hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9623258b0aadd8546b69b628ca22ab142e622094 | 1,642 | py | Python | pmfp/entrypoint/grpc/build_/build_pb_go.py | Python-Tools/pmfp | 832273890eec08e84f9c68d03f3316b2c8139133 | [
"MIT"
]
| 4 | 2017-09-15T03:38:56.000Z | 2019-12-16T02:03:14.000Z | pmfp/entrypoint/grpc/build_/build_pb_go.py | Python-Tools/pmfp | 832273890eec08e84f9c68d03f3316b2c8139133 | [
"MIT"
]
| 1 | 2021-04-27T10:51:42.000Z | 2021-04-27T10:51:42.000Z | pmfp/entrypoint/grpc/build_/build_pb_go.py | Python-Tools/pmfp | 832273890eec08e84f9c68d03f3316b2c8139133 | [
"MIT"
]
| null | null | null | """编译go语言模块."""
import warnings
from typing import List, Optional
from pathlib import Path
from pmfp.utils.run_command_utils import run
def _build_grpc(includes: str, flag: str, to: str, target: str, cwd: Path) -> None:
command = f"protoc {includes} {flag} --go_out={to} --go-grpc_out={to} {target}"
try:
run(command, cwd=cwd, visible=True)
except Exception as e:
warnings.warn(f"""根据模板构造grpc项目失败
{str(e)}
编译为go语言依赖如下插件,请检查是否安装:
"google.golang.org/protobuf/cmd/protoc-gen-go"
"google.golang.org/grpc/cmd/protoc-gen-go-grpc"
""")
else:
print(f"编译grpc项目 {target} 为go语言模块完成!")
def build_pb_go(serv_file: str, includes: List[str], to: str,
source_relative: bool, cwd: Path, files: Optional[List[str]] = None, **kwargs: str) -> None:
"""编译grpc的protobuffer定义文件为go语言模块.
Args:
serv_file (str): 定义grpc service的目标proto文件
includes (List[str]): 待编译的protobuffer文件所在的文件夹
to (str): 编译成的模块文件放到的路径
source_relative (bool): 是否使用路径作为包名,只针对go语言
cwd (Path): 执行目录.
files (Optional[List[str]]): 其他待编译的protobuffer文件
"""
includes_str = " ".join([f"-I {include}" for include in includes])
target_str = serv_file
if files:
target_str += " " + " ".join(files)
flag_str = ""
if source_relative:
flag_str += " --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative"
if kwargs:
if flag_str:
flag_str += " "
flag_str += " ".join([f"{k}={v}" for k, v in kwargs.items()])
_build_grpc(includes_str, flag_str, to, target_str, cwd)
| 32.84 | 108 | 0.626066 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 972 | 0.518677 |
962392189f97293112a65685c141235eaa945995 | 369 | py | Python | instapp/migrations/0003_auto_20190522_0007.py | imekenye/Instagram-clone | 19c895a7bc4d5137f8df6eab7ade3920dfc3eb39 | [
"Unlicense"
]
| null | null | null | instapp/migrations/0003_auto_20190522_0007.py | imekenye/Instagram-clone | 19c895a7bc4d5137f8df6eab7ade3920dfc3eb39 | [
"Unlicense"
]
| 13 | 2020-02-12T00:19:23.000Z | 2022-03-11T23:47:08.000Z | instapp/migrations/0003_auto_20190522_0007.py | imekenye/Instagram-clone | 19c895a7bc4d5137f8df6eab7ade3920dfc3eb39 | [
"Unlicense"
]
| 1 | 2019-06-07T10:01:06.000Z | 2019-06-07T10:01:06.000Z | # Generated by Django 2.2.1 on 2019-05-22 00:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('instapp', '0002_auto_20190522_0006'),
]
operations = [
migrations.RenameField(
model_name='image',
old_name='profile',
new_name='user_profile',
),
]
| 19.421053 | 47 | 0.590786 | 284 | 0.769648 | 0 | 0 | 0 | 0 | 0 | 0 | 111 | 0.300813 |
9624816b19d6ea779fa1433613690a1826c3af03 | 4,007 | py | Python | app/api/v1_0/users.py | daichi-yoshikawa/flask-boilerplate | 2a136eb713a698955dc78ce07364ac333806e6da | [
"MIT"
]
| 1 | 2021-01-04T21:25:24.000Z | 2021-01-04T21:25:24.000Z | app/api/v1_0/users.py | daichi-yoshikawa/flask-boilerplate | 2a136eb713a698955dc78ce07364ac333806e6da | [
"MIT"
]
| null | null | null | app/api/v1_0/users.py | daichi-yoshikawa/flask-boilerplate | 2a136eb713a698955dc78ce07364ac333806e6da | [
"MIT"
]
| null | null | null | import json
import logging
from flask import jsonify, make_response, request
from flask_jwt_extended import jwt_required
from flask_restful import Resource
from http import HTTPStatus
from marshmallow import ValidationError, Schema
from werkzeug.security import generate_password_hash
from app.models import db
from app.models.user import User, user_schema
from app.api.utils import get_url
from app.utils.exceptions import ApiException
logger = logging.getLogger(__name__)
class RequestSchema:
class PostUsers(Schema):
name = type(user_schema.fields['name'])(
required=True, validate=user_schema.fields['name'].validate)
email = type(user_schema.fields['email'])(
required=True, validate=user_schema.fields['email'].validate)
password = type(user_schema.fields['password'])(
required=True, validate=user_schema.fields['password'].validate)
role_id = type(user_schema.fields['role_id'])(
required=True, validate=user_schema.fields['role_id'].validate)
class ResponseSchema:
class GetUser(Schema):
id = type(user_schema.fields['id'])(
required=True, validate=user_schema.fields['name'].validate)
name = type(user_schema.fields['name'])(
required=True, validate=user_schema.fields['name'].validate)
email = type(user_schema.fields['email'])(
required=True, validate=user_schema.fields['email'].validate)
class UserListApi(Resource):
"""
GET: Return all users.
POST: Create new user account.
PUT: N/A
DELETE: N/A
"""
def post(self):
"""Sign up"""
status = HTTPStatus.CREATED
ret = {}
error_msg = {}
try:
data = request.get_json()
if data is None:
raise ApiException('Request is empty.', status=HTTPStatus.BAD_REQUEST)
errors = RequestSchema.PostUsers().validate(data)
if errors:
raise ValidationError(errors)
data = RequestSchema.PostUsers().dump(data)
if User.query.filter_by(name=data['name']).count() > 0:
raise ApiException(
f"Username:{data['name']} is already used.", status=HTTPStatus.CONFLICT)
if User.query.filter_by(email=data['email']).count() > 0:
raise ApiException(
f"Email:{data['email']} is already used.", status=HTTPStatus.CONFLICT)
data['password'] = generate_password_hash(data['password'])
user = User(**data)
db.session.add(user)
db.session.commit()
ret['link'] = {'self': get_url(tail_url=user.id)}
except ValidationError as e:
status = HTTPStatus.BAD_REQUEST
error_msg = e.normalized_messages()
except ApiException as e:
status = e.status
error_msg = str(e)
except Exception as e:
error_msg = f'{type(e)} : {str(e)} '
if status == HTTPStatus.CREATED:
status = HTTPStatus.INTERNAL_SERVER_ERROR
error_msg = f'Signup failed due to internal server error. ' + error_msg
finally:
if status != HTTPStatus.CREATED:
db.session.rollback()
ret = { 'error': { 'message': error_msg } }
logger.error(ret)
return make_response(jsonify(ret), status)
class UserApi(Resource):
"""
GET: Return user.
POST: N/A
PUT: Update user data.
DELETE: Delete user account.
"""
@jwt_required
def get(self, id):
"""Return user."""
status = HTTPStatus.OK
ret = {}
error_msg = ''
try:
query = User.query.filter_by(id=id)
user = query.first()
if not user:
raise ApiException(
f'User ID:{id} was not found.', status=HTTPStatus.NOT_FOUND)
ret = ResponseSchema.GetUser().dump(user)
ret['link'] = {'self': get_url(tail_url='')}
except ApiException as e:
status = e.status
error_msg = str(e)
except Exception as e:
status.e = HTTPStatus.INTERNAL_SERVER_ERROR
error_msg = str(e)
finally:
if error_msg != '':
ret = { 'error': { 'message': error_msg } }
logger.error(ret)
return make_response(jsonify(ret), status)
| 30.356061 | 82 | 0.660344 | 3,518 | 0.877964 | 0 | 0 | 759 | 0.189419 | 0 | 0 | 619 | 0.15448 |
9625303d504fb10bd57521a4e704cb6335319f31 | 984 | py | Python | src/count_targets.py | kahvel/MAProject | 1c17d0c3fde6d9acc7dd3861f926e8af0ddac222 | [
"MIT"
]
| null | null | null | src/count_targets.py | kahvel/MAProject | 1c17d0c3fde6d9acc7dd3861f926e8af0ddac222 | [
"MIT"
]
| null | null | null | src/count_targets.py | kahvel/MAProject | 1c17d0c3fde6d9acc7dd3861f926e8af0ddac222 | [
"MIT"
]
| null | null | null | from main import readData, getTrueLabels, binariseLabels, removePacketsAfterChange
label_data = list()
label_data.append(readData("..\\data\\test5_targets_1.csv"))
label_data.append(readData("..\\data\\test5_targets_2.csv"))
label_data.append(readData("..\\data\\test5_targets_3.csv"))
labels = [getTrueLabels(label) for label in label_data]
binarised_labels = dict()
binarised_labels[1] = [binariseLabels(label, 1) for label in labels]
binarised_labels[2] = [binariseLabels(label, 2) for label in labels]
binarised_labels[3] = [binariseLabels(label, 3) for label in labels]
for target in [1,2,3]:
for dataset in [0,1,2]:
_, binarised_labels[target][dataset] =\
removePacketsAfterChange(binarised_labels[target][dataset], binarised_labels[target][dataset], label_data[dataset], 256)
for target in [1,2,3]:
for dataset in [0,1,2]:
print "Dataset:", str(dataset+1), "Target:", str(target), "Count:", str(sum(binarised_labels[target][dataset]))
| 41 | 132 | 0.727642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 120 | 0.121951 |
962533cca6da11f2ce0eaecf148aa3437a906a76 | 14,183 | py | Python | src/imagine/goal_sampler.py | jordyantunes/Imagine | 783cedaa53635b21e18ef41ab1524d56e368d120 | [
"MIT"
]
| 20 | 2020-11-06T10:54:08.000Z | 2022-02-24T15:23:31.000Z | src/imagine/goal_sampler.py | jordyantunes/Imagine | 783cedaa53635b21e18ef41ab1524d56e368d120 | [
"MIT"
]
| null | null | null | src/imagine/goal_sampler.py | jordyantunes/Imagine | 783cedaa53635b21e18ef41ab1524d56e368d120 | [
"MIT"
]
| 4 | 2020-11-17T17:00:02.000Z | 2021-07-08T22:51:14.000Z | import numpy as np
from mpi4py import MPI
from src.imagine.goal_generator.simple_sentence_generator import SentenceGeneratorHeuristic
from src import logger
class GoalSampler:
def __init__(self,
policy_language_model,
reward_language_model,
goal_dim,
one_hot_encoder,
params):
self.policy_language_model = policy_language_model
self.reward_language_model = reward_language_model
self.goal_dim = goal_dim
self.params = params
self.nb_feedbacks = 0
self.nb_positive_feedbacks = 0
self.nb_negative_feedbacks = 0
self.feedback2id = dict()
self.id2feedback = dict()
self.id2oracleid = dict()
self.feedback2one_hot = dict()
self.id2one_hot = dict()
self.feedback_memory = dict(memory_id=[],
string=[],
iter_discovery=[],
target_counter=[],
reached_counter=[],
oracle_id=[],
f1_score=[],
policy_encoding=[],
reward_encoding=[],
imagined=[],
)
self.imagined_goals = dict(string=[],
competence=[],
lp=[])
self.one_hot_encoder = one_hot_encoder
self.goal_generator = SentenceGeneratorHeuristic(params['train_descriptions'],
params['test_descriptions'],
sentences=None,
method=params['conditions']['imagination_method'])
self.nb_discovered_goals = 0
self.score_target_goals = None
self.perceived_learning_progress = None
self.perceived_competence = None
self.feedback_stats = None
self.rank = MPI.COMM_WORLD.Get_rank()
self.num_cpus = params['experiment_params']['n_cpus']
self.rollout_batch_size = params['experiment_params']['rollout_batch_size']
self.not_imagined_goal_ids = np.array([])
self.imagined_goal_ids = np.array([])
def store_reward_function(self, reward_function):
self.reward_function = reward_function
def update_embeddings(self):
# embeddings must be updated when the language model is udpated
for i, goal_str in enumerate(self.feedback_memory['string']):
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'][i] = reward_encoding.copy()
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback_memory['policy_encoding'][i] = policy_encoding.copy()
def add_entries_to_feedback_memory(self, str_list, episode_count, imagined):
for goal_str in str_list:
if goal_str not in self.feedback2id.keys():
memory_id = self.nb_discovered_goals
if goal_str in self.params['train_descriptions']:
oracle_id = self.params['train_descriptions'].index(goal_str)
else:
oracle_id = None
one_hot = self.one_hot_encoder.encode(goal_str.lower().split(" "))
self.feedback2one_hot[goal_str] = one_hot
self.id2one_hot[memory_id] = one_hot
if self.reward_language_model is not None:
reward_encoding = self.reward_language_model.encode(goal_str)
self.feedback_memory['reward_encoding'].append(reward_encoding.copy())
policy_encoding = self.policy_language_model.encode(goal_str)
self.feedback2id[goal_str] = memory_id
self.id2oracleid[memory_id] = oracle_id
self.id2feedback[memory_id] = goal_str
self.feedback_memory['memory_id'].append(memory_id)
self.feedback_memory['oracle_id'].append(oracle_id)
self.feedback_memory['string'].append(goal_str)
self.feedback_memory['target_counter'].append(0)
self.feedback_memory['reached_counter'].append(0)
self.feedback_memory['iter_discovery'].append(episode_count)
self.feedback_memory['f1_score'].append(0)
self.feedback_memory['policy_encoding'].append(policy_encoding.copy())
self.feedback_memory['imagined'].append(imagined)
self.nb_discovered_goals += 1
elif goal_str in self.feedback2id.keys() and not imagined: # if goal previously imagined is discovered later, change its status
ind = self.feedback_memory['string'].index(goal_str)
if self.feedback_memory['imagined'][ind] == 1:
self.feedback_memory['imagined'][ind] = 0
logger.info('Goal already imagined:', goal_str)
def update_discovered_goals(self,
new_goals_str,
episode_count,
epoch):
# only done in cpu 0
self.add_entries_to_feedback_memory(str_list=new_goals_str,
episode_count=episode_count,
imagined=0)
# Decide whether to generate new goals
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = True
if len(new_goals_str) > 0 and imagined:
new_imagined_goals = []
inds_not_imagined = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.goal_generator.update_model(np.array(self.feedback_memory['string'])[inds_not_imagined])
generated_goals = self.goal_generator.generate_sentences(n='all')
for gen_g in generated_goals:
if gen_g not in self.imagined_goals['string']:
self.imagined_goals['string'].append(gen_g)
self.imagined_goals['competence'].append(0)
self.imagined_goals['lp'].append(0)
new_imagined_goals.append(gen_g)
self.add_entries_to_feedback_memory(str_list=new_imagined_goals,
episode_count=episode_count,
imagined=1)
def update(self,
current_episode,
all_episodes,
partner_available,
goals_reached_str,
goals_not_reached_str):
imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 1).flatten()
not_imagined_inds = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
self.not_imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[not_imagined_inds]
self.imagined_goal_ids = np.array(self.feedback_memory['memory_id'])[imagined_inds]
# only done in cpu 0
n_episodes = len(all_episodes)
attempted_goals_ids = []
exploit = []
for ep in all_episodes:
exploit.append(ep['exploit'])
attempted_goals_ids.append(ep['g_id'])
if partner_available:
# if partner is available, simply encodes what it said
assert n_episodes == len(goals_reached_str) == len(goals_not_reached_str) == len(exploit) == len(attempted_goals_ids)
# Get indexes in the order of discovery of the attempted goals, reached_goals, not reached_goals
goals_reached_ids = []
goals_not_reached_ids = []
for i in range(n_episodes):
goals_reached_ids.append([])
goals_not_reached_ids.append([])
for goal_str in goals_reached_str[i]:
goals_reached_ids[-1].append(self.feedback2id[goal_str])
for goal_str in goals_not_reached_str[i]:
goals_not_reached_ids[-1].append(self.feedback2id[goal_str])
else:
goals_reached_ids = []
goals_not_reached_ids = []
final_obs = np.array([ep['obs'][-1] for ep in all_episodes])
# test 50 goals for each episode
discovered_goal_ids = np.array(self.feedback_memory['memory_id'])
not_imagined_ind = np.argwhere(np.array(self.feedback_memory['imagined']) == 0).flatten()
discovered_goal_ids = discovered_goal_ids[not_imagined_ind]
n_attempts = min(50, len(discovered_goal_ids))
goals_to_try = np.random.choice(discovered_goal_ids, size=n_attempts, replace=False)
obs = np.repeat(final_obs, n_attempts, axis=0)
goals = np.tile(goals_to_try, final_obs.shape[0])
rewards = self.reward_function.predict(state=obs, goal_ids=goals)[0]
for i in range(len(all_episodes)):
pos_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == 0)].tolist()
goals_reached_ids.append(pos_goals)
neg_goals = goals_to_try[np.where(rewards[i * n_attempts: (i + 1) * n_attempts] == -1)].tolist()
goals_not_reached_ids.append(neg_goals)
return goals_reached_ids, goals_not_reached_ids
def share_info_to_all_cpus(self):
# share data across cpus
self.feedback_memory = MPI.COMM_WORLD.bcast(self.feedback_memory, root=0)
self.feedback2id = MPI.COMM_WORLD.bcast(self.feedback2id, root=0)
self.id2oracleid = MPI.COMM_WORLD.bcast(self.id2oracleid, root=0)
self.id2feedback = MPI.COMM_WORLD.bcast(self.id2feedback, root=0)
self.feedback2one_hot = MPI.COMM_WORLD.bcast(self.feedback2one_hot, root=0)
self.nb_discovered_goals = MPI.COMM_WORLD.bcast(self.nb_discovered_goals, root=0)
self.imagined_goals = MPI.COMM_WORLD.bcast(self.imagined_goals, root=0)
self.one_hot_encoder = MPI.COMM_WORLD.bcast(self.one_hot_encoder, root=0)
def sample_targets(self, epoch):
"""
Sample targets for all cpus and all batch, then scatter to the different cpus
"""
# Decide whether to exploit or not
exploit = True if np.random.random() < 0.1 else False
strategy = 'random'
goal_invention = self.params['conditions']['goal_invention']
imagined = False
if 'from_epoch' in goal_invention:
from_epoch = int(goal_invention.split('_')[-1])
if epoch > from_epoch:
imagined = np.random.random() < self.params['conditions']['p_imagined']
if self.rank == 0:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
for i in range(self.num_cpus):
goals_str = []
goals_encodings = []
goals_ids = []
for j in range(self.rollout_batch_size):
# when there is no goal in memory, sample random goal from standard normal distribution
if len(self.feedback_memory['memory_id']) == 0:
goals_encodings.append(np.random.normal(size=self.goal_dim))
goals_str.append('Random Goal')
goals_ids.append(-1)
else:
if strategy == 'random':
if imagined and self.imagined_goal_ids.size > 0:
ind = np.random.choice(self.imagined_goal_ids)
else:
ind = np.random.choice(self.not_imagined_goal_ids)
else:
raise NotImplementedError
goals_encodings.append(self.feedback_memory['policy_encoding'][ind])
goals_str.append(self.id2feedback[ind])
goals_ids.append(ind)
all_goals_str.append(goals_str)
all_goals_encodings.append(goals_encodings)
all_goals_ids.append(goals_ids)
else:
all_goals_str = []
all_goals_encodings = []
all_goals_ids = []
goals_str = MPI.COMM_WORLD.scatter(all_goals_str, root=0)
goals_encodings = MPI.COMM_WORLD.scatter(all_goals_encodings, root=0)
goals_ids = MPI.COMM_WORLD.scatter(all_goals_ids, root=0)
return exploit, goals_str, goals_encodings, goals_ids, imagined
class EvalGoalSampler:
def __init__(self, policy_language_model, one_hot_encoder, params):
self.descriptions = params['train_descriptions']
self.nb_descriptions = len(self.descriptions)
self.count = 0
self.policy_language_model = policy_language_model
self.rollout_batch_size = params['evaluation_rollout_params']['rollout_batch_size']
self.params = params
def reset(self):
self.count = 0
def sample(self, method='robin'):
# print(self.descriptions[self.count])
goals_str = []
goals_encodings = []
goals_ids = []
if method == 'robin':
ind = self.count
elif method == 'random':
ind = np.random.randint(self.nb_descriptions)
else:
raise NotImplementedError
for _ in range(self.rollout_batch_size):
g_str = self.descriptions[ind]
goals_str.append(g_str)
policy_encoding = self.policy_language_model.encode(g_str).flatten()
goals_encodings.append(policy_encoding)
goals_ids.append(ind)
self.count += 1
return True, goals_str, goals_encodings, goals_ids
| 47.434783 | 140 | 0.585419 | 14,021 | 0.988578 | 0 | 0 | 0 | 0 | 0 | 0 | 1,476 | 0.104068 |
96262446beb9d081c0d44d53817c947e2939b91a | 711 | py | Python | src/actionsold.py | Grumpy-Old-Tech/WorkshopAssistant | 704e8080e76ba6feabd6eee3e1965439336306ad | [
"MIT"
]
| null | null | null | src/actionsold.py | Grumpy-Old-Tech/WorkshopAssistant | 704e8080e76ba6feabd6eee3e1965439336306ad | [
"MIT"
]
| null | null | null | src/actionsold.py | Grumpy-Old-Tech/WorkshopAssistant | 704e8080e76ba6feabd6eee3e1965439336306ad | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
#This is different from AIY Kit's actions
#Copying and Pasting AIY Kit's actions commands will not work
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from gmusicapi import Mobileclient
from googletrans import Translator
from gtts import gTTS
import requests
import os
import os.path
import RPi.GPIO as GPIO
import time
import re
import subprocess
import json
import urllib.request
import pafy
#API Key for YouTube and KS Search Engine
google_cloud_api_key='ENTER-YOUR-GOOGLE-CLOUD-API-KEY-HERE'
#YouTube API Constants
DEVELOPER_KEY = google_cloud_api_key
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
playshell = None
| 19.75 | 61 | 0.819972 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.333333 |
96275facff37f1002cea2272aef725bd9db2358a | 2,358 | py | Python | openpype/tools/settings/settings/widgets/window.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
]
| null | null | null | openpype/tools/settings/settings/widgets/window.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
]
| null | null | null | openpype/tools/settings/settings/widgets/window.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
]
| null | null | null | from Qt import QtWidgets, QtGui
from .categories import (
CategoryState,
SystemWidget,
ProjectWidget
)
from .widgets import ShadowWidget
from .. import style
class MainWidget(QtWidgets.QWidget):
widget_width = 1000
widget_height = 600
def __init__(self, user_role, parent=None):
super(MainWidget, self).__init__(parent)
self.setObjectName("MainWidget")
self.setWindowTitle("OpenPype Settings")
self.resize(self.widget_width, self.widget_height)
stylesheet = style.load_stylesheet()
self.setStyleSheet(stylesheet)
self.setWindowIcon(QtGui.QIcon(style.app_icon_path()))
header_tab_widget = QtWidgets.QTabWidget(parent=self)
studio_widget = SystemWidget(user_role, header_tab_widget)
project_widget = ProjectWidget(user_role, header_tab_widget)
tab_widgets = [
studio_widget,
project_widget
]
header_tab_widget.addTab(studio_widget, "System")
header_tab_widget.addTab(project_widget, "Project")
layout = QtWidgets.QVBoxLayout(self)
layout.setContentsMargins(5, 5, 5, 5)
layout.setSpacing(0)
layout.addWidget(header_tab_widget)
self.setLayout(layout)
self._shadow_widget = ShadowWidget("Working...", self)
for tab_widget in tab_widgets:
tab_widget.saved.connect(self._on_tab_save)
tab_widget.state_changed.connect(self._on_state_change)
self.tab_widgets = tab_widgets
def _on_tab_save(self, source_widget):
for tab_widget in self.tab_widgets:
tab_widget.on_saved(source_widget)
def _on_state_change(self):
any_working = False
for widget in self.tab_widgets:
if widget.state is CategoryState.Working:
any_working = True
break
if (
(any_working and self._shadow_widget.isVisible())
or (not any_working and not self._shadow_widget.isVisible())
):
return
self._shadow_widget.setVisible(any_working)
# Process events to apply shadow widget visibility
app = QtWidgets.QApplication.instance()
if app:
app.processEvents()
def reset(self):
for tab_widget in self.tab_widgets:
tab_widget.reset()
| 29.111111 | 72 | 0.656064 | 2,185 | 0.926633 | 0 | 0 | 0 | 0 | 0 | 0 | 110 | 0.04665 |
824894a056e2da4cc1ec1c9dd0d07d94594ced73 | 6,093 | py | Python | azkaban_zip_uploader/tests/lambda_handler_tests.py | uk-gov-mirror/dwp.aws-azkaban | fa69ddf6e18fccba1fb96f6dd7a234b9441e96da | [
"0BSD"
]
| null | null | null | azkaban_zip_uploader/tests/lambda_handler_tests.py | uk-gov-mirror/dwp.aws-azkaban | fa69ddf6e18fccba1fb96f6dd7a234b9441e96da | [
"0BSD"
]
| null | null | null | azkaban_zip_uploader/tests/lambda_handler_tests.py | uk-gov-mirror/dwp.aws-azkaban | fa69ddf6e18fccba1fb96f6dd7a234b9441e96da | [
"0BSD"
]
| null | null | null | import lambda_handler
from unittest import TestCase
from mock import call, patch, Mock
from datetime import datetime
import boto3
import json
from botocore.stub import Stubber
import urllib3
mock_s3_client = boto3.client('s3')
s3_stubber = Stubber(mock_s3_client)
list_objects_response = {
'IsTruncated': False,
'Contents': [
{
'Key': 'return1.zip',
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
{
'Key': 'do_not_return.txt',
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
{
'Key': 'return2.zip',
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
],
'Name': 'string',
'EncodingType': 'url',
'KeyCount': 123,
'ContinuationToken': 'string'
}
s3_stubber.add_response('list_objects_v2', list_objects_response)
s3_stubber.activate()
mock_sm_client = boto3.client('secretsmanager')
sm_stubber = Stubber(mock_sm_client)
mock_secret_value_response = {
'ARN': 'arn:aws:secretsmanager:eu-west-7:123456789012:secret:tutorials/MyFirstSecret-jiObOV',
'Name': 'string',
'VersionId': 'EXAMPLE1-90ab-cdef-fedc-ba987EXAMPLE',
'SecretBinary': b'{"azkaban_username": "test_user", "azkaban_password": "pw123"}',
'CreatedDate': datetime(2015, 1, 1)
}
sm_stubber.add_response('get_secret_value', mock_secret_value_response)
sm_stubber.add_response('get_secret_value', mock_secret_value_response)
sm_stubber.activate()
data_non_fail = json.dumps({
"status" : "error",
"message" : "Project already exists.",
}).encode('utf-8')
http_non_fail_error= Mock()
http_non_fail_error.data = data_non_fail
data_fail = json.dumps({
"error" : "error",
"message" : "Other message.",
}).encode('utf-8')
http_raise_error = Mock()
http_raise_error.data = data_fail
http_status_error = Mock()
http_status_error.data = "non JSON error response".encode('utf-8')
http_status_error.status = 418
session_data = json.dumps({
"status" : "success",
"session.id" : "test-session-id-12345432"
}).encode('utf-8')
http_session = Mock()
http_session.data = session_data
http_session.status = 200
class LambdaHandlerTests(TestCase):
def test_get_files_from_s3(self):
result = lambda_handler.get_files_from_s3("bucket_id", "s3_dir", mock_s3_client)
assert result == ['return1.zip', 'return2.zip']
@patch('lambda_handler.create_project')
@patch('urllib3.PoolManager')
def test_upload_to_azkaban_api_error_in_response(self, mock_http, mock_create_project):
mock_http.request.return_value = http_raise_error
with self.assertRaises(urllib3.exceptions.ResponseError) as context:
lambda_handler.upload_to_azkaban_api('zip_file', 'zip_file_name', 'session_id', mock_http, 'azkaban_url')
mock_http.request.assert_called_once()
self.assertTrue(str(context.exception) == "Failure uploading zip_file_name to Azkaban API - Error in API response body.")
@patch('lambda_handler.create_project')
@patch('urllib3.PoolManager')
def test_upload_to_azkaban_api_non_200_status(self, mock_http, mock_create_project):
mock_http.request.return_value = http_status_error
with self.assertRaises(urllib3.exceptions.ResponseError) as context:
lambda_handler.upload_to_azkaban_api('zip_file', 'zip_file_name', 'session_id', mock_http, 'azkaban_url')
mock_http.request.assert_called_once()
self.assertTrue(str(context.exception) == "Failure uploading zip_file_name to Azkaban API - non 200 status returned.")
@patch('urllib3.PoolManager')
def test_create_project_error_handling_error_path(self, mock_http):
mock_http.request.return_value = http_raise_error
with self.assertRaises(urllib3.exceptions.ResponseError) as context:
lambda_handler.create_project('azkaban_url', mock_http, 'session_id', 'test_project')
mock_http.request.assert_called_once()
self.assertTrue(str(context.exception) == 'Other message.')
@patch('urllib3.PoolManager')
def test_create_project_error_handling_happy_path(self, mock_http):
mock_http.request.return_value = http_non_fail_error
lambda_handler.create_project('azkaban_url', mock_http, 'session_id', 'test_project')
mock_http.request.assert_called_once()
@patch('lambda_handler.os.getenv')
@patch('urllib3.PoolManager')
@patch('lambda_handler.boto3')
def test_establish_azkaban_session_raise_error(self, mock_boto3, mock_http, mock_getenv):
mock_boto3.client.return_value = mock_sm_client
mock_http.request.return_value = http_non_fail_error
mock_getenv.side_effect = ["www.test_url.com", "test_secret"]
with self.assertRaises(urllib3.exceptions.ResponseError) as context:
lambda_handler.establish_azkaban_session(mock_http)
mock_http.request.assert_called_once()
self.assertTrue(str(context.exception) == 'Failure establising Azkaban API session.')
@patch('lambda_handler.os.getenv')
@patch('urllib3.PoolManager')
@patch('lambda_handler.boto3')
def test_establish_azkaban_session(self, mock_boto3, mock_http, mock_getenv):
mock_boto3.client.return_value = mock_sm_client
mock_http.request.return_value = http_session
mock_getenv.side_effect = ["www.test_url.com", "test_secret"]
result = lambda_handler.establish_azkaban_session(mock_http)
assert result == "test-session-id-12345432"
| 35.841176 | 129 | 0.678976 | 3,355 | 0.550632 | 0 | 0 | 3,099 | 0.508616 | 0 | 0 | 1,767 | 0.290005 |
8249af75d4def2ae40ae7a6a262676d0c39c2b63 | 2,189 | py | Python | cripts/usernames/username.py | lakiw/cripts | 43f62891a3724e1ec60629887d97c421fb302163 | [
"MIT"
]
| 2 | 2017-04-06T12:26:11.000Z | 2018-11-05T19:17:15.000Z | cripts/usernames/username.py | lakiw/cripts | 43f62891a3724e1ec60629887d97c421fb302163 | [
"MIT"
]
| 9 | 2016-09-28T10:19:10.000Z | 2017-02-24T17:58:43.000Z | cripts/usernames/username.py | lakiw/cripts | 43f62891a3724e1ec60629887d97c421fb302163 | [
"MIT"
]
| null | null | null | import uuid
from mongoengine import Document, StringField, ListField, UUIDField
from django.conf import settings
from cripts.core.cripts_mongoengine import CriptsBaseAttributes, CriptsSourceDocument
from cripts.core.cripts_mongoengine import CriptsActionsDocument
class UserName(CriptsBaseAttributes, CriptsSourceDocument, CriptsActionsDocument,
Document):
"""
UserName class.
"""
meta = {
"collection": settings.COL_USERNAMES,
"cripts_type": 'UserName',
"latest_schema_version": 1,
"schema_doc": {
'name': 'The actual username',
'username_id': 'An ID corresponding to the username since using the raw username as the key can run into little bobby tables issues',
'description': 'Description of the e-mail address',
'datasets': ('List [] of datasets this username'
' appeared in'),
'source': ('List [] of sources who provided information about this'
' username'),
},
"jtable_opts": {
'details_url': 'cripts.usernames.views.username_detail',
'details_url_key': 'username_id',
'default_sort': "name",
'searchurl': 'cripts.usernames.views.usernames_listing',
'fields': [ "name", "created",
"source", "id", "username_id"],
'jtopts_fields': [ "name",
"created",
"source",
"favorite",
"id", "username_id"],
'hidden_fields': ["username_id", "id"],
'linked_fields': ["source", ],
'details_link': 'name',
'no_sort': []
}
}
name = StringField(required=True)
description = StringField(required=True)
username_id = UUIDField(binary=True, required=True, default=uuid.uuid4)
datasets = ListField(required=False)
| 39.8 | 145 | 0.516674 | 1,914 | 0.874372 | 0 | 0 | 0 | 0 | 0 | 0 | 806 | 0.368205 |
824a4f6bf20408ed367c7e9a67c9b62aea2ab1c0 | 7,611 | py | Python | sweetpea/tests/test_encoding_diagram.py | anniecherk/sweetpea-py | 23dbad99a9213ff764ec207b456cf5d002707fd0 | [
"MIT"
]
| 1 | 2018-05-06T03:54:06.000Z | 2018-05-06T03:54:06.000Z | sweetpea/tests/test_encoding_diagram.py | anniecherk/sweetpea-py | 23dbad99a9213ff764ec207b456cf5d002707fd0 | [
"MIT"
]
| 5 | 2018-09-18T02:15:17.000Z | 2018-12-05T20:02:24.000Z | sweetpea/tests/test_encoding_diagram.py | anniecherk/sweetpea-py | 23dbad99a9213ff764ec207b456cf5d002707fd0 | [
"MIT"
]
| null | null | null | import pytest
import operator as op
from sweetpea import fully_cross_block
from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window
from sweetpea.encoding_diagram import __generate_encoding_diagram
color = Factor("color", ["red", "blue"])
text = Factor("text", ["red", "blue"])
con_level = DerivedLevel("con", WithinTrial(op.eq, [color, text]))
inc_level = DerivedLevel("inc", WithinTrial(op.ne, [color, text]))
con_factor = Factor("congruent?", [con_level, inc_level])
color_repeats_factor = Factor("color repeats?", [
DerivedLevel("yes", Transition(lambda colors: colors[0] == colors[1], [color])),
DerivedLevel("no", Transition(lambda colors: colors[0] != colors[1], [color]))
])
text_repeats_factor = Factor("text repeats?", [
DerivedLevel("yes", Transition(lambda colors: colors[0] == colors[1], [text])),
DerivedLevel("no", Transition(lambda colors: colors[0] != colors[1], [text]))
])
design = [color, text, con_factor]
crossing = [color, text]
blk = fully_cross_block(design, crossing, [])
def test_generate_encoding_diagram():
assert __generate_encoding_diagram(blk) == "\
----------------------------------------------\n\
| Trial | color | text | congruent? |\n\
| # | red blue | red blue | con inc |\n\
----------------------------------------------\n\
| 1 | 1 2 | 3 4 | 5 6 |\n\
| 2 | 7 8 | 9 10 | 11 12 |\n\
| 3 | 13 14 | 15 16 | 17 18 |\n\
| 4 | 19 20 | 21 22 | 23 24 |\n\
----------------------------------------------\n"
def test_generate_encoding_diagram_with_transition():
block = fully_cross_block([color, text, color_repeats_factor],
[color, text],
[])
assert __generate_encoding_diagram(block) == "\
--------------------------------------------------\n\
| Trial | color | text | color repeats? |\n\
| # | red blue | red blue | yes no |\n\
--------------------------------------------------\n\
| 1 | 1 2 | 3 4 | |\n\
| 2 | 5 6 | 7 8 | 17 18 |\n\
| 3 | 9 10 | 11 12 | 19 20 |\n\
| 4 | 13 14 | 15 16 | 21 22 |\n\
--------------------------------------------------\n"
def test_generate_encoding_diagram_with_constraint_and_multiple_transitions():
block = fully_cross_block([color, text, con_factor, color_repeats_factor, text_repeats_factor],
[color, text],
[])
assert __generate_encoding_diagram(block) == "\
-------------------------------------------------------------------------------\n\
| Trial | color | text | congruent? | color repeats? | text repeats? |\n\
| # | red blue | red blue | con inc | yes no | yes no |\n\
-------------------------------------------------------------------------------\n\
| 1 | 1 2 | 3 4 | 5 6 | | |\n\
| 2 | 7 8 | 9 10 | 11 12 | 25 26 | 31 32 |\n\
| 3 | 13 14 | 15 16 | 17 18 | 27 28 | 33 34 |\n\
| 4 | 19 20 | 21 22 | 23 24 | 29 30 | 35 36 |\n\
-------------------------------------------------------------------------------\n"
def test_generate_encoding_diagram_with_constraint_and_multiple_transitions_in_different_order():
block = fully_cross_block([text_repeats_factor, color, color_repeats_factor, text, con_factor],
[color, text],
[])
assert __generate_encoding_diagram(block) == "\
-------------------------------------------------------------------------------\n\
| Trial | text repeats? | color | color repeats? | text | congruent? |\n\
| # | yes no | red blue | yes no | red blue | con inc |\n\
-------------------------------------------------------------------------------\n\
| 1 | | 1 2 | | 3 4 | 5 6 |\n\
| 2 | 25 26 | 7 8 | 31 32 | 9 10 | 11 12 |\n\
| 3 | 27 28 | 13 14 | 33 34 | 15 16 | 17 18 |\n\
| 4 | 29 30 | 19 20 | 35 36 | 21 22 | 23 24 |\n\
-------------------------------------------------------------------------------\n"
def test_generate_encoding_diagram_with_windows():
color3 = Factor("color3", ["red", "blue", "green"])
yes_fn = lambda colors: colors[0] == colors[1] == colors[2]
no_fn = lambda colors: not yes_fn(colors)
color3_repeats_factor = Factor("color3 repeats?", [
DerivedLevel("yes", Window(yes_fn, [color3], 3, 1)),
DerivedLevel("no", Window(no_fn, [color3], 3, 1))
])
block = fully_cross_block([color3_repeats_factor, color3, text], [color3, text], [])
assert __generate_encoding_diagram(block) == "\
---------------------------------------------------------\n\
| Trial | color3 repeats? | color3 | text |\n\
| # | yes no | red blue green | red blue |\n\
---------------------------------------------------------\n\
| 1 | | 1 2 3 | 4 5 |\n\
| 2 | | 6 7 8 | 9 10 |\n\
| 3 | 31 32 | 11 12 13 | 14 15 |\n\
| 4 | 33 34 | 16 17 18 | 19 20 |\n\
| 5 | 35 36 | 21 22 23 | 24 25 |\n\
| 6 | 37 38 | 26 27 28 | 29 30 |\n\
---------------------------------------------------------\n"
def test_generate_encoding_diagram_with_window_with_stride():
congruent_bookend = Factor("congruent bookend?", [
DerivedLevel("yes", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 1, 3)),
DerivedLevel("no", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 1, 3))
])
block = fully_cross_block([color, text, congruent_bookend], [color, text], [])
assert __generate_encoding_diagram(block) == "\
------------------------------------------------------\n\
| Trial | color | text | congruent bookend? |\n\
| # | red blue | red blue | yes no |\n\
------------------------------------------------------\n\
| 1 | 1 2 | 3 4 | 17 18 |\n\
| 2 | 5 6 | 7 8 | |\n\
| 3 | 9 10 | 11 12 | |\n\
| 4 | 13 14 | 15 16 | 19 20 |\n\
------------------------------------------------------\n"
congruent_bookend = Factor("congruent bookend?", [
DerivedLevel("yes", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 2, 2)),
DerivedLevel("no", Window(lambda colors, texts: colors[0] == texts[0], [color, text], 2, 2))
])
block = fully_cross_block([color, text, congruent_bookend], [color, text], [])
assert __generate_encoding_diagram(block) == "\
------------------------------------------------------\n\
| Trial | color | text | congruent bookend? |\n\
| # | red blue | red blue | yes no |\n\
------------------------------------------------------\n\
| 1 | 1 2 | 3 4 | |\n\
| 2 | 5 6 | 7 8 | 17 18 |\n\
| 3 | 9 10 | 11 12 | |\n\
| 4 | 13 14 | 15 16 | 19 20 |\n\
------------------------------------------------------\n"
| 48.170886 | 102 | 0.411247 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,375 | 0.574826 |
824adf7af953a3787b6ad72eca002b2f5fa3b943 | 297 | py | Python | Source_Code/Python/ConductedTest/case_generator.py | fenglwh/instruments | 7886158d1ed97fe6bfe372a55f4fca107e834311 | [
"MIT"
]
| null | null | null | Source_Code/Python/ConductedTest/case_generator.py | fenglwh/instruments | 7886158d1ed97fe6bfe372a55f4fca107e834311 | [
"MIT"
]
| 3 | 2018-09-21T00:57:21.000Z | 2018-09-21T01:49:40.000Z | Source_Code/Python/ConductedTest/case_generator.py | fenglwh/instruments | 7886158d1ed97fe6bfe372a55f4fca107e834311 | [
"MIT"
]
| null | null | null | import json
from labinstrument.SS.CMW500.CMW500_WIFI.CMW500_WIFI import *
if __name__ == '__main__':
new_config_name='emm'
new_config=CMW_WIFI(17).get_parameters()
config=json.load(open('config.txt'))
config[new_config_name]=new_config
json.dump(config,open('config.txt','w')) | 33 | 61 | 0.737374 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.141414 |
824c8df0f0e68c3c21ba270b931275c591b881bd | 9,957 | py | Python | internal-export-file/export-report-pdf/src/export-report-pdf.py | aakloul/connectors | 171bdc3441b9196ee7aef3f1f9524d8594da6425 | [
"Apache-2.0"
]
| null | null | null | internal-export-file/export-report-pdf/src/export-report-pdf.py | aakloul/connectors | 171bdc3441b9196ee7aef3f1f9524d8594da6425 | [
"Apache-2.0"
]
| null | null | null | internal-export-file/export-report-pdf/src/export-report-pdf.py | aakloul/connectors | 171bdc3441b9196ee7aef3f1f9524d8594da6425 | [
"Apache-2.0"
]
| null | null | null | import yaml
import os
import time
import datetime
from pycti.utils.constants import StixCyberObservableTypes
from weasyprint import HTML
from pycti import OpenCTIConnectorHelper, get_config_variable
from jinja2 import Environment, FileSystemLoader
class ExportReportPdf:
def __init__(self):
# Instantiate the connector helper from config
config_file_path = os.path.dirname(os.path.abspath(__file__)) + "/config.yml"
config = (
yaml.load(open(config_file_path), Loader=yaml.FullLoader)
if os.path.isfile(config_file_path)
else {}
)
self.helper = OpenCTIConnectorHelper(config)
# ExportReportPdf specific config settings
self.primary_color = get_config_variable(
"EXPORT_REPORT_PDF_PRIMARY_COLOR",
["export_report_pdf", "primary_color"],
config,
)
self.secondary_color = get_config_variable(
"EXPORT_REPORT_PDF_SECONDARY_COLOR",
["export_report_pdf", "secondary_color"],
config,
)
self.current_dir = os.path.abspath(os.path.dirname(__file__))
self.set_colors()
self.company_address_line_1 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_1",
["export_report_pdf", "company_address_line_1"],
config,
)
self.company_address_line_2 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_2",
["export_report_pdf", "company_address_line_2"],
config,
)
self.company_address_line_3 = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_ADDRESS_LINE_3",
["export_report_pdf", "company_address_line_3"],
config,
)
self.company_phone_number = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_PHONE_NUMBER",
["export_report_pdf", "company_phone_number"],
config,
)
self.company_email = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_EMAIL",
["export_report_pdf", "company_email"],
config,
)
self.company_website = get_config_variable(
"EXPORT_REPORT_PDF_COMPANY_WEBSITE",
["export_report_pdf", "company_website"],
config,
)
self.indicators_only = get_config_variable(
"EXPORT_REPORT_PDF_INDICATORS_ONLY",
["export_report_pdf", "indicators_only"],
config,
)
self.defang_urls = get_config_variable(
"EXPORT_REPORT_PDF_DEFANG_URLS",
["export_report_pdf", "defang_urls"],
config,
)
def _process_message(self, data):
file_name = data["file_name"]
# TODO this can be implemented to filter every entity and observable
# max_marking = data["max_marking"]
entity_type = data["entity_type"]
if entity_type != "Report":
raise ValueError(
f'This Connector can only process entities of type "Report" and not of type "{entity_type}".'
)
# Get the Report
report_dict = self.helper.api.report.read(id=data["entity_id"])
# Extract values for inclusion in output pdf
report_marking = report_dict.get("objectMarking", None)
if report_marking:
report_marking = report_marking[-1]["definition"]
report_name = report_dict["name"]
report_description = report_dict.get("description", "No description available.")
report_confidence = report_dict["confidence"]
report_id = report_dict["id"]
report_external_refs = [
external_ref_dict["url"]
for external_ref_dict in report_dict["externalReferences"]
]
report_objs = report_dict["objects"]
report_date = datetime.datetime.now().strftime("%b %d %Y")
context = {
"report_name": report_name,
"report_description": report_description,
"report_marking": report_marking,
"report_confidence": report_confidence,
"report_external_refs": report_external_refs,
"report_date": report_date,
"company_address_line_1": self.company_address_line_1,
"company_address_line_2": self.company_address_line_2,
"company_address_line_3": self.company_address_line_3,
"company_phone_number": self.company_phone_number,
"company_email": self.company_email,
"company_website": self.company_website,
"entities": {},
"observables": {},
}
# Process each STIX Object
for report_obj in report_objs:
obj_entity_type = report_obj["entity_type"]
obj_id = report_obj["standard_id"]
# Handle StixCyberObservables entities
if obj_entity_type == "StixFile" or StixCyberObservableTypes.has_value(
obj_entity_type
):
observable_dict = self.helper.api.stix_cyber_observable.read(id=obj_id)
# If only include indicators and
# the observable doesn't have an indicator, skip it
if self.indicators_only and not observable_dict["indicators"]:
self.helper.log_info(
f"Skipping {obj_entity_type} observable with value {observable_dict['observable_value']} as it was not an Indicator."
)
continue
if obj_entity_type not in context["observables"]:
context["observables"][obj_entity_type] = []
# Defang urls
if self.defang_urls and obj_entity_type == "Url":
observable_dict["observable_value"] = observable_dict[
"observable_value"
].replace("http", "hxxp", 1)
context["observables"][obj_entity_type].append(observable_dict)
# Handle all other entities
else:
reader_func = self.get_reader(obj_entity_type)
if reader_func is None:
self.helper.log_error(
f'Could not find a function to read entity with type "{obj_entity_type}"'
)
continue
entity_dict = reader_func(id=obj_id)
if obj_entity_type not in context["entities"]:
context["entities"][obj_entity_type] = []
context["entities"][obj_entity_type].append(entity_dict)
# Render html with input variables
env = Environment(loader=FileSystemLoader(self.current_dir))
template = env.get_template("resources/report.html")
html_string = template.render(context)
# Generate pdf from html string
pdf_contents = HTML(string=html_string, base_url="resources").write_pdf()
# Upload the output pdf
self.helper.log_info(f"Uploading: {file_name}")
self.helper.api.stix_domain_object.add_file(
id=report_id,
file_name=file_name,
data=pdf_contents,
mime_type="application/pdf",
)
return "Export done"
def set_colors(self):
with open(
os.path.join(self.current_dir, "resources/report.css.template"), "r"
) as f:
new_css = f.read()
new_css = new_css.replace("<primary_color>", self.primary_color)
new_css = new_css.replace("<secondary_color>", self.secondary_color)
with open(os.path.join(self.current_dir, "resources/report.css"), "w") as f:
f.write(new_css)
def get_reader(self, entity_type):
"""
Returns the function to use for calling the OpenCTI to
read data for a particular entity type.
entity_type: a str representing the entity type, i.e. Indicator
returns: a function or None if entity type is not supported
"""
reader = {
"Stix-Domain-Object": self.helper.api.stix_domain_object.read,
"Attack-Pattern": self.helper.api.attack_pattern.read,
"Campaign": self.helper.api.campaign.read,
"Note": self.helper.api.note.read,
"Observed-Data": self.helper.api.observed_data.read,
"Organization": self.helper.api.identity.read,
"Opinion": self.helper.api.opinion.read,
"Report": self.helper.api.report.read,
"Sector": self.helper.api.identity.read,
"System": self.helper.api.identity.read,
"Course-Of-Action": self.helper.api.course_of_action.read,
"Identity": self.helper.api.identity.read,
"Indicator": self.helper.api.indicator.read,
"Individual": self.helper.api.identity.read,
"Infrastructure": self.helper.api.infrastructure.read,
"Intrusion-Set": self.helper.api.intrusion_set.read,
"Malware": self.helper.api.malware.read,
"Threat-Actor": self.helper.api.threat_actor.read,
"Tool": self.helper.api.tool.read,
"Vulnerability": self.helper.api.vulnerability.read,
"Incident": self.helper.api.incident.read,
"City": self.helper.api.location.read,
"Country": self.helper.api.location.read,
"Region": self.helper.api.location.read,
"Position": self.helper.api.location.read,
"Location": self.helper.api.location.read,
}
return reader.get(entity_type, None)
# Start the main loop
def start(self):
self.helper.listen(self._process_message)
if __name__ == "__main__":
try:
connector_export_report_pdf = ExportReportPdf()
connector_export_report_pdf.start()
except Exception as e:
print(e)
time.sleep(10)
exit(0)
| 40.149194 | 141 | 0.610425 | 9,485 | 0.952596 | 0 | 0 | 0 | 0 | 0 | 0 | 2,942 | 0.295471 |
824e54bffa5be7c6d4c645bb089554003a4f25bc | 189 | py | Python | Lyft-Dental/payments/pay/urls.py | Abhik1998/Lyft-sample_project | 3f9a79fb86c7abee713ae37245f5e7971be09139 | [
"MIT"
]
| 1 | 2021-01-09T08:42:24.000Z | 2021-01-09T08:42:24.000Z | Lyft-Dental/payments/pay/urls.py | Abhik1998/Lyft-sample_project | 3f9a79fb86c7abee713ae37245f5e7971be09139 | [
"MIT"
]
| null | null | null | Lyft-Dental/payments/pay/urls.py | Abhik1998/Lyft-sample_project | 3f9a79fb86c7abee713ae37245f5e7971be09139 | [
"MIT"
]
| null | null | null | from django.urls import path
from .views import initiate_payment, callback
urlpatterns = [
path('', initiate_payment, name='pay'),
path('callback/', callback, name='callback'),
]
| 21 | 49 | 0.703704 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.148148 |
824eb389c2a7eca319848d5d0b764477a524317f | 544 | py | Python | ibmsecurity/isam/base/overview.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
]
| 46 | 2017-03-21T21:08:59.000Z | 2022-02-20T22:03:46.000Z | ibmsecurity/isam/base/overview.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
]
| 201 | 2017-03-21T21:25:52.000Z | 2022-03-30T21:38:20.000Z | ibmsecurity/isam/base/overview.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
]
| 91 | 2017-03-22T16:25:36.000Z | 2022-02-04T04:36:29.000Z | def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve an overview of updates and licensing information
"""
return isamAppliance.invoke_get("Retrieve an overview of updates and licensing information",
"/updates/overview")
def get_licensing_info(isamAppliance, check_mode=False, force=False):
"""
Retrieve the licensing information
"""
return isamAppliance.invoke_get("Retrieve the licensing information",
"/lum/is_licensed")
| 36.266667 | 96 | 0.647059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.46875 |
824f686fbf01dfe1ee2beac723ed207ab4daf6b1 | 1,741 | py | Python | src/sweetrpg_library_api/application/config.py | paulyhedral/sweetrpg-library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
]
| null | null | null | src/sweetrpg_library_api/application/config.py | paulyhedral/sweetrpg-library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
]
| 33 | 2021-09-18T23:52:05.000Z | 2022-03-30T12:25:49.000Z | src/sweetrpg_library_api/application/config.py | sweetrpg/library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
]
| null | null | null | # -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <[email protected]>"
"""
config.py
- settings for the flask application object
"""
import os
import redis
from sweetrpg_library_api.application import constants
class BaseConfig(object):
DEBUG = bool(os.environ.get(constants.DEBUG) or True)
PORT = os.environ.get(constants.PORT) or 5000
# ASSETS_DEBUG = True
LOG_LEVEL = os.environ.get(constants.LOG_LEVEL) or "INFO"
DB_HOST = os.environ[constants.DB_HOST]
# DB_PORT = os.environ.get(constants.DB_PORT) or "27017"
DB_USERNAME = os.environ[constants.DB_USER]
DB_PASSWORD = os.environ[constants.DB_PW]
DB_NAME = os.environ[constants.DB_NAME]
DB_OPTS = os.environ.get(constants.DB_OPTS)
DB_URL = f"mongodb+srv://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}/{DB_NAME}?{DB_OPTS}"
MONGODB_ALIAS_CONNECTION = "default"
MONGODB_URI = DB_URL
MONGODB_SETTINGS = {
"host": DB_URL,
"connect": False,
}
# used for encryption and session management
# SECRET_KEY = os.environ.get('SECRET_KEY') or hashlib.sha256(f"{random.random()}".encode('utf-8')).hexdigest()
# CSRF_TOKEN = os.environ.get('CSRF_TOKEN') or hashlib.sha256(f"{random.random()}".encode('utf-8')).hexdigest()
CACHE_REDIS_HOST = os.environ[constants.REDIS_HOST]
CACHE_REDIS_PORT = int(os.environ.get(constants.REDIS_PORT) or 6379)
# CACHE_REDIS_DB = int(os.environ.get(constants.REDIS_DB) or 7)
SESSION_TYPE = "redis"
SESSION_REDIS = redis.from_url(
f"redis://{os.environ[constants.REDIS_HOST]}:{int(os.environ.get(constants.REDIS_PORT) or 6379)}")
# SEGMENT_WRITE_KEY = os.environ.get(constants.SEGMENT_WRITE_KEY)
SERVER_NAME = os.environ.get(constants.SERVER_NAME)
| 38.688889 | 115 | 0.708788 | 1,521 | 0.873636 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.458357 |
824f8edece08e9acdf645fa301526e669393eaed | 1,711 | py | Python | frames.py | mppc12/special_subject_tea | 945c10ac5a4f0f2fec2fbd6abeb398074801250f | [
"MIT"
]
| null | null | null | frames.py | mppc12/special_subject_tea | 945c10ac5a4f0f2fec2fbd6abeb398074801250f | [
"MIT"
]
| null | null | null | frames.py | mppc12/special_subject_tea | 945c10ac5a4f0f2fec2fbd6abeb398074801250f | [
"MIT"
]
| null | null | null | import pandas as pd
from group import Group
class Frames:
def __init__(self, frame=None):
self.cleanups = Cleanup()
self.groups = Group()
class Cleanup:
def __init__(self, frame=None):
self.frame = frame
def __call__(self, frame):
self.frame = frame
return self
def dropcol(self):
column = ['貨品號列', '重量(公噸)', '英文貨名', '數量(限11碼貨品)', '數量單位']
frame = self.frame.drop(column, axis=1, inplace=False)
return frame
def droprow(self):
rowitem = ['普洱茶,每包不超過3公斤',
'普洱茶,每包超過3公斤',
'茶或馬黛茶之萃取物、精、濃縮物及以茶、馬黛茶之萃取物、精、濃縮物或以茶、馬黛茶為主要成分之調製品']
frame = self.frame[self.frame['中文貨名'].isin(rowitem) == False]
return frame
def modifydate(self):
rc_to_vi = {'92年':'2003', '93年':'2004', '94年':'2005', '95年':'2006',
'96年':'2007', '97年':'2008', '98年':'2009', '99年':'2010',
'100年':'2011', '101年':'2012', '102年':'2013', '103年':'2014',
'104年':'2015', '105年':'2016', '106年':'2017', '107年':'2018',
'108年':'2019'}
frame = self.frame.replace(rc_to_vi, inplace = False)
return frame
def dtypeint(self):
dtypes = ['重量(公斤)', '美元(千元)']
for i in dtypes:
self.frame[i] = pd.to_numeric(self.frame[i])
frame = self.frame
return frame
def modifyitem(self):
item = {'其他綠茶(未發酵),每包超過3公斤': '綠茶(未發酵),每包超過3公斤',
'薰芬綠茶,每包超過3公斤' : '綠茶(未發酵),每包超過3公斤'}
frame = self.frame.replace(item, inplace = False)
return frame
| 30.017544 | 81 | 0.499123 | 2,013 | 0.971056 | 0 | 0 | 0 | 0 | 0 | 0 | 762 | 0.367583 |
82500b40709a627c2f0699d9319a5f6bbab93bb0 | 20,594 | py | Python | msm/mycroft_skills_manager.py | forslund/mycroft-skills-manager | 825e910a555e1882999647d226a56734a7b75ea4 | [
"Apache-2.0"
]
| null | null | null | msm/mycroft_skills_manager.py | forslund/mycroft-skills-manager | 825e910a555e1882999647d226a56734a7b75ea4 | [
"Apache-2.0"
]
| null | null | null | msm/mycroft_skills_manager.py | forslund/mycroft-skills-manager | 825e910a555e1882999647d226a56734a7b75ea4 | [
"Apache-2.0"
]
| null | null | null | # Copyright (c) 2018 Mycroft AI, Inc.
#
# This file is part of Mycroft Skills Manager
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Install, remove, update and track the skills on a device
MSM can be used on the command line but is also used by Mycroft core daemons.
"""
import time
import logging
import shutil
from functools import wraps
from glob import glob
from multiprocessing.pool import ThreadPool
from os import path
from typing import Dict, List
from xdg import BaseDirectory
from msm import GitException
from msm.exceptions import (
AlreadyInstalled,
AlreadyRemoved,
MsmException,
MultipleSkillMatches,
RemoveException,
SkillNotFound
)
from msm.skill_entry import SkillEntry
from msm.skill_repo import SkillRepo
from msm.skill_state import (
initialize_skill_state,
get_skill_state,
write_device_skill_state,
load_device_skill_state,
device_skill_state_hash
)
from msm.util import cached_property, MsmProcessLock
LOG = logging.getLogger(__name__)
CURRENT_SKILLS_DATA_VERSION = 2
ONE_DAY = 86400
def save_device_skill_state(func):
"""Decorator to overwrite the skills.json file when skill state changes.
The methods decorated with this function are executed in threads. So,
this contains some funky logic to keep the threads from stepping on one
another.
"""
@wraps(func)
def func_wrapper(self, *args, **kwargs):
will_save = False
if not self.saving_handled:
will_save = self.saving_handled = True
try:
ret = func(self, *args, **kwargs)
finally:
if will_save:
self.write_device_skill_state()
# Always restore saving_handled flag
if will_save:
self.saving_handled = False
return ret
return func_wrapper
class MycroftSkillsManager(object):
SKILL_GROUPS = {'default', 'mycroft_mark_1', 'picroft', 'kde',
'respeaker', 'mycroft_mark_2', 'mycroft_mark_2pi'}
def __init__(self, platform='default', old_skills_dir=None,
skills_dir=None, repo=None, versioned=True):
self.platform = platform
# Keep this variable alive for a while, is used to move skills from the
# old config based location to XDG
self.old_skills_dir = path.expanduser(old_skills_dir or '') or None
self.skills_dir = (skills_dir or
BaseDirectory.save_data_path('mycroft/skills'))
self.repo = repo or SkillRepo()
self.versioned = versioned
self.lock = MsmProcessLock()
# Property placeholders
self._all_skills = None
self._default_skills = None
self._local_skills = None
self._device_skill_state = None
self.saving_handled = False
self.device_skill_state_hash = ''
with self.lock:
self._init_skills_data()
def clear_cache(self):
"""Completely clear the skills cache."""
self._device_skill_state = None
self._invalidate_skills_cache()
@cached_property(ttl=ONE_DAY)
def all_skills(self):
"""Getting a list of skills can take a while so cache it.
The list method is called several times in this class and in core.
Skill data on a device just doesn't change that frequently so
getting a fresh list that many times does not make a lot of sense.
The cache will expire every hour to pick up any changes in the
mycroft-skills repo.
Skill installs and updates will invalidate the cache, which will
cause this property to refresh next time is is referenced.
The list method can be called directly if a fresh skill list is needed.
"""
if self._all_skills is None:
self._all_skills = self._get_all_skills()
return self._all_skills
def _get_all_skills(self):
LOG.info('building SkillEntry objects for all skills')
self._refresh_skill_repo()
remote_skills = self._get_remote_skills()
all_skills = self._merge_remote_with_local(remote_skills)
return all_skills
def list(self):
"""Load a list of SkillEntry objects from both local and remote skills
It is necessary to load both local and remote skills at
the same time to correctly associate local skills with the name
in the repo and remote skills with any custom path that they
have been downloaded to.
The return value of this function is cached in the all_skills property.
Only call this method if you need a fresh version of the SkillEntry
objects.
"""
all_skills = self._get_all_skills()
self._invalidate_skills_cache(new_value=all_skills)
return all_skills
def _refresh_skill_repo(self):
"""Get the latest mycroft-skills repo code."""
try:
self.repo.update()
except GitException as e:
if not path.isdir(self.repo.path):
raise
LOG.warning('Failed to update repo: {}'.format(repr(e)))
def _get_remote_skills(self):
"""Build a dictionary of skills in mycroft-skills repo keyed by id"""
remote_skills = []
for name, _, url, sha in self.repo.get_skill_data():
skill_dir = SkillEntry.create_path(self.skills_dir, url, name)
sha = sha if self.versioned else ''
remote_skills.append(
SkillEntry(name, skill_dir, url, sha, msm=self)
)
return {skill.id: skill for skill in remote_skills}
def _merge_remote_with_local(self, remote_skills):
"""Merge the skills found in the repo with those installed locally."""
all_skills = []
# First move locally installed skills from old to new location
# TODO: get rid of this at some point
if self.old_skills_dir:
for old_skill_dir in glob(path.join(self.old_skills_dir, '*/')):
skill_name = old_skill_dir.rstrip('/').rsplit('/', 1)[1]
new_skill_path = self.skills_dir + "/" + skill_name
if not path.isdir(new_skill_path):
shutil.move(old_skill_dir, self.skills_dir +
"/" + skill_name)
for skill_file in glob(path.join(self.skills_dir, '*', '__init__.py')):
skill = SkillEntry.from_folder(path.dirname(skill_file), msm=self,
use_cache=False)
if skill.id in remote_skills:
skill.attach(remote_skills.pop(skill.id))
all_skills.append(skill)
all_skills.extend(remote_skills.values())
return all_skills
@property
def local_skills(self):
"""Property containing a dictionary of local skills keyed by name."""
if self._local_skills is None:
self._local_skills = {
s.name: s for s in self.all_skills if s.is_local
}
return self._local_skills
@property
def default_skills(self):
if self._default_skills is None:
default_skill_groups = self.list_all_defaults()
try:
default_skill_group = default_skill_groups[self.platform]
except KeyError:
LOG.error(
'No default skill list found for platform "{}". '
'Using base list.'.format(self.platform)
)
default_skill_group = default_skill_groups.get('default', [])
self._default_skills = {s.name: s for s in default_skill_group}
return self._default_skills
def list_all_defaults(self): # type: () -> Dict[str, List[SkillEntry]]
"""Generate dictionary of default skills in all default skill groups"""
all_skills = {skill.name: skill for skill in self.all_skills}
default_skills = {group: [] for group in self.SKILL_GROUPS}
for group_name, skill_names in self.repo.get_default_skill_names():
group_skills = []
for skill_name in skill_names:
try:
group_skills.append(all_skills[skill_name])
except KeyError:
LOG.warning('No such default skill: ' + skill_name)
default_skills[group_name] = group_skills
return default_skills
def _init_skills_data(self):
"""Initial load of the skill state that occurs upon instantiation.
If the skills state was upgraded after it was loaded, write the
updated skills state to disk.
"""
try:
del(self.device_skill_state['upgraded'])
except KeyError:
self.device_skill_state_hash = device_skill_state_hash(
self.device_skill_state
)
else:
self.write_device_skill_state()
@property
def device_skill_state(self):
"""Dictionary representing the state of skills on a device."""
if self._device_skill_state is None:
self._device_skill_state = load_device_skill_state()
skills_data_version = self._device_skill_state.get('version', 0)
if skills_data_version < CURRENT_SKILLS_DATA_VERSION:
self._upgrade_skills_data()
else:
self._sync_device_skill_state()
return self._device_skill_state
def _upgrade_skills_data(self):
"""Upgrade the contents of the device skills state if needed."""
if self._device_skill_state.get('version', 0) == 0:
self._upgrade_to_v1()
if self._device_skill_state['version'] == 1:
self._upgrade_to_v2()
def _upgrade_to_v1(self):
"""Upgrade the device skills state to version one."""
self._device_skill_state.update(blacklist=[], version=1, skills=[])
for skill in self.local_skills.values():
skill_data = self._device_skill_state.get(skill.name, {})
try:
origin = skill_data['origin']
except KeyError:
origin = self._determine_skill_origin(skill)
beta = skill_data.get('beta', False)
skill_state = initialize_skill_state(
skill.name,
origin,
beta,
skill.skill_gid
)
skill_state['installed'] = skill_data.get('installed', 0)
if isinstance(skill_state['installed'], bool):
skill_state['installed'] = 0
skill_state['updated'] = skill_data.get('updated', 0)
self._device_skill_state['skills'].append(skill_state)
self._device_skill_state.update(upgraded=True)
def _upgrade_to_v2(self):
"""Upgrade the device skills state to version 2.
This adds the skill_gid field to skill entries.
"""
self._update_skill_gid()
self._device_skill_state.update(version=2, upgraded=True)
def _sync_device_skill_state(self):
"""Sync device's skill state with with actual skills on disk."""
self._add_skills_to_state()
self._remove_skills_from_state()
self._update_skill_gid()
def _add_skills_to_state(self):
"""Add local skill to state if it is not already there."""
skill_names = [s['name'] for s in self._device_skill_state['skills']]
for skill in self.local_skills.values():
if skill.name not in skill_names:
origin = self._determine_skill_origin(skill)
skill_state = initialize_skill_state(
skill.name,
origin,
False,
skill.skill_gid
)
self._device_skill_state['skills'].append(skill_state)
def _remove_skills_from_state(self):
"""Remove skills from state that no longer exist in the filesystem."""
skills_to_remove = []
for skill in self._device_skill_state['skills']:
is_not_local = skill['name'] not in self.local_skills
is_installed_state = skill['installation'] == 'installed'
if is_not_local and is_installed_state:
skills_to_remove.append(skill)
for skill in skills_to_remove:
self._device_skill_state['skills'].remove(skill)
def _update_skill_gid(self):
for skill in self._device_skill_state['skills']:
try:
local_skill = self.local_skills[skill['name']]
except KeyError:
skill['skill_gid'] = ''
else:
skill['skill_gid'] = local_skill.skill_gid
def _determine_skill_origin(self, skill):
if skill.name in self.default_skills:
origin = 'default'
elif skill.url:
origin = 'cli'
else:
origin = 'non-msm'
return origin
def write_device_skill_state(self, data=None):
"""Write device's skill state to disk if it has been modified."""
data = data or self.device_skill_state
if device_skill_state_hash(data) != self.device_skill_state_hash:
write_device_skill_state(data)
self.device_skill_state_hash = device_skill_state_hash(data)
@save_device_skill_state
def install(self, param, author=None, constraints=None, origin=''):
"""Install by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
skill_state = initialize_skill_state(
skill.name,
origin,
skill.is_beta,
skill.skill_gid
)
try:
skill.install(constraints)
except AlreadyInstalled:
log_msg = 'Skill {} already installed - ignoring install request'
LOG.info(log_msg.format(skill.name))
skill_state = None
raise
except MsmException as e:
skill_state.update(
installation='failed',
status='error',
failure_message=str(e)
)
raise
else:
skill_state.update(
installed=time.time(),
installation='installed',
status='active',
beta=skill.is_beta
)
finally:
# Store the entry in the list
if skill_state is not None:
self.device_skill_state['skills'].append(skill_state)
self._invalidate_skills_cache()
@save_device_skill_state
def remove(self, param, author=None):
"""Remove by url or name"""
if isinstance(param, SkillEntry):
skill = param
else:
skill = self.find_skill(param, author)
try:
skill.remove()
except AlreadyRemoved:
LOG.info('Skill {} has already been removed'.format(skill.name))
raise
except RemoveException:
LOG.exception('Failed to remove skill ' + skill.name)
raise
else:
remaining_skills = []
for skill_state in self.device_skill_state['skills']:
if skill_state['name'] != skill.name:
remaining_skills.append(skill_state)
self.device_skill_state['skills'] = remaining_skills
self._invalidate_skills_cache()
def update_all(self):
def update_skill(skill):
entry = get_skill_state(skill.name, self.device_skill_state)
if entry:
entry['beta'] = skill.is_beta
if skill.update():
self._invalidate_skills_cache()
self._device_skill_state = None
if entry:
entry['updated'] = time.time()
return self.apply(update_skill, self.local_skills.values())
@save_device_skill_state
def update(self, skill=None, author=None):
"""Update all downloaded skills or one specified skill."""
if skill is None:
return self.update_all()
else:
if isinstance(skill, str):
skill = self.find_skill(skill, author)
skill_state = get_skill_state(skill.name, self.device_skill_state)
if skill_state:
skill_state['beta'] = skill.is_beta
if skill.update():
# On successful update update the update value
if skill_state:
skill_state['updated'] = time.time()
self._invalidate_skills_cache()
@save_device_skill_state
def apply(self, func, skills, max_threads=20):
"""Run a function on all skills in parallel"""
def run_item(skill):
try:
func(skill)
return True
except MsmException as e:
LOG.error('Error running {} on {}: {}'.format(
func.__name__, skill.name, repr(e)
))
return False
except Exception:
LOG.exception('Error running {} on {}:'.format(
func.__name__, skill.name
))
with ThreadPool(max_threads) as tp:
return tp.map(run_item, skills)
@save_device_skill_state
def install_defaults(self):
"""Installs the default skills, updates all others"""
def install_or_update_skill(skill):
if skill.is_local:
self.update(skill)
else:
self.install(skill, origin='default')
return self.apply(
install_or_update_skill,
self.default_skills.values()
)
def _invalidate_skills_cache(self, new_value=None):
"""Reset the cached skill lists in case something changed.
The cached_property decorator builds a _cache instance attribute
storing a dictionary of cached values. Deleting from this attribute
invalidates the cache.
"""
LOG.info('invalidating skills cache')
if hasattr(self, '_cache') and 'all_skills' in self._cache:
del self._cache['all_skills']
self._all_skills = None if new_value is None else new_value
self._local_skills = None
self._default_skills = None
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.all_skills:
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
skill_directory = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, skill_directory, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.all_skills
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
| 37.240506 | 79 | 0.609838 | 17,955 | 0.871856 | 0 | 0 | 6,680 | 0.324366 | 0 | 0 | 5,387 | 0.261581 |
8251357bc0686fc467cb6924c7a1a83a74692825 | 973 | py | Python | ietf/utils/resources.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| 1 | 2015-09-02T19:53:12.000Z | 2015-09-02T19:53:12.000Z | ietf/utils/resources.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| null | null | null | ietf/utils/resources.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
]
| null | null | null | # Autogenerated by the mkresources management command 2014-11-13 05:39
from tastypie.resources import ModelResource
from tastypie.fields import CharField
from tastypie.constants import ALL
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from ietf import api
from ietf.utils.models import DumpInfo
class UserResource(ModelResource):
username = CharField()
class Meta:
queryset = User.objects.all()
serializer = api.Serializer()
class ContentTypeResource(ModelResource):
username = CharField()
class Meta:
queryset = ContentType.objects.all()
serializer = api.Serializer()
class DumpInfoResource(ModelResource):
class Meta:
queryset = DumpInfo.objects.all()
serializer = api.Serializer()
#resource_name = 'dumpinfo'
filtering = {
"date": ALL,
"host": ALL,
}
api.utils.register(DumpInfoResource())
| 27.8 | 70 | 0.70298 | 573 | 0.5889 | 0 | 0 | 0 | 0 | 0 | 0 | 109 | 0.112025 |
825208daaf95b00b8d7fda9692bec10b366dcc4a | 1,624 | py | Python | maskrcnn_benchmark/data/datasets/concat_dataset.py | dukebw/maskrcnn-benchmark | f6710844f8cc6b6ce5345fcdc996f05ec04c3df7 | [
"MIT"
]
| null | null | null | maskrcnn_benchmark/data/datasets/concat_dataset.py | dukebw/maskrcnn-benchmark | f6710844f8cc6b6ce5345fcdc996f05ec04c3df7 | [
"MIT"
]
| null | null | null | maskrcnn_benchmark/data/datasets/concat_dataset.py | dukebw/maskrcnn-benchmark | f6710844f8cc6b6ce5345fcdc996f05ec04c3df7 | [
"MIT"
]
| null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
class ConcatDataset(_ConcatDataset):
"""
Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra
method for querying the sizes of the image
"""
def __init__(self, datasets, uniform_datasets):
_ConcatDataset.__init__(self, datasets)
self.uniform_datasets = uniform_datasets
def get_idxs(self, idx):
if self.uniform_datasets:
dataset_idx = np.random.randint(len(self.cumulative_sizes))
if dataset_idx == 0:
low = 0
else:
low = self.cumulative_sizes[dataset_idx - 1]
sample_idx = np.random.randint(0, self.cumulative_sizes[dataset_idx] - low)
else:
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return dataset_idx, sample_idx
def get_img_info(self, idx):
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx].get_img_info(sample_idx)
def __getitem__(self, idx):
if idx < 0:
if -idx > len(self):
raise ValueError("absolute value of index should not exceed dataset length")
idx = len(self) + idx
dataset_idx, sample_idx = self.get_idxs(idx)
return self.datasets[dataset_idx][sample_idx]
| 32.48 | 92 | 0.639163 | 1,446 | 0.890394 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.160099 |
8253b8de6bfcb3c4b2705d87c3cbd83db498bfb5 | 1,129 | py | Python | 153_find_minimum_in_rotated_sorted_array.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
]
| 2 | 2018-04-24T19:17:40.000Z | 2018-04-24T19:33:52.000Z | 153_find_minimum_in_rotated_sorted_array.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
]
| null | null | null | 153_find_minimum_in_rotated_sorted_array.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
]
| 3 | 2020-06-17T05:48:52.000Z | 2021-01-02T06:08:25.000Z | # 153. Find Minimum in Rotated Sorted Array
#
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
#
# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
#
# Find the minimum element.
#
# You may assume no duplicate exists in the array.
class Solution(object):
# http://bookshadow.com/weblog/2014/10/16/leetcode-find-minimum-rotated-sorted-array/
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l, r = 0, len(nums) - 1
while l < r:
m = (l + r) / 2
# if nums[m] <= nums[r]:
if nums[m] < nums[r]:
r = m
else:
l = m + 1
return nums[l]
# http://www.cnblogs.com/zuoyuan/p/4045742.html
def findMin(self, nums):
l, r = 0, len(nums) - 1
while l < r and nums[l] > nums[r]:
m = (l + r) / 2
if nums[m] < nums[r]:
r = m
else:
l = m + 1
return nums[l]
if __name__ == '__main__':
print Solution().findMin([4, 5, 6, 7, 0, 1, 2])
| 26.255814 | 96 | 0.495128 | 765 | 0.677591 | 0 | 0 | 0 | 0 | 0 | 0 | 502 | 0.444641 |
8254c27453fc702429a4cf3b2f9b5c4318d236f1 | 10,452 | py | Python | tests/bridge/test_bridge.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
]
| 99 | 2020-12-02T08:40:48.000Z | 2022-03-15T05:21:06.000Z | tests/bridge/test_bridge.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
]
| 115 | 2020-12-15T07:15:39.000Z | 2022-03-28T22:21:03.000Z | tests/bridge/test_bridge.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
]
| 56 | 2020-12-11T06:50:04.000Z | 2022-02-21T09:17:38.000Z | import pytest
from brownie import (
accounts,
interface,
MockVault,
BadgerBridgeAdapter,
CurveSwapStrategy,
CurveTokenWrapper,
)
from helpers.constants import AddressZero
from helpers.registry import registry
from config.badger_config import badger_config
from scripts.systems.badger_system import connect_badger
from scripts.systems.bridge_system import connect_bridge
from scripts.systems.swap_system import connect_swap
# Curve lp tokens
RENBTC = "0x49849C98ae39Fff122806C06791Fa73784FB3675"
TBTC = "0x64eda51d3Ad40D56b9dFc5554E06F94e1Dd786Fd"
SBTC = "0x075b1bb99792c9E1041bA13afEf80C91a1e70fB3"
# Bridge mock vaults for testing.
# Schema is (in token addr, vault name, vault symbol, vault token addr)
BRIDGE_VAULTS = [
# TODO: When bridge adapter addr is approved, can test
# directly against badger sett contracts.
{
"inToken": registry.tokens.renbtc,
"outToken": registry.tokens.renbtc,
"id": "native.renCrv",
"symbol": "bcrvrenBTC",
"token": RENBTC,
"address": "0x6dEf55d2e18486B9dDfaA075bc4e4EE0B28c1545",
"upgrade": True,
},
{
"inToken": registry.tokens.renbtc,
"outToken": registry.tokens.renbtc,
"id": "native.tbtcCrv",
"symbol": "bcrvtBTC",
"token": TBTC,
"address": "0xb9D076fDe463dbc9f915E5392F807315Bf940334",
"upgrade": True,
},
{
"inToken": registry.tokens.renbtc,
"outToken": registry.tokens.renbtc,
"id": "native.sbtcCrv",
"symbol": "bcrvsBTC",
"token": SBTC,
"address": "0xd04c48A53c111300aD41190D63681ed3dAd998eC",
"upgrade": True,
},
{
"inToken": registry.tokens.wbtc,
"outToken": registry.tokens.wbtc,
"id": "yearn.wbtc",
"symbol": "byvwBTC",
"token": registry.tokens.wbtc,
"address": "0x4b92d19c11435614cd49af1b589001b7c08cd4d5",
"upgrade": False,
},
]
# Tests mint/burn to/from crv sett.
# We create a mock vault for each pool token.
@pytest.mark.parametrize(
"vault",
BRIDGE_VAULTS,
)
def test_bridge_vault(vault):
badger = connect_badger(badger_config.prod_json)
bridge = connect_bridge(badger, badger_config.prod_json)
swap = connect_swap(badger_config.prod_json)
bridge.add_existing_swap(swap)
_deploy_bridge_mocks(badger, bridge)
slippage = 0.03
amount = 1 * 10 ** 8
v = vault["address"]
# TODO: Can interleave these mints/burns.
for accIdx in range(10, 12):
account = accounts[accIdx]
for i in range(0, 2):
balanceBefore = interface.IERC20(v).balanceOf(account)
bridge.adapter.mint(
vault["inToken"],
slippage * 10 ** 4,
account.address,
v,
amount,
# Darknode args hash/sig optional since gateway is mocked.
"",
"",
{"from": account},
)
balance = interface.IERC20(v).balanceOf(account)
assert balance > balanceBefore
interface.IERC20(v).approve(
bridge.adapter.address,
balance,
{"from": account},
)
# Approve mock gateway for transfer of underlying token for "mock" burns.
# NB: In the real world, burns don't require approvals as it's just
# an internal update the the user's token balance.
interface.IERC20(registry.tokens.renbtc).approve(
bridge.mocks.BTC.gateway, balance, {"from": bridge.adapter}
)
bridge.adapter.burn(
vault["outToken"],
v,
slippage * 10 ** 4,
account.address,
balance,
{"from": account},
)
assert interface.IERC20(v).balanceOf(account) == 0
# Tests swap router failures and wbtc mint/burn.
def test_bridge_basic_swap_fail():
renbtc = registry.tokens.renbtc
wbtc = registry.tokens.wbtc
badger = connect_badger(badger_config.prod_json)
bridge = connect_bridge(badger, badger_config.prod_json)
swap = connect_swap(badger_config.prod_json)
bridge.add_existing_swap(swap)
_upgrade_bridge(badger, bridge)
_deploy_bridge_mocks(badger, bridge)
# NB: If true, fails during router opimizeSwap() call, otherwise the underlying strategy fails.
for router_fail in [True, False]:
_deploy_swap_mocks(badger, bridge, swap, router_fail=router_fail)
# .1% slippage
slippage = 0.001
amount = 1 * 10 ** 8
for accIdx in range(10, 12):
account = accounts[accIdx]
for i in range(0, 2):
balanceBefore = interface.IERC20(renbtc).balanceOf(account)
# Test mints
bridge.adapter.mint(
wbtc,
slippage * 10 ** 4,
account.address,
AddressZero, # No vault.
amount,
# Darknode args hash/sig optional since gateway is mocked.
"",
"",
{"from": account},
)
assert interface.IERC20(renbtc).balanceOf(account) > balanceBefore
# NB: User should not receive any wbtc but rather renbtc as part
# of the fallback mechanism.
assert interface.IERC20(wbtc).balanceOf(account) == 0
# Tests swap router and wbtc mint/burn.
def test_bridge_basic():
renbtc = registry.tokens.renbtc
wbtc = registry.tokens.wbtc
badger = connect_badger(badger_config.prod_json)
bridge = connect_bridge(badger, badger_config.prod_json)
swap = connect_swap(badger_config.prod_json)
bridge.add_existing_swap(swap)
_deploy_bridge_mocks(badger, bridge)
router = swap.router
# 3% slippage
slippage = 0.03
amount = 1 * 10 ** 8
# Test estimating slippage from a random account for wbtc <-> renbtc swaps.
_assert_swap_slippage(
router,
renbtc,
wbtc,
amount,
slippage,
)
_assert_swap_slippage(
router,
wbtc,
renbtc,
amount,
slippage,
)
for accIdx in range(10, 12):
account = accounts[accIdx]
for i in range(0, 2):
balanceBefore = interface.IERC20(wbtc).balanceOf(account)
# Test mints
bridge.adapter.mint(
wbtc,
slippage * 10 ** 4,
account.address,
AddressZero, # No vault.
amount,
# Darknode args hash/sig optional since gateway is mocked.
"",
"",
{"from": account},
)
assert interface.IERC20(wbtc).balanceOf(account) > balanceBefore
# Test burns
balance = interface.IERC20(wbtc).balanceOf(account)
interface.IERC20(wbtc).approve(bridge.adapter, balance, {"from": account})
# Approve mock gateway for transfer of underlying token for "mock" burns.
# NB: In the real world, burns don't require approvals as it's
# just an internal update the the user's token balance.
interface.IERC20(renbtc).approve(
bridge.mocks.BTC.gateway,
balance,
{"from": bridge.adapter},
)
bridge.adapter.burn(
wbtc,
AddressZero, # No vault.
slippage * 10 ** 4,
account.address,
balance,
{"from": account},
)
assert interface.IERC20(wbtc).balanceOf(account) == 0
def test_bridge_sweep():
renbtc = registry.tokens.renbtc
wbtc = registry.tokens.wbtc
badger = connect_badger(badger_config.prod_json)
bridge = connect_bridge(badger, badger_config.prod_json)
# Send both renbtc and wbtc to bridge adapter and test sweep.
for (whale, token) in [
(registry.whales.renbtc.whale, interface.IERC20(renbtc)),
(registry.whales.wbtc.whale, interface.IERC20(wbtc)),
]:
token.transfer(
bridge.adapter,
token.balanceOf(whale),
{"from": whale},
)
# Can be called from any account, should always send to governance.
beforeBalance = token.balanceOf(badger.devMultisig)
bridge.adapter.sweep({"from": badger.devMultisig})
assert token.balanceOf(badger.devMultisig) > beforeBalance
def _assert_swap_slippage(router, fromToken, toToken, amountIn, slippage):
# Should be accessible from a random account.
account = accounts[8]
(strategyAddr, amountOut) = router.optimizeSwap.call(
fromToken,
toToken,
amountIn,
{"from": account},
)
assert (1 - (amountOut / amountIn)) < slippage
strategy = interface.ISwapStrategy(strategyAddr)
# Redundant slippage check, but just to be sure.
amountOut = strategy.estimateSwapAmount.call(
fromToken,
toToken,
amountIn,
{"from": account},
)
assert (1 - (amountOut / amountIn)) < slippage
def _deploy_bridge_mocks(badger, bridge):
# NB: Deploy/use mock gateway
bridge.deploy_mocks()
bridge.adapter.setRegistry(
bridge.mocks.registry,
{"from": badger.devMultisig},
)
def _deploy_swap_mocks(badger, bridge, swap, router_fail=False):
swap.deploy_mocks(router_fail=router_fail)
bridge.adapter.setRouter(swap.mocks.router, {"from": badger.devMultisig})
def _upgrade_swap(badger, swap):
badger.deploy_logic("CurveSwapStrategy", CurveSwapStrategy)
logic = badger.logic["CurveSwapStrategy"]
badger.devProxyAdmin.upgrade(
swap.strategies.curve,
logic,
{"from": badger.governanceTimelock},
)
def _upgrade_bridge(badger, bridge):
badger.deploy_logic("BadgerBridgeAdapter", BadgerBridgeAdapter)
logic = badger.logic["BadgerBridgeAdapter"]
badger.devProxyAdmin.upgrade(
bridge.adapter,
logic,
{"from": badger.governanceTimelock},
)
badger.deploy_logic("CurveTokenWrapper", CurveTokenWrapper)
logic = badger.logic["CurveTokenWrapper"]
bridge.adapter.setCurveTokenWrapper(logic, {"from": badger.devMultisig})
| 32.560748 | 99 | 0.604191 | 0 | 0 | 0 | 0 | 1,890 | 0.180827 | 0 | 0 | 2,491 | 0.238328 |
8254e7450b3c4e0f6d891fdfe8c1ab7c064377f8 | 1,423 | py | Python | babylon_server/babylon/config.py | ajponte/babylon | e743f5b3bb5b2eb864247414c4f51962eea9108e | [
"MIT"
]
| null | null | null | babylon_server/babylon/config.py | ajponte/babylon | e743f5b3bb5b2eb864247414c4f51962eea9108e | [
"MIT"
]
| 2 | 2021-11-08T18:09:22.000Z | 2021-11-09T19:22:33.000Z | babylon_server/babylon/config.py | ajponte/babylon | e743f5b3bb5b2eb864247414c4f51962eea9108e | [
"MIT"
]
| null | null | null | import os
class Config:
# Statement for enabling the development environment
DEBUG = True
# Define the application directory
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Logging config.
LOG_DIR = "logs"
LOG_TYPE = ["LOG_TYPE", "watched"]
LOG_LEVEL = "DEBUG"
APP_LOG_NAME = "babylon_server.log"
# WWW_LOG_NAME is for log rotation, which is currently not set up.
# Log files sit in the `logs` directory.
WWW_LOG_NAME = "babylon_server.log"
LOG_MAX_BYTES = 100_000_000 # 100MB in bytes
LOG_COPIES = 5
# All the MySql options are under the assumption that the only database at this time is the
# `activity` database.
MYSQL_DATABASE_HOST = "localhost"
MYSQL_DATABASE_NAME = "activity"
MYSQL_DATABASE_PORT = "3308"
MYSQL_DATABASE_USER = "application"
MYSQL_DATABASE_PWD = "application123"
MYSQL_UNIX_SOCKET = "/var/run/mysqld/mysqld.sock"
SQLALCHEMY_DATABASE_URI = f'mysql+pymysql://{MYSQL_DATABASE_USER}:{MYSQL_DATABASE_PWD}@{MYSQL_DATABASE_HOST}:{MYSQL_DATABASE_PORT}/{MYSQL_DATABASE_NAME}?{MYSQL_UNIX_SOCKET}' # noqa
# Pool recycle is recommended for MySQL.
# See https://docs.sqlalchemy.org/en/14/core/pooling.html#setting-pool-recycle
SQLALCHEMY_POOL_RECYCLE = 3600
SQLALCHEMY_BINDS = {
'db2': 'mysql://user:pass@localhost/activity',
'db3': 'mysql://user:pass@localhost/user'
}
| 37.447368 | 185 | 0.709065 | 1,410 | 0.990864 | 0 | 0 | 0 | 0 | 0 | 0 | 848 | 0.595924 |
82559085472d1981739859824315a98440b83c6f | 131 | py | Python | etherscan_py/__init__.py | saltduck/etherscan_py | 1a4ac48733d832d6dc4c8f74fafd7af4c3ce675e | [
"MIT"
]
| 6 | 2021-02-20T10:32:36.000Z | 2022-02-10T17:00:00.000Z | etherscan_py/__init__.py | saltduck/etherscan_py | 1a4ac48733d832d6dc4c8f74fafd7af4c3ce675e | [
"MIT"
]
| 2 | 2020-11-19T04:39:25.000Z | 2021-03-05T12:40:21.000Z | etherscan_py/__init__.py | saltduck/etherscan_py | 1a4ac48733d832d6dc4c8f74fafd7af4c3ce675e | [
"MIT"
]
| 3 | 2021-03-03T18:37:26.000Z | 2021-04-04T14:14:05.000Z | """Top-level package for etherscan-py."""
__author__ = """Julian Koh"""
__email__ = '[email protected]'
__version__ = '0.1.0'
| 21.833333 | 41 | 0.687023 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.664122 |
8258e9ef419949e0cfc0082d25711b7eeaaea221 | 427 | py | Python | realtime/realtime.py | mikerah13/python_samples | c4cd8af3cee99a5199dd2231f182240c35984b97 | [
"MIT"
]
| null | null | null | realtime/realtime.py | mikerah13/python_samples | c4cd8af3cee99a5199dd2231f182240c35984b97 | [
"MIT"
]
| null | null | null | realtime/realtime.py | mikerah13/python_samples | c4cd8af3cee99a5199dd2231f182240c35984b97 | [
"MIT"
]
| null | null | null | from subprocess import Popen, PIPE
def run_command(command):
process = subprocess.Popen(shlex.split(command), stdout=subprocess.PIPE)
while True:
output = process.stdout.readline()
if output == '' and process.poll() is not None:
break
if output:
print output.strip()
rc = process.poll()
return rc
if __name__ == "__main__":
run_command("ping google.com")
| 23.722222 | 76 | 0.627635 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.067916 |
825a7d574135cde50db9d1e2e4cce7b2af3b42c9 | 923 | py | Python | resources/model/agenda.py | diegohideky/climatempoworkshop | edb50eec386d6db5ede9b28192520922ed85c55e | [
"MIT"
]
| null | null | null | resources/model/agenda.py | diegohideky/climatempoworkshop | edb50eec386d6db5ede9b28192520922ed85c55e | [
"MIT"
]
| null | null | null | resources/model/agenda.py | diegohideky/climatempoworkshop | edb50eec386d6db5ede9b28192520922ed85c55e | [
"MIT"
]
| null | null | null | from db_connection import db
class Agenda(db.Model):
__tablename__ = "agendas"
id = db.Column(db.Integer, primary_key=True)
date = db.Column(db.Date)
work_start = db.Column(db.Time)
work_end = db.Column(db.Time)
rest_start = db.Column(db.Time)
rest_end = db.Column(db.Time)
user_id = db.Column(db.Integer, db.ForeignKey('usuarios.id'))
user = db.relationship('User')
def __init__(self, date, work_start, work_end, rest_start, rest_end, user_id):
self.date = date
self.work_start = work_start
self.work_end = work_end
self.rest_start = rest_start
self.rest_end = rest_end
self.user_id = user_id
def update(self, date, work_start, work_end, rest_start, rest_end):
self.date = date
self.work_start = work_start
self.work_end = work_end
self.rest_start = rest_start
self.rest_end = rest_end | 31.827586 | 82 | 0.658722 | 892 | 0.966414 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.030336 |
825b17f2327978290f8d614819c14bd2efe19e58 | 661 | py | Python | data/models.py | sarfarazstark/To-Do-Bot | c2d032fa69e42b651d1c574c276161eceb141981 | [
"Apache-2.0"
]
| 4 | 2020-11-21T14:49:00.000Z | 2022-02-21T11:24:17.000Z | data/models.py | sarfarazstark/To-Do-Bot | c2d032fa69e42b651d1c574c276161eceb141981 | [
"Apache-2.0"
]
| null | null | null | data/models.py | sarfarazstark/To-Do-Bot | c2d032fa69e42b651d1c574c276161eceb141981 | [
"Apache-2.0"
]
| null | null | null | """Database models"""
from sqlalchemy import orm
import sqlalchemy
from .db_session import SqlAlchemyBase
# Task database model
class Task(SqlAlchemyBase):
__tablename__ = 'tasks'
id = sqlalchemy.Column(
sqlalchemy.Integer, primary_key=True, autoincrement=True
)
user_id = sqlalchemy.Column(sqlalchemy.Integer)
title = sqlalchemy.Column(sqlalchemy.String)
days_of_the_week = sqlalchemy.Column(sqlalchemy.String)
# User database model
class User(SqlAlchemyBase):
__tablename__ = 'users'
telegram_id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
language_id = sqlalchemy.Column(sqlalchemy.Integer)
| 23.607143 | 73 | 0.753404 | 504 | 0.762481 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.11649 |
825b9506a0a8cc2c13904600639147b936af53d7 | 470 | py | Python | graduated_site/migrations/0029_auto_20191218_2109.py | vbacaksiz/KTU-MEBSIS | e1afaa07a16e00ff9be3f39b728603b64f08590e | [
"MIT"
]
| null | null | null | graduated_site/migrations/0029_auto_20191218_2109.py | vbacaksiz/KTU-MEBSIS | e1afaa07a16e00ff9be3f39b728603b64f08590e | [
"MIT"
]
| null | null | null | graduated_site/migrations/0029_auto_20191218_2109.py | vbacaksiz/KTU-MEBSIS | e1afaa07a16e00ff9be3f39b728603b64f08590e | [
"MIT"
]
| null | null | null | # Generated by Django 3.0 on 2019-12-18 21:09
import ckeditor.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('graduated_site', '0028_auto_20191218_2028'),
]
operations = [
migrations.AlterField(
model_name='user_internship_post',
name='content',
field=ckeditor.fields.RichTextField(max_length=2000, null=True, verbose_name='İçerik'),
),
]
| 23.5 | 99 | 0.651064 | 366 | 0.775424 | 0 | 0 | 0 | 0 | 0 | 0 | 127 | 0.269068 |
825c203e1359d9feaff1e4d74ceac39a9987f062 | 4,945 | py | Python | tests/test_obj.py | runapp/M2Crypto | bc3b54758fe73dce86304663084b40fa5d6973c0 | [
"MIT"
]
| 58 | 2015-04-20T01:17:37.000Z | 2022-03-31T10:55:13.000Z | tests/test_obj.py | runapp/M2Crypto | bc3b54758fe73dce86304663084b40fa5d6973c0 | [
"MIT"
]
| 7 | 2015-07-08T21:59:37.000Z | 2021-04-18T12:27:41.000Z | tests/test_obj.py | runapp/M2Crypto | bc3b54758fe73dce86304663084b40fa5d6973c0 | [
"MIT"
]
| 29 | 2015-02-23T17:46:31.000Z | 2022-03-15T09:57:46.000Z | #!/usr/bin/env python
"""Unit tests for M2Crypto.m2 obj_* functions.
"""
from M2Crypto import ASN1, BIO, Rand, X509, m2, six
from tests import unittest
"""
These functions must be cleaned up and moved to some python module
Taken from CA managment code
"""
def x509_name2list(name):
for i in range(0, name.entry_count()):
yield X509.X509_Name_Entry(m2.x509_name_get_entry(name._ptr(), i),
_pyfree=0)
def x509_name_entry2tuple(entry):
bio = BIO.MemoryBuffer()
m2.asn1_string_print(bio._ptr(), m2.x509_name_entry_get_data(entry._ptr()))
return (
six.ensure_text(m2.obj_obj2txt(
m2.x509_name_entry_get_object(entry._ptr()), 0)),
six.ensure_text(bio.getvalue()))
def tuple2x509_name_entry(tup):
obj, data = tup
# TODO This is evil, isn't it? Shouldn't we use only official API?
# Something like X509.X509_Name.add_entry_by_txt()
_x509_ne = m2.x509_name_entry_create_by_txt(None, six.ensure_str(obj),
ASN1.MBSTRING_ASC,
six.ensure_str(data), len(data))
if not _x509_ne:
raise ValueError("Invalid object indentifier: %s" % obj)
return X509.X509_Name_Entry(_x509_ne, _pyfree=1) # Prevent memory leaks
class ObjectsTestCase(unittest.TestCase):
def callback(self, *args):
pass
def test_obj2txt(self):
self.assertEqual(m2.obj_obj2txt(m2.obj_txt2obj("commonName", 0), 1),
b"2.5.4.3", b"2.5.4.3")
self.assertEqual(m2.obj_obj2txt(m2.obj_txt2obj("commonName", 0), 0),
b"commonName", b"commonName")
def test_nid(self):
self.assertEqual(m2.obj_ln2nid("commonName"),
m2.obj_txt2nid("2.5.4.3"),
"ln2nid and txt2nid mismatch")
self.assertEqual(m2.obj_ln2nid("CN"),
0, "ln2nid on sn")
self.assertEqual(m2.obj_sn2nid("CN"),
m2.obj_ln2nid("commonName"),
"ln2nid and sn2nid mismatch")
self.assertEqual(m2.obj_sn2nid("CN"),
m2.obj_obj2nid(m2.obj_txt2obj("CN", 0)), "obj2nid")
self.assertEqual(m2.obj_txt2nid("__unknown"),
0, "__unknown")
def test_tuple2tuple(self):
tup = ("CN", "someCommonName")
tup1 = x509_name_entry2tuple(tuple2x509_name_entry(tup))
# tup1[0] is 'commonName', not 'CN'
self.assertEqual(tup1[1], tup[1], tup1)
self.assertEqual(x509_name_entry2tuple(tuple2x509_name_entry(tup1)),
tup1, tup1)
def test_unknown(self):
with self.assertRaises(ValueError):
tuple2x509_name_entry(("__unknown", "_"))
def test_x509_name(self):
n = X509.X509_Name()
# It seems this actually needs to be a real 2 letter country code
n.C = b'US'
n.SP = b'State or Province'
n.L = b'locality name'
n.O = b'orhanization name'
n.OU = b'org unit'
n.CN = b'common name'
n.Email = b'[email protected]'
n.serialNumber = b'1234'
n.SN = b'surname'
n.GN = b'given name'
n.givenName = b'name given'
self.assertEqual(len(n), 11, len(n))
# Thierry: this call to list seems extraneous...
tl = [x509_name_entry2tuple(x) for x in x509_name2list(n)]
self.assertEqual(len(tl), len(n), len(tl))
x509_n = m2.x509_name_new()
for o in [tuple2x509_name_entry(x) for x in tl]:
m2.x509_name_add_entry(x509_n, o._ptr(), -1, 0)
o._pyfree = 0 # Take care of underlying object
n1 = X509.X509_Name(x509_n)
self.assertEqual(n.as_text(), n1.as_text(), n1.as_text())
# Detailed OpenSSL error message is visible in Python error message:
def test_detailed_error_message(self):
from M2Crypto import SMIME, X509
s = SMIME.SMIME()
x509 = X509.load_cert('tests/recipient.pem')
sk = X509.X509_Stack()
sk.push(x509)
s.set_x509_stack(sk)
st = X509.X509_Store()
st.load_info('tests/recipient.pem')
s.set_x509_store(st)
p7, data = SMIME.smime_load_pkcs7('tests/sample-p7.pem')
self.assertIsInstance(p7, SMIME.PKCS7, p7)
try:
s.verify(p7, data)
except SMIME.PKCS7_Error as e:
six.assertRegex(self, str(e),
"unable to get local issuer certificate",
"Not received expected error message")
def suite():
t_suite = unittest.TestSuite()
t_suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ObjectsTestCase))
return t_suite
if __name__ == '__main__':
Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
| 35.070922 | 81 | 0.591304 | 3,329 | 0.673205 | 189 | 0.03822 | 0 | 0 | 0 | 0 | 1,172 | 0.237007 |
825ed7b070e5aaac9e764b86a1c9c4bdbe9ea988 | 4,656 | py | Python | new_scraper.py | Baw25/HomeSavvy | e07fb6f78e6f68fb981c92b15df5eef981e4d0ea | [
"MIT"
]
| null | null | null | new_scraper.py | Baw25/HomeSavvy | e07fb6f78e6f68fb981c92b15df5eef981e4d0ea | [
"MIT"
]
| null | null | null | new_scraper.py | Baw25/HomeSavvy | e07fb6f78e6f68fb981c92b15df5eef981e4d0ea | [
"MIT"
]
| null | null | null | #!/bin/python
# -*- coding: utf-8 -*-
# Droplet Name: ubuntu-512mb-sfo2-01
# IP Address: 138.68.252.152
# Username: root
# Password: fbe29a96430704766b5054c4d9
# New Password: Rowing525
# https://medium.com/@hoppy/how-to-test-or-scrape-javascript-rendered-websites-with-python-selenium-a-beginner-step-by-c137892216aa
from time import sleep
from random import randint
from selenium import webdriver
from pyvirtualdisplay import Display
class RealTassaSpider():
def __init__(self):
self.url_to_crawl = "https://app.realtaasa.com/homes"
self.url_login = "http://app.realtaasa.com/signin"
self.all_items = []
# Open headless chromedriver
def start_driver(self):
print 'starting driver...'
self.display = Display(visible=0, size=(800, 600))
self.display.start()
self.driver = webdriver.Chrome()
sleep(4)
# Close chromedriver
def close_driver(self):
print 'closing driver...'
self.display.stop()
self.driver.quit()
print 'closed!'
# Tell the browser to get a page
def get_page(self, url):
print 'getting page...'
self.driver.get(url)
sleep(randint(2,3))
# <button type="submit" class="input mbs button--primary">Continue</button>
# Getting past login
def login(self):
print 'getting pass the gate page...'
try:
form = self.driver.find_element_by_xpath('//*[@id="signInForm"]')
form.find_element_by_xpath('.//*[@id="email"]').send_keys('[email protected]')
form.find_element_by_xpath('.//*[@id="password"]').send_keys('Rowing525')
form.find_element_by_xpath('.//*[@class="input.mbs.button--primary"]').click()
sleep(randint(3,5))
except Exception:
pass
def get_login_then_homes(self,url):
print 'logging in...'
self.driver.get(url_login)
print 'getting pass the gate page...'
try:
form = self.driver.find_element_by_xpath('//*[@id="signInForm"]')
form.find_element_by_xpath('.//*[@id="email"]').send_keys('[email protected]')
form.find_element_by_xpath('.//*[@id="password"]').send_keys('Rowing525')
form.find_element_by_xpath('.//*[@class="input.mbs.button--primary"]').click()
sleep(randint(3,5))
except Exception:
pass
home_button = self.driver.find_element_by_xpath('//*[@id="nav-homes"]')
home_button.click()
# <div class="desk--ten-twelfths push--desk--one-twelfth">
url for mlax
address 1
address 2
Neighborhood 1
Building type
BedBath
Price
Coowners
Monthly cost
Tax savings
Down payment
Description
div#content --> main content area for all content
div.grid__item one-whole > span.grid__item > a#more-photos
div.one-whole > div.one-whole > div.prop-info > div.grid_item
h1.alpha --> address
div.beta --> address
div.beta --> neighborhood
div.grid__item.desk--one-third.lap--one-third.one-whole.pln --> select all
div.delta.mbn.tc-cove.fw-500 --> select all
div.delta.mbs --> select all
div.grid__item.one-whole.pln --> select all
def grab_a_tags(self):
print 'grabbing list of items...'
for a in self.driver.find_elements_by_xpath('//*[@class="desk--ten-twelfths push--desk--one-twelfth"]//a'):
data = self.process_elements(a)
if data:
self.all_items.append(data)
else:
pass
def process_elements(self, a):
url = ''
address_1 = ''
address_2 = ''
prd_price = ''
neighborhood = ''
building_type = ''
bedbath =''
price = ''
coowners = ''
monthly_cost = ''
tax_savings = ''
down_payment = ''
description = ''
try:
url = a.find_element_by_xpath('.//*[@id="more-photos"]').get_attribute('href')
address_1 = a.find_element_by_xpath('.//*[@class="alpha mbn fw-500"]').text
address_2 = a.find_element_by_xpath('.//*[@class="beta fw-300]').text
prd_price = a.find_element_by_xpath('.//*[@class="price ng-scope ng-binding"]').text
except Exception:
pass
if prd_image and prd_title and prd_price:
single_item_info = {
'image': prd_image.encode('UTF-8'),
'title': prd_title.encode('UTF-8'),
'price': prd_price.encode('UTF-8')
}
return single_item_info
else:
return False
def parse(self):
self.start_driver()
self.get_page(self.url_login)
self.login()
self.grab_a_tags()
self.close_driver()
if self.all_items:
return self.all_items
else:
return False
# Run spider
RealTassa = RealTassaSpider()
items_list = RealTassa.parse()
# Do something with the data touched
for item in items_list:
print item
| 27.550296 | 131 | 0.649055 | 4,064 | 0.872852 | 0 | 0 | 0 | 0 | 0 | 0 | 1,447 | 0.310782 |
825ff6c34b7f590f5f9226ffd0a964d853a9a998 | 532 | py | Python | gdsfactory/simulation/gmeep/__init__.py | gdsfactory/gdsfactory | ee761ae0b4429fbec7035bbea5d1e5206c66bea7 | [
"MIT"
]
| 42 | 2020-05-25T09:33:45.000Z | 2022-03-29T03:41:19.000Z | gdsfactory/simulation/gmeep/__init__.py | gdsfactory/gdsfactory | ee761ae0b4429fbec7035bbea5d1e5206c66bea7 | [
"MIT"
]
| 133 | 2020-05-28T18:29:04.000Z | 2022-03-31T22:21:42.000Z | gdsfactory/simulation/gmeep/__init__.py | gdsfactory/gdsfactory | ee761ae0b4429fbec7035bbea5d1e5206c66bea7 | [
"MIT"
]
| 17 | 2020-06-30T07:07:50.000Z | 2022-03-17T15:45:27.000Z | from gdsfactory.simulation.gmeep.add_monitors import add_monitors
from gdsfactory.simulation.gmeep.get_simulation import get_simulation
from gdsfactory.simulation.gmeep.get_transmission_2ports import (
get_transmission_2ports,
plot2D,
plot3D,
)
from gdsfactory.simulation.gmeep.plot_xsection import plot_xsection
__all__ = [
"add_monitors",
"get_simulation",
"get_sparameters1x2",
"get_transmission_2ports",
"plot2D",
"plot3D",
"plot_xsection",
"plot_eigenmode",
]
__version__ = "0.0.2"
| 25.333333 | 69 | 0.757519 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.242481 |
82602d4942f676f159704b220ea884a45a9e0b4a | 11,212 | py | Python | coreos-ostree-importer/coreos_ostree_importer.py | dustymabe/fedora-coreos-releng-automation | 654a3505f3cc0795fa192c7503858e6fc95a9093 | [
"MIT"
]
| null | null | null | coreos-ostree-importer/coreos_ostree_importer.py | dustymabe/fedora-coreos-releng-automation | 654a3505f3cc0795fa192c7503858e6fc95a9093 | [
"MIT"
]
| null | null | null | coreos-ostree-importer/coreos_ostree_importer.py | dustymabe/fedora-coreos-releng-automation | 654a3505f3cc0795fa192c7503858e6fc95a9093 | [
"MIT"
]
| null | null | null | #!/usr/bin/python3
import boto3
import botocore
import fedora_messaging
import fedora_messaging.api
import hashlib
import json
import logging
import os
import subprocess
import sys
import tarfile
import tempfile
import traceback
# Set local logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
FEDORA_MESSAGING_TOPIC_LISTEN = (
"org.fedoraproject.prod.coreos.build.request.ostree-import"
)
FEDORA_MESSAGING_TOPIC_RESPOND = FEDORA_MESSAGING_TOPIC_LISTEN + ".finished"
# We are processing the org.fedoraproject.prod.coreos.build.request.ostree-import topic
# https://apps.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.prod.coreos.build.request.ostree-import&delta=100000
# The schema was originally designed in:
# https://github.com/coreos/fedora-coreos-tracker/issues/198#issuecomment-513944390
EXAMPLE_MESSAGE_BODY = json.loads("""
{
"build_id": "30.20190905.0",
"stream": "testing",
"basearch": "x86_64",
"commit": "s3://fcos-builds/prod/streams/testing/builds/30.20190905.0/x86_64/ostree-commit.tar",
"checksum": "sha256:d01db6939e7387afa2492ac8e2591c53697fc21cf16785585f7f1ac0de692863",
"ostree_ref": "fedora/x86_64/coreos/testing",
"ostree_checksum": "b4beca154dab3696fd04f32ddab818102caa9247ec3192403adb9aaecc991bd9",
"target_repo": "prod"
}
"""
)
KNOWN_OSTREE_REPOS = {
"prod": "/mnt/koji/ostree/repo",
"compose": "/mnt/koji/compose/ostree/repo",
}
# Given a repo (and thus an input JSON) analyze existing koji tag set
# and tag in any missing packages
class Consumer(object):
def __init__(self):
# Check the possible repos to make sure they exist
for path in KNOWN_OSTREE_REPOS.values():
if not ostree_repo_exists(path):
raise Exception(f"OSTree repo does not exist at {path}")
logger.info(
"Processing messages with topic: %s" % FEDORA_MESSAGING_TOPIC_LISTEN
)
def __call__(self, message: fedora_messaging.api.Message):
# Catch any exceptions and don't raise them further because
# it will cause /usr/bin/fedora-messaging to crash and we'll
# lose the traceback logs from the container
try:
self.process(message)
logger.info("Sending SUCCESS message")
send_message(msg=message.body, status="SUCCESS")
except Exception as e:
logger.error("Caught Exception!")
logger.error("###################################")
traceback.print_exc()
logger.error("###################################")
logger.error("Replying with a FAILURE message...")
send_message(msg=message.body, status="FAILURE")
logger.error("\t continuing...")
pass
def process(self, message: fedora_messaging.api.Message):
logger.debug(message.topic)
logger.debug(message.body)
# Grab the raw message body and parse out pieces
msg = message.body
basearch = msg["basearch"]
build_id = msg["build_id"]
checksum = msg["checksum"]
commit_url = msg["commit"]
ostree_checksum = msg["ostree_checksum"]
ostree_ref = msg["ostree_ref"]
stream = msg["stream"]
target_repo = msg["target_repo"]
# Qualify arguments
if not checksum.startswith("sha256:"):
raise Exception("checksum value must start with sha256:")
if target_repo not in KNOWN_OSTREE_REPOS.keys():
raise Exception(f"Provided target repo is unknown: {target_repo}")
sha256sum = checksum[7:]
target_repo_path = KNOWN_OSTREE_REPOS[target_repo]
source_repo_path = None
# Detect if the commit already exists in the target repo
# NOTE: We assume here that an import won't be requested twice for
# the same commit (i.e. someone adds detached metadata and
# then does a second import request).
if ostree_commit_exists(target_repo_path, ostree_checksum):
logger.info(
f"Commit {ostree_checksum} already exists in the target repo. "
"Skipping import"
)
return
# Import the OSTree commit to the specified repo. We'll use
# a temporary directory to untar the repo into.
with tempfile.TemporaryDirectory() as tmpdir:
# If the target repo is the prod repo the commit could
# already have been imported into the compose repo. If it
# is already in the compose repo then let's just pull-local
# from there to save downloading all from the net again.
if target_repo == "prod" and ostree_commit_exists(
repo=KNOWN_OSTREE_REPOS["compose"], commit=ostree_checksum
):
logger.info("Commit exists in compose repo. Importing from there")
source_repo_path = KNOWN_OSTREE_REPOS["compose"]
else:
# Grab the file from s3 and then pull local
untar_file_from_s3(url=commit_url, tmpdir=tmpdir, sha256sum=sha256sum)
source_repo_path = tmpdir
# one more sanity check: make sure buildid == version
assert_commit_has_version(
repo=source_repo_path, commit=ostree_checksum, version=build_id
)
# Import the commit into the target repo
ostree_pull_local(
commit=ostree_checksum,
dstrepo=target_repo_path,
srcrepo=source_repo_path,
branch=ostree_ref,
)
def runcmd(cmd: list, **kwargs: int) -> subprocess.CompletedProcess:
try:
# default args to pass to subprocess.run
pargs = {"check": True, "capture_output": True}
logger.debug(f"Running command: {cmd}")
pargs.update(kwargs)
cp = subprocess.run(cmd, **pargs)
except subprocess.CalledProcessError as e:
logger.error("Command returned bad exitcode")
logger.error(f"COMMAND: {cmd}")
logger.error(f" STDOUT: {e.stdout.decode()}")
logger.error(f" STDERR: {e.stderr.decode()}")
raise e
return cp # subprocess.CompletedProcess
def send_message(msg: dict, status: str):
# Send back a message with all the original message body
# along with an additional `status:` header with either
# `SUCCESS` or `FAILURE`.
fedora_messaging.api.publish(
fedora_messaging.message.Message(
topic=FEDORA_MESSAGING_TOPIC_RESPOND, body={"status": status, **msg}
)
)
# https://stackoverflow.com/a/55542529
def get_sha256sum(filepath: str) -> str:
h = hashlib.sha256()
with open(filepath, "rb") as file:
while True:
# Reading is buffered, so we can read smaller chunks.
chunk = file.read(h.block_size)
if not chunk:
break
h.update(chunk)
return h.hexdigest()
def parse_s3_url(url: str) -> tuple:
if not url.startswith("s3://"):
raise Exception(f"Unable to parse the s3 url: {url}")
# Chop off s3:// and break into bucket / key
bucket, key = url[5:].split("/", 1)
return (bucket, key)
def untar_file_from_s3(url: str, tmpdir: str, sha256sum: str):
filename = "ostree.tar"
filepath = os.path.join(tmpdir, filename)
# Grab file from s3
logger.info(f"Downloading object from s3: {url}")
s3 = boto3.client("s3")
bucket, key = parse_s3_url(url)
s3.download_file(bucket, key, filepath)
# Verify file has correct checksum
calcuatedsum = get_sha256sum(filepath)
if sha256sum != calcuatedsum:
raise Exception("Checksums do not match: " f"{sha256sum} != {calcuatedsum}")
# Untar the file into the temporary directory
with tarfile.open(filepath) as tar:
tar.extractall(path=tmpdir)
def ostree_pull_local(srcrepo: str, dstrepo: str, branch: str, commit: str):
# verify the parent commit of the new commit is in the destination repo
# and also that the current branch in the repo points to it
branch_exists = ostree_branch_exists(repo=dstrepo, branch=branch)
parent = ostree_get_parent_commit(repo=srcrepo, commit=commit)
if branch_exists:
assert_branch_points_to_commit(repo=dstrepo, branch=branch, commit=parent)
# pull content
logger.info("Running ostree pull-local to perform import")
cmd = ["ostree", f"--repo={dstrepo}", "pull-local", srcrepo, commit]
runcmd(cmd)
# update branch
if branch_exists:
cmd = ["ostree", f"--repo={dstrepo}", "reset", branch, commit]
else:
cmd = ["ostree", f"--repo={dstrepo}", "refs", f"--create={branch}", commit]
logger.info(f"Updating branch {branch} -> {commit} in {dstrepo}")
runcmd(cmd)
# update summary file
logger.info("Updating summary file")
cmd = ["ostree", f"--repo={dstrepo}", "summary", "-u"]
runcmd(cmd)
def ostree_repo_exists(repo: str) -> bool:
if not os.path.exists(repo):
return False
cmd = ["ostree", f"--repo={repo}", "refs"]
if runcmd(cmd, check=False).returncode != 0:
logger.debug(f"OSTree repo does not exist at {repo}")
return False
return True
def ostree_commit_exists(repo: str, commit: str) -> bool:
cmd = ["ostree", f"--repo={repo}", "show", commit]
return runcmd(cmd, check=False).returncode == 0
def ostree_branch_exists(repo: str, branch: str) -> bool:
cmd = ["ostree", f"--repo={repo}", "rev-parse", branch]
return runcmd(cmd, check=False).returncode == 0
def ostree_get_parent_commit(repo: str, commit: str) -> str:
cmd = ["ostree", f"--repo={repo}", "rev-parse", f"{commit}^"]
return runcmd(cmd, check=True).stdout.strip().decode()
def assert_branch_points_to_commit(repo: str, branch: str, commit: str):
cmd = ["ostree", f"--repo={repo}", "rev-parse", branch]
cp = runcmd(cmd, check=True)
detected = cp.stdout.strip().decode()
logger.debug(f"{branch} points to {detected}")
if commit != detected:
raise Exception(f"{branch} points to {detected}. Expected {commit}")
def assert_commit_has_version(repo: str, commit: str, version: str):
cmd = ["ostree", f"--repo={repo}", "show", commit, "--print-metadata-key=version"]
cp = runcmd(cmd, check=True)
embeddedversion = cp.stdout.replace(b"'", b"").strip().decode()
if version != embeddedversion:
raise Exception(
"Embedded commit version does not match buildid "
f"{version} != {embeddedversion}"
)
# The code in this file is expected to be run through fedora messaging
# However, you can run the script directly for testing purposes. The
# below code allows us to do that and also fake feeding data to the
# call by updating the json text below.
if __name__ == "__main__":
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s %(name)s - %(message)s")
)
logger.addHandler(sh)
m = fedora_messaging.api.Message(
topic="org.fedoraproject.prod.coreos.build.request.ostree-import",
body=EXAMPLE_MESSAGE_BODY,
)
c = Consumer()
c.__call__(m)
| 37.249169 | 125 | 0.646539 | 4,078 | 0.363717 | 0 | 0 | 0 | 0 | 0 | 0 | 4,693 | 0.418569 |
8261781129ea227c5b055b630da103ca621c0fbe | 1,837 | py | Python | deepscm/datasets/medical/ukbb.py | mobarakol/deepscm | ffa5f0208c98b1f31e300f28c07c7d51090eda4a | [
"MIT"
]
| null | null | null | deepscm/datasets/medical/ukbb.py | mobarakol/deepscm | ffa5f0208c98b1f31e300f28c07c7d51090eda4a | [
"MIT"
]
| null | null | null | deepscm/datasets/medical/ukbb.py | mobarakol/deepscm | ffa5f0208c98b1f31e300f28c07c7d51090eda4a | [
"MIT"
]
| null | null | null | from torch.utils.data.dataset import Dataset
import numpy as np
import pandas as pd
import os
import nibabel as nib
from nilearn.image import resample_img
import torch
class UKBBDataset(Dataset):
def __init__(self, csv_path, base_path='/vol/biobank/12579/brain/rigid_to_mni/images', crop_type=None, crop_size=(64, 64, 64), downsample: float = 2.5):#(64, 64, 64)
super().__init__()
self.csv_path = csv_path
df = pd.read_csv(csv_path)
self.num_items = len(df)
self.metrics = {col: torch.as_tensor(df[col]).float() for col in df.columns}
self.base_path = base_path
self.filename = 'T1_unbiased_brain_rigid_to_mni.nii.gz'
self.crop_size = np.array(crop_size)
self.downsample = downsample
def __len__(self):
return self.num_items
def __getitem__(self, index):
item = {col: values[index] for col, values in self.metrics.items()}
mri_path = os.path.join(self.base_path,str(int(item['eid'])),self.filename)
try:
img = nib.load(mri_path)#.get_data()
except:
index += 1
item = {col: values[index] for col, values in self.metrics.items()}
mri_path = os.path.join(self.base_path,str(int(item['eid'])),self.filename)
img = nib.load(mri_path)#.get_data()
downsampled_nii = resample_img(img, target_affine=np.eye(3)*self.downsample, interpolation='linear')
img = downsampled_nii.dataobj
init_pos = np.round(np.array(img.shape)/2-self.crop_size/2).astype(int)
end_pos = init_pos+self.crop_size
min_ = np.min(img)
max_ = np.max(img)
img = (img - min_) / (max_ - min_)
item['image'] = np.expand_dims(img[init_pos[0]:end_pos[0],init_pos[1]:end_pos[1],init_pos[2]:end_pos[2]], axis=0)
return item
| 41.75 | 169 | 0.645073 | 1,667 | 0.907458 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.080022 |
82629e49973e0be1f008350e2ac5d3d75aff0200 | 4,493 | py | Python | external/mmdetection/detection_tasks/extension/utils/pipelines.py | bes-dev/training_extensions | 7b016e3bd02ae7c74d60fd5a0ae0912a42ef87cb | [
"Apache-2.0"
]
| 44 | 2018-10-27T15:28:19.000Z | 2019-02-26T12:50:39.000Z | external/mmdetection/detection_tasks/extension/utils/pipelines.py | bes-dev/training_extensions | 7b016e3bd02ae7c74d60fd5a0ae0912a42ef87cb | [
"Apache-2.0"
]
| 31 | 2018-11-09T20:33:47.000Z | 2019-02-28T09:58:22.000Z | external/mmdetection/detection_tasks/extension/utils/pipelines.py | bes-dev/training_extensions | 7b016e3bd02ae7c74d60fd5a0ae0912a42ef87cb | [
"Apache-2.0"
]
| 27 | 2018-11-05T21:59:34.000Z | 2019-02-28T14:28:50.000Z | # Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import copy
import numpy as np
from mmdet.datasets.builder import PIPELINES
from ..datasets import get_annotation_mmdet_format
@PIPELINES.register_module()
class LoadImageFromOTEDataset:
"""
Pipeline element that loads an image from a OTE Dataset on the fly. Can do conversion to float 32 if needed.
Expected entries in the 'results' dict that should be passed to this pipeline element are:
results['dataset_item']: dataset_item from which to load the image
results['dataset_id']: id of the dataset to which the item belongs
results['index']: index of the item in the dataset
:param to_float32: optional bool, True to convert images to fp32. defaults to False
"""
def __init__(self, to_float32: bool = False):
self.to_float32 = to_float32
def __call__(self, results):
dataset_item = results['dataset_item']
img = dataset_item.numpy
shape = img.shape
assert img.shape[0] == results['height'], f"{img.shape[0]} != {results['height']}"
assert img.shape[1] == results['width'], f"{img.shape[1]} != {results['width']}"
filename = f"Dataset item index {results['index']}"
results['filename'] = filename
results['ori_filename'] = filename
results['img'] = img
results['img_shape'] = shape
results['ori_shape'] = shape
# Set initial values for default meta_keys
results['pad_shape'] = shape
num_channels = 1 if len(shape) < 3 else shape[2]
results['img_norm_cfg'] = dict(
mean=np.zeros(num_channels, dtype=np.float32),
std=np.ones(num_channels, dtype=np.float32),
to_rgb=False)
results['img_fields'] = ['img']
if self.to_float32:
results['img'] = results['img'].astype(np.float32)
return results
@PIPELINES.register_module()
class LoadAnnotationFromOTEDataset:
"""
Pipeline element that loads an annotation from a OTE Dataset on the fly.
Expected entries in the 'results' dict that should be passed to this pipeline element are:
results['dataset_item']: dataset_item from which to load the annotation
results['ann_info']['label_list']: list of all labels in the project
"""
def __init__(self, min_size : int, with_bbox: bool = True, with_label: bool = True, with_mask: bool = False, with_seg: bool = False,
poly2mask: bool = True, with_text: bool = False, domain=None):
self.with_bbox = with_bbox
self.with_label = with_label
self.with_mask = with_mask
self.with_seg = with_seg
self.poly2mask = poly2mask
self.with_text = with_text
self.domain = domain
self.min_size = min_size
@staticmethod
def _load_bboxes(results, ann_info):
results['bbox_fields'].append('gt_bboxes')
results['gt_bboxes'] = copy.deepcopy(ann_info['bboxes'])
return results
@staticmethod
def _load_labels(results, ann_info):
results['gt_labels'] = copy.deepcopy(ann_info['labels'])
return results
@staticmethod
def _load_masks(results, ann_info):
results['mask_fields'].append('gt_masks')
results['gt_masks'] = copy.deepcopy(ann_info['masks'])
return results
def __call__(self, results):
dataset_item = results['dataset_item']
label_list = results['ann_info']['label_list']
ann_info = get_annotation_mmdet_format(dataset_item, label_list, self.domain, self.min_size)
if self.with_bbox:
results = self._load_bboxes(results, ann_info)
if results is None or len(results['gt_bboxes']) == 0:
return None
if self.with_label:
results = self._load_labels(results, ann_info)
if self.with_mask:
results = self._load_masks(results, ann_info)
return results
| 37.132231 | 136 | 0.665702 | 3,717 | 0.827287 | 0 | 0 | 3,775 | 0.840196 | 0 | 0 | 1,870 | 0.416203 |
826343a77ca38151d0a290d5ea759c030e820e04 | 846 | py | Python | Leetcode/Competition/180_1.py | ZR-Huang/AlgorithmPractices | 226cecde136531341ce23cdf88529345be1912fc | [
"BSD-3-Clause"
]
| 1 | 2019-11-26T11:52:25.000Z | 2019-11-26T11:52:25.000Z | Leetcode/Competition/180_1.py | ZR-Huang/AlgorithmPractices | 226cecde136531341ce23cdf88529345be1912fc | [
"BSD-3-Clause"
]
| null | null | null | Leetcode/Competition/180_1.py | ZR-Huang/AlgorithmPractices | 226cecde136531341ce23cdf88529345be1912fc | [
"BSD-3-Clause"
]
| null | null | null | from typing import List
class Solution:
def luckyNumbers (self, matrix: List[List[int]]) -> List[int]:
m = len(matrix)
n = len(matrix[0])
ans = []
for i in range(m):
row_min_index = 0
row_min = 10**5+1
for j in range(n):
if matrix[i][j] < row_min:
row_min = matrix[i][j]
row_min_index = j
is_max = True
for k in range(m):
if matrix[k][row_min_index] > row_min:
is_max = False
break
if is_max:
ans.append(row_min)
return ans
print(Solution().luckyNumbers([[3,7,8],[9,11,13],[15,16,17]]))
print(Solution().luckyNumbers([[1,10,4,2],[9,3,8,7],[15,16,17,12]]))
print(Solution().luckyNumbers([[7,8],[1,2]])) | 33.84 | 68 | 0.478723 | 643 | 0.760047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8264bba891a9dc8d15b49f3c1fc314e278831022 | 3,282 | py | Python | GUITests/UC10.sikuli/UC10.py | gabrielganzer/EZGas | cc21dadb0001405e96a8fe298c2bbccf61d5d5a2 | [
"BSD-3-Clause"
]
| null | null | null | GUITests/UC10.sikuli/UC10.py | gabrielganzer/EZGas | cc21dadb0001405e96a8fe298c2bbccf61d5d5a2 | [
"BSD-3-Clause"
]
| null | null | null | GUITests/UC10.sikuli/UC10.py | gabrielganzer/EZGas | cc21dadb0001405e96a8fe298c2bbccf61d5d5a2 | [
"BSD-3-Clause"
]
| 1 | 2021-04-06T14:31:08.000Z | 2021-04-06T14:31:08.000Z | # UC10 - Evaluate price
#
# User U exists and has valid account
# We create two Users, User1_UC10, User2_UC10 and one new gasStation GasStationUC10
#
# Registered on a 1920x1080p, Google Chrome 100% zoom
### SETUP
#User1
click("1590678880209.png")
click("1590678953637.png")
wait(2)
type("1590829373120.png", "User1_UC10" + Key.TAB + "[email protected]" + Key.TAB + "user1")
click("1590679157604.png")
click("1590788841790.png")
wait(2)
# User2
click("1590678880209.png")
wait(2)
click("1590678953637.png")
wait(2)
type("1590829373120.png", "User2_UC10" + Key.TAB + "[email protected]" + Key.TAB + "user2")
click("1590679157604.png")
click("1590788841790.png")
# Admin creates a new GasStation
click("1590678880209-1.png")
wait(3)
type("1590829943940.png", "[email protected]" + Key.TAB + "admin" )
click("1590784293656.png")
wait(2)
click("1590784369122.png")
wait(2)
wheel(WHEEL_DOWN, 6)
wait(2)
type("1590830169812.png", "GasStation_UC10" + Key.TAB + "Torino, corso duca")
wait( "add_UC10.png" , 20)
type(Key.DOWN + Key.ENTER)
type("1590830389386.png", Key.DOWN + Key.DOWN + Key.ENTER)
click("1590830256446.png")
click("1590830265272.png")
wait(2)
click("1590785166092.png")
wait(3)
type(Key.HOME)
click("1590788397797.png")
wait(2)
click("1590828906996.png")
wait(2)
click("1590788458524.png")
# User1 searches the gasStation
click("1590678880209.png")
wait(3)
type("1590829943940.png", "[email protected]" + Key.TAB + "user1" )
click("1590784293656.png")
wait(2)
wheel(WHEEL_DOWN, 6)
type("1590931278631.png" , "Torino, corso duca" )
wait( "add_UC10.png" , 20)
type(Key.DOWN + Key.ENTER)
wait(2)
click("1590922172004.png")
wait(2)
wheel(WHEEL_DOWN, 4)
wait(2)
click(Pattern("1590922374562.png").targetOffset(543,-4))
wheel(WHEEL_DOWN, 4)
wait(2)
click(Pattern("1590930530512.png").targetOffset(73,1))
type("1.5")
click(Pattern("1590930568512.png").targetOffset(73,0))
type("1.4")
click("1590834482526.png")
wait(3)
type(Key.HOME)
wait(3)
click("1590788458524.png")
# User2 login and evaluate prices
wait(2)
click("1590678880209.png")
wait(3)
type("1590829943940.png", "[email protected]" + Key.TAB + "user2" )
click("1590784293656.png")
wait(2)
wheel(WHEEL_DOWN, 4)
wait(2)
type("1590918242822-1.png" , "Torino, corso duca" )
wait( "add_UC10.png" , 20)
type(Key.DOWN + Key.ENTER)
wait(2)
click("1590918499196.png")
wheel(WHEEL_DOWN, 3)
click(Pattern("1591638408351.png").targetOffset(1068,-3))
# User2 clicks on the green button if the price is correct, otherwise clicks on the red button
# If User clicks the green button, the User1 trustlevel increases +1, otherwise it decreases -1
#
wait(3)
type(Key.HOME)
click("1590788458524.png")
wait(2)
# Admin deletes users and gasStation
click("1590678880209-1.png")
wait(3)
type("1590829943940.png", "[email protected]" + Key.TAB + "admin" )
click("1590784293656.png")
wait(2)
click("1590784369122.png")
wait(2)
wheel(WHEEL_DOWN, 10)
wait(2)
click(Pattern("1590931822851.png").targetOffset(905,-27))
wait(2)
wheel(WHEEL_UP, 15)
wait(2)
click(Pattern("1590931876805.png").targetOffset(560,-4))
wait(2)
click(Pattern("1590931914901.png").targetOffset(556,-10))
wait(2)
click("1590788397797.png")
wait(2)
click("1590828906996.png")
wait(2)
click("1590788458524.png")
wait(2)
| 23.442857 | 95 | 0.720902 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,819 | 0.554235 |
82661285c2d18985678122dfb06c00248935e316 | 540 | py | Python | basic/migrations/0003_entrypoint_entry_function.py | kgdunn/django-peer-review-system | 8d013961e00d189fbbade5283128e956a27954f8 | [
"BSD-2-Clause"
]
| null | null | null | basic/migrations/0003_entrypoint_entry_function.py | kgdunn/django-peer-review-system | 8d013961e00d189fbbade5283128e956a27954f8 | [
"BSD-2-Clause"
]
| 2 | 2020-03-20T11:50:04.000Z | 2020-03-20T11:50:06.000Z | basic/migrations/0003_entrypoint_entry_function.py | kgdunn/django-peer-review-system | 8d013961e00d189fbbade5283128e956a27954f8 | [
"BSD-2-Clause"
]
| null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-27 16:14
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('basic', '0002_auto_20170727_1741'),
]
operations = [
migrations.AddField(
model_name='entrypoint',
name='entry_function',
field=models.CharField(default='', help_text='Django function, with syntax: "app_name.function_name"', max_length=100),
),
]
| 25.714286 | 131 | 0.644444 | 382 | 0.707407 | 0 | 0 | 0 | 0 | 0 | 0 | 189 | 0.35 |
826680004e25570292d08da9b3737a70f6cd33e6 | 29,337 | py | Python | test_net_with_srgan.py | jasonlai777/Faster-R-CNN | b5c0c18a9b5faabd4b6ef23346aff85104df7356 | [
"MIT"
]
| null | null | null | test_net_with_srgan.py | jasonlai777/Faster-R-CNN | b5c0c18a9b5faabd4b6ef23346aff85104df7356 | [
"MIT"
]
| null | null | null | test_net_with_srgan.py | jasonlai777/Faster-R-CNN | b5c0c18a9b5faabd4b6ef23346aff85104df7356 | [
"MIT"
]
| null | null | null | # --------------------------------------------------------
# Pytorch Multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
from PIL import Image
from torchvision.utils import save_image
import cv2
from torch.utils.data import DataLoader
from srgan_datasets import *
from srgan import *
import torch.nn.functional as F
from datasets.voc_eval import parse_rec
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
classes = ('__background__', # always index 0
'A.bes(H)','A.bes(T)','A.bes','A.bic(H)','A.bic(T)','A.bic',
'A.fuj(H)','A.fuj(T)','A.fuj','B.xyl(H)','B.xyl(T)','B.xyl',
'C.ele(H)','C.ele(T)','C.ele','M.ent(H)','M.ent(T)','M.ent',
'M.gra(H)','M.gra(T)','M.gra','M.inc(H)','M.inc(T)','M.inc',
'P.cof(H)','P.cof(T)','P.cof','P.vul(H)','P.vul(T)','P.vul',
'P.spe(H)','P.spe(T)','P.spe','H.sp(H)','H.sp(T)','H.sp',
'M.ams(H)' ,'M.ams(T)','M.ams'
)###################
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/res101.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
args = parser.parse_args()
return args
def parse_args_for_srgan():
os.makedirs("srgan/images", exist_ok=True)
os.makedirs("srgan/saved_models", exist_ok=True)
parser = argparse.ArgumentParser()
parser.add_argument("--epoch", type=int, default=500 , help="epoch to start training from")
parser.add_argument("--n_epochs", type=int, default=501, help="number of epochs of training")
parser.add_argument("--dataset_name", type=str, default="img_align_celeba", help="name of the dataset")
parser.add_argument("--batch_size", type=int, default=1, help="size of the batches")
parser.add_argument("--lr", type=float, default=0.00001, help="adam: learning rate")
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of second order momentum of gradient")
parser.add_argument("--decay_epoch", type=int, default=100, help="epoch from which to start lr decay")
#parser.add_argument("--n_cpu", type=int, default=8, help="number of cpu threads to use during batch generation")
parser.add_argument("--hr_height", type=int, default=1024, help="high res. image height")
parser.add_argument("--hr_width", type=int, default=1024, help="high res. image width")
parser.add_argument("--channels", type=int, default=3, help="number of image channels")
parser.add_argument("--sample_interval", type=int, default=50, help="interval between saving image samples")
parser.add_argument("--checkpoint_interval", type=int, default=100, help="interval between model checkpoints")
opt = parser.parse_args([])
return opt
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
def load_gt_box(annopath,
imagesetfile,
classname,
cachedir):
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, '%s_annots.pkl' % imagesetfile)
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annotations
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
try:
recs = pickle.load(f)
except:
recs = pickle.load(f, encoding='bytes')
#print(recs)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
#print(class_recs)
#print(len(class_recs))
return class_recs
def iou(bb1, bb2):#########################
""" check if overlap"""
#assert bb1[0] < bb1[2]
#assert bb1[1] < bb1[3]
#assert bb2[0] < bb2[2]
#assert bb2[1] < bb2[3]
# determine the coordinates of the intersection rectangle
#print(bb1[0], bb2[0])
x_left = max(bb1[0], bb2[0])
y_top = max(bb1[1], bb2[1])
x_right = min(bb1[2], bb2[2])
y_bottom = min(bb1[3], bb2[3])
iw = x_right - x_left
ih = y_bottom - y_top
inters = iw * ih
# union
uni = ((bb1[2]-bb1[0])*(bb1[3]-bb1[1]) + (bb2[2]-bb2[0])*(bb2[3]-bb2[1]) - inters)
overlaps = inters / uni
return overlaps
def Area(vertex):
width = vertex[2] - vertex[0]
height = vertex[3] - vertex[1]
area = width*height
return area
if __name__ == '__main__':
args = parse_args()
args_sr = parse_args_for_srgan()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.0
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
with torch.no_grad():
im_data.resize_(data[0].size()).copy_(data[0])
im_info.resize_(data[1].size()).copy_(data[1])
gt_boxes.resize_(data[2].size()).copy_(data[2])
num_boxes.resize_(data[3].size()).copy_(data[3])
#print(im_data.shape)
#print(im_info.shape)
#print(gt_boxes)
#print(num_boxes)
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= data[1][0][2].item()
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
#print(scores[:,1:3].shape)
#print(pred_boxes[:,4:12].shape)
############################## decline head-tail overlapping
new_pred_boxes = torch.cuda.FloatTensor(300, 160).zero_()
new_scores = torch.cuda.FloatTensor(300,40).zero_()
for k in range(13):
b = torch.cat((pred_boxes[:,12*k+4:12*k+8],pred_boxes[:,12*k+8:12*k+12]),0)
s = torch.cat((scores[:,3*k+1],scores[:,3*k+2]),0)
keep = nms(b, s, 0.2)
#new head class
idx = [g for g in range(len(keep)) if keep[g] <300]
new_pred_boxes[:len(keep[idx]),12*k+4:12*k+8] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+1] = s[keep[idx]]
#new tail class
idx = [g for g in range(len(keep)) if keep[g] >=300]
new_pred_boxes[:len(keep[idx]),12*k+8:12*k+12] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+2] = s[keep[idx]]
#new full length class = original
new_pred_boxes[:,12*k+12:12*k+16] = pred_boxes[:,12*k+12:12*k+16]
new_scores[:,3*k+3] = scores[:,3*k+3]
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in range(1, imdb.num_classes):
inds = torch.nonzero(new_scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = new_scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = new_pred_boxes[inds, :]
else:
cls_boxes = new_pred_boxes[inds][:, j * 4:(j + 1) * 4]
#print(cls_boxes.shape)
#print(cls_scores.unsqueeze(1).shape)
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes[j][i] = cls_dets.cpu().numpy()
else:
all_boxes[j][i] = empty_array
#print(exist_classes)
#for k, j in enumerate(exist_classes):
# all_boxes[j][i] = exist_dets[k]
#print(all_boxes)
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
#print(all_boxes[3][i][:,-1])
image_scores = np.hstack([all_boxes[j][i][:,-1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
cv2.imwrite('result.png', im2show)
pdb.set_trace()
#cv2.imshow('test', im2show)
#cv2.waitKey(0)
#print(all_boxes[1][0][0])
print(torch.cuda.current_device())
with torch.cuda.device(torch.cuda.current_device()):
torch.cuda.empty_cache()
#################################### filter imgs need to do SRGAN-preprocessing
annopath = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/VOC2007/Annotations/{:s}.xml'
imagesetfile = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt'
cachedir = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/annotations_cache'
image_file = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/VOC2007/JPEGImages'
f = open(imagesetfile)
new_indexes = []
img_ids = []
new_gt_boxes = []
for line in f:
img_ids.append(line.splitlines())
img_ids = np.squeeze(img_ids)
for i in range(num_images):
for j in range(1, imdb.num_classes):
gt_boxes_1 = load_gt_box(annopath,imagesetfile,classes[j],cachedir)
if not np.any(all_boxes[j][i]):
continue
if len(gt_boxes_1[img_ids[i]]['bbox']) == 0:
continue
else:# 1 GT box in single image for a single class
gt_b = gt_boxes_1[img_ids[i]]['bbox']
#print(gt_b)
z = 0
for m in range(len(all_boxes[j][i])):
for n in range(len(gt_b)):
det_b = [int(l) for l in all_boxes[j][i][m][:4]]
#print(all_boxes[j][i][m][4], iou(det_b, gt_b[n]), imdb.image_index[j])
if all_boxes[j][i][m][4] > 0.5 and all_boxes[j][i][m][4] < 0.8 \
and iou(det_b, gt_b[n]) > 0.5 and classes[j][-1]==")":
print("srgan beginning......")
new_indexes.append(img_ids[i]+"_"+classes[j]+"_"+str(z))
print(len(new_indexes))#, all_boxes[j][i][m][4], iou(det_b, gt_b[n]))
img_path = os.path.join(image_file, img_ids[i]+".JPG")
img = Image.open(img_path)
img = np.asarray(img)
quaterx = int(img.shape[1]*1/4)
quatery = int(img.shape[0]*1/4)
x1_padding = 0
y1_padding = 0
x2_padding = 0
y2_padding = 0
print(img.shape)
if Area(det_b) >= Area(gt_b[n]):
x1, y1, x2, y2 = det_b
print("det_b: " + str(det_b))
if x1 > quaterx:
x1-=quaterx
x1_padding = quaterx
else:
x1 = 0
x1_padding = x1
if x2 < img.shape[0]-quaterx:
x2+= quaterx
x2_padding = quaterx
else:
x2 = img.shape[0]-1
x2_padding = img.shape[0] - x2-1
if y1 > quatery:
y1 -=quatery
y1_padding = quatery
else:
y1 = 0
y1_padding = y1
if y2 < img.shape[1]-quatery:
y2+=quatery
y2_padding = quatery
else:
y2= img.shape[1]-1
y2_padding = img.shape[1] - y2-1
else:
x1, y1, x2, y2 = gt_b[n]
print("gt_b: "+str(gt_b))
if x1 > quaterx:
x1-=quaterx
x1_padding = quaterx
else:
x1 = 0
x1_padding = x1
if x2 < img.shape[0]-quaterx:
x2+= quaterx
x2_padding = quaterx
else:
x2 = img.shape[0]-1
x2_padding = img.shape[0] - x2-1
if y1 > quatery:
y1 -=quatery
y1_padding = quatery
else:
y1 = 0
y1_padding = y1
if y2 < img.shape[1]-quatery:
y2+=quatery
y2_padding = quatery
else:
y2= img.shape[1]-1
y2_padding = img.shape[1] - y2-1
x1, y1, x2, y2= int(x1),int(y1),int(x2), int(y2)
new_gt_boxes.append([x1_padding, y1_padding, x2-x1-x1_padding-x2_padding, \
y2-y1-y1_padding-y2_padding])# whole photo
srgan_in = img[y1:y2 ,x1:x2 ,:]
srgan_in = srgan_in[...,::-1]#rgb->bgr
print(x1,y1,x2,y2,srgan_in.shape)
cv2.imwrite(os.path.join("srgan/srgan_input", img_ids[i]+"_"+classes[j]+"_"+str(z)+".JPG"), srgan_in)
print("save input: %s" %(img_ids[i]+"_"+classes[j]+"_"+str(z)))
z+=1
all_boxes[j][i][m] = np.append(gt_b[n], 1.0)# turn original pred box to gt box
with torch.cuda.device(torch.cuda.current_device()):
torch.cuda.empty_cache()
dataloader = DataLoader(
ImageDataset("srgan/srgan_input", hr_shape=(1024,1024)),
batch_size=1,
shuffle=True,
num_workers=0,
)
#gan_output = srgan(args_sr, dataloader)
srgan(args_sr, dataloader)
#print("length of data: %d"%len(gan_output))
print("srgan finish......")
with torch.cuda.device(torch.cuda.current_device()):
torch.cuda.empty_cache()
# re-test srgan output
dataloader1 = DataLoader(
ImageDataset("srgan/srgan_output", hr_shape=(1024,1024)),
batch_size=1,
shuffle=True,
num_workers=0,
)
all_boxes_1 = [[[] for _ in range(len(dataloader1))]
for _ in range(imdb.num_classes)]
for i, gan_img in enumerate(dataloader1):
#for i in range(len(dataloader1)):
#gan_img = gan_output[i]
#print(gan_img)
arr = np.append(gan_img["origin_size"][0][0].numpy(), gan_img["origin_size"][1][0].numpy())
gan_img_os = F.interpolate(gan_img['hr'], size=(arr[0],arr[1]), mode='bilinear')
r = 600 / gan_img_os.shape[2]
gan_info = np.array([[gan_img_os.shape[2], gan_img_os.shape[3], r]])
with torch.no_grad():
gan_img_600 = F.interpolate(gan_img_os, scale_factor=r, mode="bilinear").cuda()
gan_info = torch.from_numpy(gan_info).cuda()
gt_boxes
num_boxes
#print(gan_img.shape)
#print(gan_info.shape)
#print(gt_boxes)
#print(num_boxes)
det_tic = time.time()
rois_1, cls_prob_1, bbox_pred_1, \
rpn_loss_cls_1, rpn_loss_box_1, \
RCNN_loss_cls_1, RCNN_loss_bbox_1, \
rois_label_1 = fasterRCNN(gan_img_600, gan_info, gt_boxes, num_boxes)
scores_1 = cls_prob_1.data
boxes_1 = rois_1.data[:, :, 1:5]
#print(data)
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred_1.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas_1 = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas_1 = box_deltas.view(1, -1, 4)
else:
box_deltas_1 = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas_1 = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes_1 = bbox_transform_inv(boxes, box_deltas_1, 1)
pred_boxes_1 = clip_boxes(pred_boxes_1, gan_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes_1 = np.tile(boxes_1, (1, scores.shape[1]))
pred_boxes_1 /= data[1][0][2].item()
scores_1 = scores_1.squeeze()
pred_boxes_1 = pred_boxes_1.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
############################## decline head-tail overlapping
new_pred_boxes = torch.cuda.FloatTensor(300, 160).zero_()
new_scores = torch.cuda.FloatTensor(300,40).zero_()
for k in range(13):
b = torch.cat((pred_boxes_1[:,12*k+4:12*k+8],pred_boxes_1[:,12*k+8:12*k+12]),0)
s = torch.cat((scores_1[:,3*k+1],scores_1[:,3*k+2]),0)
keep = nms(b, s, 0.2)
#new head class
idx = [g for g in range(len(keep)) if keep[g] <300]
new_pred_boxes[:len(keep[idx]),12*k+4:12*k+8] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+1] = s[keep[idx]]
#new tail class
idx = [g for g in range(len(keep)) if keep[g] >=300]
new_pred_boxes[:len(keep[idx]),12*k+8:12*k+12] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+2] = s[keep[idx]]
#new full length class = original
new_pred_boxes[:,12*k+12:12*k+16] = pred_boxes[:,12*k+12:12*k+16]
new_scores[:,3*k+3] = scores[:,3*k+3]
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in range(1, imdb.num_classes):
inds = torch.nonzero(new_scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores_1 = new_scores[:,j][inds]
_, order = torch.sort(cls_scores_1, 0, True)
if args.class_agnostic:
cls_boxes_1 = new_pred_boxes[inds, :]
else:
cls_boxes_1 = new_pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets_1 = torch.cat((cls_boxes_1, cls_scores_1.unsqueeze(1)), 1)
cls_dets_1 = cls_dets_1[order]
keep = nms(cls_boxes_1[order, :], cls_scores_1[order], cfg.TEST.NMS)
cls_dets_1 = cls_dets_1[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes_1[j][i] = cls_dets.cpu().numpy()
else:
all_boxes_1[j][i] = empty_array
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
#print(all_boxes[3][i][:,-1])
image_scores = np.hstack([all_boxes_1[j][i][:,-1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes_1[j][i][:, -1] >= image_thresh)[0]
all_boxes_1[j][i] = all_boxes_1[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, len(dataloader1), detect_time, nms_time))
sys.stdout.flush()
torch.cuda.empty_cache()
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
end = time.time()
#print(len(all_boxes))
#print(len(all_boxes_1[0]))
for a in range(len(all_boxes)):
all_boxes[a].extend(all_boxes_1[a])
print(len(all_boxes[a]))
print(new_indexes)
#print(new_gt_boxes)
imdb.evaluate_detections(all_boxes, output_dir, new_indexes, new_gt_boxes)
print("test time: %0.4fs" % (end - start))
| 39.325737 | 115 | 0.590449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,445 | 0.219688 |
8267a45960a2743e88617d4dc273ba1a2f8b4aea | 1,231 | py | Python | app.py | iio1989/oshite | dd95eced2630929705670aaf23be5f35df3b9737 | [
"OLDAP-2.3"
]
| null | null | null | app.py | iio1989/oshite | dd95eced2630929705670aaf23be5f35df3b9737 | [
"OLDAP-2.3"
]
| 1 | 2020-09-24T05:15:00.000Z | 2020-09-24T05:17:06.000Z | app.py | iio1989/oshite | dd95eced2630929705670aaf23be5f35df3b9737 | [
"OLDAP-2.3"
]
| null | null | null | from flask import Flask, render_template, request, redirect, url_for, Markup
import app_helper as apHelp
app = Flask(__name__)
@app.route('/')
def root():
return render_template('home.html')
# click convetBtn. get HttpParam.
@app.route('/post', methods=['GET', 'POST'])
def post():
if request.method == 'POST':
input_kana = request.form['input_kana']
converted_input_list = apHelp.getConvetedStr_kanaToOshite(input_kana)
# rendering for home.html.
return render_template('home.html',
input_kana=input_kana,
converted_input_list=converted_input_list,
fileType= apHelp.FILE_TYPE)
else: # error redirect.
return redirect(url_for('home'))
# click homeBtn from header.
@app.route('/home', methods=['GET', 'POST'])
def home():
return render_template('home.html')
# click aboutBtn from header.
@app.route('/about', methods=['GET', 'POST'])
def about():
return render_template('about.html')
# click historyBtn from header.
@app.route('/history', methods=['GET', 'POST'])
def history():
return render_template('history.html')
if __name__ == '__main__':
app.run(debug=True) | 30.775 | 77 | 0.645004 | 0 | 0 | 0 | 0 | 917 | 0.744923 | 0 | 0 | 336 | 0.272949 |
826808ff1bd0ba43f535ae2091908373eab637e4 | 1,322 | py | Python | build/cls/tp/slices.py | amunoz1/mines | 106f852fe4e64ee132d74290c1a57ea716312376 | [
"MIT"
]
| 1 | 2016-07-19T08:50:54.000Z | 2016-07-19T08:50:54.000Z | src/tp/slices.py | amunoz1/mines | 106f852fe4e64ee132d74290c1a57ea716312376 | [
"MIT"
]
| null | null | null | src/tp/slices.py | amunoz1/mines | 106f852fe4e64ee132d74290c1a57ea716312376 | [
"MIT"
]
| null | null | null | """
Makes subdirectories with slices of seismic time or depth images.
For example, the directory with name "s3_84" contains a constant-i3
slice, where i3 = 84.
"""
from tputils import *
#setupForSubset("subz_401_4_600")
setupForSubset("subt_251_4_500")
seismicDir = getSeismicDir()
#############################################################################
def main(args):
#makeSlice3Z(96)
makeSlice3T(84)
makeSlice3T(73)
def makeSlice3T(i3):
subDir = "s3_"+str(i3)+"/"
File(seismicDir+subDir).mkdir()
for name in ["tpst"]:
x = readImage(name)
writeImage(subDir+name,x[i3])
display(x[i3])
def makeSlice3Z(i3):
subDir = "s3_"+str(i3)+"/"
File(seismicDir+subDir).mkdir()
for name in ["tpsz","tpgv","tpgd","tpgg","tpgp"]:
x = readImage(name)
writeImage(subDir+name,x[i3])
def display(s,g=None,cmin=0,cmax=0):
sp = SimplePlot(SimplePlot.Origin.UPPER_LEFT)
sp.addColorBar()
sp.getPlotPanel().setColorBarWidthMinimum(80)
pv = sp.addPixels(s)
pv.setInterpolation(PixelsView.Interpolation.NEAREST)
if g!=None:
pv = sp.addPixels(g)
pv.setInterpolation(PixelsView.Interpolation.NEAREST)
pv.setColorModel(ColorMap.getJet(0.3))
if cmin!=cmax:
pv.setClips(cmin,cmax)
#############################################################################
run(main)
| 28.12766 | 77 | 0.618759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 434 | 0.32829 |
826871d70c21ce76b15c27edf3c9b2a76149c4a5 | 4,442 | py | Python | lldb/examples/summaries/cocoa/NSException.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
]
| 427 | 2018-05-29T14:21:02.000Z | 2022-03-16T03:17:54.000Z | SymbolExtractorAndRenamer/lldb/examples/summaries/cocoa/NSException.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
]
| 25 | 2018-07-23T08:34:15.000Z | 2021-11-05T07:13:36.000Z | SymbolExtractorAndRenamer/lldb/examples/summaries/cocoa/NSException.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
]
| 52 | 2018-07-19T19:57:32.000Z | 2022-03-11T16:05:38.000Z | """
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# summary provider for class NSException
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import CFString
import lldb
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
class NSKnownException_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
if not (self.sys_params.types_cache.id):
self.sys_params.types_cache.id = self.valobj.GetType(
).GetBasicType(lldb.eBasicTypeObjCID)
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def offset_name(self):
logger = lldb.formatters.Logger.Logger()
return self.sys_params.pointer_size
def offset_reason(self):
logger = lldb.formatters.Logger.Logger()
return 2 * self.sys_params.pointer_size
def description(self):
logger = lldb.formatters.Logger.Logger()
name_ptr = self.valobj.CreateChildAtOffset(
"name", self.offset_name(), self.sys_params.types_cache.id)
reason_ptr = self.valobj.CreateChildAtOffset(
"reason", self.offset_reason(), self.sys_params.types_cache.id)
return 'name:' + CFString.CFString_SummaryProvider(
name_ptr, None) + ' reason:' + CFString.CFString_SummaryProvider(reason_ptr, None)
class NSUnknownException_SummaryProvider:
def adjust_for_architecture(self):
pass
def __init__(self, valobj, params):
logger = lldb.formatters.Logger.Logger()
self.valobj = valobj
self.sys_params = params
self.update()
def update(self):
logger = lldb.formatters.Logger.Logger()
self.adjust_for_architecture()
def description(self):
logger = lldb.formatters.Logger.Logger()
stream = lldb.SBStream()
self.valobj.GetExpressionPath(stream)
name_vo = self.valobj.CreateValueFromExpression(
"name", "(NSString*)[" + stream.GetData() + " name]")
reason_vo = self.valobj.CreateValueFromExpression(
"reason", "(NSString*)[" + stream.GetData() + " reason]")
if name_vo.IsValid() and reason_vo.IsValid():
return CFString.CFString_SummaryProvider(
name_vo, None) + ' ' + CFString.CFString_SummaryProvider(reason_vo, None)
return '<variable is not NSException>'
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSException':
wrapper = NSKnownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
else:
wrapper = NSUnknownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
return wrapper
def NSException_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.description()
except:
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSException>'
return str(summary)
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSException.NSException_SummaryProvider NSException")
| 32.903704 | 94 | 0.670419 | 2,308 | 0.519586 | 0 | 0 | 0 | 0 | 0 | 0 | 611 | 0.137551 |
8268e3ff708fceac06d057f89101a1b211a8db3a | 364 | py | Python | pacman-arch/test/pacman/tests/upgrade084.py | Maxython/pacman-for-termux | 3b208eb9274cbfc7a27fca673ea8a58f09ebad47 | [
"MIT"
]
| 23 | 2021-05-21T19:11:06.000Z | 2022-03-31T18:14:20.000Z | source/pacman-6.0.1/test/pacman/tests/upgrade084.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
]
| 11 | 2021-05-21T12:08:44.000Z | 2021-12-21T08:30:08.000Z | source/pacman-6.0.1/test/pacman/tests/upgrade084.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
]
| 1 | 2021-09-26T08:44:40.000Z | 2021-09-26T08:44:40.000Z | self.description = "Install a package ('any' architecture)"
p = pmpkg("dummy")
p.files = ["bin/dummy",
"usr/man/man1/dummy.1"]
p.arch = 'any'
self.addpkg(p)
self.option["Architecture"] = ['auto']
self.args = "-U %s" % p.filename()
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=dummy")
for f in p.files:
self.addrule("FILE_EXIST=%s" % f)
| 21.411765 | 59 | 0.648352 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.445055 |
826924435f780ad15c0f8fe2ba2c459917504e1d | 7,995 | py | Python | Examples And Benchmarks/HTTP over Raw Connection/SERVERS/http_server__http_tools.py | FI-Mihej/FI-ASockIOCore | 82ee94e94f692511afee5aeb0d75bb7366eb9b96 | [
"Apache-2.0"
]
| null | null | null | Examples And Benchmarks/HTTP over Raw Connection/SERVERS/http_server__http_tools.py | FI-Mihej/FI-ASockIOCore | 82ee94e94f692511afee5aeb0d75bb7366eb9b96 | [
"Apache-2.0"
]
| null | null | null | Examples And Benchmarks/HTTP over Raw Connection/SERVERS/http_server__http_tools.py | FI-Mihej/FI-ASockIOCore | 82ee94e94f692511afee5aeb0d75bb7366eb9b96 | [
"Apache-2.0"
]
| null | null | null | from simple_network.tcp_app_server import *
import httptools
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = 'ButenkoMS <[email protected]>'
# ======================================================================
# ===================GLOBAL SETTINGS FOR ALL TESTS======================
#
SERVER_KEYWORD = b'http server inline'
SERVER_ADDRESS = ('localhost', 25000)
BSC__USE_READ_WITH_FIXED_BUFFER = True # "Optimized for speed". Good for Named Clients.
# BSC__USE_READ_WITH_FIXED_BUFFER = False # "Optimized for memory". Good for big amount of Unknown Clients (raw,
# http, etc.) if you have small server.
BSC__SOCKET_READ_FIXED_BUFFER_SIZE = 1024 ** 2
BSC__USE_NODELAY_INET = True
BSC__REUSE_GATE_ADDR = True
BSC__REUSE_GATE_PORT = True
LINE_TRACE_ALLOWED = True
#
# ===================GLOBAL SETTINGS FOR ALL TESTS======================
# ======================================================================
class RawClientCheckerAllRaw(CheckIsRawConnection):
def __call__(self, app_server: ASockIOCore, client_info: Connection):
return True
def run_http_server():
io_iteration_timeout = 0.5
# ADD SERVER GATE CONNECTIONS
set_of_tcp_settings = set()
tcp_settings = ConnectionSettings(ConnectionDirectionRole.server, SERVER_ADDRESS, SERVER_KEYWORD)
set_of_tcp_settings.add(tcp_settings)
# CREATE SERVER
http_server = ASockIOCore(set_of_tcp_settings)
# SET SERVER SETTINGS
http_server.raw_checker_for_new_incoming_connections = RawClientCheckerAllRaw()
http_server.unknown_clients_are_allowed = True
http_server.should_get_client_addr_info_on_connection = False
http_server.use_speed_optimized_socket_read = BSC__USE_READ_WITH_FIXED_BUFFER
http_server.socket_read_fixed_buffer_size.result = BSC__SOCKET_READ_FIXED_BUFFER_SIZE
http_server.use_nodelay_inet = BSC__USE_NODELAY_INET
http_server.reuse_gate_addr = BSC__REUSE_GATE_ADDR
http_server.reuse_gate_port = BSC__REUSE_GATE_PORT
# START SERVER
with asock_io_core_connect(http_server, True, backlog=1000) as server:
http_server.need_to_auto_check_incoming_raw_connection = True
clients_per_transport_id = dict()
# RUN SERVER LOOP
while True:
io_iteration_result = server.io_iteration(io_iteration_timeout)
# CLIENT CONNECTED
for another_client_id in io_iteration_result.newly_connected_unknown_clients:
clients_per_transport_id[another_client_id] = HttpClientData(another_client_id, server)
# CLIENT HAVE DATA TO READ
for another_client_id in io_iteration_result.clients_have_data_to_read:
clients_per_transport_id[another_client_id].data_received()
# CLIENT CLOSED
for another_client_id in io_iteration_result.clients_with_disconnected_connection:
if clients_per_transport_id[another_client_id].socket_error():
del clients_per_transport_id[another_client_id]
print('Server had been Shut Down.')
# ==============================================================================================================
# !!!!! IMPORTANT !!!!!
# NEXT CODE SHOULD BE EQUIVALENT TO ASYNCIO HTTP SERVER'S CODE FROM "https://github.com/MagicStack/vmbench" PROJECT
# (BENCHMARKING TOOL FROM 'UVLOOP' DEVELOPERS) FOR FAIR COMPARISON, SO IT'S SO DIRTY.
# (IT'S ALMOST EQUIVALENT: IT DOES NOT HAVE FEW CRITICAL vmbench's BUGS)
_RESP_CACHE = {}
class HttpRequest:
__slots__ = ('_protocol', '_url', '_headers', '_version')
def __init__(self, protocol, url, headers, version):
self._protocol = protocol
self._url = url
self._headers = headers
self._version = version
class HttpResponse:
__slots__ = ('_protocol', '_request', '_headers_sent')
def __init__(self, protocol, request: HttpRequest):
self._protocol = protocol
self._request = request
self._headers_sent = False
def write(self, data):
self._protocol.output_list.append(b''.join([
'HTTP/{} 200 OK\r\n'.format(
self._request._version).encode('latin-1'),
b'Content-Type: text/plain\r\n',
'Content-Length: {}\r\n'.format(len(data)).encode('latin-1'),
b'\r\n',
data
]))
class HttpClientData:
__slots__ = ('server', 'output_list', 'transport_id',
'_current_request', '_current_parser',
'_current_url', '_current_headers', '_last_piece_of_data',
'_previous_piece_of_data')
def __init__(self, transport_id, server: ASockIOCore):
self.server = server
self.transport_id = transport_id
self.output_list = list()
self._current_parser = httptools.HttpRequestParser(self)
self._current_headers = list()
self._current_request = None
self._current_url = None
self._last_piece_of_data = None
self._previous_piece_of_data = None
def data_received(self):
try:
for message in self.server.get_messages_from_client(self.transport_id):
# print('IN {}: {}'.format(self.transport_id, bytes(message)))
self._current_parser.feed_data(message)
self.server.send_messages_to_client(self.transport_id, self.output_list)
except Exception as err:
print('EXCEPTION:', err)
self.server.mark_client_connection_as_should_be_closed_immediately(self.transport_id, False)
# raise err
del self.output_list[:]
# self.output_list.clear()
def socket_error(self):
self._current_request = self._current_parser = None
self.server.remove_client(self.transport_id)
return True
# =============================================
# ==== BEGIN of HttpRequestParser methods: ====
# def on_message_begin(self):
# pass
def on_url(self, url):
if self._current_url:
self._current_url += url
else:
self._current_url = url
# def on_status(self, data):
# pass
def on_header(self, name, value):
self._current_headers.append((name, value))
def on_headers_complete(self):
try:
self._current_request = HttpRequest(
self, self._current_url, self._current_headers,
self._current_parser.get_http_version())
self.handle(self._current_request, HttpResponse(self, self._current_request))
except:
print('ON HEADERS COMPLETE. ID: {}. Last: {}. Previous : {}.'.format(
self.transport_id, self._last_piece_of_data, self._previous_piece_of_data))
raise
# def on_body(self, data):
# pass
# def on_message_complete(self):
# pass
# def on_chunk_header(self):
# pass
# def on_chunk_complete(self):
# pass
# ==== END of HttpRequestParser methods====
# =========================================
def handle(self, request, response: HttpResponse):
parsed_url = httptools.parse_url(self._current_url)
payload_size = parsed_url.path.decode('ascii')[1:]
if not payload_size:
payload_size = 1024
else:
payload_size = int(payload_size)
resp = _RESP_CACHE.get(payload_size)
if resp is None:
resp = b'X' * payload_size
_RESP_CACHE[payload_size] = resp
response.write(resp)
self._current_request = None
self._current_url = None
self._current_headers = list()
# print('KEEP ALIVE:', self._current_parser.should_keep_alive())
if not self._current_parser.should_keep_alive():
self.server.mark_client_connection_as_ready_to_be_closed(self.transport_id, False)
if __name__ == '__main__':
run_http_server()
| 34.313305 | 115 | 0.635897 | 4,512 | 0.564353 | 0 | 0 | 0 | 0 | 0 | 0 | 2,250 | 0.281426 |
826a0a34399ac3cae5194033717af23bcc5eca24 | 1,066 | py | Python | bamboomba_description/launch/robot_state_publisher.launch.py | RIF-Robotics/bamboomba | 3fd725be9b0fdf33d3e46c37d20d8cbecea7d15d | [
"BSD-3-Clause"
]
| null | null | null | bamboomba_description/launch/robot_state_publisher.launch.py | RIF-Robotics/bamboomba | 3fd725be9b0fdf33d3e46c37d20d8cbecea7d15d | [
"BSD-3-Clause"
]
| null | null | null | bamboomba_description/launch/robot_state_publisher.launch.py | RIF-Robotics/bamboomba | 3fd725be9b0fdf33d3e46c37d20d8cbecea7d15d | [
"BSD-3-Clause"
]
| null | null | null | from os import path
from launch import LaunchDescription
from ament_index_python.packages import get_package_share_directory
from launch_ros.actions import Node
from launch.actions import DeclareLaunchArgument
from launch.substitutions import Command, LaunchConfiguration
def generate_launch_description():
use_sim_time = LaunchConfiguration('use_sim_time')
xacro_path = path.join(get_package_share_directory('bamboomba_description'),
'urdf', 'bamboomba.urdf.xacro')
robot_description = {'robot_description' : Command(['xacro', ' ', xacro_path])}
return LaunchDescription([
DeclareLaunchArgument(
'use_sim_time',
default_value='False',
description='Use simulation clock if true'),
Node(package='robot_state_publisher',
name='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
parameters=[{'use_sim_time': use_sim_time},
robot_description]
),
])
| 35.533333 | 83 | 0.674484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 236 | 0.221388 |
826b06577ba553bf4320966964956912f51ba613 | 1,211 | py | Python | tests/test__event.py | alpha-health-ai/pyformance | 3dcf6556a070e89c783f30ddfff03c986e7a5582 | [
"Apache-2.0"
]
| 4 | 2019-11-13T11:11:43.000Z | 2021-06-20T11:01:27.000Z | tests/test__event.py | alpha-health-ai/pyformance | 3dcf6556a070e89c783f30ddfff03c986e7a5582 | [
"Apache-2.0"
]
| 5 | 2019-01-14T14:59:44.000Z | 2020-12-13T17:04:27.000Z | tests/test__event.py | alpha-health-ai/pyformance | 3dcf6556a070e89c783f30ddfff03c986e7a5582 | [
"Apache-2.0"
]
| 6 | 2019-04-17T21:07:40.000Z | 2022-01-18T16:11:51.000Z | from pyformance.meters import Event, EventPoint
from tests import TimedTestCase
class EventTestCase(TimedTestCase):
def setUp(self):
super(EventTestCase, self).setUp()
self.event = Event(
clock=TimedTestCase.clock,
key="test_event",
tags={"name", "value"}
)
def tearDown(self):
super(EventTestCase, self).tearDown()
def test_add_event_and_read_it(self):
mock_values = {"value": 1}
self.event.add(mock_values)
events = self.event.get_events()
self.assertEqual(events, [EventPoint(
time=self.clock.time(),
values=mock_values
)])
def test_clear_event_clears_events(self):
self.event.add({"value": 1})
self.event.clear()
self.assertEqual(len(self.event.get_events()), 0)
def test_get_event_returns_shallow_copy(self):
mock_values = {"value": 1}
self.event.add(mock_values)
events = self.event.get_events()
self.assertEqual(len(events), 1)
# make sure the returned object is not a reference(important for thread safety)
self.event.clear()
self.assertEqual(len(events), 1)
| 26.911111 | 87 | 0.620974 | 1,128 | 0.931462 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.10322 |
826c1e777daa6347910fda6447c0f31e1cd72324 | 4,894 | py | Python | problem_#43_30032019.py | vivek28111992/DailyCoding | db58c069ef393f6a93fe86913660860134cb97a0 | [
"MIT"
]
| null | null | null | problem_#43_30032019.py | vivek28111992/DailyCoding | db58c069ef393f6a93fe86913660860134cb97a0 | [
"MIT"
]
| null | null | null | problem_#43_30032019.py | vivek28111992/DailyCoding | db58c069ef393f6a93fe86913660860134cb97a0 | [
"MIT"
]
| null | null | null | """
Good morning! Here's your coding interview problem for today.
This problem was asked by Amazon.
Implement a stack that has the following methods:
push(val), which pushes an element onto the stack
pop(), which pops off and returns the topmost element of the stack. If there are no elements in the stack, then it should throw an error or return null.
max(), which returns the maximum value in the stack currently. If there are no elements in the stack, then it should throw an error or return null.
Each method should run in constant time.
https://www.geeksforgeeks.org/design-a-stack-that-supports-getmin-in-o1-time-and-o1-extra-space/
https://www.geeksforgeeks.org/design-and-implement-special-stack-data-structure/
"""
# Class to make a Node
class Node:
# Constructor which assign argument to node's value
def __init__(self, value):
self.value = value
self.next = None
# This method returns the string representation of the object
def __str__(self):
return "Node({})".format(self.value)
# __repr__ is same as __str__
__repr__ = __str__
class Stack:
# Stack Constructor initialise top of stack and counter
def __init__(self):
self.top = None
self.maximum = None
self.count = 0
self.minimum = None
# This method returns the string representation of the object (stack).
def __str__(self):
temp = self.top
out = []
while temp:
out.append(str(temp.value))
temp = temp.next
out = '\n'.join(out)
return ('Top {} \n\nStack :\n{}'.format(self.top, out))
# __repr__ is same as __str__
__repr__ = __str__
# This method is used to get minimum element of stack
def getMin(self):
if self.top is None:
return "Stack is Empty"
else:
print("Minimum element in the stack is: {}".format(self.minimum.value))
# This method is used to get minimum element of stack
def getMax(self):
if self.top is None:
return "Stack is Empty"
else:
print("Maximum element in the stack is: {}".format(self.maximum.value))
# Method to check if stack is Empty or not
def isEmpty(self):
# If top equals to None then stack is empty
if self.top == None:
return True
else:
# If top not equal to None then stack is empty
return False
def push(self, value):
if self.top is None:
self.top = Node(value)
self.top.value = value
self.minimum = Node(value)
self.minimum.value = value
self.maximum = Node(value)
self.maximum.value = value
elif value < self.minimum.value:
new_node = Node(value)
new_node_min = Node(value)
new_node_max = Node(self.maximum.value)
new_node.next = self.top
new_node_max.next = self.maximum
new_node_min.next = self.minimum
self.top = new_node
self.top.value = value
self.maximum = new_node_max
self.maximum.value = value
self.minimum = new_node_min
self.minimum.value = value
elif value > self.maximum.value:
new_node = Node(value)
new_node_max = Node(value)
new_node_min = Node(self.minimum.value)
new_node.next = self.top
new_node_max.next = self.maximum
new_node_min.next = self.minimum
self.top = new_node
self.top.value = value
self.maximum = new_node_max
self.maximum.value = value
self.minimum = new_node_min
self.minimum.value = value
else:
new_node = Node(value)
new_node_max = Node(self.maximum.value)
new_node_min = Node(self.minimum.value)
new_node.next = self.top
new_node_max.next = self.maximum
new_node_min.next = self.minimum
self.maximum = new_node_max
self.maximum.value = value
self.minimum = new_node_min
self.minimum.value = value
self.top = new_node
self.top.value = value
print("Number Inserted: {}".format(value))
# This method is used to pop top of stack
def pop(self):
if self.top is None:
print("Stack is empty")
else:
removedNode = self.top.value
self.top = self.top.next
self.minimum = self.minimum.next
self.maximum = self.maximum.next
print("Top Most Element Removed : {}".format(removedNode))
stack = Stack()
stack.push(3)
stack.push(5)
stack.getMin()
stack.getMax()
stack.push(2)
stack.push(1)
stack.getMin()
stack.getMax()
stack.pop()
stack.getMin()
stack.getMax()
stack.pop()
| 31.574194 | 152 | 0.603801 | 3,948 | 0.806702 | 0 | 0 | 0 | 0 | 0 | 0 | 1,535 | 0.313649 |
826c762278c3cdf3123d44a03bc4ba2f8e259b59 | 1,161 | py | Python | scrapy/contrib/linkextractors/lxmlhtml.py | emschorsch/scrapy | acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f | [
"BSD-3-Clause"
]
| 1 | 2015-09-03T18:30:10.000Z | 2015-09-03T18:30:10.000Z | scrapy/contrib/linkextractors/lxmlhtml.py | emschorsch/scrapy | acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f | [
"BSD-3-Clause"
]
| 2 | 2021-12-13T20:51:32.000Z | 2022-02-11T03:47:35.000Z | scrapy/contrib/linkextractors/lxmlhtml.py | emschorsch/scrapy | acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f | [
"BSD-3-Clause"
]
| 1 | 2017-11-09T20:33:59.000Z | 2017-11-09T20:33:59.000Z | """
Link extractor based on lxml.html
"""
import lxml.html
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
class LxmlParserLinkExtractor(object):
def __init__(self, tag="a", attr="href", process=None, unique=False):
self.scan_tag = tag if callable(tag) else lambda t: t == tag
self.scan_attr = attr if callable(attr) else lambda a: a == attr
self.process_attr = process if callable(process) else lambda v: v
self.unique = unique
self.links = []
def _extract_links(self, response_text, response_url):
html = lxml.html.fromstring(response_text)
html.make_links_absolute(response_url)
for e, a, l, p in html.iterlinks():
if self.scan_tag(e.tag):
if self.scan_attr(a):
link = Link(self.process_attr(l), text=e.text)
self.links.append(link)
links = unique_list(self.links, key=lambda link: link.url) \
if self.unique else self.links
return links
def extract_links(self, response):
return self._extract_links(response.body, response.url)
| 31.378378 | 73 | 0.639966 | 1,013 | 0.872524 | 0 | 0 | 0 | 0 | 0 | 0 | 50 | 0.043066 |
826cefe38aada7c6d25cf287d99f1f536d838887 | 97 | py | Python | src/meetings/admin.py | Yalnyra/office-meeting-reservation | 52f558ec11a9b5d69c28acb60de132d70b0a789b | [
"bzip2-1.0.6"
]
| null | null | null | src/meetings/admin.py | Yalnyra/office-meeting-reservation | 52f558ec11a9b5d69c28acb60de132d70b0a789b | [
"bzip2-1.0.6"
]
| null | null | null | src/meetings/admin.py | Yalnyra/office-meeting-reservation | 52f558ec11a9b5d69c28acb60de132d70b0a789b | [
"bzip2-1.0.6"
]
| null | null | null | from django.contrib import admin
from .models import Meeting
admin.site.register(Meeting)
| 16.166667 | 33 | 0.773196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
826e60c1e9dd5f09bf2a2eb0580dbe4ef233f970 | 578 | py | Python | app/templates/init.py | arudmin/generator-flask-heroku | 12ecd9d37b732bf5d59912c4874f1dbc6cfa63b1 | [
"MIT"
]
| null | null | null | app/templates/init.py | arudmin/generator-flask-heroku | 12ecd9d37b732bf5d59912c4874f1dbc6cfa63b1 | [
"MIT"
]
| null | null | null | app/templates/init.py | arudmin/generator-flask-heroku | 12ecd9d37b732bf5d59912c4874f1dbc6cfa63b1 | [
"MIT"
]
| null | null | null | from flask import Flask, url_for
import os
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'SECRET_KEY_CH1ng3me'
# Determines the destination of the build. Only usefull if you're using Frozen-Flask
app.config['FREEZER_DESTINATION'] = os.path.dirname(os.path.abspath(__file__))+'/../build'
# Function to easily find your assets
# In your template use <link rel=stylesheet href="{{ static('filename') }}">
<%= appName %>.jinja_env.globals['static'] = (
lambda filename: url_for('static', filename = filename)
)
from <%= appName %> import views
| 32.111111 | 90 | 0.723183 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 285 | 0.49308 |
826e7e8ce0638e411f4ad1445cfe2c06fdbae9c6 | 936 | py | Python | sigmod2021-exdra-p523/experiments/code/other/l2svm.py | damslab/reproducibility | f7804b2513859f7e6f14fa7842d81003d0758bf8 | [
"Apache-2.0"
]
| 4 | 2021-12-10T17:20:26.000Z | 2021-12-27T14:38:40.000Z | sigmod2021-exdra-p523/experiments/code/other/l2svm.py | damslab/reproducibility | f7804b2513859f7e6f14fa7842d81003d0758bf8 | [
"Apache-2.0"
]
| null | null | null | sigmod2021-exdra-p523/experiments/code/other/l2svm.py | damslab/reproducibility | f7804b2513859f7e6f14fa7842d81003d0758bf8 | [
"Apache-2.0"
]
| null | null | null |
import numpy as np
import argparse
from sklearn.svm import LinearSVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_regression
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--datapath', type=str, required=True)
parser.add_argument('-y', '--labels', type=str, required=True)
parser.add_argument('-v', '--verbose', type=bool, default=False)
parser.add_argument('-o', '--outputpath', type=str, required=True)
args = parser.parse_args()
X = np.load(args.datapath, allow_pickle=True)
y = np.load(args.labels, allow_pickle=True)
# http://scikit-learn.sourceforge.net/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC
regr = make_pipeline(StandardScaler(),
LinearSVR(verbose=args.verbose, tol = 1e-5, max_iter = 30))
regr.fit(X,y)
np.savetxt(args.outputpath, regr.named_steps['linearsvr'].coef_, delimiter=",")
| 36 | 111 | 0.766026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 188 | 0.200855 |
826ee5078415354fd8746cf24ad960817241f697 | 4,797 | py | Python | libs/fm_mission_planner/python/fm_mission_planner/target_viz.py | ethz-asl/mav_findmine | 2835995ace0a20a30f20812437b1b066428253a9 | [
"MIT"
]
| 3 | 2021-06-25T03:38:38.000Z | 2022-01-13T08:39:48.000Z | libs/fm_mission_planner/python/fm_mission_planner/target_viz.py | ethz-asl/mav_findmine | 2835995ace0a20a30f20812437b1b066428253a9 | [
"MIT"
]
| null | null | null | libs/fm_mission_planner/python/fm_mission_planner/target_viz.py | ethz-asl/mav_findmine | 2835995ace0a20a30f20812437b1b066428253a9 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
# MIT License
#
# Copyright (c) 2020 Rik Baehnemann, ASL, ETH Zurich, Switzerland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import rospy
import rospkg
import pandas as pd
import pymap3d as pm
import os
import numpy as np
from matplotlib import cm
from matplotlib import colors
from sensor_msgs.msg import NavSatFix
from visualization_msgs.msg import Marker, MarkerArray
# Load target list from CSV, receive home point from ROS msgs and publish target points to RVIZ.
class TargetViz():
def __init__(self):
self.df_targets = None
self.loadRosParameters()
self.subscribeToTopics()
self.advertiseTopics()
self.loadTargetTable()
self.main()
def loadRosParameters(self):
rospack = rospkg.RosPack()
default_target_path = os.path.join(rospack.get_path('fm_mission_planner'), 'cfg/target_table.csv')
self.target_path = rospy.get_param("~target_table", default_target_path)
self.frame_id = rospy.get_param("~frame_id", 'enu')
def subscribeToTopics(self):
self.home_point_sub = rospy.Subscriber('home_point', NavSatFix, self.homePointCallback)
def advertiseTopics(self):
self.target_pub = rospy.Publisher('~targets', MarkerArray, latch=True)
def homePointCallback(self, msg):
self.lat0 = msg.latitude
self.lon0 = msg.longitude
self.alt0 = msg.altitude
rospy.loginfo_throttle(10.0, 'Received home point lat0: ' + str(self.lat0) + ' lon0: ' + str(self.lon0) + ' alt0: ' + str(self.alt0))
if self.df_targets is not None and len(self.df_targets):
self.convertToENU()
self.createColors()
self.createMarkerArray()
self.target_pub.publish(self.marker_array)
def loadTargetTable(self):
self.df_targets = pd.read_csv(self.target_path, sep=",")
rospy.loginfo('Loading ' + str(len(self.df_targets)) + ' target points.')
def convertToENU(self):
lat = self.df_targets['lat'].values
lon = self.df_targets['lon'].values
alt = np.squeeze(np.zeros((len(self.df_targets), 1)))
print lat
print lat.size
if lat.size == 1 and lon.size == 1 and alt.size == 1:
lat = np.array([lat])
lon = np.array([lon])
alt = np.array([alt])
self.east = []
self.north = []
self.up = []
for i in range(0, len(self.df_targets)):
east, north, up = pm.geodetic2enu(lat[i], lon[i], alt[i], self.lat0, self.lon0, self.alt0)
self.east.append(east)
self.north.append(north)
self.up.append(up)
def createColors(self):
types = self.df_targets['type'].values
color_map = cm.get_cmap('Set1')
norm = colors.Normalize(vmin=min(types), vmax=max(types))
self.colors = color_map(norm(types))
def createMarkerArray(self):
self.marker_array = MarkerArray()
for i in range(0, len(self.df_targets)):
marker = Marker()
marker.type = marker.SPHERE
marker.action = marker.ADD
marker.scale.x = 0.4
marker.scale.y = 0.4
marker.scale.z = 0.4
marker.color.r = self.colors[i, 0]
marker.color.g = self.colors[i, 1]
marker.color.b = self.colors[i, 2]
marker.color.a = self.colors[i, 3]
marker.pose.position.x = self.east[i]
marker.pose.position.y = self.north[i]
marker.pose.position.z = self.up[i]
marker.pose.orientation.w = 1.0
marker.header.frame_id = self.frame_id
marker.id = i
self.marker_array.markers.append(marker)
def main(self):
rospy.spin()
| 36.9 | 141 | 0.64999 | 3,288 | 0.685428 | 0 | 0 | 0 | 0 | 0 | 0 | 1,428 | 0.297686 |
826f63c5c39c287a99f63c5f1236dbd56acc410a | 20,198 | py | Python | capirca/lib/gcp_hf.py | PhillSimonds/capirca | 26c5f4f7d3bfc29841f5e6d3cdf07be9923c2c70 | [
"Apache-2.0"
]
| null | null | null | capirca/lib/gcp_hf.py | PhillSimonds/capirca | 26c5f4f7d3bfc29841f5e6d3cdf07be9923c2c70 | [
"Apache-2.0"
]
| null | null | null | capirca/lib/gcp_hf.py | PhillSimonds/capirca | 26c5f4f7d3bfc29841f5e6d3cdf07be9923c2c70 | [
"Apache-2.0"
]
| 1 | 2022-02-14T03:22:18.000Z | 2022-02-14T03:22:18.000Z | """Google Cloud Hierarchical Firewall Generator.
Hierarchical Firewalls (HF) are represented in a SecurityPolicy GCP resouce.
"""
import copy
import re
from typing import Dict, Any
from absl import logging
from capirca.lib import gcp
from capirca.lib import nacaddr
class ExceededCostError(gcp.Error):
"""Raised when the total cost of a policy is above the maximum."""
class DifferentPolicyNameError(gcp.Error):
"""Raised when headers in the same policy have a different policy name."""
class ApiVersionSyntaxMap:
"""Defines the syntax changes between different API versions.
http://cloud/compute/docs/reference/rest/v1/firewallPolicies/addRule
http://cloud/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule
"""
SYNTAX_MAP = {
'beta': {
'display_name': 'displayName',
'dest_ip_range': 'destIpRanges',
'src_ip_range': 'srcIpRanges',
'layer_4_config': 'layer4Configs'
},
'ga': {
'display_name': 'shortName',
'dest_ip_range': 'destIpRanges',
'src_ip_range': 'srcIpRanges',
'layer_4_config': 'layer4Configs'
}
}
class Term(gcp.Term):
"""Used to create an individual term."""
ACTION_MAP = {'accept': 'allow', 'next': 'goto_next'}
_MAX_TERM_COMMENT_LENGTH = 64
_TARGET_RESOURCE_FORMAT = 'https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}'
_TERM_ADDRESS_LIMIT = 256
_TERM_TARGET_RESOURCES_LIMIT = 256
_TERM_DESTINATION_PORTS_LIMIT = 256
def __init__(self,
term,
address_family='inet',
policy_inet_version='inet',
api_version='beta'):
super().__init__(term)
self.address_family = address_family
self.term = term
self.skip = False
self._ValidateTerm()
self.api_version = api_version
# This is to handle mixed, where the policy_inet_version is mixed,
# but the term inet version is either inet/inet6.
# This is only useful for term name and priority.
self.policy_inet_version = policy_inet_version
def _ValidateTerm(self):
if self.term.destination_tag or self.term.source_tag:
raise gcp.TermError('Hierarchical Firewall does not support tags')
if len(self.term.target_resources) > self._TERM_TARGET_RESOURCES_LIMIT:
raise gcp.TermError(
'Term: %s target_resources field contains %s resources. It should not contain more than "%s".'
% (self.term.name, str(len(
self.term.target_resources)), self._TERM_TARGET_RESOURCES_LIMIT))
for proj, vpc in self.term.target_resources:
if not gcp.IsProjectIDValid(proj):
raise gcp.TermError(
'Project ID "%s" must be 6 to 30 lowercase letters, digits, or hyphens.'
' It must start with a letter. Trailing hyphens are prohibited.' %
proj)
if not gcp.IsVPCNameValid(vpc):
raise gcp.TermError('VPC name "%s" must start with a lowercase letter '
'followed by up to 62 lowercase letters, numbers, '
'or hyphens, and cannot end with a hyphen.' % vpc)
if self.term.source_port:
raise gcp.TermError('Hierarchical firewall does not support source port '
'restrictions.')
if self.term.option:
raise gcp.TermError('Hierarchical firewall does not support the '
'TCP_ESTABLISHED option.')
if len(self.term.destination_port) > self._TERM_DESTINATION_PORTS_LIMIT:
raise gcp.TermError(
'Term: %s destination_port field contains %s ports. It should not contain more than "%s".'
% (self.term.name, str(len(
self.term.destination_port)), self._TERM_DESTINATION_PORTS_LIMIT))
# Since policy_inet_version is used to handle 'mixed'.
# We should error out if the individual term's inet version (address_family)
# is anything other than inet/inet6, since this should never happen
# naturally. Something has gone horribly wrong if you encounter this error.
if self.address_family == 'mixed':
raise gcp.TermError(
'Hierarchical firewall rule has incorrect inet_version for rule: %s' %
self.term.name)
def ConvertToDict(self, priority_index):
"""Converts term to dict representation of SecurityPolicy.Rule JSON format.
Takes all of the attributes associated with a term (match, action, etc) and
converts them into a dictionary which most closely represents
the SecurityPolicy.Rule JSON format.
Args:
priority_index: An integer priority value assigned to the term.
Returns:
A dict term.
"""
if self.skip:
return {}
rules = []
# Identify if this is inet6 processing for a term under a mixed policy.
mixed_policy_inet6_term = False
if self.policy_inet_version == 'mixed' and self.address_family == 'inet6':
mixed_policy_inet6_term = True
term_dict = {
'action': self.ACTION_MAP.get(self.term.action[0], self.term.action[0]),
'direction': self.term.direction,
'priority': priority_index
}
# Get the correct syntax for API versions.
src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['src_ip_range']
dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['dest_ip_range']
layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[
self.api_version]['layer_4_config']
target_resources = []
for proj, vpc in self.term.target_resources:
target_resources.append(self._TARGET_RESOURCE_FORMAT.format(proj, vpc))
if target_resources: # Only set when non-empty.
term_dict['targetResources'] = target_resources
term_dict['enableLogging'] = self._GetLoggingSetting()
# This combo provides ability to identify the rule.
term_name = self.term.name
if mixed_policy_inet6_term:
term_name = gcp.GetIpv6TermName(term_name)
raw_description = term_name + ': ' + ' '.join(self.term.comment)
term_dict['description'] = gcp.TruncateString(raw_description,
self._MAX_TERM_COMMENT_LENGTH)
filtered_protocols = []
for proto in self.term.protocol:
# ICMP filtering by inet_version
# Since each term has inet_version, 'mixed' is correctly processed here.
if proto == 'icmp' and self.address_family == 'inet6':
logging.warning(
'WARNING: Term %s is being rendered for inet6, ICMP '
'protocol will not be rendered.', self.term.name)
continue
if proto == 'icmpv6' and self.address_family == 'inet':
logging.warning(
'WARNING: Term %s is being rendered for inet, ICMPv6 '
'protocol will not be rendered.', self.term.name)
continue
if proto == 'igmp' and self.address_family == 'inet6':
logging.warning(
'WARNING: Term %s is being rendered for inet6, IGMP '
'protocol will not be rendered.', self.term.name)
continue
filtered_protocols.append(proto)
# If there is no protocol left after ICMP/IGMP filtering, drop this term.
# But only do this for terms that originally had protocols.
# Otherwise you end up dropping the default-deny.
if self.term.protocol and not filtered_protocols:
return {}
protocols_and_ports = []
if not self.term.protocol:
# Empty protocol list means any protocol, but any protocol in HF is
# represented as "all"
protocols_and_ports = [{'ipProtocol': 'all'}]
else:
for proto in filtered_protocols:
# If the protocol name is not supported, use the protocol number.
if proto not in self._ALLOW_PROTO_NAME:
proto = str(self.PROTO_MAP[proto])
logging.info('INFO: Term %s is being rendered using protocol number',
self.term.name)
proto_ports = {'ipProtocol': proto}
if self.term.destination_port:
ports = self._GetPorts()
if ports: # Only set when non-empty.
proto_ports['ports'] = ports
protocols_and_ports.append(proto_ports)
if self.api_version == 'ga':
term_dict['match'] = {layer_4_config: protocols_and_ports}
else:
term_dict['match'] = {'config': {layer_4_config: protocols_and_ports}}
# match needs a field called versionedExpr with value FIREWALL
# See documentation:
# https://cloud.google.com/compute/docs/reference/rest/beta/organizationSecurityPolicies/addRule
term_dict['match']['versionedExpr'] = 'FIREWALL'
ip_version = self.AF_MAP[self.address_family]
if ip_version == 4:
any_ip = [nacaddr.IP('0.0.0.0/0')]
else:
any_ip = [nacaddr.IPv6('::/0')]
if self.term.direction == 'EGRESS':
daddrs = self.term.GetAddressOfVersion('destination_address', ip_version)
# If the address got filtered out and is empty due to address family, we
# don't render the term. At this point of term processing, the direction
# has already been validated, so we can just log and return empty rule.
if self.term.destination_address and not daddrs:
logging.warning(
'WARNING: Term %s is not being rendered for %s, '
'because there are no addresses of that family.', self.term.name,
self.address_family)
return []
# This should only happen if there were no addresses set originally.
if not daddrs:
daddrs = any_ip
destination_address_chunks = [
daddrs[x:x + self._TERM_ADDRESS_LIMIT]
for x in range(0, len(daddrs), self._TERM_ADDRESS_LIMIT)
]
for daddr_chunk in destination_address_chunks:
rule = copy.deepcopy(term_dict)
if self.api_version == 'ga':
rule['match'][dest_ip_range] = [
daddr.with_prefixlen for daddr in daddr_chunk
]
else:
rule['match']['config'][dest_ip_range] = [
daddr.with_prefixlen for daddr in daddr_chunk
]
rule['priority'] = priority_index
rules.append(rule)
priority_index += 1
else:
saddrs = self.term.GetAddressOfVersion('source_address', ip_version)
# If the address got filtered out and is empty due to address family, we
# don't render the term. At this point of term processing, the direction
# has already been validated, so we can just log and return empty rule.
if self.term.source_address and not saddrs:
logging.warning(
'WARNING: Term %s is not being rendered for %s, '
'because there are no addresses of that family.', self.term.name,
self.address_family)
return []
# This should only happen if there were no addresses set originally.
if not saddrs:
saddrs = any_ip
source_address_chunks = [
saddrs[x:x + self._TERM_ADDRESS_LIMIT]
for x in range(0, len(saddrs), self._TERM_ADDRESS_LIMIT)
]
for saddr_chunk in source_address_chunks:
rule = copy.deepcopy(term_dict)
if self.api_version == 'ga':
rule['match'][src_ip_range] = [
saddr.with_prefixlen for saddr in saddr_chunk
]
else:
rule['match']['config'][src_ip_range] = [
saddr.with_prefixlen for saddr in saddr_chunk
]
rule['priority'] = priority_index
rules.append(rule)
priority_index += 1
return rules
def __str__(self):
return ''
class HierarchicalFirewall(gcp.GCP):
"""A GCP Hierarchical Firewall policy."""
SUFFIX = '.gcphf'
_ANY_IP = {
'inet': nacaddr.IP('0.0.0.0/0'),
'inet6': nacaddr.IP('::/0'),
}
_PLATFORM = 'gcp_hf'
_SUPPORTED_AF = frozenset(['inet', 'inet6', 'mixed'])
# Beta is the default API version. GA supports IPv6 (inet6/mixed).
_SUPPORTED_API_VERSION = frozenset(['beta', 'ga'])
_DEFAULT_MAXIMUM_COST = 100
def _BuildTokens(self):
"""Build supported tokens for platform.
Returns:
Tuple containing both supported tokens and sub tokens.
"""
supported_tokens, _ = super()._BuildTokens()
supported_tokens |= {
'destination_tag', 'expiration', 'source_tag', 'translated',
'target_resources', 'logging'
}
supported_tokens -= {
'destination_address_exclude', 'expiration', 'icmp_type',
'source_address_exclude', 'verbatim'
}
supported_sub_tokens = {'action': {'accept', 'deny', 'next'}}
return supported_tokens, supported_sub_tokens
def _TranslatePolicy(self, pol, exp_info):
"""Translates a Capirca policy into a HF-specific data structure.
Takes in a POL file, parses each term and populates the policy
dict. Each term in this list is a dictionary formatted according to
HF's rule API specification. Additionally, checks for its quota.
Args:
pol: A Policy() object representing a given POL file.
exp_info: An int that specifies number of weeks until policy expiry.
Raises:
ExceededCostError: Raised when the cost of a policy exceeds the default
maximum cost.
HeaderError: Raised when the header cannot be parsed or a header option is
invalid.
DifferentPolicyNameError: Raised when a header policy name differs from
other in the same policy.
"""
self.policies = []
policy = {
'rules': [],
'type': 'FIREWALL'
}
is_policy_modified = False
counter = 1
total_cost = 0
for header, terms in pol.filters:
if self._PLATFORM not in header.platforms:
continue
filter_options = header.FilterOptions(self._PLATFORM)
is_policy_modified = True
# Get term direction if set.
direction = 'INGRESS'
for i in self._GOOD_DIRECTION:
if i in filter_options:
direction = i
filter_options.remove(i)
# Get the address family if set.
address_family = 'inet'
for i in self._SUPPORTED_AF:
if i in filter_options:
address_family = i
filter_options.remove(i)
# Get the compute API version if set.
api_version = 'beta'
for i in self._SUPPORTED_API_VERSION:
if i in filter_options:
api_version = i
filter_options.remove(i)
break
# Find the default maximum cost of a policy, an integer, if specified.
max_cost = self._DEFAULT_MAXIMUM_COST
for opt in filter_options:
try:
max_cost = int(opt)
filter_options.remove(opt)
break
except ValueError:
continue
if max_cost > 65536:
raise gcp.HeaderError(
'Default maximum cost cannot be higher than 65536')
display_name = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['display_name']
# Get policy name and validate it to meet displayName requirements.
policy_name = header.FilterName(self._PLATFORM)
if not policy_name:
raise gcp.HeaderError(
'Policy name was not specified in header')
filter_options.remove(policy_name)
if len(policy_name) > 63:
raise gcp.HeaderError(
'Policy name "%s" is too long; the maximum number of characters '
'allowed is 63' % (policy_name))
if not bool(re.match('^[a-z]([-a-z0-9]*[a-z0-9])?$', policy_name)):
raise gcp.HeaderError(
'Invalid string for displayName, "%s"; the first character must be '
'a lowercase letter, and all following characters must be a dash, '
'lowercase letter, or digit, except the last character, which '
'cannot be a dash.' % (policy_name))
if display_name in policy and policy[display_name] != policy_name:
raise DifferentPolicyNameError(
'Policy names that are from the same policy are expected to be '
'equal, but %s is different to %s' %
(policy[display_name], policy_name))
policy[display_name] = policy_name
# If there are remaining options, they are unknown/unsupported options.
if filter_options:
raise gcp.HeaderError(
'Unsupported or unknown filter options %s in policy %s ' %
(str(filter_options), policy_name))
# Handle mixed for each indvidual term as inet and inet6.
# inet/inet6 are treated the same.
term_address_families = []
if address_family == 'mixed':
term_address_families = ['inet', 'inet6']
else:
term_address_families = [address_family]
for term in terms:
if term.stateless_reply:
continue
if gcp.IsDefaultDeny(term):
if direction == 'EGRESS':
if address_family != 'mixed':
# Default deny also gets processed as part of terms processing.
# The name and priority get updated there.
term.destination_address = [self._ANY_IP[address_family]]
else:
term.destination_address = [
self._ANY_IP['inet'], self._ANY_IP['inet6']
]
else:
if address_family != 'mixed':
term.source_address = [self._ANY_IP[address_family]]
else:
term.source_address = [
self._ANY_IP['inet'], self._ANY_IP['inet6']
]
term.name = self.FixTermLength(term.name)
term.direction = direction
# Only generate the term if it's for the appropriate platform
if term.platform:
if self._PLATFORM not in term.platform:
continue
if term.platform_exclude:
if self._PLATFORM in term.platform_exclude:
continue
for term_af in term_address_families:
rules = Term(
term,
address_family=term_af,
policy_inet_version=address_family,
api_version=api_version).ConvertToDict(priority_index=counter)
if not rules:
continue
for dict_term in rules:
total_cost += GetRuleTupleCount(dict_term, api_version)
if total_cost > max_cost:
raise ExceededCostError(
'Policy cost (%d) for %s reached the '
'maximum (%d)' %
(total_cost, policy[display_name], max_cost))
policy['rules'].append(dict_term)
counter += len(rules)
self.policies.append(policy)
# Do not render an empty rules if no policies have been evaluated.
if not is_policy_modified:
self.policies = []
if total_cost > 0:
logging.info('Policy %s quota cost: %d',
policy[display_name], total_cost)
def GetRuleTupleCount(dict_term: Dict[str, Any], api_version):
"""Calculate the tuple count of a rule in its dictionary form.
Quota is charged based on how complex the rules are rather than simply
limiting the number of rules.
The cost of a rule is the number of distinct protocol:port combinations plus
the number of IP addresses plus the number of targets.
Note: The goal of this function is not to determine if a rule is valid, but
to calculate its tuple count regardless of correctness.
Args:
dict_term: A dict object.
api_version: A string indicating the api version.
Returns:
int: The tuple count of the rule.
"""
layer4_count = 0
layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['layer_4_config']
dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['dest_ip_range']
src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['src_ip_range']
targets_count = len(dict_term.get('targetResources', []))
if api_version == 'ga':
config = dict_term.get('match', {})
else:
config = dict_term.get('match', {}).get('config', {})
addresses_count = len(
config.get(dest_ip_range, []) + config.get(src_ip_range, []))
for l4config in config.get(layer_4_config, []):
for _ in l4config.get('ports', []):
layer4_count += 1
if l4config.get('ipProtocol'):
layer4_count += +1
return addresses_count + layer4_count + targets_count
| 36.392793 | 105 | 0.645608 | 18,476 | 0.914744 | 0 | 0 | 0 | 0 | 0 | 0 | 8,121 | 0.40207 |
82701369685666a9616bf5da9dc3d5c258f51242 | 196 | py | Python | src/caracara/_kits.py | LaudateCorpus1/caracara | 1200f56891617f15dd48616d7198c45a1e0cbe26 | [
"Unlicense"
]
| 1 | 2021-12-28T05:12:33.000Z | 2021-12-28T05:12:33.000Z | src/caracara/_kits.py | CrowdStrike/caracara | 0cfc12447ee299f69e23a5d5210eab5fce8e033e | [
"Unlicense"
]
| 1 | 2021-11-26T08:53:25.000Z | 2021-11-26T08:53:25.000Z | src/caracara/_kits.py | LaudateCorpus1/caracara | 1200f56891617f15dd48616d7198c45a1e0cbe26 | [
"Unlicense"
]
| 2 | 2022-02-22T07:32:20.000Z | 2022-02-26T03:05:57.000Z | """Kits class defines the available Toolboxes."""
from enum import Enum
class Kits(Enum):
"""Enumerator for toolbox class name lookups."""
HOSTS = "HostsToolbox"
RTR = "RTRToolbox"
| 19.6 | 52 | 0.683673 | 121 | 0.617347 | 0 | 0 | 0 | 0 | 0 | 0 | 123 | 0.627551 |
827202efc4734328b2680b6cfa71b26432695ccb | 559 | py | Python | hackerrank/algorithms/time_conversion.py | ontana/mystudy | 8158550da3cdbaaa81660be73f2dfad869aae466 | [
"MIT"
]
| null | null | null | hackerrank/algorithms/time_conversion.py | ontana/mystudy | 8158550da3cdbaaa81660be73f2dfad869aae466 | [
"MIT"
]
| null | null | null | hackerrank/algorithms/time_conversion.py | ontana/mystudy | 8158550da3cdbaaa81660be73f2dfad869aae466 | [
"MIT"
]
| null | null | null | #!/bin/python3
# https://www.hackerrank.com/challenges/time-conversion
import sys
def timeConversion(s):
# Complete this function
ar = s.split(':')
tail = ar[-1][-2:].lower()
addition_hours = 0
if (tail == 'pm' and ar[0] != '12') or (tail == 'am' and ar[0] == '12'):
addition_hours = 12
hh = int((int(ar[0]) + addition_hours) % 24)
new_time = "%02d:" % hh
new_time += ':'.join(ar[1:])
new_time = new_time[:-2]
return new_time
s = input().strip()
result = timeConversion(s)
print(result) | 26.619048 | 77 | 0.563506 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 125 | 0.223614 |
8272095e27dabd69813e44dde70f50afb8a04d0c | 5,296 | py | Python | supplychainpy/demand/economic_order_quantity.py | supplybi/supplychainpy | 821ea21adb684abba7f9a7b26eaf218f44b45ced | [
"BSD-3-Clause"
]
| 5 | 2018-09-07T09:00:31.000Z | 2022-01-02T05:20:46.000Z | supplychainpy/demand/economic_order_quantity.py | supplybi/supplychainpy | 821ea21adb684abba7f9a7b26eaf218f44b45ced | [
"BSD-3-Clause"
]
| null | null | null | supplychainpy/demand/economic_order_quantity.py | supplybi/supplychainpy | 821ea21adb684abba7f9a7b26eaf218f44b45ced | [
"BSD-3-Clause"
]
| 3 | 2018-02-14T14:04:45.000Z | 2021-04-15T04:31:42.000Z | from decimal import Decimal, getcontext, ROUND_HALF_UP
from supplychainpy.demand import analyse_uncertain_demand
from supplychainpy.demand.eoq import minimum_variable_cost, economic_order_quantity
class EconomicOrderQuantity(analyse_uncertain_demand.UncertainDemand):
__economic_order_quantity = Decimal(0)
analyse_uncertain_demand.UncertainDemand.__reorder_cost = Decimal(0)
__holding_cost = Decimal(0)
__min_variable_cost = Decimal(0)
__reorder_quantity = Decimal(0)
__unit_cost = 0.00
@property
def minimum_variable_cost(self) -> Decimal:
return self.__min_variable_cost
@property
def economic_order_quantity(self) -> Decimal:
return self.__economic_order_quantity
def __init__(self, reorder_quantity: float, holding_cost: float, reorder_cost: float, average_orders: float,
unit_cost: float, total_orders: float):
getcontext().prec = 2
getcontext().rounding = ROUND_HALF_UP
self.__reorder_quantity = Decimal(reorder_quantity)
self.__holding_cost = holding_cost
self.__reorder_cost = reorder_cost
self.__unit_cost = unit_cost
self.__min_variable_cost = minimum_variable_cost(total_orders, reorder_cost, unit_cost, holding_cost)
self.__economic_order_quantity = economic_order_quantity(total_orders, reorder_cost, unit_cost,holding_cost, reorder_quantity)
def _minimum_variable_cost(self, average_orders, reorder_cost, unit_cost) -> Decimal:
getcontext().prec = 2
getcontext().rounding = ROUND_HALF_UP
holding_cost = self.__holding_cost
step = float(0.2)
previous_eoq_variable_cost = Decimal(0)
Decimal(reorder_cost)
order_factor = float(0.002)
vc = 0.00
counter = 0
order_size = 0
while previous_eoq_variable_cost >= Decimal(vc):
previous_eoq_variable_cost = Decimal(vc)
# reorder cost * average demand all divided by order size + (demand size * holding cost)
if counter < 1:
order_size = self._order_size(average_orders=average_orders, reorder_cost=reorder_cost,
unit_cost=unit_cost, holding_cost=holding_cost,
order_factor=order_factor)
vc = self._variable_cost(float(average_orders), float(reorder_cost), float(order_size), float(unit_cost), float(holding_cost))
order_size += int(float(order_size) * step)
if counter < 1:
previous_eoq_variable_cost = Decimal(vc)
while counter == 0:
counter += 1
return Decimal(previous_eoq_variable_cost)
# probably missing the addition
def _economic_order_quantity(self, average_orders: float, reorder_cost: float, unit_cost: float) -> Decimal:
getcontext().prec = 2
getcontext().rounding = ROUND_HALF_UP
holding_cost = self.__holding_cost
reorder_quantity = int(self.__reorder_quantity)
step = float(0.2)
previous_eoq_variable_cost = Decimal(0)
eoq_variable_cost = Decimal(0)
Decimal(reorder_cost)
order_factor = float(0.002)
vc = 0.00
rc = 0.00
hc = 0.00
s = 0.00
counter = 0
order_size = 0
diff = Decimal(0)
while previous_eoq_variable_cost >= Decimal(vc):
previous_eoq_variable_cost = Decimal(vc)
# reorder cost * average demand all divided by order size + (demand size * holding cost)
if counter < 1:
order_size = self._order_size(average_orders=average_orders, reorder_cost=reorder_cost,
unit_cost=unit_cost, holding_cost=holding_cost,
order_factor=order_factor)
vc = self._variable_cost(float(average_orders), float(reorder_cost), float(order_size), float(unit_cost), float(holding_cost))
order_size += int(float(order_size) * step)
if counter < 1:
previous_eoq_variable_cost = Decimal(vc)
while counter == 0:
counter += 1
return Decimal(order_size)
@staticmethod
def _variable_cost(average_orders: float, reorder_cost: float, order_size: float, unit_cost: float,
holding_cost: float) -> float:
rc = lambda x, y, z: (x * y) / z
hc = lambda x, y, z: x * y * z
vc = rc(float(average_orders), float(reorder_cost), float(order_size)) + hc(float(unit_cost),
float(order_size),
float(holding_cost))
return vc
@staticmethod
def _order_size(average_orders: float, reorder_cost: float, unit_cost: float, holding_cost: float,
order_factor: float) -> float:
order_size_calc = lambda x, y, z, i, j: int(
(float(x) * float(y) * 2) / (float(z) * float(i)) * float(j) * float(0.5))
order_size = order_size_calc(average_orders, reorder_cost, unit_cost, holding_cost, order_factor)
return order_size
| 42.368 | 138 | 0.624434 | 5,096 | 0.962236 | 0 | 0 | 1,224 | 0.231118 | 0 | 0 | 207 | 0.039086 |
8273cb7b96dbba2d80d2ff7f28ed04bc72f420f5 | 2,182 | py | Python | tests/test_arraymisc.py | XinYangDong/mmcv-0.2.10 | 527388ea7c5daf7149a88b3dc833373d5a5fb850 | [
"Apache-2.0"
]
| 1 | 2019-04-04T07:07:55.000Z | 2019-04-04T07:07:55.000Z | tests/test_arraymisc.py | XinYangDong/mmcv-0.2.10 | 527388ea7c5daf7149a88b3dc833373d5a5fb850 | [
"Apache-2.0"
]
| null | null | null | tests/test_arraymisc.py | XinYangDong/mmcv-0.2.10 | 527388ea7c5daf7149a88b3dc833373d5a5fb850 | [
"Apache-2.0"
]
| null | null | null | from __future__ import division
import mmcv
import numpy as np
import pytest
def test_quantize():
arr = np.random.randn(10, 10)
levels = 20
qarr = mmcv.quantize(arr, -1, 1, levels)
assert qarr.shape == arr.shape
assert qarr.dtype == np.dtype('int64')
for i in range(arr.shape[0]):
for j in range(arr.shape[1]):
ref = min(levels - 1,
int(np.floor(10 * (1 + max(min(arr[i, j], 1), -1)))))
assert qarr[i, j] == ref
qarr = mmcv.quantize(arr, -1, 1, 20, dtype=np.uint8)
assert qarr.shape == arr.shape
assert qarr.dtype == np.dtype('uint8')
with pytest.raises(ValueError):
mmcv.quantize(arr, -1, 1, levels=0)
with pytest.raises(ValueError):
mmcv.quantize(arr, -1, 1, levels=10.0)
with pytest.raises(ValueError):
mmcv.quantize(arr, 2, 1, levels)
def test_dequantize():
levels = 20
qarr = np.random.randint(levels, size=(10, 10))
arr = mmcv.dequantize(qarr, -1, 1, levels)
assert arr.shape == qarr.shape
assert arr.dtype == np.dtype('float64')
for i in range(qarr.shape[0]):
for j in range(qarr.shape[1]):
assert arr[i, j] == (qarr[i, j] + 0.5) / 10 - 1
arr = mmcv.dequantize(qarr, -1, 1, levels, dtype=np.float32)
assert arr.shape == qarr.shape
assert arr.dtype == np.dtype('float32')
with pytest.raises(ValueError):
mmcv.dequantize(arr, -1, 1, levels=0)
with pytest.raises(ValueError):
mmcv.dequantize(arr, -1, 1, levels=10.0)
with pytest.raises(ValueError):
mmcv.dequantize(arr, 2, 1, levels)
def test_joint():
arr = np.random.randn(100, 100)
levels = 1000
qarr = mmcv.quantize(arr, -1, 1, levels)
recover = mmcv.dequantize(qarr, -1, 1, levels)
assert np.abs(recover[arr < -1] + 0.999).max() < 1e-6
assert np.abs(recover[arr > 1] - 0.999).max() < 1e-6
assert np.abs((recover - arr)[(arr >= -1) & (arr <= 1)]).max() <= 1e-3
arr = np.clip(np.random.randn(100) / 1000, -0.01, 0.01)
levels = 99
qarr = mmcv.quantize(arr, -1, 1, levels)
recover = mmcv.dequantize(qarr, -1, 1, levels)
assert np.all(recover == 0)
| 31.171429 | 75 | 0.593951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.014665 |
82740abbd8d90d8a1d79663896c17644f40508b8 | 2,373 | py | Python | train.py | jmribeiro/NumPyNeuralNetworkFromScratch | 19c4360ef4eec91cd17142ced9fde35773d795b5 | [
"Apache-2.0"
]
| 1 | 2020-07-06T18:15:34.000Z | 2020-07-06T18:15:34.000Z | train.py | jmribeiro/NumPyNeuralNetworkFromScratch | 19c4360ef4eec91cd17142ced9fde35773d795b5 | [
"Apache-2.0"
]
| null | null | null | train.py | jmribeiro/NumPyNeuralNetworkFromScratch | 19c4360ef4eec91cd17142ced9fde35773d795b5 | [
"Apache-2.0"
]
| null | null | null | #!/usr/bin/env python
"""
Author: João Ribeiro
"""
import argparse
import numpy as np
from model import FeedForwardNetwork
from utils import load_ocr_dataset, plot
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Hyperparams
parser.add_argument('-epochs', default=20, type=int, help="Number of training epochs.")
parser.add_argument('-num_layers', default=2, type=int, help="Number of hidden layers.")
parser.add_argument('-hidden_size', default=64, type=int, help="Number of units per hidden layer.")
parser.add_argument('-activation', default="relu", type=str, help="Activation function for the hidden layers.")
parser.add_argument('-learning_rate', default=0.1, type=float, help="Learning rate for SGD optimizer.")
parser.add_argument('-l2_penalty', default=0.0, type=float, help="L2 penalty for SGD optimizer.")
parser.add_argument('-batch_size', default=32, type=int, help="Number of datapoints per SGD step.")
# Misc.
parser.add_argument('-data', default='ocr_dataset/letter.data', help="Path to letter.data OCR dataset.")
parser.add_argument('-save_plot', action="store_true", help="Whether or not to save the generated accuracies plot.")
opt = parser.parse_args()
# ############ #
# Load Dataset #
# ############ #
print("Loading OCR Dataset", end="", flush=True)
data = load_ocr_dataset(opt.data)
X_train, y_train = data["train"]
X_val, y_val = data["dev"]
X_test, y_test = data["test"]
num_features = X_train.shape[1]
num_classes = np.unique(y_train).size
print(" [Done]", flush=True)
# ########### #
# Setup Model #
# ########### #
print("Deploying model", end="", flush=True)
model = FeedForwardNetwork(
num_features, num_classes,
opt.num_layers, opt.hidden_size, opt.activation,
opt.learning_rate, opt.l2_penalty, opt.batch_size
)
print(" [Done]", flush=True)
# ################ #
# Train & Evaluate #
# ################ #
print("Training model", flush=True)
validation_accuracies, final_test_accuracy = model.fit(X_train, y_train, X_val, y_val, X_test, y_test, opt.epochs)
# #### #
# Plot #
# #### #
print("Plotting", end="", flush=True)
plot(opt.epochs, validation_accuracies, opt.save_plot)
print(" [Done]\nGoodbye.", flush=True)
| 31.64 | 120 | 0.645175 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 858 | 0.361415 |
8274f0f41fe5e911ea0767aa342d89364c6cbf67 | 3,935 | py | Python | varify/samples/views.py | chop-dbhi/varify | 5dc721e49ed9bd3582f4b117785fdd1a8b6ba777 | [
"BSD-2-Clause"
]
| 6 | 2015-01-16T14:35:29.000Z | 2017-06-18T05:56:15.000Z | varify/samples/views.py | solvebio/varify | 5dc721e49ed9bd3582f4b117785fdd1a8b6ba777 | [
"BSD-2-Clause"
]
| null | null | null | varify/samples/views.py | solvebio/varify | 5dc721e49ed9bd3582f4b117785fdd1a8b6ba777 | [
"BSD-2-Clause"
]
| 3 | 2015-05-27T15:03:17.000Z | 2020-03-11T08:42:46.000Z | from guardian.shortcuts import get_objects_for_user
from django.http import Http404, HttpResponseRedirect
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from vdw.samples.models import Sample, Project, Batch, Cohort
from .forms import CohortForm
def registry(request):
projects = get_objects_for_user(request.user, 'samples.view_project')
batch_count = Count('batches', distinct=True)
sample_count = Count('samples', distinct=True)
# Distinct count on batch necessary since the join inflates the numbers
projects = projects.annotate(sample_count=sample_count,
batch_count=batch_count)
staged_samples = \
Sample.objects.filter(published=False, project__in=projects) \
.select_related('batch', 'project')
return render(request, 'samples/registry.html', {
'projects': list(projects),
'staged_samples': list(staged_samples),
})
def project_registry(request, pk):
projects = get_objects_for_user(request.user, 'samples.view_project')
batch_count = Count('batches', distinct=True)
sample_count = Count('samples', distinct=True)
# Distinct count on batch necessary since the join inflates the numbers
try:
project = projects.annotate(sample_count=sample_count,
batch_count=batch_count).get(pk=pk)
except Project.DoesNotExist:
raise Http404
batches = Batch.objects.filter(project=project) \
.annotate(sample_count=Count('samples'))
return render(request, 'samples/project.html', {
'project': project,
'batches': batches,
})
def batch_registry(request, pk):
projects = get_objects_for_user(request.user, 'samples.view_project')
sample_count = Count('samples', distinct=True)
try:
batch = Batch.objects.annotate(sample_count=sample_count) \
.filter(project__in=projects).select_related('project').get(pk=pk)
except Batch.DoesNotExist:
raise Http404
samples = Sample.objects.filter(batch=batch)
return render(request, 'samples/batch.html', {
'batch': batch,
'project': batch.project,
'samples': samples,
})
def sample_registry(request, pk):
projects = get_objects_for_user(request.user, 'samples.view_project')
try:
sample = Sample.objects.filter(project__in=projects) \
.select_related('batch', 'project').get(pk=pk)
except Sample.DoesNotExist:
raise Http404
return render(request, 'samples/sample.html', {
'sample': sample,
'batch': sample.batch,
'project': sample.project,
})
def cohort_form(request, pk=None):
if request.user.has_perm('samples.change_cohort'):
cohorts = Cohort.objects.all()
cohort = get_object_or_404(Cohort, pk=pk) if pk else None
else:
cohorts = Cohort.objects.filter(user=request.user)
cohort = \
get_object_or_404(Cohort, pk=pk, user=request.user) if pk else None
# Apply permissions..
samples = Sample.objects.all()
if request.method == 'POST':
form = CohortForm(samples, data=request.POST, instance=cohort,
initial={'user': request.user})
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('cohorts'))
else:
form = CohortForm(samples, instance=cohort)
return render(request, 'samples/cohort-form.html', {
'form': form,
'cohort': cohort,
'cohorts': cohorts,
})
def cohort_delete(request, pk):
if request.user.has_perm('samples.change_cohort'):
cohort = get_object_or_404(Cohort, pk=pk)
else:
cohort = get_object_or_404(Cohort, pk=pk, user=request.user)
cohort.delete()
return HttpResponseRedirect(reverse('cohorts'))
| 31.48 | 79 | 0.665565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 650 | 0.165184 |
8275090a0a26b9725fd053645507a75767690bfa | 6,656 | py | Python | dumbai.py | CapKenway/dumbai | affa89663c980177d6c1e0fef9bda7978032da4d | [
"Unlicense"
]
| null | null | null | dumbai.py | CapKenway/dumbai | affa89663c980177d6c1e0fef9bda7978032da4d | [
"Unlicense"
]
| null | null | null | dumbai.py | CapKenway/dumbai | affa89663c980177d6c1e0fef9bda7978032da4d | [
"Unlicense"
]
| null | null | null | import sys
from pprint import pprint
import os
#--------------------------------------------------------------------------#
class CsPP():
def __init__(self, domains):
self.domains = domains
self.maindict = {}
self.keyitems = []
pass
def check_if(self):
emptylist = []
for domainkey in list(self.domains.keys()):
if not domainkey in list(self.maindict.keys()):
emptylist.append(domainkey)
for listitem in emptylist:
self.maindict[listitem] = list(self.domains.values())[1]
pass
def not_belonging(self, key, lister):
templist = []
maindomain = self.domains[key]
for item in maindomain:
if not item in lister:
templist.append(item)
self.maindict[key] = templist
pass
def belonging(self, key, lister):
self.maindict.__setitem__(key, lister)
pass
def get_one_up(self, values):
self.keyitems.insert(self.keyitems.index(values[0]), values[1])
def get_one_down(self, values):
self.keyitems.reverse()
self.keyitems.insert(self.keyitems.index(values[1]), values[0])
self.keyitems.reverse()
def not_working_together(self, first, second):
firstlist = self.maindict[first]
secondlist = self.maindict[second]
for item in firstlist:
if item in secondlist:
firstlist.remove(item)
self.maindict[first] = firstlist
def backtrack(self, maindict, what_want = '', conditions = [], starter = ''):
csp_back = CsPP_Backend(domains = maindict, what_want = what_want, conditions = conditions, starter = starter)
return csp_back._backtrack()
pass
def left_to_right(self, maindict, path):
to_do = []
pathkeys = list(path.keys())
pathvalues = list(path.values())
mainkeys = list(maindict.keys())
mainvalues = list(maindict.values())
keylist = []
for key, values in zip(pathkeys, pathvalues):
keylist.append(key)
if len(values) > 1:
to_do.append(values[1:])
if len(to_do) != 0:
for i in range(0, len(to_do)):
popped = to_do.pop(i)
keylist.append(popped)
for item in keylist:
if keylist.count(item) > 1:
keylist.remove(item)
if type(item) == list:
keylist.remove(item)
valuestodict = []
for key in keylist:
if type(key) != list:
valuestodict.append(maindict[key])
else:
keylist.remove(key)
returndict = dict((key, values) for key, values in zip(keylist, valuestodict))
forprune = CsPP_Backend()
pruned = forprune._prune(returndict)
return pruned
def right_to_left(self, maindict, path):
tempkeys = list(path.keys())
tempvalues = list(path.values())
tempvalues.reverse()
tempkeys.reverse()
i = 0
flag = False
templist = []
removeditems = []
indexes = []
i = 0
templist.append(tempkeys[0])
for key in tempkeys:
for n in range(i, len(tempvalues)):
flag = False
for u in range(0, len(tempvalues[n])):
if len(tempvalues)!= 0 and key == tempvalues[n][u]:
i = n
templist.append(tempkeys[n])
flag = True
break
if flag:
break
for item in templist:
if templist.count(item) > 1:
templist.remove(item)
dictvalues = []
for tempval in templist:
dictvalues.append(maindict[tempval])
availdict = dict((key, val) for key, val in zip(templist, dictvalues))
removedvalues = []
for key in list(maindict.keys()):
if not key in list(availdict.keys()):
removeditems.append(key)
removedvalues.append(maindict[key])
removeddict = dict((key, val) for key, val in zip(removeditems, removedvalues))
forprune = CsPP_Backend()
pruned = forprune._prune(availdict)
for key in list(removeddict.keys()):
pruned[key] = []
return pruned
pass
#--------------------------------------------------------------------------#
class CsPP_Backend():
def __init__(self, *args, **kwargs):
self.domains = kwargs.get('domains')
self.conditions = kwargs.get('conditions')
self.what_want = kwargs.get('what_want')
self.starter = kwargs.get('starter')
pass
def _backtrack(self):
if self.what_want == 'mrv':
return self._highest_constraint(self.domains, self.starter)
elif self.what_want == 'lcv':
return self._minimum_constraint(self.domains, self.starter)
else:
return self.domains
def _minimum_constraint(self, domains, starter = ''):
low_constraint = None
if starter != '':
yet_lowest = len(domains[starter])
else:
yet_lowest = len(domains[list(domains.keys())[0]])
for key, val in zip(list(domains.keys()), list(domains.values())):
if yet_lowest > len(val):
yet_lowest = len(val)
low_constraint = key
return low_constraint
pass
def _highest_constraint(self, domains, starter = ''):
high_constraint = None
if starter != '':
yet_highest = len(domains[starter])
else:
yet_highest = len(domains[list(domains.keys())[0]])
for key, val in zip(list(domains.keys()), list(domains.values())):
if yet_highest < len(val):
yet_highest = len(val)
high_constraint = key
return high_constraint
pass
def _prune(self, domains):
emptydict = {}
pruneditems = []
for key, value in zip(list(domains.keys()), list(domains.values())):
for val in value:
if val in pruneditems:
continue
emptydict.__setitem__(key, val)
pruneditems.append(val)
break
for key in list(domains.keys()):
if not key in list(emptydict.keys()):
emptydict.__setitem__(key, [])
return emptydict
#--------------------------------------------------------------------------# | 35.404255 | 118 | 0.526292 | 6,371 | 0.957181 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.04372 |
82758db2cce35fc271964b68ef46df023933f752 | 6,079 | py | Python | napari_plugin_test/widgets/warp_image_volume.py | krentzd/napari-test | 97673f9408eab7d2cc01f4562a3deeeee7fd8bcb | [
"MIT"
]
| null | null | null | napari_plugin_test/widgets/warp_image_volume.py | krentzd/napari-test | 97673f9408eab7d2cc01f4562a3deeeee7fd8bcb | [
"MIT"
]
| null | null | null | napari_plugin_test/widgets/warp_image_volume.py | krentzd/napari-test | 97673f9408eab7d2cc01f4562a3deeeee7fd8bcb | [
"MIT"
]
| null | null | null | #!/usr/bin/env python3
# coding: utf-8
# Adapted from: https://github.com/zpincus/celltool/blob/master/celltool/numerics/image_warp.py
from scipy import ndimage
import numpy as np
from probreg import bcpd
import tifffile
import matplotlib.pyplot as plt
import napari
from magicgui import magic_factory, widgets
from napari.types import PointsData, ImageData
from typing_extensions import Annotated
def _make_inverse_warp(from_points, to_points, output_region, approximate_grid):
x_min, y_min, z_min, x_max, y_max, z_max = output_region
if approximate_grid is None: approximate_grid = 1
x_steps = (x_max - x_min) // approximate_grid
y_steps = (y_max - y_min) // approximate_grid
z_steps = (z_max - z_min) // approximate_grid
x, y, z = np.mgrid[x_min:x_max:x_steps*1j, y_min:y_max:y_steps*1j, z_min:z_max:z_steps*1j]
transform = _make_warp(to_points, from_points, x, y, z)
if approximate_grid != 1:
# linearly interpolate the zoomed transform grid
new_x, new_y, new_z = np.mgrid[x_min:x_max+1, y_min:y_max+1, z_min:z_max+1]
x_fracs, x_indices = np.modf((x_steps-1)*(new_x-x_min)/float(x_max-x_min))
y_fracs, y_indices = np.modf((y_steps-1)*(new_y-y_min)/float(y_max-y_min))
z_fracs, z_indices = np.modf((z_steps-1)*(new_z-z_min)/float(z_max-z_min))
x_indices = x_indices.astype(int)
y_indices = y_indices.astype(int)
z_indices = z_indices.astype(int)
x1 = 1 - x_fracs
y1 = 1 - y_fracs
z1 = 1 - z_fracs
ix1 = (x_indices+1).clip(0, x_steps-1)
iy1 = (y_indices+1).clip(0, y_steps-1)
iz1 = (z_indices+1).clip(0, z_steps-1)
transform_x = _trilinear_interpolation(0, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform_y = _trilinear_interpolation(1, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform_z = _trilinear_interpolation(2, transform, x1, y1, z1, x_fracs, y_fracs, z_fracs, x_indices, y_indices, z_indices, ix1, iy1, iz1)
transform = [transform_x, transform_y, transform_z]
return transform
def _trilinear_interpolation(d, t, x0, y0, z0, x1, y1, z1, ix0, iy0, iz0, ix1, iy1, iz1):
t000 = t[d][(ix0, iy0, iz0)]
t001 = t[d][(ix0, iy0, iz1)]
t010 = t[d][(ix0, iy1, iz0)]
t100 = t[d][(ix1, iy0, iz0)]
t011 = t[d][(ix0, iy1, iz1)]
t101 = t[d][(ix1, iy0, iz1)]
t110 = t[d][(ix1, iy1, iz0)]
t111 = t[d][(ix1, iy1, iz1)]
return t000*x0*y0*z0 + t001*x0*y0*z1 + t010*x0*y1*z0 + t100*x1*y0*z0 + t011*x0*y1*z1 + t101*x1*y0*z1 + t110*x1*y1*z0 + t111*x1*y1*z1
def _U(x):
_small = 1e-100
return (x**2) * np.where(x<_small, 0, np.log(x))
def _interpoint_distances(points):
xd = np.subtract.outer(points[:,0], points[:,0])
yd = np.subtract.outer(points[:,1], points[:,1])
zd = np.subtract.outer(points[:,2], points[:,2])
return np.sqrt(xd**2 + yd**2 + zd**2)
def _make_L_matrix(points):
n = len(points)
K = _U(_interpoint_distances(points))
P = np.ones((n, 4))
P[:,1:] = points
O = np.zeros((4, 4))
L = np.asarray(np.bmat([[K, P],[P.transpose(), O]]))
return L
def _calculate_f(coeffs, points, x, y, z):
w = coeffs[:-3]
a1, ax, ay, az = coeffs[-4:]
summation = np.zeros(x.shape)
for wi, Pi in zip(w, points):
summation += wi * _U(np.sqrt((x-Pi[0])**2 + (y-Pi[1])**2 + (z-Pi[2])**2))
return a1 + ax*x + ay*y +az*z + summation
def _make_warp(from_points, to_points, x_vals, y_vals, z_vals):
from_points, to_points = np.asarray(from_points), np.asarray(to_points)
err = np.seterr(divide='ignore')
L = _make_L_matrix(from_points)
V = np.resize(to_points, (len(to_points)+4, 3))
V[-3:, :] = 0
coeffs = np.dot(np.linalg.pinv(L), V)
print('L, V, coeffs', L.shape, V.shape, coeffs.shape)
x_warp = _calculate_f(coeffs[:,0], from_points, x_vals, y_vals, z_vals)
y_warp = _calculate_f(coeffs[:,1], from_points, x_vals, y_vals, z_vals)
z_warp = _calculate_f(coeffs[:,2], from_points, x_vals, y_vals, z_vals)
np.seterr(**err)
return [x_warp, y_warp, z_warp]
@magic_factory
def make_image_warping(
viewer: "napari.viewer.Viewer",
moving_image: ImageData,
fixed_image: ImageData,
moving_points: PointsData,
transformed_points: PointsData,
interpolation_order: Annotated[int, {"min": 0, "max": 10, "step": 1}]=1,
approximate_grid: Annotated[int, {"min": 1, "max": 10, "step": 1}]=1
):
from napari.qt import thread_worker
pbar = widgets.ProgressBar()
pbar.range = (0, 0) # unknown duration
make_image_warping.insert(0, pbar) # add progress bar to the top of widget
# this function will be called after we return
def _add_data(return_value, self=make_image_warping):
data, kwargs = return_value
viewer.add_image(data, **kwargs)
self.pop(0).hide() # remove the progress bar
@thread_worker(connect={"returned": _add_data})
def _warp_images(from_points, to_points, image, output_region, interpolation_order=5, approximate_grid=10):
print('Entered warp_images')
transform = _make_inverse_warp(from_points, to_points, output_region, approximate_grid)
warped_image = ndimage.map_coordinates(np.asarray(image), transform, order=interpolation_order)
kwargs = dict(
name='warped_image'
)
return (warped_image, kwargs)
print('Warping image volume')
assert len(moving_points) == len(transformed_points), 'Moving and transformed points must be of same length.'
output_region = (0, 0, 0, int(fixed_image.shape[0] / 1), int(fixed_image.shape[1] / 1), int(fixed_image.shape[2] / 1))
print(output_region)
_warp_images(from_points=moving_points,
to_points=transformed_points,
image=moving_image,
output_region=output_region,
interpolation_order=interpolation_order,
approximate_grid=approximate_grid)
| 40.798658 | 147 | 0.659154 | 0 | 0 | 0 | 0 | 1,874 | 0.308274 | 0 | 0 | 506 | 0.083237 |
8275c53b65c622b02c7389ff31c415c23a469b50 | 6,235 | py | Python | main/tests/test_celery.py | OpenHumans/oh-23andme-source | 2580b0177b9caad079826305c7455ea7fb116a76 | [
"MIT"
]
| null | null | null | main/tests/test_celery.py | OpenHumans/oh-23andme-source | 2580b0177b9caad079826305c7455ea7fb116a76 | [
"MIT"
]
| 7 | 2018-03-26T02:09:43.000Z | 2021-06-10T17:42:26.000Z | main/tests/test_celery.py | OpenHumans/oh-23andme-source | 2580b0177b9caad079826305c7455ea7fb116a76 | [
"MIT"
]
| 2 | 2018-03-29T12:51:28.000Z | 2018-12-27T18:44:35.000Z | from django.test import TestCase, RequestFactory
import vcr
from django.conf import settings
from django.core.management import call_command
from open_humans.models import OpenHumansMember
from main.celery import read_reference, clean_raw_23andme
from main.celery_helper import vcf_header
import os
import tempfile
import requests
import requests_mock
from main.celery import process_file
class ParsingTestCase(TestCase):
"""
test that files are parsed correctly
"""
def setUp(self):
"""
Set up the app for following tests
"""
settings.DEBUG = True
call_command('init_proj_config')
self.factory = RequestFactory()
data = {"access_token": 'foo',
"refresh_token": 'bar',
"expires_in": 36000}
self.oh_member = OpenHumansMember.create(oh_id='12345678',
data=data)
self.oh_member.save()
self.user = self.oh_member.user
self.user.set_password('foobar')
self.user.save()
def test_read_reference(self):
"""
Test function to read the reference file.
"""
REF_23ANDME_FILE = os.path.join(os.path.dirname(__file__),
'fixtures/test_reference.txt')
ref = read_reference(REF_23ANDME_FILE)
self.assertEqual(ref, {'1': {'82154': 'A', '752566': 'G'}})
def test_vcf_header(self):
"""
Test function to create a VCF header
"""
hd = vcf_header(
source='23andme',
reference='http://example.com',
format_info=['<ID=GT,Number=1,Type=String,Description="GT">'])
self.assertEqual(len(hd), 6)
expected_header_fields = ["##fileformat",
"##fileDate",
'##source',
'##reference',
'##FORMAT',
'#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER' +
'\tINFO\tFORMAT\t23ANDME_DATA']
self.assertEqual([i.split("=")[0] for i in hd], expected_header_fields)
def test_23andme_cleaning(self):
"""
Test that cleanup works as expected
"""
with requests_mock.Mocker() as m:
get_url = 'http://example.com/23andme_file.txt'
closed_input_file = os.path.join(os.path.dirname(__file__),
'fixtures/23andme_invalid.txt')
fhandle = open(closed_input_file, "rb")
content = fhandle.read()
m.register_uri('GET',
get_url,
content=content,
status_code=200)
tf_in = tempfile.NamedTemporaryFile(suffix=".txt")
tf_in.write(requests.get(get_url).content)
tf_in.flush()
cleaned_input = clean_raw_23andme(tf_in)
cleaned_input.seek(0)
lines = cleaned_input.read()
self.assertEqual(lines.find('John Doe'), -1)
self.assertNotEqual(lines.find('data file generated'), -1)
@vcr.use_cassette('main/tests/fixtures/process_file.yaml',
record_mode='none')
def test_process_file(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.txt',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
@vcr.use_cassette('main/tests/fixtures/process_file_bz2.yaml',
record_mode='none')
def test_process_file_bz2(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.txt.bz2',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt.bz2?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
@vcr.use_cassette('main/tests/fixtures/process_file_gz.yaml',
record_mode='none')
def test_process_file_gz(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.txt.gz',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.txt.gz?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
@vcr.use_cassette('main/tests/fixtures/process_file_zip.yaml',
record_mode='none')
def test_process_file_zip(self):
"""
test process_file celery task
"""
member = {"project_member_id": "1234"}
dfile = {'id': 34567,
'basename': '23andme_valid.zip',
'created': '2018-03-30T00:09:36.563486Z',
'download_url': 'https://myawslink.com/member-files/direct-sharing-1337/1234/23andme_valid.zip?Signature=nope&Expires=1522390374&AWSAccessKeyId=nope',
'metadata': {'tags': ['bar'], 'description': 'foo'},
'source': 'direct-sharing-1337'}
process_file(dfile, 'myaccesstoken', member, dfile['metadata'])
| 40.225806 | 171 | 0.563432 | 5,843 | 0.937129 | 0 | 0 | 3,008 | 0.482438 | 0 | 0 | 2,517 | 0.403689 |
82763f4b601df981afd52e2acd04c501b896a5f2 | 168 | py | Python | apps/tracking/admin.py | Codeidea/budget-tracker | e07e8d6bb49b0a3de428942a57f090912c191d3e | [
"MIT"
]
| null | null | null | apps/tracking/admin.py | Codeidea/budget-tracker | e07e8d6bb49b0a3de428942a57f090912c191d3e | [
"MIT"
]
| null | null | null | apps/tracking/admin.py | Codeidea/budget-tracker | e07e8d6bb49b0a3de428942a57f090912c191d3e | [
"MIT"
]
| null | null | null | from django.contrib import admin
from .models import LogCategory, BudgetLog
# Register your models here.
admin.site.register(LogCategory)
admin.site.register(BudgetLog) | 33.6 | 42 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.166667 |
8276d754ae9e540d8e94f8e6e543d48ce3a9e8c7 | 2,060 | py | Python | nn_model/embedding_layer.py | onlyrico/mling_sdgms | ef6015d1a815a317f16fa1e42cbb048e4fe443f7 | [
"MIT"
]
| 4 | 2021-06-01T02:06:57.000Z | 2022-02-23T02:14:07.000Z | nn_model/embedding_layer.py | onlyrico/mling_sdgms | ef6015d1a815a317f16fa1e42cbb048e4fe443f7 | [
"MIT"
]
| null | null | null | nn_model/embedding_layer.py | onlyrico/mling_sdgms | ef6015d1a815a317f16fa1e42cbb048e4fe443f7 | [
"MIT"
]
| 2 | 2021-01-28T05:48:20.000Z | 2022-01-24T11:59:13.000Z | # -*- coding: UTF-8 -*-
#!/usr/bin/python3
"""
Embedding Layer
"""
#************************************************************
# Imported Libraries
#************************************************************
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gensim.models import KeyedVectors
import pdb
class EmbeddingLayer(nn.Module):
def __init__(self, params, vocab, pretrained_emb_path = None):
super(EmbeddingLayer, self).__init__()
# embedding layer
self.lang = vocab.lang
self.vocab = vocab
self.emb_dim = params.emb_dim
self.embeddings = nn.Embedding(vocab.vocab_size, self.emb_dim, padding_idx = vocab.PAD_ID)
self.init_emb(self.embeddings, pretrained_emb_path, vocab)
# ijcai dropout, p = 0.2
self.emb_do = nn.Dropout(p = params.emb_do)
self.use_cuda = params.cuda
def init_emb(self, embeddings, pretrained_emb_path, vocab):
if pretrained_emb_path is not None:
self.load_pretrained(pretrained_emb_path, embeddings, vocab)
else:
"""
Initialize embedding weight like word2vec.
The u_embedding is a uniform distribution in [-0.5/emb_dim, 0.5/emb_dim],
"""
initrange = 0.5 / self.emb_dim
embeddings.weight.data.uniform_(-initrange, initrange)
embeddings.weight.data[vocab.PAD_ID] = 0
def load_pretrained(self, pretrained_emb_path, embeddings, vocab):
print('loading {} embeddings for {}'.format(pretrained_emb_path, self.lang))
try:
pre_emb = KeyedVectors.load_word2vec_format(pretrained_emb_path, binary = False)
except:
print('Did not found {} embeddings for {}'.format(pretrained_emb_path, self.lang))
return
# ignore only pad
for i in range(1, len(vocab.idx2word)):
try:
embeddings.weight.data[i] = torch.from_numpy(pre_emb[vocab.idx2word[i]])
except:
continue
def forward(self, batch_input):
input_word_embs = self.embeddings(batch_input)
input_word_embs = self.emb_do(input_word_embs)
return input_word_embs
| 30.294118 | 94 | 0.662136 | 1,706 | 0.828155 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.229126 |
827728c1d9e6e7856cb8f43e7465659d4d505df0 | 17,810 | py | Python | grpclib/server.py | panaetov/grpclib | 3c7d6ec3479cde417e748bc9b0cf0e9188d0f42a | [
"BSD-3-Clause"
]
| null | null | null | grpclib/server.py | panaetov/grpclib | 3c7d6ec3479cde417e748bc9b0cf0e9188d0f42a | [
"BSD-3-Clause"
]
| null | null | null | grpclib/server.py | panaetov/grpclib | 3c7d6ec3479cde417e748bc9b0cf0e9188d0f42a | [
"BSD-3-Clause"
]
| null | null | null | import abc
import socket
import logging
import asyncio
import warnings
import h2.config
import h2.exceptions
from .utils import DeadlineWrapper
from .const import Status
from .stream import send_message, recv_message
from .stream import StreamIterator
from .metadata import Metadata, Deadline
from .protocol import H2Protocol, AbstractHandler
from .exceptions import GRPCError, ProtocolError
from .encoding.base import GRPC_CONTENT_TYPE
from .encoding.proto import ProtoCodec
log = logging.getLogger(__name__)
class Stream(StreamIterator):
"""
Represents gRPC method call – HTTP/2 request/stream, and everything you
need to communicate with client in order to handle this request.
As you can see, every method handler accepts single positional argument -
stream:
.. code-block:: python
async def MakeLatte(self, stream: grpclib.server.Stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
This is true for every gRPC method type.
"""
# stream state
_send_initial_metadata_done = False
_send_message_count = 0
_send_trailing_metadata_done = False
_cancel_done = False
def __init__(self, stream, cardinality, codec, recv_type, send_type,
*, metadata, deadline=None):
self._stream = stream
self._cardinality = cardinality
self._codec = codec
self._recv_type = recv_type
self._send_type = send_type
self.metadata = metadata
self.deadline = deadline
async def recv_message(self):
"""Coroutine to receive incoming message from the client.
If client sends UNARY request, then you can call this coroutine
only once. If client sends STREAM request, then you should call this
coroutine several times, until it returns None. To simplify your code
in this case, :py:class:`Stream` class implements async iteration
protocol, so you can use it like this:
.. code-block:: python
async for massage in stream:
do_smth_with(message)
or even like this:
.. code-block:: python
messages = [msg async for msg in stream]
HTTP/2 has flow control mechanism, so server will acknowledge received
DATA frames as a message only after user consumes this coroutine.
:returns: message
"""
return await recv_message(self._stream, self._codec, self._recv_type)
async def send_initial_metadata(self):
"""Coroutine to send headers with initial metadata to the client.
In gRPC you can send initial metadata as soon as possible, because
gRPC doesn't use `:status` pseudo header to indicate success or failure
of the current request. gRPC uses trailers for this purpose, and
trailers are sent during :py:meth:`send_trailing_metadata` call, which
should be called in the end.
.. note:: This coroutine will be called implicitly during first
:py:meth:`send_message` coroutine call, if not called before
explicitly.
"""
if self._send_initial_metadata_done:
raise ProtocolError('Initial metadata was already sent')
await self._stream.send_headers([
(':status', '200'),
('content-type', (GRPC_CONTENT_TYPE + '+'
+ self._codec.__content_subtype__)),
])
self._send_initial_metadata_done = True
async def send_message(self, message, **kwargs):
"""Coroutine to send message to the client.
If server sends UNARY response, then you should call this coroutine only
once. If server sends STREAM response, then you can call this coroutine
as many times as you need.
:param message: message object
"""
if 'end' in kwargs:
warnings.warn('"end" argument is deprecated, use '
'"stream.send_trailing_metadata" explicitly',
stacklevel=2)
end = kwargs.pop('end', False)
assert not kwargs, kwargs
if not self._send_initial_metadata_done:
await self.send_initial_metadata()
if not self._cardinality.server_streaming:
if self._send_message_count:
raise ProtocolError('Server should send exactly one message '
'in response')
await send_message(self._stream, self._codec, message, self._send_type)
self._send_message_count += 1
if end:
await self.send_trailing_metadata()
async def send_trailing_metadata(self, *, status=Status.OK,
status_message=None):
"""Coroutine to send trailers with trailing metadata to the client.
This coroutine allows sending trailers-only responses, in case of some
failure conditions during handling current request, i.e. when
``status is not OK``.
.. note:: This coroutine will be called implicitly at exit from
request handler, with appropriate status code, if not called
explicitly during handler execution.
:param status: resulting status of this coroutine call
:param status_message: description for a status
"""
if self._send_trailing_metadata_done:
raise ProtocolError('Trailing metadata was already sent')
if not self._send_message_count and status is Status.OK:
raise ProtocolError('{!r} requires non-empty response'
.format(status))
if self._send_initial_metadata_done:
headers = []
else:
# trailers-only response
headers = [(':status', '200')]
headers.append(('grpc-status', str(status.value)))
if status_message is not None:
headers.append(('grpc-message', status_message))
await self._stream.send_headers(headers, end_stream=True)
self._send_trailing_metadata_done = True
if status != Status.OK and self._stream.closable:
self._stream.reset_nowait()
async def cancel(self):
"""Coroutine to cancel this request/stream.
Server will send RST_STREAM frame to the client, so it will be
explicitly informed that there is nothing to expect from the server
regarding this request/stream.
"""
if self._cancel_done:
raise ProtocolError('Stream was already cancelled')
await self._stream.reset() # TODO: specify error code
self._cancel_done = True
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if (
self._send_trailing_metadata_done
or self._cancel_done
or self._stream._transport.is_closing()
):
# to suppress exception propagation
return True
if exc_val is not None:
if isinstance(exc_val, GRPCError):
status = exc_val.status
status_message = exc_val.message
elif isinstance(exc_val, Exception):
status = Status.UNKNOWN
status_message = 'Internal Server Error'
else:
# propagate exception
return
elif not self._send_message_count:
status = Status.UNKNOWN
status_message = 'Empty response'
else:
status = Status.OK
status_message = None
try:
await self.send_trailing_metadata(status=status,
status_message=status_message)
except h2.exceptions.StreamClosedError:
pass
# to suppress exception propagation
return True
async def request_handler(mapping, _stream, headers, codec, release_stream):
try:
headers_map = dict(headers)
if headers_map[':method'] != 'POST':
await _stream.send_headers([
(':status', '405'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
content_type = headers_map.get('content-type')
if content_type is None:
await _stream.send_headers([
(':status', '415'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Missing content-type header'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
base_content_type, _, sub_type = content_type.partition('+')
sub_type = sub_type or ProtoCodec.__content_subtype__
if (
base_content_type != GRPC_CONTENT_TYPE
or sub_type != codec.__content_subtype__
):
await _stream.send_headers([
(':status', '415'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Unacceptable content-type header'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
if headers_map.get('te') != 'trailers':
await _stream.send_headers([
(':status', '400'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Required "te: trailers" header is missing'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
h2_path = headers_map[':path']
method = mapping.get(h2_path)
if method is None:
await _stream.send_headers([
(':status', '200'),
('grpc-status', str(Status.UNIMPLEMENTED.value)),
('grpc-message', 'Method not found'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
metadata = Metadata.from_headers(headers)
try:
deadline = Deadline.from_metadata(metadata)
except ValueError:
await _stream.send_headers([
(':status', '200'),
('grpc-status', str(Status.UNKNOWN.value)),
('grpc-message', 'Invalid grpc-timeout header'),
], end_stream=True)
if _stream.closable:
_stream.reset_nowait()
return
async with Stream(_stream, method.cardinality, codec,
method.request_type, method.reply_type,
metadata=metadata, deadline=deadline) as stream:
deadline_wrapper = None
try:
if deadline:
deadline_wrapper = DeadlineWrapper()
with deadline_wrapper.start(deadline):
with deadline_wrapper:
await method.func(stream)
else:
await method.func(stream)
except asyncio.TimeoutError:
if deadline_wrapper and deadline_wrapper.cancelled:
log.exception('Deadline exceeded')
raise GRPCError(Status.DEADLINE_EXCEEDED)
else:
log.exception('Timeout occurred')
raise
except asyncio.CancelledError:
log.exception('Request was cancelled')
raise
except Exception:
log.exception('Application error')
raise
except Exception:
log.exception('Server error')
finally:
release_stream()
class _GC(abc.ABC):
_gc_counter = 0
@property
@abc.abstractmethod
def __gc_interval__(self):
raise NotImplementedError
@abc.abstractmethod
def __gc_collect__(self):
pass
def __gc_step__(self):
self._gc_counter += 1
if not (self._gc_counter % self.__gc_interval__):
self.__gc_collect__()
class Handler(_GC, AbstractHandler):
__gc_interval__ = 10
closing = False
def __init__(self, mapping, codec, *, loop):
self.mapping = mapping
self.codec = codec
self.loop = loop
self._tasks = {}
self._cancelled = set()
def __gc_collect__(self):
self._tasks = {s: t for s, t in self._tasks.items()
if not t.done()}
self._cancelled = {t for t in self._cancelled
if not t.done()}
def accept(self, stream, headers, release_stream):
self.__gc_step__()
self._tasks[stream] = self.loop.create_task(
request_handler(self.mapping, stream, headers, self.codec,
release_stream)
)
def cancel(self, stream):
task = self._tasks.pop(stream)
task.cancel()
self._cancelled.add(task)
def close(self):
for task in self._tasks.values():
task.cancel()
self._cancelled.update(self._tasks.values())
self.closing = True
async def wait_closed(self):
if self._cancelled:
await asyncio.wait(self._cancelled, loop=self.loop)
def check_closed(self):
self.__gc_collect__()
return not self._tasks and not self._cancelled
class Server(_GC, asyncio.AbstractServer):
"""
HTTP/2 server, which uses gRPC service handlers to handle requests.
Handler is a subclass of the abstract base class, which was generated
from .proto file:
.. code-block:: python
class CoffeeMachine(cafe_grpc.CoffeeMachineBase):
async def MakeLatte(self, stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
server = Server([CoffeeMachine()], loop=loop)
"""
__gc_interval__ = 10
def __init__(self, handlers, *, loop, codec=None):
"""
:param handlers: list of handlers
:param loop: asyncio-compatible event loop
"""
mapping = {}
for handler in handlers:
mapping.update(handler.__mapping__())
self._mapping = mapping
self._loop = loop
self._codec = codec or ProtoCodec()
self._config = h2.config.H2Configuration(
client_side=False,
header_encoding='utf-8',
)
self._tcp_server = None
self._handlers = set()
def __gc_collect__(self):
self._handlers = {h for h in self._handlers
if not (h.closing and h.check_closed())}
def _protocol_factory(self):
self.__gc_step__()
handler = Handler(self._mapping, self._codec, loop=self._loop)
self._handlers.add(handler)
return H2Protocol(handler, self._config, loop=self._loop)
async def start(self, host=None, port=None, *,
family=socket.AF_UNSPEC, flags=socket.AI_PASSIVE,
sock=None, backlog=100, ssl=None, reuse_address=None,
reuse_port=None):
"""Coroutine to start the server.
:param host: can be a string, containing IPv4/v6 address or domain name.
If host is None, server will be bound to all available interfaces.
:param port: port number.
:param family: can be set to either :py:data:`python:socket.AF_INET` or
:py:data:`python:socket.AF_INET6` to force the socket to use IPv4 or
IPv6. If not set it will be determined from host.
:param flags: is a bitmask for
:py:meth:`~python:asyncio.AbstractEventLoop.getaddrinfo`.
:param sock: sock can optionally be specified in order to use a
preexisting socket object. If specified, host and port should be
omitted (must be None).
:param backlog: is the maximum number of queued connections passed to
listen().
:param ssl: can be set to an :py:class:`~python:ssl.SSLContext`
to enable SSL over the accepted connections.
:param reuse_address: tells the kernel to reuse a local socket in
TIME_WAIT state, without waiting for its natural timeout to expire.
:param reuse_port: tells the kernel to allow this endpoint to be bound
to the same port as other existing endpoints are bound to,
so long as they all set this flag when being created.
"""
if self._tcp_server is not None:
raise RuntimeError('Server is already started')
self._tcp_server = await self._loop.create_server(
self._protocol_factory, host, port,
family=family, flags=flags, sock=sock, backlog=backlog, ssl=ssl,
reuse_address=reuse_address, reuse_port=reuse_port
)
def close(self):
"""Stops accepting new connections, cancels all currently running
requests. Request handlers are able to handle `CancelledError` and
exit properly.
"""
if self._tcp_server is None:
raise RuntimeError('Server is not started')
self._tcp_server.close()
for handler in self._handlers:
handler.close()
async def wait_closed(self):
"""Coroutine to wait until all existing request handlers will exit
properly.
"""
if self._tcp_server is None:
raise RuntimeError('Server is not started')
await self._tcp_server.wait_closed()
if self._handlers:
await asyncio.wait({h.wait_closed() for h in self._handlers},
loop=self._loop)
| 34.921569 | 80 | 0.600842 | 13,407 | 0.752695 | 0 | 0 | 160 | 0.008983 | 12,665 | 0.711038 | 6,465 | 0.362958 |
82789f2ad5480b27da525091f877dbbf7fb5f30c | 3,923 | py | Python | examples/vector_dot.py | Wheest/EVA | 6d19da1d454f398f0ade297d3a76a4ee9e773929 | [
"MIT"
]
| null | null | null | examples/vector_dot.py | Wheest/EVA | 6d19da1d454f398f0ade297d3a76a4ee9e773929 | [
"MIT"
]
| null | null | null | examples/vector_dot.py | Wheest/EVA | 6d19da1d454f398f0ade297d3a76a4ee9e773929 | [
"MIT"
]
| null | null | null | #!/usr/bin/env python
import argparse
from eva import EvaProgram, Input, Output
from eva.ckks import CKKSCompiler
from eva.seal import generate_keys
import numpy as np
import time
from eva.std.numeric import horizontal_sum
def dot(x, y):
return np.dot(x, y)
def generate_inputs_naive(size, label="x"):
inputs = dict()
inputs_np = np.zeros((size))
i = 0
for n in range(size):
# each element is a list (i.e. a vector of size 1)
inputs[f"{label}_{n}"] = [i]
inputs_np[n] = i
i += 1
return inputs, inputs_np
def generate_vector_dot_naive(size):
"""Vector dot product with vector size of 1"""
fhe_dot = EvaProgram("fhe_dot", vec_size=1)
with fhe_dot:
a = np.array([Input(f"x_{n}") for n in range(size)]).reshape(1, size)
b = np.array([Input(f"w_{k}") for k in range(size)]).reshape(size, 1)
out = dot(a, b)
Output("y", out[0][0])
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
def generate_inputs(size, label="x"):
inputs = dict()
inputs_np = np.zeros((size))
i = 0
# all data is stored in a single list of size `size`
inputs[label] = list(range(size))
for n in range(size):
inputs_np[n] = i
i += 1
return inputs, inputs_np
def generate_vector_dot(size):
"""Vector dot product with CKKS vector size equal to the size"""
fhe_dot = EvaProgram("fhe_dot", vec_size=size)
with fhe_dot:
a = np.array([Input("x")])
b = np.array([Input(f"w")])
out = dot(a, b)
Output("y", horizontal_sum(out))
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
def benchmark_vector_dot(size, mode="SIMD"):
if mode == "SIMD":
# generate program with SIMD-style
inputs, inputs_np = generate_inputs(size, label="x")
weights, weights_np = generate_inputs(size, label="w")
fhe_dot = generate_vector_dot(size)
else:
# generate program with vector size = 1
inputs, inputs_np = generate_inputs_naive(size, label="x")
weights, weights_np = generate_inputs_naive(size, label="w")
fhe_dot = generate_vector_dot_naive(size)
# compiling program
data = {**weights, **inputs}
compiler = CKKSCompiler(config={"security_level": "128", "warn_vec_size": "false"})
compiled, params, signature = compiler.compile(fhe_dot)
public_ctx, secret_ctx = generate_keys(params)
enc_inputs = public_ctx.encrypt(data, signature)
# Running program
start = time.time()
enc_outputs = public_ctx.execute(compiled, enc_inputs)
end = time.time()
run_time = end - start
# decrypt the output
outputs = secret_ctx.decrypt(enc_outputs, signature)
y = np.array(outputs["y"])
# get time for plaintext dot product
start = time.time()
true_y = inputs_np.dot(weights_np)
end = time.time()
plain_run_time = end - start
# verifying correctness of output
np.testing.assert_allclose(y, true_y)
return run_time, plain_run_time
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a dot product program")
parser.add_argument(
"--mode",
default="SIMD",
choices=["SIMD", "naive"],
)
args = parser.parse_args()
results_cipher = dict()
results_plain = dict()
if args.mode == "SIMD":
print("Generating code in SIMD style")
else:
print("Generating code in naive style")
for size in [4, 8, 16, 32, 64, 128, 256, 512, 1024]:
time_cipher, time_plain = benchmark_vector_dot(size, args.mode)
results_cipher[f"{size}"] = time_cipher
results_plain[f"{size}"] = time_plain
print(f"Done vector size {size}, CKKS time: {time_cipher}")
print("Done")
print("CKKS times:", results_cipher)
print("Plain text times:", results_plain)
| 28.845588 | 87 | 0.640836 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 809 | 0.20622 |
8278ddae26be1d01817d1cce51811bee77e0e097 | 5,903 | py | Python | ramcache.py | Lopez6969/chromium-dashboard | b35fb5372f33bfe1992c0ffaf1e723afbb3d9af2 | [
"Apache-2.0"
]
| null | null | null | ramcache.py | Lopez6969/chromium-dashboard | b35fb5372f33bfe1992c0ffaf1e723afbb3d9af2 | [
"Apache-2.0"
]
| null | null | null | ramcache.py | Lopez6969/chromium-dashboard | b35fb5372f33bfe1992c0ffaf1e723afbb3d9af2 | [
"Apache-2.0"
]
| null | null | null | from __future__ import division
from __future__ import print_function
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module manages a distributed RAM cache as a global python dictionary in
each AppEngine instance. AppEngine can spin up new instances or kill old ones
at any time. Each instance's RAM cache is independent and might not have the
same entries as found in the RAM caches of other instances.
Each instance will do the work needed to compute a given RAM cache entry
itself. The values computed in a given instance will speed up future requests
made to that instance only.
When the user edits something in the app, the updated entity is stored in
datastore. Also, the singleton SharedInvalidate entity is updated with the
timestamp of the change. Every request handler must start processing a request
by first calling SharedInvalidate.check_for_distributed_invalidation() which
checks for any needed invalidations and clears RAM cache entries in
that instance if needed.
For now, there is only a single RAM cache per instance and when anything is
invalidated, that entire RAM cache is completely cleared. In the future,
invalidations could be compartmentalized by RAM cache type, or even specific
entity IDs. Monorail uses that approach, but existing ChromeStatus code does
not need it.
Calling code must not mutate any value that is passed into set() or returned
from get(). If calling code needs to mutate such objects, it should call
copy.copy() or copy.deepcopy() to avoid unintentional cumulative mutations.
Unlike memcache, this RAM cache has no concept of expiration time. So,
whenever a cached value would become invalid, it must be invalidated.
"""
import logging
import time as time_module
from google.appengine.ext import db
global_cache = {}
expires = {}
# Whenever the cache would have more than this many items, some
# random item is dropped, or the entire cache is cleared.
# If our instances are killed by appengine for exceeding memory limits,
# we can configure larger instances and/or reduce this value.
MAX_CACHE_SIZE = 10000
def set(key, value, time=None):
"""Emulate the memcache.set() method using a RAM cache."""
if len(global_cache) + 1 > MAX_CACHE_SIZE:
popped_item = global_cache.popitem()
if popped_item[0] in expires:
del expires[popped_item[0]]
global_cache[key] = value
if time:
expires[key] = int(time_module.time()) + time
def _check_expired(keys):
now = int(time_module.time())
for key in keys:
if key in expires and expires[key] < now:
del expires[key]
del global_cache[key]
def get(key):
"""Emulate the memcache.get() method using a RAM cache."""
_check_expired([key])
verb = 'hit' if key in global_cache else 'miss'
logging.info('cache %s for %r', verb, key)
return global_cache.get(key)
def get_multi(keys):
"""Emulate the memcache.get_multi() method using a RAM cache."""
_check_expired(keys)
return {
key: global_cache[key]
for key in keys
if key in global_cache
}
def set_multi(entries):
"""Emulate the memcache.set_multi() method using a RAM cache."""
if len(global_cache) + len(entries) > MAX_CACHE_SIZE:
global_cache.clear()
expires.clear()
global_cache.update(entries)
def delete(key):
"""Emulate the memcache.delete() method using a RAM cache."""
if key in global_cache:
del global_cache[key]
flush_all() # Note: this is wasteful but infrequent in our app.
def flush_all():
"""Emulate the memcache.flush_all() method using a RAM cache.
This does not clear the RAM cache in this instance. That happens
at the start of the next request when the request handler calls
SharedInvalidate.check_for_distributed_invalidation().
"""
SharedInvalidate.invalidate()
class SharedInvalidateParent(db.Model):
pass
class SharedInvalidate(db.Model):
PARENT_ENTITY_ID = 123
PARENT_KEY = db.Key.from_path('SharedInvalidateParent', PARENT_ENTITY_ID)
SINGLETON_ENTITY_ID = 456
SINGLETON_KEY = db.Key.from_path(
'SharedInvalidateParent', PARENT_ENTITY_ID,
'SharedInvalidate', SINGLETON_ENTITY_ID)
last_processed_timestamp = None
updated = db.DateTimeProperty(auto_now=True)
@classmethod
def invalidate(cls):
"""Tell this and other appengine instances to invalidate their caches."""
singleton = cls.get(cls.SINGLETON_KEY)
if not singleton:
singleton = SharedInvalidate(key=cls.SINGLETON_KEY)
singleton.put() # automatically sets singleton.updated to now.
# The cache in each instance (including this one) will be
# cleared on the next call to check_for_distributed_invalidation()
# which should happen at the start of request processing.
@classmethod
def check_for_distributed_invalidation(cls):
"""Check if any appengine instance has invlidated the cache."""
singleton = cls.get(cls.SINGLETON_KEY, read_policy=db.STRONG_CONSISTENCY)
if not singleton:
return # No news is good news
if (cls.last_processed_timestamp is None or
singleton.updated > cls.last_processed_timestamp):
global_cache.clear()
expires.clear()
cls.last_processed_timestamp = singleton.updated
def check_for_distributed_invalidation():
"""Just a shorthand way to call the class method."""
SharedInvalidate.check_for_distributed_invalidation()
| 34.723529 | 79 | 0.750127 | 1,406 | 0.238184 | 0 | 0 | 974 | 0.165001 | 0 | 0 | 3,563 | 0.603591 |
8279df0466383aeaceedee24127f9a8045b9a674 | 401 | py | Python | src/sales/migrations/0029_auto_20191025_1058.py | vladimirtkach/yesjob | 83800f4d29bf2dab30b14fc219d3150e3bc51e15 | [
"MIT"
]
| null | null | null | src/sales/migrations/0029_auto_20191025_1058.py | vladimirtkach/yesjob | 83800f4d29bf2dab30b14fc219d3150e3bc51e15 | [
"MIT"
]
| 18 | 2020-02-12T00:41:40.000Z | 2022-02-10T12:00:03.000Z | src/sales/migrations/0029_auto_20191025_1058.py | vladimirtkach/yesjob | 83800f4d29bf2dab30b14fc219d3150e3bc51e15 | [
"MIT"
]
| null | null | null | # Generated by Django 2.2 on 2019-10-25 10:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sales', '0028_auto_20191024_1736'),
]
operations = [
migrations.AlterField(
model_name='interaction',
name='result',
field=models.CharField(blank=True, max_length=1000),
),
]
| 21.105263 | 64 | 0.608479 | 310 | 0.773067 | 0 | 0 | 0 | 0 | 0 | 0 | 98 | 0.244389 |
8279e33b741621fbcfe10f065044f83eff6d9a93 | 41,238 | py | Python | maintenance/pymelControlPanel.py | GlenWalker/pymel | 8b69b72e1bb726a66792707af39626a987bf5c21 | [
"BSD-3-Clause"
]
| null | null | null | maintenance/pymelControlPanel.py | GlenWalker/pymel | 8b69b72e1bb726a66792707af39626a987bf5c21 | [
"BSD-3-Clause"
]
| null | null | null | maintenance/pymelControlPanel.py | GlenWalker/pymel | 8b69b72e1bb726a66792707af39626a987bf5c21 | [
"BSD-3-Clause"
]
| null | null | null | """
UI for controlling how api classes and mel commands are combined into pymel classes.
This UI modifies factories.apiToMelData which is pickled out to apiMelBridge.
It controls:
which mel methods correspond to api methods
disabling of api methods
preference for overloaded methods (since currently only one overloaded method is supported)
renaming of apiMethod
"""
import inspect, re, os
import pymel.core as pm
import pymel.internal.factories as factories
import logging
logger = logging.getLogger(__name__)
if logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)
FRAME_WIDTH = 800
VERBOSE = True
class PymelControlPanel(object):
def __init__(self):
# key is a tuple of (class, method)
self.classList = sorted( list( set( [ key[0] for key in factories.apiToMelData.keys()] ) ) )
self.classFrames={}
self.processClassFrames()
self.buildUI()
def buildUI(self):
_notifySavingDisabled()
self.win = pm.window(title='Pymel Control Panel')
self.win.show()
with pm.paneLayout(configuration='vertical3', paneSize=([1,20,100], [3,20,100]) ) as self.pane:
# Lef Column: Api Classes
self.classScrollList = pm.textScrollList('apiClassList')
# Center Column: Api Methods
# Would LIKE to do it like this, but there is currently a bug with
# objectType UI, such that even if
# layout('window4|paneLayout5', q=1, exists=1) == True
# when you run:
# objectTypeUI('window4|paneLayout5')
# you will get an error:
# RuntimeError: objectTypeUI: Object 'window4|paneLayout5' not found.
# with formLayout() as apiForm:
# #with scrollLayout() as scroll:
# with tabLayout('apiMethodCol') as self.apiMethodCol:
# pass
# status = helpLine(h=60)
# So, instead, we do it old-school...
apiForm = pm.formLayout()
self.apiMethodCol = pm.tabLayout('apiMethodCol')
pm.setParent(apiForm)
status = pm.cmds.helpLine(h=60)
pm.setParent(self.pane)
apiForm.attachForm( self.apiMethodCol, 'top', 5 )
apiForm.attachForm( self.apiMethodCol, 'left', 5 )
apiForm.attachForm( self.apiMethodCol, 'right', 5 )
apiForm.attachControl( self.apiMethodCol, 'bottom', 5, status )
apiForm.attachPosition( status, 'bottom', 5, 20 )
apiForm.attachForm( status, 'bottom', 5 )
apiForm.attachForm( status, 'left', 5 )
apiForm.attachForm( status, 'right', 5 )
# Right Column: Mel Methods
melForm = pm.formLayout()
label1 = pm.text( label='Unassigned Mel Methods' )
self.unassignedMelMethodLister = pm.textScrollList()
label2 = pm.text( label='Assigned Mel Methods' )
self.assignedMelMethodLister = pm.textScrollList()
label3 = pm.text( label='Disabled Mel Methods' )
self.disabledMelMethodLister = pm.textScrollList()
pm.setParent(self.pane)
melForm.attachForm( label1, 'top', 5 )
melForm.attachForm( label1, 'left', 5 )
melForm.attachForm( label1, 'right', 5 )
melForm.attachControl( self.unassignedMelMethodLister, 'top', 0, label1 )
melForm.attachForm( self.unassignedMelMethodLister, 'left', 5 )
melForm.attachForm( self.unassignedMelMethodLister, 'right', 5 )
melForm.attachPosition( self.unassignedMelMethodLister, 'bottom', 5, 33 )
melForm.attachControl( label2, 'top', 5, self.unassignedMelMethodLister)
melForm.attachForm( label2, 'left', 5 )
melForm.attachForm( label2, 'right', 5 )
melForm.attachControl( self.assignedMelMethodLister, 'top', 0, label2 )
melForm.attachForm( self.assignedMelMethodLister, 'left', 5 )
melForm.attachForm( self.assignedMelMethodLister, 'right', 5 )
melForm.attachPosition( self.assignedMelMethodLister, 'bottom', 5, 66 )
melForm.attachControl( label3, 'top', 5, self.assignedMelMethodLister)
melForm.attachForm( label3, 'left', 5 )
melForm.attachForm( label3, 'right', 5 )
melForm.attachControl( self.disabledMelMethodLister, 'top', 0, label3 )
melForm.attachForm( self.disabledMelMethodLister, 'left', 5 )
melForm.attachForm( self.disabledMelMethodLister, 'right', 5 )
melForm.attachForm( self.disabledMelMethodLister, 'bottom', 5 )
pm.setParent('..')
pm.popupMenu(parent=self.unassignedMelMethodLister, button=3 )
pm.menuItem(l='disable', c=pm.Callback( PymelControlPanel.disableMelMethod, self, self.unassignedMelMethodLister ) )
pm.popupMenu(parent=self.assignedMelMethodLister, button=3 )
pm.menuItem(l='disable', c=pm.Callback( PymelControlPanel.disableMelMethod, self, self.assignedMelMethodLister ) )
pm.popupMenu(parent=self.disabledMelMethodLister, button=3 )
pm.menuItem(l='enable', c=pm.Callback( PymelControlPanel.enableMelMethod))
self.classScrollList.extend( self.classList )
self.classScrollList.selectCommand( lambda: self.apiClassList_selectCB() )
pm.scriptJob(uiDeleted=[str(self.win),cacheResults])
self.win.show()
def disableMelMethod(self, menu):
msel = menu.getSelectItem()
csel = self.classScrollList.getSelectItem()
if msel and csel:
method = msel[0]
clsname = csel[0]
menu.removeItem(method)
self.disabledMelMethodLister.append( method )
#print clsname, method, factories.apiToMelData[ (clsname, method) ]
factories.apiToMelData[ (clsname, method) ]['melEnabled'] = False
def enableMelMethod(self):
menu = self.disabledMelMethodLister
msel = menu.getSelectItem()
csel = self.classScrollList.getSelectItem()
if msel and csel:
method = msel[0]
clsname = csel[0]
menu.removeItem(method)
self.unassignedMelMethodLister.append( method )
#print clsname, method, factories.apiToMelData[ (clsname, method) ]
factories.apiToMelData[ (clsname, method) ].pop('melEnabled')
@staticmethod
def getMelMethods(className):
"""get all mel-derived methods for this class"""
import maintenance.build
if not factories.classToMelMap.keys():
# force factories.classToMelMap to be populated
list(maintenance.build.iterPyNodeText())
assert factories.classToMelMap
reg = re.compile('(.*[a-z])([XYZ])$')
newlist = []
origlist = factories.classToMelMap.get(className, [])
for method in origlist:
m = reg.search(method)
if m:
# strip off the XYZ component and replace with *
newname = m.group(1) + '*'
if newname not in newlist:
newlist.append(newname)
else:
newlist.append(method)
return sorted(newlist)
def apiClassList_selectCB(self, *args):
sel = self.classScrollList.getSelectItem()
if sel:
self.buildClassColumn(sel[0])
def assignMelMethod(self, method):
#print "method %s is now assigned" % method
if method in pm.util.listForNone( self.unassignedMelMethodLister.getAllItems() ):
self.unassignedMelMethodLister.removeItem(method)
self.assignedMelMethodLister.append( method )
def unassignMelMethod(self, method):
#print "method %s is now unassigned" % method
if method in pm.util.listForNone( self.assignedMelMethodLister.getAllItems() ):
self.assignedMelMethodLister.removeItem(method)
self.unassignedMelMethodLister.append( method )
def processClassFrames(self):
"""
This triggers the generation of all the defaults for `factories.apiToMelData`, but it does
not create any UI elements. It creates `ClassFrame` instances, which in turn create
`MethodRow` instances, but the creation of UI elements is delayed until a particular
configuration is requested via `buildClassColumn`.
"""
logger.info( 'processing all classes...' )
for className in self.classList:
melMethods = self.getMelMethods(className)
logger.debug( '%s: mel methods: %s' % (className, melMethods) )
for clsName, apiClsName in getClassHierarchy(className):
if apiClsName and apiClsName not in ['list']:
if clsName not in self.classFrames:
frame = ClassFrame( self, clsName, apiClsName)
self.classFrames[clsName] = frame
# temporarily disable the melName updating until we figure out how to deal
# with base classes that are the parents of many others, and which therefore end up with
# methods derived from many different mel commands, which are only applicable for the inherited classes
# not for the base class on its own. ( see ObjectSet and Character, for an example, specifically 'getIntersection' method )
#self.classFrames[clsName].updateMelNames( melMethods )
logger.info( 'done processing classes' )
def buildClassColumn(self, className ):
"""
Build an info column for a class. This column will include processed `ClassFrame`s for it and its parent classes
"""
pm.setParent(self.apiMethodCol)
self.apiMethodCol.clear()
self.unassignedMelMethodLister.removeAll()
self.assignedMelMethodLister.removeAll()
self.disabledMelMethodLister.removeAll()
melMethods = self.getMelMethods(className)
for method in melMethods:
# fix
if (className, method) in factories.apiToMelData and factories.apiToMelData[ (className, method) ] == {'enabled':False}:
d = factories.apiToMelData.pop( (className, method) )
d.pop('enabled')
d['melEnabled'] = False
if (className, method) in factories.apiToMelData and factories.apiToMelData[(className, method)].get('melEnabled',True) == False:
self.disabledMelMethodLister.append( method )
else:
self.unassignedMelMethodLister.append( method )
#filter = set( ['double', 'MVector'] )
filter = []
count = 0
for clsName, apiClsName in getClassHierarchy(className):
if apiClsName:
#print cls
if clsName in self.classFrames:
logger.debug( "building UI for %s", clsName )
frame = self.classFrames[clsName].buildUI(filter)
self.apiMethodCol.setTabLabel( [frame, clsName] )
count+=1
#frame.setVisible(False)
#if i != len(mro)-1:
# frame.setCollapse(True)
else:
logger.debug( "skipping %s", clsName )
self.apiMethodCol.setSelectTabIndex(count)
#self.classFrames[className].frame.setCollapse(False)
class ClassFrame(object):
def __init__(self, parent, className, apiClassName ):
self.parent = parent
self.className = className
self.apiClassName = apiClassName
self.rows = {}
self.classInfo = factories.apiClassInfo[apiClassName]['methods']
for method in self.classInfo.keys():
row = MethodRow( self, self.className, self.apiClassName, method, self.classInfo[method] )
self.rows[method] = row
def updateMelNames(self, melMethods):
logger.debug( '%s: updating melNames' % self.className )
for rowName, row in self.rows.items():
row.updateMelNames( melMethods )
def buildUI(self, filter=None):
count = 0
#self.form = formLayout()
with pm.frameLayout(collapsable=False, label='%s (%s)' % (self.className, self.apiClassName),
width = FRAME_WIDTH) as self.frame:
#labelAlign='top')
with pm.tabLayout() as tab:
invertibles = factories.apiClassInfo[self.apiClassName].get('invertibles', [])
usedMethods = []
with pm.formLayout() as pairdForm:
tab.setTabLabel( [pairdForm, 'Paired'] )
with pm.scrollLayout() as pairedScroll:
with pm.columnLayout(visible=False, adjustableColumn=True) as pairedCol:
for setMethod, getMethod in invertibles:
pm.setParent(pairedCol) # column
frame = pm.frameLayout(label = '%s / %s' % (setMethod, getMethod),
labelVisible=True, collapsable=True,
collapse=True, width = FRAME_WIDTH)
col2 = pm.columnLayout()
pairCount = 0
pairCount += self.rows[setMethod].buildUI(filter)
pairCount += self.rows[getMethod].buildUI(filter)
usedMethods += [setMethod, getMethod]
if pairCount == 0:
#deleteUI(col2)
frame.setVisible(False)
frame.setHeight(1)
count += pairCount
pairedCol.setVisible(True)
pairdForm.attachForm( pairedScroll, 'top', 5 )
pairdForm.attachForm( pairedScroll, 'left', 5 )
pairdForm.attachForm( pairedScroll, 'right', 5 )
pairdForm.attachForm( pairedScroll, 'bottom', 5 )
with pm.formLayout() as unpairedForm:
tab.setTabLabel( [unpairedForm, 'Unpaired'] )
with pm.scrollLayout() as unpairedScroll:
with pm.columnLayout(visible=False ) as unpairedCol:
# For some reason, on linux, the unpairedCol height is wrong...
# track + set it ourselves
unpairedHeight = 10 # a little extra buffer...
#rowSpace = unpairedCol.getRowSpacing()
for methodName in sorted( self.classInfo.keys() ):
pm.setParent(unpairedCol)
if methodName not in usedMethods:
frame = pm.frameLayout(label = methodName,
labelVisible=True, collapsable=True,
collapse=True, width = FRAME_WIDTH)
col2 = pm.columnLayout()
count += self.rows[methodName].buildUI(filter)
unpairedHeight += self.rows[methodName].frame.getHeight()# + rowSpace
unpairedCol.setHeight(unpairedHeight)
#self.form.attachForm( self.frame, 'left', 2)
#self.form.attachForm( self.frame, 'right', 2)
#self.form.attachForm( self.frame, 'top', 2)
#self.form.attachForm( self.frame, 'bottom', 2)
unpairedCol.setVisible(True)
unpairedForm.attachForm( unpairedScroll, 'top', 5 )
unpairedForm.attachForm( unpairedScroll, 'left', 5 )
unpairedForm.attachForm( unpairedScroll, 'right', 5 )
unpairedForm.attachForm( unpairedScroll, 'bottom', 5 )
return self.frame
class MethodRow(object):
def __init__(self, parent, className, apiClassName, apiMethodName,
methodInfoList):
self.parent = parent
self.className = className
self.methodName = factories.apiClassInfo[apiClassName].get('pymelMethods', {}).get(apiMethodName, apiMethodName)
self.apiClassName = apiClassName
self.apiMethodName = apiMethodName
self.methodInfoList = methodInfoList
self.data = factories._getApiOverrideData(self.className, self.methodName)
self.classInfo = factories.apiClassInfo[self.apiClassName]['methods'][self.apiMethodName]
try:
enabledArray = self.getEnabledArray()
except:
print self.apiClassName, self.apiMethodName
raise
# DEFAULT VALUES
# correct old values
# we no longer store positive values, only negative -- meaning methods will be enabled by default
# if 'enabled' in self.data and ( self.data['enabled'] == True or sum(enabledArray) == 0 ):
# logger.debug( '%s.%s: enabled array: %s' % ( self.className, self.methodName, enabledArray ) )
# logger.debug( '%s.%s: removing enabled entry' % ( self.className, self.methodName) )
# self.data.pop('enabled', None)
# enabled
# if not self.data.has_key( 'enabled' ):
# self.data['enabled'] = True
if self.methodName in factories.EXCLUDE_METHODS : # or sum(enabledArray) == 0:
self.data['enabled'] = False
# useName mode
if not self.data.has_key( 'useName' ):
self.data['useName'] = 'API'
else:
# correct old values
useNameVal = self.data['useName']
if useNameVal == True:
self.data['useName'] = 'API'
elif useNameVal == False:
self.data['useName'] = 'MEL'
elif useNameVal not in ['MEL', 'API']:
self.data['useName'] = str(useNameVal)
# correct old values
if self.data.has_key('overloadPrecedence'):
self.data['overloadIndex'] = self.data.pop('overloadPrecedence')
# correct old values
if self.data.has_key('melName'):
#logger.debug( "correcting melName %s %s %s" % (self.className, self.methodName, str(self.data['melName']) ) )
self.data['melName'] = str(self.data['melName'])
overloadId = self.data.get('overloadIndex', 0)
if overloadId is None:
# in a previous test, it was determined there were no wrappable overload methods,
# but there may be now. try again.
overloadId = 0
# ensure we don't use a value that is not valid
for i in range(overloadId, len(enabledArray)+1):
try:
if enabledArray[i]:
break
except IndexError: # went too far, so none are valid
overloadId = None
# if val is None:
# # nothing valid
# self.data.pop('overloadIndex', None)
# else:
self.data['overloadIndex'] = overloadId
def crossReference(self, melName):
""" create an entry for the melName which points to the data being tracked for the api name"""
factories.apiToMelData[ (self.className, melName ) ] = self.data
def uncrossReference(self, melName):
factories.apiToMelData.pop( (self.className, melName ) )
def updateMelNames(self, melMethods):
# melName
if not self.data.has_key( 'melName' ):
match = None
for method in melMethods:
methreg = re.compile(method.replace('*', '.{0,1}') + '$')
#print self.methodName, methreg
if methreg.match( self.methodName ):
match = str(method)
break
if match:
logger.debug( "%s.%s: adding melName %s" % ( self.className, self.methodName, match ) )
self.data['melName'] = match
self.crossReference( match )
def buildUI(self, filter=None):
if filter:
match = False
for i, info in enumerate( self.methodInfoList):
argUtil = factories.ApiArgUtil( self.apiClassName, self.apiMethodName, i )
if filter.intersection( argUtil.getInputTypes() + argUtil.getOutputTypes() ):
match = True
break
if match == False:
return False
self.layout = { 'columnAlign' : [1,'right'],
'columnAttach' : [1,'right',8] }
#print className, self.methodName, melMethods
isOverloaded = len(self.methodInfoList)>1
self.frame = pm.frameLayout( w=FRAME_WIDTH, labelVisible=False, collapsable=False)
logger.debug("building row for %s - %s" % (self.methodName, self.frame))
col = pm.columnLayout()
enabledArray = []
self.rows = []
self.overloadPrecedenceColl = None
self.enabledChBx = pm.checkBox(label=self.methodName,
changeCommand=pm.CallbackWithArgs( MethodRow.enableCB, self ) )
if isOverloaded:
self.overloadPrecedenceColl = pm.radioCollection()
for i in range( len(self.methodInfoList) ) :
self.createMethodInstance(i)
else:
#row = rowLayout( self.methodName + '_rowMain', nc=2, cw2=[200, 400] )
#self.enabledChBx = checkBox(label=self.methodName, changeCommand=CallbackWithArgs( MethodRow.enableCB, self ) )
#text(label='')
self.createMethodInstance(0)
#setParent('..')
pm.setParent(col)
pm.separator(w=800, h=6)
#self.row = rowLayout( self.methodName + '_rowSettings', nc=4, cw4=[200, 160, 180, 160] )
#self.rows.append(row)
self.row = pm.rowLayout( self.methodName + '_rowSettings', nc=2, cw2=[200, 220], **self.layout )
self.rows.append(self.row)
# create ui elements
pm.text(label='Mel Equivalent')
self.melNameTextField = pm.textField(w=170, editable=False)
self.melNameOptMenu = pm.popupMenu(parent=self.melNameTextField,
button=1,
postMenuCommand=pm.Callback( MethodRow.populateMelNameMenu, self ) )
pm.setParent('..')
self.row2 = pm.rowLayout( self.methodName + '_rowSettings2', nc=3, cw3=[200, 180, 240], **self.layout )
self.rows.append(self.row2)
pm.text(label='Use Name')
self.nameMode = pm.radioButtonGrp(label='', nrb=3, cw4=[1,50,50,50], labelArray3=['api', 'mel', 'other'] )
self.altNameText = pm.textField(w=170, enable=False)
self.altNameText.changeCommand( pm.CallbackWithArgs( MethodRow.alternateNameCB, self ) )
self.nameMode.onCommand( pm.Callback( MethodRow.nameTypeCB, self ) )
isEnabled = self.data.get('enabled', True)
# UI SETUP
melName = self.data.get('melName', '')
try:
#self.melNameOptMenu.setValue( melName )
self.melNameTextField.setText(melName)
if melName != '':
self.parent.parent.assignMelMethod( melName )
except RuntimeError:
# it is possible for a method name to be listed here that was set from a different view,
# where this class was a super class and more mel commands were available. expand the option list,
# and make this frame read-only
pm.menuItem( label=melName, parent=self.melNameOptMenu )
self.melNameOptMenu.setValue( melName )
logger.debug( "making %s frame read-only" % self.methodName )
self.frame.setEnable(False)
self.enabledChBx.setValue( isEnabled )
self.row.setEnable( isEnabled )
self.row2.setEnable( isEnabled )
name = self.data['useName']
if name == 'API' :
self.nameMode.setSelect( 1 )
self.altNameText.setEnable(False)
elif name == 'MEL' :
self.nameMode.setSelect( 2 )
self.altNameText.setEnable(False)
else :
self.nameMode.setSelect( 3 )
self.altNameText.setText(name)
self.altNameText.setEnable(True)
if self.overloadPrecedenceColl:
items = self.overloadPrecedenceColl.getCollectionItemArray()
try:
val = self.data.get('overloadIndex', 0)
if val is None:
logger.info( "no wrappable options for method %s" % self.methodName )
self.frame.setEnable( False )
else:
self.overloadPrecedenceColl.setSelect( items[ val ] )
except:
pass
# # ensure we don't use a value that is not valid
# for val in range(val, len(enabledArray)+1):
# try:
# if enabledArray[val]:
# break
# except IndexError:
# val = None
# if val is not None:
# self.overloadPrecedenceColl.setSelect( items[ val ] )
pm.setParent('..')
pm.setParent('..') # frame
pm.setParent('..') # column
return True
def enableCB(self, *args ):
logger.debug( 'setting enabled to %s' % args[0] )
if args[0] == False:
self.data['enabled'] = False
else:
self.data.pop('enabled', None)
self.row.setEnable( args[0] )
def nameTypeCB(self ):
logger.info( 'setting name type' )
selected = self.nameMode.getSelect()
if selected == 1:
val = 'API'
self.altNameText.setEnable(False)
elif selected == 2:
val = 'MEL'
self.altNameText.setEnable(False)
else:
val = str(self.altNameText.getText())
self.altNameText.setEnable(True)
logger.debug( 'data %s' % self.data )
self.data['useName'] = val
def alternateNameCB(self, *args ):
self.data['useName'] = str(args[0])
# def formatAnnotation(self, apiClassName, methodName ):
# defs = []
# try:
# for methodInfo in factories.apiClassInfo[apiClassName]['methods'][methodName] :
# args = ', '.join( [ '%s %s' % (x[1],x[0]) for x in methodInfo['args'] ] )
# defs.append( '%s( %s )' % ( methodName, args ) )
# return '\n'.join( defs )
# except KeyError:
# print "could not find documentation for", apiClassName, methodName
def overloadPrecedenceCB(self, i):
logger.debug( 'overloadPrecedenceCB' )
self.data['overloadIndex'] = i
def melNameChangedCB(self, newMelName):
oldMelName = str(self.melNameTextField.getText())
if oldMelName:
self.uncrossReference( oldMelName )
if newMelName == '[None]':
print "removing melName"
self.data.pop('melName',None)
self.parent.parent.unassignMelMethod( oldMelName )
self.melNameTextField.setText('')
else:
print "adding melName", newMelName
self.crossReference( newMelName )
self.data['melName'] = newMelName
self.parent.parent.assignMelMethod( newMelName )
self.melNameTextField.setText(newMelName)
def populateMelNameMenu(self):
"""called to populate the popup menu for choosing the mel equivalent to an api method"""
self.melNameOptMenu.deleteAllItems()
pm.menuItem(parent=self.melNameOptMenu, label='[None]', command=pm.Callback( MethodRow.melNameChangedCB, self, '[None]' ))
# need to add a listForNone to this in windows
items = self.parent.parent.unassignedMelMethodLister.getAllItems()
if items:
for method in items:
pm.menuItem(parent=self.melNameOptMenu, label=method, command=pm.Callback( MethodRow.melNameChangedCB, self, str(method) ))
def getEnabledArray(self):
"""returns an array of booleans that correspond to each override method and whether they can be wrapped"""
array = []
for i, info in enumerate( self.methodInfoList ):
argUtil = factories.ApiArgUtil( self.apiClassName, self.apiMethodName, i )
array.append( argUtil.canBeWrapped() )
return array
def createMethodInstance(self, i ):
#setUITemplate('attributeEditorTemplate', pushTemplate=1)
rowSpacing = [30, 20, 400]
defs = []
#try:
argUtil = factories.ApiArgUtil( self.apiClassName, self.apiMethodName, i )
proto = argUtil.getPrototype( className=False, outputs=True, defaults=False )
enable = argUtil.canBeWrapped()
if argUtil.isDeprecated():
pm.text(l='DEPRECATED')
# main info row
row = pm.rowLayout( '%s_rowMain%s' % (self.methodName,i), nc=3, cw3=rowSpacing, enable=enable )
self.rows.append(row)
pm.text(label='')
if self.overloadPrecedenceColl is not None:
# toggle for overloaded methods
pm.radioButton(label='', collection=self.overloadPrecedenceColl,
enable = enable,
onCommand=pm.Callback( MethodRow.overloadPrecedenceCB, self, i ))
pm.text( l='', #l=proto,
annotation = self.methodInfoList[i]['doc'],
enable = enable)
pm.setParent('..')
try:
argList = factories.apiClassOverrides[self.apiClassName]['methods'][self.apiMethodName][i]['args']
except (KeyError, IndexError):
argList = self.methodInfoList[i]['args']
returnType = self.methodInfoList[i]['returnType']
types = self.methodInfoList[i]['types']
args = []
for arg , type, direction in argList:
type = str(types[arg])
assert arg != 'return'
self._makeArgRow( i, type, arg, direction, self.methodInfoList[i]['argInfo'][arg]['doc'] )
if returnType:
self._makeArgRow( i, returnType, 'return', 'return', self.methodInfoList[i]['returnInfo']['doc'] )
pm.separator(w=800, h=14)
return enable
# methodInfo = factories.apiClassInfo[self.apiClassName]['methods'][self.apiMethodName][overloadNum]
# args = ', '.join( [ '%s %s' % (x[1],x[0]) for x in methodInfo['args'] ] )
# return '( %s ) --> ' % ( args )
#except:
# print "could not find documentation for", apiClassName, methodName
def setUnitType(self, methodIndex, argName, unitType ):
if self.apiClassName not in factories.apiClassOverrides:
factories.apiClassOverrides[self.apiClassName] = { 'methods' : {} }
methodOverrides = factories.apiClassOverrides[self.apiClassName]['methods']
if self.apiMethodName not in methodOverrides:
methodOverrides[self.apiMethodName] = {}
if argName == 'return':
if methodIndex not in methodOverrides[self.apiMethodName]:
methodOverrides[self.apiMethodName][methodIndex] = { 'returnInfo' : {} }
methodOverrides[self.apiMethodName][methodIndex]['returnInfo']['unitType'] = unitType
else:
if methodIndex not in methodOverrides[self.apiMethodName]:
methodOverrides[self.apiMethodName][methodIndex] = { 'argInfo' : {} }
if argName not in methodOverrides[self.apiMethodName][methodIndex]['argInfo']:
methodOverrides[self.apiMethodName][methodIndex]['argInfo'][argName] = {}
methodOverrides[self.apiMethodName][methodIndex]['argInfo'][argName]['unitType'] = unitType
def setDirection(self, methodIndex, argName, direction ):
if self.apiClassName not in factories.apiClassOverrides:
factories.apiClassOverrides[self.apiClassName] = { 'methods' : {} }
methodOverrides = factories.apiClassOverrides[self.apiClassName]['methods']
if self.apiMethodName not in methodOverrides:
methodOverrides[self.apiMethodName] = {}
if methodIndex not in methodOverrides[self.apiMethodName]:
methodOverrides[self.apiMethodName][methodIndex] = { }
try:
argList = methodOverrides[self.apiMethodName][methodIndex]['args']
except KeyError:
argList = self.methodInfoList[methodIndex]['args']
newArgList = []
inArgs = []
outArgs = []
for i_argName, i_argType, i_direction in argList:
if i_argName == argName:
argInfo = ( i_argName, i_argType, direction )
else:
argInfo = ( i_argName, i_argType, i_direction )
if argInfo[2] == 'in':
inArgs.append( i_argName )
else:
outArgs.append( i_argName )
newArgList.append( argInfo )
methodOverrides[self.apiMethodName][methodIndex] = { }
methodOverrides[self.apiMethodName][methodIndex]['args'] = newArgList
methodOverrides[self.apiMethodName][methodIndex]['inArgs'] = inArgs
methodOverrides[self.apiMethodName][methodIndex]['outArgs'] = outArgs
def _makeArgRow(self, methodIndex, type, argName, direction, annotation=''):
COL1_WIDTH = 260
COL2_WIDTH = 120
pm.rowLayout( nc=4, cw4=[COL1_WIDTH,COL2_WIDTH, 70, 150], **self.layout )
label = str(type)
pm.text( l=label, ann=annotation )
pm.text( l=argName, ann=annotation )
if direction == 'return':
pm.text( l='(result)' )
else:
direction_om = pm.optionMenu(l='', w=60, ann=annotation, cc=pm.CallbackWithArgs( MethodRow.setDirection, self, methodIndex, argName ) )
for unit in ['in', 'out']:
pm.menuItem(l=unit)
direction_om.setValue(direction)
if self._isPotentialUnitType(type) :
om = pm.optionMenu(l='', ann=annotation, cc=pm.CallbackWithArgs( MethodRow.setUnitType, self, methodIndex, argName ) )
for unit in ['unitless', 'linear', 'angular', 'time']:
pm.menuItem(l=unit)
if argName == 'return':
try:
value = factories.apiClassOverrides[self.apiClassName]['methods'][self.apiMethodName][methodIndex]['returnInfo']['unitType']
except KeyError:
pass
else:
try:
value = factories.apiClassOverrides[self.apiClassName]['methods'][self.apiMethodName][methodIndex]['argInfo'][argName]['unitType']
except KeyError:
pass
try:
om.setValue(value)
except: pass
else:
pm.text( l='', ann=annotation )
pm.setParent('..')
def _isPotentialUnitType(self, type):
type = str(type)
return type == 'MVector' or type.startswith('double')
def _getClass(className):
for module in [pm.nodetypes, pm.datatypes, pm.general]:
try:
pymelClass = getattr(module, className)
return pymelClass
except AttributeError:
pass
def getApiClassName( className ):
pymelClass = _getClass(className)
if pymelClass:
apiClass = None
apiClassName = None
#if cls.__name__ not in ['object']:
try:
apiClass = pymelClass.__dict__[ '__apicls__']
apiClassName = apiClass.__name__
except KeyError:
try:
apiClass = pymelClass.__dict__[ 'apicls']
apiClassName = apiClass.__name__
except KeyError:
#print "could not determine api class for", cls.__name__
apiClassName = None
return apiClassName
else:
logger.warning( "could not find class %s" % (className) )
def getClassHierarchy( className ):
pymelClass = _getClass(className)
if pymelClass:
mro = list( inspect.getmro(pymelClass) )
mro.reverse()
for i, cls in enumerate(mro):
#if cls.__name__ not in ['object']:
try:
apiClass = cls.__dict__[ '__apicls__']
apiClassName = apiClass.__name__
except KeyError:
try:
apiClass = cls.__dict__[ 'apicls']
apiClassName = apiClass.__name__
except KeyError:
#print "could not determine api class for", cls.__name__
apiClassName = None
yield cls.__name__, apiClassName
else:
logger.warning( "could not find class %s" % (className) )
def setManualDefaults():
# set some defaults
# TODO : allow these defaults to be controlled via the UI
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnTransform', 'methods', 'setScalePivot', 0, 'defaults', 'balance' ), True )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnTransform', 'methods', 'setRotatePivot', 0, 'defaults', 'balance' ), True )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnTransform', 'methods', 'setRotateOrientation', 0, 'defaults', 'balance' ), True )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnSet', 'methods', 'getMembers', 0, 'defaults', 'flatten' ), False )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnDagNode', 'methods', 'instanceCount', 0, 'defaults', 'total' ), True )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MFnMesh', 'methods', 'createColorSetWithName', 1, 'defaults', 'modifier' ), None )
# add some manual invertibles: THESE MUST BE THE API NAMES
invertibles = [ ('MPlug', 0, 'setCaching', 'isCachingFlagSet') ,
('MPlug', 0, 'setChannelBox', 'isChannelBoxFlagSet'),
('MFnTransform', 0, 'enableLimit', 'isLimited'),
('MFnTransform', 0, 'setLimit', 'limitValue'),
('MFnTransform', 0, 'set', 'transformation'),
('MFnRadialField', 0, 'setType', 'radialType')
]
for className, methodIndex, setter, getter in invertibles:
# append to the class-level invertibles list
curr = pm.util.getCascadingDictItem( factories.apiClassInfo, (className, 'invertibles' ), [] )
pair = (setter, getter)
if pair not in curr:
curr.append( pair )
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'invertibles'), curr )
# add the individual method entries
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', setter, methodIndex, 'inverse' ), (getter, True) )
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', getter, methodIndex, 'inverse' ), (setter, False) )
nonInvertibles = [ ( 'MFnMesh', 0, 'setFaceVertexNormals', 'getFaceVertexNormals' ),
( 'MFnMesh', 0, 'setFaceVertexNormal', 'getFaceVertexNormal' ) ]
for className, methodIndex, setter, getter in nonInvertibles:
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', setter, methodIndex, 'inverse' ), None )
pm.util.setCascadingDictItem( factories.apiClassOverrides, (className, 'methods', getter, methodIndex, 'inverse' ), None )
fixSpace()
def fixSpace():
"fix the Space enumerator"
enum = pm.util.getCascadingDictItem( factories.apiClassInfo, ('MSpace', 'pymelEnums', 'Space') )
keys = enum._keys.copy()
#print keys
val = keys.pop('postTransform', None)
if val is not None:
keys['object'] = val
newEnum = pm.util.Enum( 'Space', keys )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MSpace', 'pymelEnums', 'Space'), newEnum )
else:
logger.warning( "could not fix Space")
def _notifySavingDisabled():
pm.confirmDialog(title='Saving Disabled',
message='Saving using this UI has been disabled until it'
' can be updated. Changes will not be saved.')
def cacheResults():
_notifySavingDisabled()
return
# res = pm.confirmDialog( title='Cache Results?',
# message="Would you like to write your changes to disk? If you choose 'No' your changes will be lost when you restart Maya.",
# button=['Yes','No'],
# cancelButton='No',
# defaultButton='Yes')
# print res
# if res == 'Yes':
# doCacheResults()
# def doCacheResults():
# print "---"
# print "adding manual defaults"
# setManualDefaults()
# print "merging dictionaries"
# # update apiClasIfno with the sparse data stored in apiClassOverrides
# factories.mergeApiClassOverrides()
# print "saving api cache"
# factories.saveApiCache()
# print "saving bridge"
# factories.saveApiMelBridgeCache()
# print "---"
| 42.295385 | 151 | 0.591299 | 34,512 | 0.836898 | 802 | 0.019448 | 837 | 0.020297 | 0 | 0 | 10,561 | 0.256099 |
827aae5cfb257b722009084fcdc82d1efd26e382 | 35 | py | Python | src/test/data/pa3/sample/list_get_element_oob_3.py | Leo-Enrique-Wu/chocopy_compiler_code_generation | 4606be0531b3de77411572aae98f73169f46b3b9 | [
"BSD-2-Clause"
]
| 7 | 2021-08-28T18:20:45.000Z | 2022-02-01T07:35:59.000Z | src/test/data/pa3/sample/list_get_element_oob_3.py | Leo-Enrique-Wu/chocopy_compiler_code_generation | 4606be0531b3de77411572aae98f73169f46b3b9 | [
"BSD-2-Clause"
]
| 4 | 2020-05-18T01:06:15.000Z | 2020-06-12T19:33:14.000Z | src/test/data/pa3/sample/list_get_element_oob_3.py | Leo-Enrique-Wu/chocopy_compiler_code_generation | 4606be0531b3de77411572aae98f73169f46b3b9 | [
"BSD-2-Clause"
]
| 5 | 2019-11-27T05:11:05.000Z | 2021-06-29T14:31:14.000Z | x:[int] = None
x = []
print(x[0])
| 7 | 14 | 0.457143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
827ac159b8342adeb18a832d9a86cfcb0600fb29 | 62 | py | Python | modules/vqvc/__init__.py | reppy4620/VCon | cac3441443cb9b28ffbaa0646ed1826d71cb16e0 | [
"MIT"
]
| 4 | 2021-05-22T03:14:44.000Z | 2022-01-03T04:32:54.000Z | modules/vqvc/__init__.py | reppy4620/VCon | cac3441443cb9b28ffbaa0646ed1826d71cb16e0 | [
"MIT"
]
| null | null | null | modules/vqvc/__init__.py | reppy4620/VCon | cac3441443cb9b28ffbaa0646ed1826d71cb16e0 | [
"MIT"
]
| null | null | null | from .model import VQVCModel
from .pl_model import VQVCModule
| 20.666667 | 32 | 0.83871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
827b053defe8919cad9935212546496cfc58073c | 1,040 | py | Python | kornia/constants.py | carlosb1/kornia | a2b34d497314e7ed65f114401efdd3cc9ba2077c | [
"ECL-2.0",
"Apache-2.0"
]
| 1 | 2020-06-17T16:57:14.000Z | 2020-06-17T16:57:14.000Z | kornia/constants.py | carlosb1/kornia | a2b34d497314e7ed65f114401efdd3cc9ba2077c | [
"ECL-2.0",
"Apache-2.0"
]
| null | null | null | kornia/constants.py | carlosb1/kornia | a2b34d497314e7ed65f114401efdd3cc9ba2077c | [
"ECL-2.0",
"Apache-2.0"
]
| 1 | 2022-01-26T13:39:34.000Z | 2022-01-26T13:39:34.000Z | from typing import Union, TypeVar
from enum import Enum
import torch
pi = torch.tensor(3.14159265358979323846)
T = TypeVar('T', bound='Resample')
U = TypeVar('U', bound='BorderType')
class Resample(Enum):
NEAREST = 0
BILINEAR = 1
BICUBIC = 2
@classmethod
def get(cls, value: Union[str, int, T]) -> T: # type: ignore
if type(value) == str:
return cls[value.upper()] # type: ignore
if type(value) == int:
return cls(value) # type: ignore
if type(value) == cls:
return value # type: ignore
raise TypeError()
class BorderType(Enum):
CONSTANT = 0
REFLECT = 1
REPLICATE = 2
CIRCULAR = 3
@classmethod
def get(cls, value: Union[str, int, U]) -> U: # type: ignore
if type(value) == str:
return cls[value.upper()] # type: ignore
if type(value) == int:
return cls(value) # type: ignore
if type(value) == cls:
return value # type: ignore
raise TypeError()
| 24.761905 | 65 | 0.567308 | 849 | 0.816346 | 0 | 0 | 676 | 0.65 | 0 | 0 | 140 | 0.134615 |
827beb02ef352cf0444f8df3acec604c0da03a1c | 1,731 | py | Python | intrepyd/tests/test_openplc.py | bobosoft/intrepyd | 13f0912b31f86f9bcc50f52ef4ad870e33f0cf65 | [
"BSD-3-Clause"
]
| 2 | 2021-04-25T17:38:03.000Z | 2022-03-20T20:48:50.000Z | intrepyd/tests/test_openplc.py | bobosoft/intrepyd | 13f0912b31f86f9bcc50f52ef4ad870e33f0cf65 | [
"BSD-3-Clause"
]
| 1 | 2016-11-30T22:25:00.000Z | 2017-01-16T22:43:39.000Z | intrepyd/tests/test_openplc.py | bobosoft/intrepyd | 13f0912b31f86f9bcc50f52ef4ad870e33f0cf65 | [
"BSD-3-Clause"
]
| null | null | null | import intrepyd
from intrepyd.iec611312py.plcopen import parse_plc_open_file
from intrepyd.iec611312py.stmtprinter import StmtPrinter
import unittest
from . import from_fixture_path
class TestOpenPLC(unittest.TestCase):
def test_simple_1(self):
pous = parse_plc_open_file(from_fixture_path('openplc/simple1.xml'))
self.assertEqual(1, len(pous))
printer = StmtPrinter()
printer.processStatements(pous[0].statements)
self.assertEqual('output1 := (local1 + input1);', printer.result)
def test_datatype_1(self):
pous = parse_plc_open_file(from_fixture_path('openplc/datatype1.xml'))
self.assertEqual(1, len(pous))
def test_if_1(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if1.xml'))
self.assertEqual(1, len(pous))
def test_if_2(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if2.xml'))
self.assertEqual(1, len(pous))
def test_if_3(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if3.xml'))
self.assertEqual(1, len(pous))
def test_if_4(self):
pous = parse_plc_open_file(from_fixture_path('openplc/if4.xml'))
self.assertEqual(1, len(pous))
printer = StmtPrinter()
printer.processStatements(pous[0].statements)
self.assertEqual('IF (100 < (UDINT_TO_DINT((CONST_IN.Tolerance_Max / 100)) * UnitDelay_2_DSTATE)) THEN overInfusion := 1; END_IF;',
printer.result)
# It is slow, as expected
# def test_infusion_pump(self):
# pous = parsePlcOpenFile('tests/openplc/GPCA_SW_Functional_subst.xml')
# self.assertEqual(1, len(pous))
if __name__ == "__main__":
unittest.main()
| 37.630435 | 139 | 0.688619 | 1,499 | 0.865973 | 0 | 0 | 0 | 0 | 0 | 0 | 433 | 0.250144 |
827d64df2b74d446113ab304669eb3fd477b0e80 | 3,506 | py | Python | reveal_graph_embedding/datautil/asu_datautil/asu_read_data.py | MKLab-ITI/reveal-graph-embedding | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | [
"Apache-2.0"
]
| 31 | 2015-07-14T16:21:25.000Z | 2021-06-30T14:10:44.000Z | reveal_graph_embedding/datautil/asu_datautil/asu_read_data.py | MKLab-ITI/reveal-graph-embedding | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | [
"Apache-2.0"
]
| null | null | null | reveal_graph_embedding/datautil/asu_datautil/asu_read_data.py | MKLab-ITI/reveal-graph-embedding | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | [
"Apache-2.0"
]
| 11 | 2016-08-21T03:07:20.000Z | 2020-03-07T03:17:05.000Z | __author__ = 'Georgios Rizos ([email protected])'
import numpy as np
import scipy.sparse as sparse
from reveal_graph_embedding.common import get_file_row_generator
def read_adjacency_matrix(file_path, separator):
"""
Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Read all file rows
for file_row in file_row_generator:
source_node = np.int64(file_row[0])
target_node = np.int64(file_row[1])
# Add edge
append_row(source_node)
append_col(target_node)
# Since this is an undirected network also add the reciprocal edge
append_row(target_node)
append_col(source_node)
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
number_of_nodes = np.max(row) # I assume that there are no missing nodes at the end.
# Array count should start from 0.
row -= 1
col -= 1
# Form sparse adjacency matrix
adjacency_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_nodes))
return adjacency_matrix
def read_node_label_matrix(file_path, separator, number_of_nodes):
"""
Reads node-label pairs in csv format and returns a list of tuples and a node-label matrix.
Inputs: - file_path: The path where the node-label matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- number_of_nodes: The number of nodes of the full graph. It is possible that not all nodes are labelled.
Outputs: - node_label_matrix: The node-label associations in a NumPy array of tuples format.
- number_of_categories: The number of categories/classes the nodes may belong to.
- labelled_node_indices: A NumPy array containing the labelled node indices.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Populate the arrays
for file_row in file_row_generator:
node = np.int64(file_row[0])
label = np.int64(file_row[1])
# Add label
append_row(node)
append_col(label)
number_of_categories = len(set(col)) # I assume that there are no missing labels. There may be missing nodes.
labelled_node_indices = np.array(list(set(row)))
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
# Array count should start from 0.
row -= 1
col -= 1
labelled_node_indices -= 1
# Form sparse adjacency matrix
node_label_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_categories))
node_label_matrix = node_label_matrix.tocsr()
return node_label_matrix, number_of_categories, labelled_node_indices
| 33.711538 | 118 | 0.688534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,576 | 0.449515 |
827fbde3f6b49a475e21f72342cbd95940e44a4d | 1,255 | py | Python | create_tweet_classes.py | jmcguinness11/StockPredictor | 9dd545a11ca9beab6e108d5b8f001f69501af606 | [
"MIT"
]
| null | null | null | create_tweet_classes.py | jmcguinness11/StockPredictor | 9dd545a11ca9beab6e108d5b8f001f69501af606 | [
"MIT"
]
| null | null | null | create_tweet_classes.py | jmcguinness11/StockPredictor | 9dd545a11ca9beab6e108d5b8f001f69501af606 | [
"MIT"
]
| null | null | null | # create_tweet_classes.py
# this assumes the existence of a get_class(day, hour, ticker) function
# that returns the class (0, 1, or -1) for a given hour and ticker
import collections
import json
import random
refined_tweets = collections.defaultdict(list)
#returns label for company and time
def getLabel(ticker, month, day, hour):
return random.randint(-1,1)
#parses individual json file
def parseJSON(data, month, day, hour):
results = []
for tweet in data.itervalues():
text = tweet['text']
label = getLabel(tweet['company'], month, day, hour)
results.append([text,label])
return results
def loadData(months, days):
hours = [10, 11, 12, 13, 14]
minutes = [0, 15, 30, 45]
output_data = []
for month in months:
for day in days:
for hour in hours:
for minute in minutes:
filename = 'tweets_{}_{}_{}_{}.dat'.format(month, day, hour, minute)
with open(filename, 'r') as f:
try:
data = json.load(f)
except ValueError as err:
print filename
exit(1)
output_data += parseJSON(data, month, day, hour)
f.close()
print len(output_data)
print output_data[0:10]
return output_data
def main():
days = [9,10,11,12,13,16,17]
loadData([4], days)
if __name__=='__main__':
main()
| 24.607843 | 73 | 0.6749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.220717 |
827fdac046ac07902d8fa5e1aeb478e27e40e24c | 11,538 | py | Python | integration_tests/test_router.py | madfish-solutions/quipuswap-token2token-core | 41fd4293029e2094a564141fb389fd9a1ef19185 | [
"MIT"
]
| null | null | null | integration_tests/test_router.py | madfish-solutions/quipuswap-token2token-core | 41fd4293029e2094a564141fb389fd9a1ef19185 | [
"MIT"
]
| null | null | null | integration_tests/test_router.py | madfish-solutions/quipuswap-token2token-core | 41fd4293029e2094a564141fb389fd9a1ef19185 | [
"MIT"
]
| null | null | null | from unittest import TestCase
import json
from helpers import *
from pytezos import ContractInterface, pytezos, MichelsonRuntimeError
from pytezos.context.mixin import ExecutionContext
token_a = "KT1AxaBxkFLCUi3f8rdDAAxBKHfzY8LfKDRA"
token_b = "KT1PgHxzUXruWG5XAahQzJAjkk4c2sPcM3Ca"
token_c = "KT1RJ6PbjHpwc3M5rw5s2Nbmefwbuwbdxton"
token_d = "KT1Wz32jY2WEwWq8ZaA2C6cYFHGchFYVVczC"
pair_ab = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
}
pair_bc = {
"token_a_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_ac = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_cd = {
"token_a_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
},
"token_b_type" : {
"fa2": {
"token_address": token_d,
"token_id": 3
}
}
}
class TokenToTokenRouterTest(TestCase):
@classmethod
def setUpClass(cls):
cls.maxDiff = None
dex_code = open("./integration_tests/compiled/Dex.tz", 'r').read()
cls.dex = ContractInterface.from_michelson(dex_code)
initial_storage_michelson = json.load(open("./integration_tests/compiled/storage.json", 'r'))
cls.init_storage = cls.dex.storage.decode(initial_storage_michelson)
def test_tt_token_to_token_router(self):
amount_in=10_000
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 300_000))
res = chain.execute(self.dex.addPair(pair_bc, 500_000, 700_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : amount_in,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
contract_in = next(v for v in transfers if v["destination"] == contract_self_address)
self.assertEqual(contract_in["token_address"], token_a)
self.assertEqual(contract_in["amount"], 10_000)
routed_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["token_address"], token_c)
# same swap but one by one
res = chain.interpret(self.dex.swap(
swaps=[{
"pair_id": 0,
"operation": "a_to_b",
}],
amount_in=amount_in,
min_amount_out=1,
receiver=julian,
deadline=100_000
))
transfers = parse_token_transfers(res)
token_b_out = next(v for v in transfers if v["destination"] == julian)
res = chain.interpret(self.dex.swap(
swaps=[{
"pair_id": 1,
"operation": "a_to_b",
}],
amount_in=token_b_out["amount"],
min_amount_out=1,
receiver=julian,
deadline=100_000,
))
transfers = parse_token_transfers(res)
token_c_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["amount"], token_c_out["amount"])
def test_tt_router_triangle(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000_000_000, 100_000_000_000))
res = chain.execute(self.dex.addPair(pair_bc, 100_000_000_000, 100_000_000_000))
res = chain.execute(self.dex.addPair(pair_ac, 100_000_000_000, 100_000_000_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
},
{
"pair_id": 2,
"operation": "b_to_a",
}
],
"amount_in" : 10_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_c_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_c_out["amount"], 9909) # ~ 9910 by compound interest formula
def test_tt_router_ab_ba(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000_000_000, 100_000_000_000))
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 0,
"operation": "b_to_a",
}
],
"amount_in" : 10_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_out["amount"], 9939)
def test_tt_router_impossible_path(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 1111, 3333))
res = chain.execute(self.dex.addPair(pair_cd, 5555, 7777))
# can't find path
with self.assertRaises(MichelsonRuntimeError):
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : 334,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
with self.assertRaises(MichelsonRuntimeError):
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 0,
"operation": "a_to_b",
}
],
"amount_in" : 334,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
def test_tt_router_cant_overbuy(self):
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 100_000))
res = chain.execute(self.dex.addPair(pair_bc, 10_000, 10_000))
res = chain.execute(self.dex.addPair(pair_ac, 1_000_000, 1_000_000))
# overbuy at the very beginning
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
}
],
"amount_in" : 100_000_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(token_out["amount"], 99_999)
# overbuy at the end
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : 100_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertLess(token_out["amount"], 9_999)
# overbuy in the middle
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "a_to_b",
},
{
"pair_id": 1,
"operation": "a_to_b",
},
{
"pair_id": 2,
"operation": "b_to_a",
}
],
"amount_in" : 10_000_000_000,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
token_out = next(v for v in transfers if v["destination"] == julian)
self.assertLess(token_out["amount"], 9_999)
def test_tt_router_mixed_fa2_fa12(self):
pair_ab = {
"token_a_type" : {
"fa12": token_b,
},
"token_b_type": {
"fa2": {
"token_address": token_a,
"token_id": 1
}
},
}
pair_bc = {
"token_a_type" : {
"fa12": token_b,
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
amount_in=10_000
chain = LocalChain(storage=self.init_storage)
res = chain.execute(self.dex.addPair(pair_ab, 100_000, 300_000))
res = chain.execute(self.dex.addPair(pair_bc, 500_000, 700_000))
# interpret the call without applying it
res = chain.interpret(self.dex.swap({
"swaps" : [
{
"pair_id": 0,
"operation": "b_to_a",
},
{
"pair_id": 1,
"operation": "a_to_b",
}
],
"amount_in" : amount_in,
"min_amount_out" : 1,
"receiver" : julian,
"deadline": 100_000
}))
transfers = parse_token_transfers(res)
contract_in = next(v for v in transfers if v["destination"] == contract_self_address)
self.assertEqual(contract_in["token_address"], token_a)
self.assertEqual(contract_in["amount"], 10_000)
routed_out = next(v for v in transfers if v["destination"] == julian)
self.assertEqual(routed_out["token_address"], token_c)
| 30.68617 | 101 | 0.477639 | 10,128 | 0.877795 | 0 | 0 | 381 | 0.033021 | 0 | 0 | 2,349 | 0.203588 |
8281775b2d035df5a898df5ea5c2730c011f5d85 | 109 | py | Python | practice/practice_perfect/ex7.py | recursivelycurious/wordnik-repl | 9d9e96a8ebc79b95f135d5bc871602b65d2d2b79 | [
"MIT"
]
| null | null | null | practice/practice_perfect/ex7.py | recursivelycurious/wordnik-repl | 9d9e96a8ebc79b95f135d5bc871602b65d2d2b79 | [
"MIT"
]
| 8 | 2017-12-29T21:19:00.000Z | 2018-04-01T05:05:05.000Z | practice/practice_perfect/ex7.py | recursivelycurious/wordnik-repl | 9d9e96a8ebc79b95f135d5bc871602b65d2d2b79 | [
"MIT"
]
| 1 | 2017-12-27T23:45:58.000Z | 2017-12-27T23:45:58.000Z | def remove_duplicates(lst):
new = []
for x in lst:
if x not in new:
new.append(x)
return new
| 15.571429 | 27 | 0.59633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
8281bdb342d8804d5733ed2d9e90e2c325ef1463 | 4,013 | py | Python | run.py | evilspyboy/twitch-relay-monitor | 45c9c2f02b67f73b5baea53813d8818d673d93ba | [
"MIT"
]
| 1 | 2021-03-19T15:02:38.000Z | 2021-03-19T15:02:38.000Z | run.py | evilspyboy/twitch-relay-monitor | 45c9c2f02b67f73b5baea53813d8818d673d93ba | [
"MIT"
]
| null | null | null | run.py | evilspyboy/twitch-relay-monitor | 45c9c2f02b67f73b5baea53813d8818d673d93ba | [
"MIT"
]
| 1 | 2021-03-19T15:02:38.000Z | 2021-03-19T15:02:38.000Z | import datetime
from datetime import timedelta
import pprint
from config import *
from helper import *
import time
import logging
from logging.handlers import RotatingFileHandler
logger = logging.getLogger('Twitch Relay Monitor')
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler('/home/pi/twitch_relay_monitor/logs/app.log', maxBytes=200000, backupCount=2)
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
def print_verbose(comment):
if verbose_mode==1:
#print to screen
print(comment)
else:
logger.info(comment)
#First, start by getting token to access Twitch api
r=get_token(client_id,client_secret,grant_type,scope)
if r == False:
# if there is a problem, end the program
logger.error("Can't Auth user")
exit(1)
# since streamer username is given we need to get its broadcaster id for other requests
broadcaster=get_broadcaster_id(client_id,username)
if broadcaster==False:
# if there is a problem, end the program
logger.error("Can not get broadcster id")
exit(1)
if "access_token" not in r:
# if there is a problem, end the program
logger.error("Access token is missing " + str(r))
exit(1)
access_token=r['access_token'];
expires_in=r['expires_in']
# Fresh token interval will keep track of the time we need to validate the token
fresh_token_interval=token_validate_interval
skip_count=0
while True:
wait_time=online_user_wait_time
# refresh token if expired
if fresh_token_interval <30:
#confirm the token is valid
if is_valid_token(access_token) ==False:
r=get_token(client_id,client_secret,grant_type,scope)
if r ==False:
skip_count=skip_count+1
logger.info("Fresh Token Skip get token , skip:" + str(skip_count))
time.sleep(skip_wait_time)
continue
access_token=r['access_token'];
expires_in=r['expires_in']
fresh_token_interval=token_validate_interval
if is_user_live(client_id,access_token,username):
print_verbose("User ["+username+"] online")
set_stream(1)
user_streaming_flag=1
else:
print_verbose("User ["+username+"] offline")
set_hypetrain(0)
set_follow(0)
set_stream(0)
user_streaming_flag=0
wait_time=user_offline_wait_time
last_hype_train_action=get_last_hype_train_action(client_id,access_token,broadcaster["_id"])
if last_hype_train_action ==False:
skip_count=skip_count+1
logger.info("Hype Train Skip get token , skip:" + str(skip_count))
time.sleep(skip_wait_time)
continue
#retrieve most recent follow event
last_follow_action=get_last_follow_action(client_id,access_token,broadcaster["_id"])
if last_follow_action ==False:
skip_count=skip_count+1
logger.info("Follow Skip get token , skip:" + str(skip_count))
time.sleep(skip_wait_time)
continue
#mark follow if last follow event is < event notification time from current time
if user_streaming_flag==1:
subscribe_time=last_follow_action["data"][0]["followed_at"]
subscribe_time=datetime.datetime.strptime(subscribe_time,'%Y-%m-%dT%H:%M:%SZ')
if datetime.datetime.utcnow() < subscribe_time + timedelta(seconds=event_notification_delay):
print_verbose("Relay Function - Follow Event Active")
set_follow(1)
else:
set_follow(0)
#set hype train state
if(is_train_active(last_hype_train_action["data"])):
print_verbose("Train Active at level " + str(last_hype_train_action["data"][0]["event_data"]['level']))
level=last_hype_train_action["data"][0]["event_data"]['level']
if 1 <= level <= 5:
if user_streaming_flag==1:
logger.info("Relay Function - Hype Train Event")
set_hypetrain(level)
wait_time=5 # active hype train wait time in seconds
else:
print_verbose("Train not active")
set_hypetrain(0)
wait_time=online_user_wait_time
fresh_token_interval=fresh_token_interval-wait_time
if skip_count == max_skip_count:
logger.error("Skip count limit reached")
exit(1)
time.sleep(wait_time)
#reset skip_count if one request execute without issue within max_skip_count
skip_count=0 | 31.351563 | 107 | 0.76601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,300 | 0.323947 |
8283520b4c3b447717f6ff9b6cc63100ada20554 | 793 | py | Python | aiassistants/assistants/ptype/src/Config.py | wrattler/wrattler | 25d2a178e5efc289b40fa3725b23334636bd349d | [
"MIT"
]
| 56 | 2018-03-21T07:04:44.000Z | 2021-12-26T15:01:42.000Z | aiassistants/assistants/ptype/src/Config.py | wrattler/wrattler | 25d2a178e5efc289b40fa3725b23334636bd349d | [
"MIT"
]
| 181 | 2018-06-07T10:35:23.000Z | 2022-02-26T10:22:54.000Z | aiassistants/assistants/ptype/src/Config.py | wrattler/wrattler | 25d2a178e5efc289b40fa3725b23334636bd349d | [
"MIT"
]
| 9 | 2018-05-04T10:04:49.000Z | 2019-10-07T15:53:50.000Z | class Config:
# helps to store settings for an experiment.
def __init__(self, _experiments_folder_path='experiments', _dataset_name='dataset', _column_names='unknown',
_types={1:'integer', 2:'string', 3:'float', 4:'boolean', 5:'gender', 6:'unknown', 7:'date-iso-8601', 8:'date-eu', 9:'date-non-std-subtype', 10:'date-non-std',
11:'positive integer', 12:'positive float'}):
self.main_experiments_folder = _experiments_folder_path
self.dataset_name = _dataset_name
self.column_names = _column_names
self.types = _types
self.types_as_list = list(_types.values())
columns = ['missing', 'catch-all',]
for key in _types:
columns.append(_types[key])
self.columns = columns | 49.5625 | 175 | 0.630517 | 793 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 239 | 0.301387 |
828565457c47cac1020f3188fe499892855af43c | 12,928 | py | Python | rbc/tests/test_omnisci_array.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
]
| null | null | null | rbc/tests/test_omnisci_array.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
]
| null | null | null | rbc/tests/test_omnisci_array.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
]
| null | null | null | import os
from collections import defaultdict
from rbc.omnisci_backend import Array
from rbc.errors import OmnisciServerError
from numba import types as nb_types
import pytest
rbc_omnisci = pytest.importorskip('rbc.omniscidb')
available_version, reason = rbc_omnisci.is_available()
pytestmark = pytest.mark.skipif(not available_version, reason=reason)
@pytest.fixture(scope='module')
def omnisci():
# TODO: use omnisci_fixture from rbc/tests/__init__.py
config = rbc_omnisci.get_client_config(debug=not True)
m = rbc_omnisci.RemoteOmnisci(**config)
table_name = os.path.splitext(os.path.basename(__file__))[0]
m.sql_execute(f'DROP TABLE IF EXISTS {table_name}')
sqltypes = ['FLOAT[]', 'DOUBLE[]',
'TINYINT[]', 'SMALLINT[]', 'INT[]', 'BIGINT[]',
'BOOLEAN[]']
# todo: TEXT ENCODING DICT, TEXT ENCODING NONE, TIMESTAMP, TIME,
# DATE, DECIMAL/NUMERIC, GEOMETRY: POINT, LINESTRING, POLYGON,
# MULTIPOLYGON, See
# https://www.omnisci.com/docs/latest/5_datatypes.html
colnames = ['f4', 'f8', 'i1', 'i2', 'i4', 'i8', 'b']
table_defn = ',\n'.join('%s %s' % (n, t)
for t, n in zip(sqltypes, colnames))
m.sql_execute(f'CREATE TABLE IF NOT EXISTS {table_name} ({table_defn});')
data = defaultdict(list)
for i in range(5):
for j, n in enumerate(colnames):
if n == 'b':
data[n].append([_i % 2 == 0 for _i in range(-3, 3)])
elif n.startswith('f'):
data[n].append([i * 10 + _i + 0.5 for _i in range(-3, 3)])
else:
data[n].append([i * 10 + _i for _i in range(-3, 3)])
m.load_table_columnar(table_name, **data)
m.table_name = table_name
yield m
try:
m.sql_execute(f'DROP TABLE IF EXISTS {table_name}')
except Exception as msg:
print('%s in deardown' % (type(msg)))
@pytest.mark.parametrize('c_name', ['int8_t i1', 'int16_t i2', 'int32_t i4', 'int64_t i8',
'float f4', 'double f8'])
@pytest.mark.parametrize('device', ['cpu', 'gpu'])
def test_ptr(omnisci, c_name, device):
omnisci.reset()
if not omnisci.has_cuda and device == 'gpu':
pytest.skip('test requires CUDA-enabled omniscidb server')
from rbc.external import external
if omnisci.compiler is None:
pytest.skip('test requires clang C/C++ compiler')
ctype, cname = c_name.split()
c_code = f'''
#include <stdint.h>
#ifdef __cplusplus
extern "C" {{
#endif
{ctype} mysum_impl({ctype}* x, int n) {{
{ctype} r = 0;
for (int i=0; i < n; i++) {{
r += x[i];
}}
return r;
}}
{ctype} myval_impl({ctype}* x) {{
return *x;
}}
#ifdef __cplusplus
}}
#endif
'''
omnisci.user_defined_llvm_ir[device] = omnisci.compiler(c_code)
mysum_impl = external(f'{ctype} mysum_impl({ctype}*, int32_t)')
myval_impl = external(f'{ctype} myval_impl({ctype}*)')
@omnisci(f'{ctype}({ctype}[])', devices=[device])
def mysum_ptr(x):
return mysum_impl(x.ptr(), len(x))
@omnisci(f'{ctype}({ctype}[], int32_t)', devices=[device])
def myval_ptr(x, i):
return myval_impl(x.ptr(i))
desrc, result = omnisci.sql_execute(
f'select {cname}, mysum_ptr({cname}) from {omnisci.table_name}')
for a, r in result:
if cname == 'i1':
assert sum(a) % 256 == r % 256
else:
assert sum(a) == r
desrc, result = omnisci.sql_execute(
f'select {cname}, myval_ptr({cname}, 0), myval_ptr({cname}, 2) from {omnisci.table_name}')
for a, r0, r2 in result:
assert a[0] == r0
assert a[2] == r2
def test_len_i32(omnisci):
omnisci.reset()
@omnisci('int64(int32[])')
def array_sz_int32(x):
return len(x)
desrc, result = omnisci.sql_execute(
f'select i4, array_sz_int32(i4) from {omnisci.table_name}')
for a, sz in result:
assert len(a) == sz
def test_len_f64(omnisci):
omnisci.reset()
@omnisci('int64(float64[])')
def array_sz_double(x):
return len(x)
desrc, result = omnisci.sql_execute(
f'select f8, array_sz_double(f8) from {omnisci.table_name}')
for a, sz in result:
assert len(a) == sz
@pytest.mark.skipif(available_version[:2] == (5, 1),
reason="skip due to a bug in omniscidb 5.1 (got %s)" % (
available_version,))
def test_getitem_bool(omnisci):
omnisci.reset()
@omnisci('bool(bool[], int64)')
def array_getitem_bool(x, i):
return x[i]
query = f'select b, array_getitem_bool(b, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_i8(omnisci):
omnisci.reset()
@omnisci('int8(int8[], int32)')
def array_getitem_int8(x, i):
return x[i]
query = f'select i1, array_getitem_int8(i1, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_i32(omnisci):
omnisci.reset()
@omnisci('int32(int32[], int32)')
def array_getitem_int32(x, i):
return x[i]
query = f'select i4, array_getitem_int32(i4, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_i64(omnisci):
omnisci.reset()
@omnisci('int64(int64[], int64)')
def array_getitem_int64(x, i):
return x[i]
query = f'select i8, array_getitem_int64(i8, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
def test_getitem_float(omnisci):
omnisci.reset()
@omnisci('double(double[], int32)')
def array_getitem_double(x, i):
return x[i]
query = f'select f8, array_getitem_double(f8, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
assert type(a[2]) == type(item)
@omnisci('float(float[], int64)')
def array_getitem_float(x, i):
return x[i]
query = f'select f4, array_getitem_float(f4, 2) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, item in result:
assert a[2] == item
assert type(a[2]) == type(item)
def test_sum(omnisci):
omnisci.reset()
@omnisci('int32(int32[])')
def array_sum_int32(x):
r = 0
n = len(x)
for i in range(n):
r = r + x[i]
return r
query = f'select i4, array_sum_int32(i4) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for a, s in result:
assert sum(a) == s
@pytest.mark.skipif(available_version[:2] == (5, 1),
reason="skip due to a bug in omniscidb 5.1 (got %s)" % (
available_version,))
def test_even_sum(omnisci):
omnisci.reset()
@omnisci('int32(bool[], int32[])')
def array_even_sum_int32(b, x):
r = 0
n = len(x)
for i in range(n):
if b[i]:
r = r + x[i]
return r
query = f'select b, i4, array_even_sum_int32(b, i4) from {omnisci.table_name}'
desrc, result = omnisci.sql_execute(query)
for b, i4, s in result:
assert sum([i_ for b_, i_ in zip(b, i4) if b_]) == s
def test_array_setitem(omnisci):
omnisci.reset()
@omnisci('double(double[], int32)')
def array_setitem_sum(b, c):
n = len(b)
s = 0
for i in range(n):
b[i] = b[i] * c # changes the value inplace
s += b[i]
b[i] = b[i] / c
return s
query = f'select f8, array_setitem_sum(f8, 4) from {omnisci.table_name}'
_, result = omnisci.sql_execute(query)
for f8, s in result:
assert sum(f8) * 4 == s
def test_array_constructor_noreturn(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
from numba import types
@omnisci('float64(int32)')
def array_noreturn(size):
a = Array(size, types.float64)
b = Array(size, types.float64)
c = Array(size, types.float64)
for i in range(size):
a[i] = b[i] = c[i] = i + 3.0
s = 0.0
for i in range(size):
s += a[i] + b[i] + c[i] - a[i] * b[i]
return s
query = 'select array_noreturn(10)'
_, result = omnisci.sql_execute(query)
r = list(result)[0]
assert (r == (-420.0,))
def test_array_constructor_return(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
from numba import types
from rbc.externals.stdio import printf
@omnisci('float64[](int32)')
def array_return(size):
printf("entering array_return(%i)\n", size)
a = Array(size, types.float64)
b = Array(size, types.float64)
for i in range(size):
a[i] = float(i)
b[i] = float(size - i - 1)
if size % 2:
c = a
else:
c = b
printf("returning array with length %i\n", len(c))
return c
query = 'select array_return(9), array_return(10)'
_, result = omnisci.sql_execute(query)
r = list(result)[0]
assert r == (list(map(float, range(9))),
list(map(float, reversed(range(10)))))
def test_array_constructor_len(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
from numba import types
@omnisci('int64(int32)')
def array_len(size):
a = Array(size, types.float64)
return len(a)
query = 'select array_len(30)'
_, result = omnisci.sql_execute(query)
assert list(result)[0] == (30,)
def test_array_constructor_getitem(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
import numpy as np
@omnisci('double(int32, int32)')
def array_ptr(size, pos):
a = Array(size, np.double)
for i in range(size):
a[i] = i + 0.0
return a[pos]
query = 'select array_ptr(5, 3)'
_, result = omnisci.sql_execute(query)
assert list(result)[0] == (3.0,)
def test_array_constructor_is_null(omnisci):
omnisci.reset()
from rbc.omnisci_backend import Array
@omnisci('int8(int64)')
def array_is_null(size):
a = Array(size, 'double')
return a.is_null()
query = 'select array_is_null(3);'
_, result = omnisci.sql_execute(query)
assert list(result)[0] == (0,)
inps = [('int32', 'i4', 'trunc'), ('int32', 'i4', 'sext'),
('int32', 'i4', 'zext'), ('float', 'f4', 'fptrunc'),
('double', 'f8', 'fpext')]
@pytest.mark.parametrize("typ, col, suffix", inps,
ids=[item[-1] for item in inps])
def test_issue197(omnisci, typ, col, suffix):
omnisci.reset()
import rbc.omnisci_backend as np
from numba import types
cast = dict(
trunc=types.int64,
sext=types.int8,
zext=types.uint8,
fptrunc=types.float64,
fpext=types.float32)[suffix]
def fn_issue197(x):
y = np.zeros_like(x)
for i in range(len(x)):
y[i] = cast(x[i] + 3)
return y
fn_name = f"fn_issue197_{typ}_{suffix}"
fn_issue197.__name__ = fn_name
omnisci(f'{typ}[]({typ}[])')(fn_issue197)
_, result = omnisci.sql_execute(
f'SELECT {col}, {fn_name}({col}) FROM {omnisci.table_name};'
)
column, ret = list(result)[0]
for x, y in zip(column, ret):
assert y == x + 3
def test_issue197_bool(omnisci):
omnisci.reset()
import rbc.omnisci_backend as np
@omnisci('bool[](bool[])')
def fn_issue197_bool(x):
y = np.zeros_like(x)
for i in range(len(x)):
y[i] = bool(x[i])
return y
col = 'b'
fn_name = 'fn_issue197_bool'
_, result = omnisci.sql_execute(
f'SELECT {col}, {fn_name}({col}) FROM {omnisci.table_name};'
)
column, ret = list(result)[0]
for x, y in zip(column, ret):
assert bool(x) == bool(y)
def test_issue109(omnisci):
@omnisci('double[](int32)')
def issue109(size):
a = Array(5, 'double')
for i in range(5):
a[i] = nb_types.double(i)
return a
_, result = omnisci.sql_execute('select issue109(3);')
assert list(result) == [([0.0, 1.0, 2.0, 3.0, 4.0],)]
def test_issue77(omnisci):
@omnisci('int64[]()')
def issue77():
a = Array(5, 'int64')
a.fill(1)
return a
if omnisci.version[:2] >= (5, 8):
_, result = omnisci.sql_execute('select issue77();')
assert list(result)[0][0] == [1, 1, 1, 1, 1]
else:
with pytest.raises(OmnisciServerError) as exc:
_, result = omnisci.sql_execute('select issue77();')
assert exc.match('Could not bind issue77()')
| 27.274262 | 98 | 0.586556 | 0 | 0 | 1,513 | 0.117033 | 7,942 | 0.614325 | 0 | 0 | 3,075 | 0.237856 |
82857e9a33dbe718b0d5cc5a60fda6fb7a1add58 | 4,666 | py | Python | env/lib/python3.8/site-packages/unidecode/x093.py | avdhari/enigma | b7e965a91ca5f0e929c4c719d695f15ccb8b5a2c | [
"MIT"
]
| 48 | 2021-11-20T08:17:53.000Z | 2022-03-19T13:57:15.000Z | venv/lib/python3.6/site-packages/unidecode/x093.py | mrsaicharan1/iiita-updates | a22a0157b90d29b946d0f020e5f76744f73a6bff | [
"Apache-2.0"
]
| 392 | 2015-07-30T14:37:05.000Z | 2022-03-21T16:56:09.000Z | venv/lib/python3.6/site-packages/unidecode/x093.py | mrsaicharan1/iiita-updates | a22a0157b90d29b946d0f020e5f76744f73a6bff | [
"Apache-2.0"
]
| 15 | 2015-10-01T21:31:08.000Z | 2020-05-05T00:03:27.000Z | data = (
'Lun ', # 0x00
'Kua ', # 0x01
'Ling ', # 0x02
'Bei ', # 0x03
'Lu ', # 0x04
'Li ', # 0x05
'Qiang ', # 0x06
'Pou ', # 0x07
'Juan ', # 0x08
'Min ', # 0x09
'Zui ', # 0x0a
'Peng ', # 0x0b
'An ', # 0x0c
'Pi ', # 0x0d
'Xian ', # 0x0e
'Ya ', # 0x0f
'Zhui ', # 0x10
'Lei ', # 0x11
'A ', # 0x12
'Kong ', # 0x13
'Ta ', # 0x14
'Kun ', # 0x15
'Du ', # 0x16
'Wei ', # 0x17
'Chui ', # 0x18
'Zi ', # 0x19
'Zheng ', # 0x1a
'Ben ', # 0x1b
'Nie ', # 0x1c
'Cong ', # 0x1d
'Qun ', # 0x1e
'Tan ', # 0x1f
'Ding ', # 0x20
'Qi ', # 0x21
'Qian ', # 0x22
'Zhuo ', # 0x23
'Qi ', # 0x24
'Yu ', # 0x25
'Jin ', # 0x26
'Guan ', # 0x27
'Mao ', # 0x28
'Chang ', # 0x29
'Tian ', # 0x2a
'Xi ', # 0x2b
'Lian ', # 0x2c
'Tao ', # 0x2d
'Gu ', # 0x2e
'Cuo ', # 0x2f
'Shu ', # 0x30
'Zhen ', # 0x31
'Lu ', # 0x32
'Meng ', # 0x33
'Lu ', # 0x34
'Hua ', # 0x35
'Biao ', # 0x36
'Ga ', # 0x37
'Lai ', # 0x38
'Ken ', # 0x39
'Kazari ', # 0x3a
'Bu ', # 0x3b
'Nai ', # 0x3c
'Wan ', # 0x3d
'Zan ', # 0x3e
'[?] ', # 0x3f
'De ', # 0x40
'Xian ', # 0x41
'[?] ', # 0x42
'Huo ', # 0x43
'Liang ', # 0x44
'[?] ', # 0x45
'Men ', # 0x46
'Kai ', # 0x47
'Ying ', # 0x48
'Di ', # 0x49
'Lian ', # 0x4a
'Guo ', # 0x4b
'Xian ', # 0x4c
'Du ', # 0x4d
'Tu ', # 0x4e
'Wei ', # 0x4f
'Cong ', # 0x50
'Fu ', # 0x51
'Rou ', # 0x52
'Ji ', # 0x53
'E ', # 0x54
'Rou ', # 0x55
'Chen ', # 0x56
'Ti ', # 0x57
'Zha ', # 0x58
'Hong ', # 0x59
'Yang ', # 0x5a
'Duan ', # 0x5b
'Xia ', # 0x5c
'Yu ', # 0x5d
'Keng ', # 0x5e
'Xing ', # 0x5f
'Huang ', # 0x60
'Wei ', # 0x61
'Fu ', # 0x62
'Zhao ', # 0x63
'Cha ', # 0x64
'Qie ', # 0x65
'She ', # 0x66
'Hong ', # 0x67
'Kui ', # 0x68
'Tian ', # 0x69
'Mou ', # 0x6a
'Qiao ', # 0x6b
'Qiao ', # 0x6c
'Hou ', # 0x6d
'Tou ', # 0x6e
'Cong ', # 0x6f
'Huan ', # 0x70
'Ye ', # 0x71
'Min ', # 0x72
'Jian ', # 0x73
'Duan ', # 0x74
'Jian ', # 0x75
'Song ', # 0x76
'Kui ', # 0x77
'Hu ', # 0x78
'Xuan ', # 0x79
'Duo ', # 0x7a
'Jie ', # 0x7b
'Zhen ', # 0x7c
'Bian ', # 0x7d
'Zhong ', # 0x7e
'Zi ', # 0x7f
'Xiu ', # 0x80
'Ye ', # 0x81
'Mei ', # 0x82
'Pai ', # 0x83
'Ai ', # 0x84
'Jie ', # 0x85
'[?] ', # 0x86
'Mei ', # 0x87
'Chuo ', # 0x88
'Ta ', # 0x89
'Bang ', # 0x8a
'Xia ', # 0x8b
'Lian ', # 0x8c
'Suo ', # 0x8d
'Xi ', # 0x8e
'Liu ', # 0x8f
'Zu ', # 0x90
'Ye ', # 0x91
'Nou ', # 0x92
'Weng ', # 0x93
'Rong ', # 0x94
'Tang ', # 0x95
'Suo ', # 0x96
'Qiang ', # 0x97
'Ge ', # 0x98
'Shuo ', # 0x99
'Chui ', # 0x9a
'Bo ', # 0x9b
'Pan ', # 0x9c
'Sa ', # 0x9d
'Bi ', # 0x9e
'Sang ', # 0x9f
'Gang ', # 0xa0
'Zi ', # 0xa1
'Wu ', # 0xa2
'Ying ', # 0xa3
'Huang ', # 0xa4
'Tiao ', # 0xa5
'Liu ', # 0xa6
'Kai ', # 0xa7
'Sun ', # 0xa8
'Sha ', # 0xa9
'Sou ', # 0xaa
'Wan ', # 0xab
'Hao ', # 0xac
'Zhen ', # 0xad
'Zhen ', # 0xae
'Luo ', # 0xaf
'Yi ', # 0xb0
'Yuan ', # 0xb1
'Tang ', # 0xb2
'Nie ', # 0xb3
'Xi ', # 0xb4
'Jia ', # 0xb5
'Ge ', # 0xb6
'Ma ', # 0xb7
'Juan ', # 0xb8
'Kasugai ', # 0xb9
'Habaki ', # 0xba
'Suo ', # 0xbb
'[?] ', # 0xbc
'[?] ', # 0xbd
'[?] ', # 0xbe
'Na ', # 0xbf
'Lu ', # 0xc0
'Suo ', # 0xc1
'Ou ', # 0xc2
'Zu ', # 0xc3
'Tuan ', # 0xc4
'Xiu ', # 0xc5
'Guan ', # 0xc6
'Xuan ', # 0xc7
'Lian ', # 0xc8
'Shou ', # 0xc9
'Ao ', # 0xca
'Man ', # 0xcb
'Mo ', # 0xcc
'Luo ', # 0xcd
'Bi ', # 0xce
'Wei ', # 0xcf
'Liu ', # 0xd0
'Di ', # 0xd1
'Qiao ', # 0xd2
'Cong ', # 0xd3
'Yi ', # 0xd4
'Lu ', # 0xd5
'Ao ', # 0xd6
'Keng ', # 0xd7
'Qiang ', # 0xd8
'Cui ', # 0xd9
'Qi ', # 0xda
'Chang ', # 0xdb
'Tang ', # 0xdc
'Man ', # 0xdd
'Yong ', # 0xde
'Chan ', # 0xdf
'Feng ', # 0xe0
'Jing ', # 0xe1
'Biao ', # 0xe2
'Shu ', # 0xe3
'Lou ', # 0xe4
'Xiu ', # 0xe5
'Cong ', # 0xe6
'Long ', # 0xe7
'Zan ', # 0xe8
'Jian ', # 0xe9
'Cao ', # 0xea
'Li ', # 0xeb
'Xia ', # 0xec
'Xi ', # 0xed
'Kang ', # 0xee
'[?] ', # 0xef
'Beng ', # 0xf0
'[?] ', # 0xf1
'[?] ', # 0xf2
'Zheng ', # 0xf3
'Lu ', # 0xf4
'Hua ', # 0xf5
'Ji ', # 0xf6
'Pu ', # 0xf7
'Hui ', # 0xf8
'Qiang ', # 0xf9
'Po ', # 0xfa
'Lin ', # 0xfb
'Suo ', # 0xfc
'Xiu ', # 0xfd
'San ', # 0xfe
'Cheng ', # 0xff
)
| 18.015444 | 21 | 0.388556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,119 | 0.668453 |
82858d5820f148f4cd403dae133ec9b5dc1ebb08 | 1,785 | py | Python | src/core/default/commands/bucket/utils.py | cdev-framework/cdev-sdk | 06cd7b40936ab063d1d8fd1a7d9f6882750e8a96 | [
"BSD-3-Clause-Clear"
]
| 2 | 2022-02-28T02:51:59.000Z | 2022-03-24T15:23:18.000Z | src/core/default/commands/bucket/utils.py | cdev-framework/cdev-sdk | 06cd7b40936ab063d1d8fd1a7d9f6882750e8a96 | [
"BSD-3-Clause-Clear"
]
| null | null | null | src/core/default/commands/bucket/utils.py | cdev-framework/cdev-sdk | 06cd7b40936ab063d1d8fd1a7d9f6882750e8a96 | [
"BSD-3-Clause-Clear"
]
| null | null | null | from dataclasses import dataclass
import re
from tokenize import group
from core.constructs.resource import ResourceModel
from core.constructs.workspace import Workspace
RUUID = "cdev::simple::bucket"
def get_cloud_output_from_cdev_name(component_name: str, cdev_name: str) -> str:
try:
ws = Workspace.instance()
cloud_output = ws.get_backend().get_cloud_output_by_name(
ws.get_resource_state_uuid(), component_name, RUUID, cdev_name
)
return cloud_output
except Exception as e:
print(f"Could not find resource {component_name}:{RUUID}:{cdev_name}")
print(e)
return None
def get_resource_from_cdev_name(component_name: str, cdev_name: str) -> ResourceModel:
try:
ws = Workspace.instance()
resource = ws.get_backend().get_resource_by_name(
ws.get_resource_state_uuid(), component_name, RUUID, cdev_name
)
return resource
except Exception as e:
print(f"Could not find resource {component_name}:{RUUID}:{cdev_name}")
print(e)
return None
remote_name_regex = "bucket://([a-z,_]+).([a-z,_]+)/?(\S+)?"
compiled_regex = re.compile(remote_name_regex)
@dataclass
class remote_location:
component_name: str
cdev_bucket_name: str
path: str
def is_valid_remote(name: str) -> bool:
return True if compiled_regex.match(name) else False
def parse_remote_location(name: str) -> remote_location:
match = compiled_regex.match(name)
if not match:
raise Exception(
"provided name {name} does not match regex for a remote bucket object"
)
return remote_location(
component_name=match.group(1),
cdev_bucket_name=match.group(2),
path=match.group(3),
)
| 25.869565 | 86 | 0.678431 | 86 | 0.048179 | 0 | 0 | 97 | 0.054342 | 0 | 0 | 258 | 0.144538 |
8285973ff004a3b86ceb55d9c1d9f9899c59ee73 | 7,434 | py | Python | examples/blank_cylinders.py | reflectometry/osrefl | ddf55d542f2eab2a29fd6ffc862379820a06d5c7 | [
"BSD-3-Clause"
]
| 2 | 2015-05-21T15:16:46.000Z | 2015-10-23T17:47:36.000Z | examples/blank_cylinders.py | reflectometry/osrefl | ddf55d542f2eab2a29fd6ffc862379820a06d5c7 | [
"BSD-3-Clause"
]
| null | null | null | examples/blank_cylinders.py | reflectometry/osrefl | ddf55d542f2eab2a29fd6ffc862379820a06d5c7 | [
"BSD-3-Clause"
]
| null | null | null | from greens_thm_form import greens_form_line, greens_form_shape
from numpy import arange, linspace, float64, indices, zeros_like, ones_like, pi, sin, complex128, array, exp, newaxis, cumsum, sum, cos, sin, log, log10
from osrefl.theory.DWBAGISANS import dwbaWavefunction
class shape:
def __init__(self, name):
self.name = name
self.points = []
self.sld = 0.0
self.sldi = 0.0
def rectangle(x0, y0, dx, dy, sld=0.0, sldi=0.0):
#generate points for a rectangle
rect = shape('rectangle')
rect.points = [[x0,y0], [x0+dx, y0], [x0+dx, y0+dy], [x0, y0+dy]]
rect.sld = sld
rect.sldi = sldi
rect.area = dx * dy
return rect
def sawtooth(z, n=6, x_length=3000.0, base_width=500.0, height=300.0, sld=0.0, sldi=0.0, sld_front=0.0, sldi_front=0.0):
if z>height:
return [], sld_front
width = (z / height) * base_width
front_width = base_width - width
rects = [rectangle(0, base_width*(i+0.5) - width/2.0, x_length, width, sld, sldi) for i in range(n)]
# now rectangles for the gaps between the sawtooths...
if (sld_front !=0.0 and sldi_front != 0.0):
front_rects = [rectangle(0, 0, x_length, front_width/2.0, sld_front, sldi_front)]
front_rects.extend([rectangle(0, base_width*(i+0.5)+width/2.0, x_length, front_width, sld_front, sldi_front) for i in range(1,n-1)])
front_rects.append(rectangle(0, base_width*(n-0.5)+width/2.0, x_length, front_width/2.0, sld_front, sldi_front))
rects.extend(front_rects)
# now calculate the average SLD (nuclear) for the layer
avg_sld = (width * sld + front_width * sld_front) / base_width
avg_sldi = (width * sldi + front_width * sldi_front) / base_width
return rects, avg_sld, avg_sldi
def arc(r, theta_start, theta_end, x_center, y_center, theta_step=1.0, close=True, sld=0.0, sldi=0.0, ):
a = shape('arc')
a.theta_start = theta_start
a.theta_end = theta_end
a.area = pi * r**2 * abs(theta_end - theta_start)/360.0
if close == True:
a.points.append([x_center, y_center]) # center point
numpoints = (theta_end - theta_start) / theta_step + 1
thetas = linspace(theta_start, theta_end, numpoints) * pi/180 # to radians
for th in thetas:
a.points.append([r*cos(th) + x_center, r*sin(th) + y_center])
a.sld = sld
a.sldi = sldi
return a
def limit_cyl(arc, xmin=0.0, xmax=0.0, ymin=0.0, ymax=0.0):
new_arc = shape('arc')
new_arc.sld = arc.sld
new_arc.sldi = arc.sldi
new_arc.theta_start = arc.theta_start
new_arc.theta_end = arc.theta_end
#new_arc.area = arc.area
for point in arc.points:
if (point[0] >= xmin) and (point[0] <= xmax) and (point[1] >=ymin) and (point[1] <= ymax):
new_arc.points.append(point)
if len(new_arc.points) < 3:
new_arc.area = 0.0
else:
new_arc.area = (len(new_arc.points) - 2) / 360.0 * arc.area
return new_arc
def conj(sld):
conjugate_sld = sld.copy()
conjugate_sld[:,2] *= -1
return conjugate_sld
# alternating SLD
wavelength = 1.24 # x-ray wavelength, Angstroms
spacing = 600.0 # distance between cylinder centers
radius = 200.0 # Angstroms, radius of cylinders
thickness = 300.0 # Angstrom, thickness of cylinder layer
sublayer_thickness = 200.0 # Angstrom, full layer of matrix below cylinders
matrix_sld = pi/(wavelength**2) * 2.0 * 1.0e-6 # substrate
matrix_sldi = pi/(wavelength**2) * 2.0 * 1.0e-7 # absorption in substrate
cyl_sld = 0.0
cyl_sldi = 0.0 # cylinders are holes in matrix
unit_dx = 2.0 * spacing
unit_dy = 1.0 * spacing
matrix = rectangle(0,0, 3000, 3000, matrix_sld, matrix_sldi)
cylinders = []
centers = []
for i in range(3):
for j in range(6):
x0 = i * 2.0 * spacing
y0 = j * spacing
x1 = x0 + spacing # basis
y1 = y0 + spacing/2.0
cylinders.append(arc(radius, 0.0, 360.0, x0, y0, sld=cyl_sld, sldi=cyl_sldi))
cylinders.append(arc(radius, 0.0, 360.0, x1, y1, sld=cyl_sld, sldi=cyl_sldi))
cyl_area = 0.0
for cyl in cylinders:
cyl_area += cyl.area
clipped_cylinders = [limit_cyl(cyl, xmin=0.0, xmax=3000.0, ymin=0.0, ymax=3000.0) for cyl in cylinders]
clipped_cyl_area = 0.0
for cyl in clipped_cylinders:
clipped_cyl_area += cyl.area
print "clipped_cyl_area / matrix.area = ", clipped_cyl_area / matrix.area
print "ratio should be 0.3491 for FCT planar array with a/b = 2 and r = a/6"
avg_sld = (matrix.area * matrix_sld + clipped_cyl_area * cyl_sld) / matrix.area
avg_sldi = (matrix.area * matrix_sldi + clipped_cyl_area * cyl_sldi) / matrix.area
front_sld = 0.0 # air
back_sld = pi/(wavelength**2) * 2.0 * 5.0e-6 # substrate
back_sldi = pi/(wavelength**2) * 2.0 * 7.0e-8 # absorption in substrate
qz = linspace(0.01, 0.21, 501)
qy = linspace(-0.1, 0.1, 500)
qx = ones_like(qy, dtype=complex128) * 1e-8
SLDArray = [ [0,0,0], # air
[avg_sld, thickness, avg_sldi], # sample
[matrix_sld, sublayer_thickness, matrix_sldi], # full matrix layer under cylinders
[back_sld, 0, back_sldi] ]
FT = zeros_like(qx, dtype=complex128)
for cyl in clipped_cylinders:
FT += greens_form_shape(cyl.points, qx, qy) * (cyl.sld)
FT += greens_form_shape(matrix.points, qx, qy) * (matrix.sld)
FT += greens_form_shape(matrix.points, qx, qy) * (-avg_sld)
SLDArray = array(SLDArray)
def calc_gisans(alpha_in, show_plot=True):
#alpha_in = 0.25 # incoming beam angle
kz_in_0 = 2*pi/wavelength * sin(alpha_in * pi/180.0)
kz_out_0 = kz_in - qz
wf_in = dwbaWavefunction(kz_in_0, SLDArray)
wf_out = dwbaWavefunction(-kz_out_0, conj(SLDArray))
kz_in_l = wf_in.kz_l
kz_out_l = -wf_out.kz_l
zs = cumsum(SLDArray[1:-1,1])
dz = SLDArray[1:-1,1][:,newaxis]
z_array = array(zs)[:,newaxis]
qrt_inside = kz_in_l[1] - kz_out_l[1]
qtt_inside = kz_in_l[1] + kz_out_l[1]
qtr_inside = -kz_in_l[1] + kz_out_l[1]
qrr_inside = -kz_in_l[1] - kz_out_l[1]
# the overlap is the forward-moving amplitude c in psi_in multiplied by
# the forward-moving amplitude in the time-reversed psi_out, which
# ends up being the backward-moving amplitude d in the non-time-reversed psi_out
# (which is calculated by the wavefunction calculator)
# ... and vice-verso for d and c in psi_in and psi_out
overlap = wf_out.d[1] * wf_in.c[1] / (1j * qtt_inside) * (exp(1j * qtt_inside * thickness) - 1.0)
overlap += wf_out.c[1] * wf_in.d[1] / (1j * qrr_inside) * (exp(1j * qrr_inside * thickness) - 1.0)
overlap += wf_out.d[1] * wf_in.d[1] / (1j * qtr_inside) * (exp(1j * qtr_inside * thickness) - 1.0)
overlap += wf_out.c[1] * wf_in.c[1] / (1j * qrt_inside) * (exp(1j * qrt_inside * thickness) - 1.0)
overlap_BA = 1.0 / (1j * qz) * (exp(1j * qz * thickness) - 1.0)
overlap_BA += 1.0 / (-1j * qz) * (exp(-1j * qz * thickness) - 1.0)
gisans = overlap[:,newaxis] * FT[newaxis, :]
gisans_BA = overlap_BA[:,newaxis] * FT[newaxis, :]
extent = [qy.min(), qy.max(), qz.min(), qz.max()]
if show_plot == True:
from pylab import imshow, figure, colorbar
figure()
imshow(log10(abs(gisans)**2), origin='lower', extent=extent, aspect='auto')
colorbar()
figure()
imshow(log10(abs(gisans_BA)**2), origin='lower', extent=extent, aspect='auto')
colorbar()
return gisans, gisans_BA
| 37.356784 | 152 | 0.644471 | 139 | 0.018698 | 0 | 0 | 0 | 0 | 0 | 0 | 1,071 | 0.144068 |
82866aa0a6c01bbd11d8219aa01d2f4e9089b2ed | 3,750 | py | Python | EXAMPLE/test_backtest/MACD_JCSC.py | evsteel/QUANTAXIS | 50e0116b2b52e6bbac6819d5f039608bf4a17367 | [
"MIT"
]
| 2 | 2018-10-29T12:01:55.000Z | 2021-03-05T10:28:59.000Z | EXAMPLE/test_backtest/MACD_JCSC.py | evsteel/QUANTAXIS | 50e0116b2b52e6bbac6819d5f039608bf4a17367 | [
"MIT"
]
| 1 | 2019-01-23T04:46:52.000Z | 2019-01-23T04:46:52.000Z | EXAMPLE/test_backtest/MACD_JCSC.py | evsteel/QUANTAXIS | 50e0116b2b52e6bbac6819d5f039608bf4a17367 | [
"MIT"
]
| 2 | 2018-11-30T07:52:14.000Z | 2021-05-28T23:00:20.000Z | # -*- coding: utf-8 -*-
# Demo: MACD strategy
# src: ./test_backtest/MACD_JCSC.py
# jupyter: ./test_backtest/QUANTAXIS回测分析全过程讲解.ipynb
# paper: ./test_backtest/QUANTAXIS回测分析全过程讲解.md
import QUANTAXIS as QA
import numpy as np
import pandas as pd
import datetime
st1=datetime.datetime.now()
# define the MACD strategy
def MACD_JCSC(dataframe, SHORT=12, LONG=26, M=9):
"""
1.DIF向上突破DEA,买入信号参考。
2.DIF向下跌破DEA,卖出信号参考。
"""
CLOSE = dataframe.close
DIFF = QA.EMA(CLOSE, SHORT) - QA.EMA(CLOSE, LONG)
DEA = QA.EMA(DIFF, M)
MACD = 2*(DIFF-DEA)
CROSS_JC = QA.CROSS(DIFF, DEA)
CROSS_SC = QA.CROSS(DEA, DIFF)
ZERO = 0
return pd.DataFrame({'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC, 'CROSS_SC': CROSS_SC, 'ZERO': ZERO})
# create account
Account = QA.QA_Account()
Broker = QA.QA_BacktestBroker()
Account.reset_assets(1000000)
Account.account_cookie = 'macd_stock'
QA.QA_SU_save_strategy('MACD_JCSC','Indicator',Account.account_cookie)
# get data from mongodb
data = QA.QA_fetch_stock_day_adv(
['000001', '000002', '000004', '600000'], '2017-09-01', '2018-05-20')
data = data.to_qfq()
# add indicator
ind = data.add_func(MACD_JCSC)
# ind.xs('000001',level=1)['2018-01'].plot()
data_forbacktest=data.select_time('2018-01-01','2018-05-01')
for items in data_forbacktest.panel_gen:
for item in items.security_gen:
daily_ind=ind.loc[item.index]
if daily_ind.CROSS_JC.iloc[0]>0:
order=Account.send_order(
code=item.code[0],
time=item.date[0],
amount=1000,
towards=QA.ORDER_DIRECTION.BUY,
price=0,
order_model=QA.ORDER_MODEL.CLOSE,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
#print(item.to_json()[0])
Broker.receive_order(QA.QA_Event(order=order,market_data=item))
trade_mes=Broker.query_orders(Account.account_cookie,'filled')
res=trade_mes.loc[order.account_cookie,order.realorder_id]
order.trade(res.trade_id,res.trade_price,res.trade_amount,res.trade_time)
elif daily_ind.CROSS_SC.iloc[0]>0:
#print(item.code)
if Account.sell_available.get(item.code[0], 0)>0:
order=Account.send_order(
code=item.code[0],
time=item.date[0],
amount=Account.sell_available.get(item.code[0], 0),
towards=QA.ORDER_DIRECTION.SELL,
price=0,
order_model=QA.ORDER_MODEL.MARKET,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
#print
Broker.receive_order(QA.QA_Event(order=order,market_data=item))
trade_mes=Broker.query_orders(Account.account_cookie,'filled')
res=trade_mes.loc[order.account_cookie,order.realorder_id]
order.trade(res.trade_id,res.trade_price,res.trade_amount,res.trade_time)
Account.settle()
print('TIME -- {}'.format(datetime.datetime.now()-st1))
print(Account.history)
print(Account.history_table)
print(Account.daily_hold)
# create Risk analysis
Risk = QA.QA_Risk(Account)
Account.save()
Risk.save()
# print(Risk.message)
# print(Risk.assets)
# Risk.plot_assets_curve()
# plt=Risk.plot_dailyhold()
# plt.show()
# plt1=Risk.plot_signal()
# plt.show()
# performance=QA.QA_Performance(Account)
# plt=performance.plot_pnlmoney(performance.pnl_fifo)
# plt.show()
# Risk.assets.plot()
# Risk.benchmark_assets.plot()
# save result
#account_info = QA.QA_fetch_account({'account_cookie': 'user_admin_macd'})
#account = QA.QA_Account().from_message(account_info[0])
#print(account)
| 32.051282 | 123 | 0.6464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,156 | 0.301513 |
8286a3142dc78c8279d55cc22186d074f451c53d | 6,247 | py | Python | tests/zquantum/qaoa/ansatzes/farhi_ansatz_test.py | zapatacomputing/z-quantum-qaoa | a13a99939ee41c760fdfb302e5f4944e087a09a7 | [
"Apache-2.0"
]
| 3 | 2020-10-06T13:54:40.000Z | 2021-07-04T21:02:14.000Z | tests/zquantum/qaoa/ansatzes/farhi_ansatz_test.py | zapatacomputing/z-quantum-qaoa | a13a99939ee41c760fdfb302e5f4944e087a09a7 | [
"Apache-2.0"
]
| 34 | 2020-04-30T02:52:31.000Z | 2022-03-30T19:19:14.000Z | tests/zquantum/qaoa/ansatzes/farhi_ansatz_test.py | zapatacomputing/z-quantum-qaoa | a13a99939ee41c760fdfb302e5f4944e087a09a7 | [
"Apache-2.0"
]
| 5 | 2020-06-24T10:57:01.000Z | 2021-07-09T01:14:16.000Z | from zquantum.core.interfaces.ansatz_test import AnsatzTests
from zquantum.core.circuits import Circuit, H, RX, RZ
from zquantum.core.utils import compare_unitary
from zquantum.core.openfermion import change_operator_type
from zquantum.qaoa.ansatzes.farhi_ansatz import (
QAOAFarhiAnsatz,
create_farhi_qaoa_circuits,
create_all_x_mixer_hamiltonian,
)
from openfermion import QubitOperator, IsingOperator
import pytest
import numpy as np
import sympy
class TestQAOAFarhiAnsatz(AnsatzTests):
@pytest.fixture
def ansatz(self):
cost_hamiltonian = QubitOperator((0, "Z")) + QubitOperator((1, "Z"))
mixer_hamiltonian = QubitOperator((0, "X")) + QubitOperator((1, "X"))
return QAOAFarhiAnsatz(
number_of_layers=1,
cost_hamiltonian=cost_hamiltonian,
mixer_hamiltonian=mixer_hamiltonian,
)
@pytest.fixture
def beta(self):
return sympy.Symbol("beta_0")
@pytest.fixture
def gamma(self):
return sympy.Symbol("gamma_0")
@pytest.fixture
def symbols_map(self, beta, gamma):
return {beta: 0.5, gamma: 0.7}
@pytest.fixture
def target_unitary(self, beta, gamma, symbols_map):
target_circuit = Circuit()
target_circuit += H(0)
target_circuit += H(1)
target_circuit += RZ(2 * gamma)(0)
target_circuit += RZ(2 * gamma)(1)
target_circuit += RX(2 * beta)(0)
target_circuit += RX(2 * beta)(1)
return target_circuit.bind(symbols_map).to_unitary()
def test_set_cost_hamiltonian(self, ansatz):
# Given
new_cost_hamiltonian = QubitOperator((0, "Z")) - QubitOperator((1, "Z"))
# When
ansatz.cost_hamiltonian = new_cost_hamiltonian
# Then
assert ansatz._cost_hamiltonian == new_cost_hamiltonian
def test_set_cost_hamiltonian_invalidates_circuit(self, ansatz):
# Given
new_cost_hamiltonian = QubitOperator((0, "Z")) - QubitOperator((1, "Z"))
# When
ansatz.cost_hamiltonian = new_cost_hamiltonian
# Then
assert ansatz._parametrized_circuit is None
def test_set_mixer_hamiltonian(self, ansatz):
# Given
new_mixer_hamiltonian = QubitOperator((0, "Z")) - QubitOperator((1, "Z"))
# When
ansatz.mixer_hamiltonian = new_mixer_hamiltonian
# Then
ansatz._mixer_hamiltonian == new_mixer_hamiltonian
def test_set_mixer_hamiltonian_invalidates_circuit(self, ansatz):
# Given
new_mixer_hamiltonian = QubitOperator((0, "Z")) - QubitOperator((1, "Z"))
# When
ansatz.mixer_hamiltonian = new_mixer_hamiltonian
# Then
assert ansatz._parametrized_circuit is None
def test_get_number_of_qubits(self, ansatz):
# Given
new_cost_hamiltonian = (
QubitOperator((0, "Z")) + QubitOperator((1, "Z")) + QubitOperator((2, "Z"))
)
target_number_of_qubits = 3
# When
ansatz.cost_hamiltonian = new_cost_hamiltonian
# Then
assert ansatz.number_of_qubits == target_number_of_qubits
def test_get_number_of_qubits_with_ising_hamiltonian(self, ansatz):
# Given
new_cost_hamiltonian = (
QubitOperator((0, "Z")) + QubitOperator((1, "Z")) + QubitOperator((2, "Z"))
)
new_cost_hamiltonian = change_operator_type(new_cost_hamiltonian, IsingOperator)
target_number_of_qubits = 3
# When
ansatz.cost_hamiltonian = new_cost_hamiltonian
# Then
assert ansatz.number_of_qubits == target_number_of_qubits
def test_get_parametrizable_circuit(self, ansatz, beta, gamma):
# Then
assert ansatz.parametrized_circuit.free_symbols == [
gamma,
beta,
]
def test_generate_circuit(self, ansatz, symbols_map, target_unitary):
# When
parametrized_circuit = ansatz._generate_circuit()
evaluated_circuit = parametrized_circuit.bind(symbols_map)
final_unitary = evaluated_circuit.to_unitary()
# Then
assert compare_unitary(final_unitary, target_unitary, tol=1e-10)
def test_generate_circuit_with_ising_operator(
self, ansatz, symbols_map, target_unitary
):
# When
ansatz.cost_hamiltonian = change_operator_type(
ansatz.cost_hamiltonian, IsingOperator
)
parametrized_circuit = ansatz._generate_circuit()
evaluated_circuit = parametrized_circuit.bind(symbols_map)
final_unitary = evaluated_circuit.to_unitary()
# Then
assert compare_unitary(final_unitary, target_unitary, tol=1e-10)
def test_create_farhi_qaoa_circuits():
# Given
hamiltonians = [
QubitOperator("Z0 Z1"),
QubitOperator("Z0") + QubitOperator("Z1"),
]
number_of_layers = 2
# When
circuits = create_farhi_qaoa_circuits(hamiltonians, number_of_layers)
# Then
assert len(circuits) == len(hamiltonians)
for circuit in circuits:
assert isinstance(circuit, Circuit)
def test_create_farhi_qaoa_circuits_when_number_of_layers_is_list():
# Given
hamiltonians = [
QubitOperator("Z0 Z1"),
QubitOperator("Z0") + QubitOperator("Z1"),
]
number_of_layers = [2, 3]
# When
circuits = create_farhi_qaoa_circuits(hamiltonians, number_of_layers)
# Then
assert len(circuits) == len(hamiltonians)
for circuit in circuits:
assert isinstance(circuit, Circuit)
def test_create_farhi_qaoa_circuits_fails_when_length_of_inputs_is_not_equal():
# Given
hamiltonians = [
QubitOperator("Z0 Z1"),
QubitOperator("Z0") + QubitOperator("Z1"),
]
number_of_layers = [2]
# When
with pytest.raises(AssertionError):
create_farhi_qaoa_circuits(hamiltonians, number_of_layers)
def test_create_all_x_mixer_hamiltonian():
# Given
number_of_qubits = 4
target_operator = (
QubitOperator("X0")
+ QubitOperator("X1")
+ QubitOperator("X2")
+ QubitOperator("X3")
)
# When
operator = create_all_x_mixer_hamiltonian(number_of_qubits)
# Then
assert operator == target_operator
| 29.328638 | 88 | 0.664959 | 4,228 | 0.676805 | 0 | 0 | 1,003 | 0.160557 | 0 | 0 | 346 | 0.055387 |
8287b90a2bda6ffe57a0b5c598cfc436f9b7c2a3 | 3,297 | py | Python | utils/parser.py | scalar42/scholar-alerts-assistant | 5b674f3784d09ced8a6c17a653d9bdfa08947125 | [
"MIT"
]
| null | null | null | utils/parser.py | scalar42/scholar-alerts-assistant | 5b674f3784d09ced8a6c17a653d9bdfa08947125 | [
"MIT"
]
| null | null | null | utils/parser.py | scalar42/scholar-alerts-assistant | 5b674f3784d09ced8a6c17a653d9bdfa08947125 | [
"MIT"
]
| null | null | null | from html.parser import HTMLParser
class Paper():
def __init__(self):
self.title = ""
self.source_link = ""
self.authr_and_pub = ""
# self.publication = ""
self.abstract = ""
self.star_link = ""
def add_title(self, title):
self.title = title
return self.check_complete()
def add_source_link(self, source_link):
self.source_link = source_link
return self.check_complete()
def add_authr_and_pub(self, authr_and_pub):
self.authr_and_pub = authr_and_pub
return self.check_complete()
# def add_publication(self, publication):
# self.publication = publication
# return self.check_complete()
def add_abstract(self, abstract):
self.abstract += abstract
return self.check_complete()
def add_star_link(self, star_link):
self.star_link = star_link
return self.check_complete()
def check_complete(self):
if self.title == "" or self.source_link == "" or self.authr_and_pub == "" or self.abstract == "" or self.star_link == "":
return False
return True
def __str__(self):
return self.title + "\n" + self.source_link + "\n" + self.authr_and_pub + "\n" + self.abstract + "\n" + self.star_link
def __eq__(self, other):
return self.title == other.title
def __hash__(self):
return hash(self.title)
class Parser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.is_title = False
self.is_authr_and_pub = False
self.is_abstract = False
self.is_table = False
self.papers = []
self.current_paper = Paper()
def move_to_next_paper(self):
self.papers.append(self.current_paper)
self.current_paper = Paper()
self.is_title = False
self.is_authr_and_pub = False
self.is_abstract = False
self.is_table = False
def handle_starttag(self, tag, attrs):
if tag == "h3":
self.is_title = True
elif tag == "a" and self.is_title:
for attr in attrs:
if attr[0].lower() == 'href':
self.current_paper.add_source_link(attr[1])
break
elif tag == "a" and self.is_table:
for attr in attrs:
if attr[0].lower() == 'href':
self.current_paper.add_star_link(attr[1])
self.is_table = False
self.move_to_next_paper()
break
def handle_data(self, data):
if self.is_title:
self.current_paper.add_title(data)
elif self.is_authr_and_pub:
self.current_paper.add_authr_and_pub(data)
elif self.is_abstract:
self.current_paper.add_abstract(data)
def handle_endtag(self, tag):
if tag == "h3":
self.is_title = False
self.is_authr_and_pub = True
elif tag == "div":
if self.is_authr_and_pub:
self.is_authr_and_pub = False
self.is_abstract = True
elif self.is_abstract:
self.is_abstract = False
self.is_table = True
def get_papers(self):
return self.papers
| 30.813084 | 129 | 0.578101 | 3,257 | 0.987868 | 0 | 0 | 0 | 0 | 0 | 0 | 201 | 0.060965 |
8288331b93be5bebcd8bf3d2c82ccd107597d65b | 1,067 | py | Python | ApendixI-Games/StacklessPSP-2.5.2_R1/pspsnd.py | MelroLeandro/Matematica-Discreta-para-Hackers-ipnyb | 1f9ca7db685733a3df924db1269bd852acf27602 | [
"MIT"
]
| null | null | null | ApendixI-Games/StacklessPSP-2.5.2_R1/pspsnd.py | MelroLeandro/Matematica-Discreta-para-Hackers-ipnyb | 1f9ca7db685733a3df924db1269bd852acf27602 | [
"MIT"
]
| 1 | 2019-08-16T12:59:01.000Z | 2019-08-18T06:36:47.000Z | ApendixI-Games/StacklessPSP-2.5.2_R1/pspsnd.py | MelroLeandro/Matematica-Discreta-para-Hackers-ipnyb | 1f9ca7db685733a3df924db1269bd852acf27602 | [
"MIT"
]
| null | null | null | """Wrapper for pygame, which exports the PSP Python API on non-PSP systems."""
__author__ = "Per Olofsson, <[email protected]>"
import pygame
pygame.init()
_vol_music = 255
_vol_sound = 255
def setMusicVolume(vol):
global _vol_music
if vol >= 0 and vol <= 255:
_vol_music = vol
pygame.mixer.music.set_volume(_vol_music / 255.0)
def setSndFxVolume(vol):
global _vol_sound
if vol >= 0 and vol <= 255:
_vol_sound = vol
class Music:
def __init__(self, filename, maxchan=128, loop=False):
self._loop = loop
pygame.mixer.music.load(filename)
pygame.mixer.music.set_volume(_vol_music / 255.0)
def start(self):
if self._loop:
pygame.mixer.music.play(-1)
else:
pygame.mixer.music.play()
def stop(self):
pygame.mixer.music.stop()
class Sound:
def __init__(self, filename):
self._snd = pygame.mixer.Sound(filename)
def start(self):
self._snd.set_volume(_vol_sound / 255.0)
self._snd.play()
| 21.34 | 78 | 0.62418 | 592 | 0.554827 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.109653 |
82884c2f951413f34a94ee10615c1c83e1f50fe8 | 1,540 | py | Python | utility_parseCMUMovie.py | bipulkumar22/pyTextClassification | 7ed92949aa5648d3198588d0c5d6db89b48025ac | [
"Apache-2.0"
]
| 11 | 2016-09-16T10:38:19.000Z | 2021-12-13T19:38:24.000Z | utility_parseCMUMovie.py | tyiannak/pyTextClassification | 7ed92949aa5648d3198588d0c5d6db89b48025ac | [
"Apache-2.0"
]
| null | null | null | utility_parseCMUMovie.py | tyiannak/pyTextClassification | 7ed92949aa5648d3198588d0c5d6db89b48025ac | [
"Apache-2.0"
]
| 6 | 2016-11-19T15:35:13.000Z | 2020-03-29T17:09:22.000Z | import os
import csv
import ast
# used to generate folder-seperated corpus from CMUMovie dataset
# just type python utility_parseCMUMovie.py in a terminal and the data will be downloaded and split to subfolders in the moviePlots/ path
os.system("wget http://www.cs.cmu.edu/~ark/personas/data/MovieSummaries.tar.gz")
os.system("tar -xvzf MovieSummaries.tar.gz")
minRevenue = 20000000
movieMetadata = {}
with open('MovieSummaries/movie.metadata.tsv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='|')
for row in reader:
rev = 0
if len(row[4])>1:
rev = int(row[4])
if (minRevenue < 0) or ( (minRevenue > 0) and (rev>minRevenue) ):
movieMetadata[row[0]] = {}
movieMetadata[row[0]]['title'] = row[2]
movieMetadata[row[0]]['genres'] = ast.literal_eval(row[8]).values()
print len(movieMetadata)
with open("MovieSummaries/plot_summaries.txt") as f:
content = f.readlines()
for c in content:
d = c.split("\t")
id = d[0]
plot = d[1]
if id in movieMetadata:
print id, movieMetadata[id]['title']
for g in movieMetadata[id]['genres']:
if not os.path.exists("moviePlots" + os.sep + g.replace("/","-")):
os.makedirs("moviePlots" + os.sep + g.replace("/","-"))
f = open("moviePlots" + os.sep + g.replace("/","-") + os.sep + id + "_" + movieMetadata[id]["title"].replace("/","-"), 'w')
f.write(plot)
f.close()
| 37.560976 | 143 | 0.595455 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.319481 |
8288f27e4f9f25b68044be3e4af91d23dfa24bd3 | 1,923 | py | Python | model.py | luqifeng/CVND---Image-Captioning-Project | 6564b72222d962f8e1acdcdcf3d8ac5874ad9ab8 | [
"MIT"
]
| null | null | null | model.py | luqifeng/CVND---Image-Captioning-Project | 6564b72222d962f8e1acdcdcf3d8ac5874ad9ab8 | [
"MIT"
]
| null | null | null | model.py | luqifeng/CVND---Image-Captioning-Project | 6564b72222d962f8e1acdcdcf3d8ac5874ad9ab8 | [
"MIT"
]
| null | null | null | import torch
import torch.nn as nn
import torchvision.models as models
import numpy as np
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.embed(features)
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
self.lstm = nn.LSTM(embed_size,hidden_size,num_layers,batch_first=True)
self.embeddings = nn.Embedding(vocab_size, embed_size)
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
captions = self.embeddings(captions)
embed = torch.cat((features.unsqueeze(1),captions),1)
r_out = self.lstm(embed)
output = self.linear(r_out[0])[:, :-1, :]
return output
def sample(self, inputs, states=None, max_len=20):
#" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
#pass
output = []
for i in range(max_len):
hiddens, states = self.lstm(inputs, states)
mid = self.linear(hiddens.squeeze(1))
predicted = mid.max(1)[1]
output.append(predicted.tolist()[0])
inputs = self.embeddings(predicted)
inputs = inputs.unsqueeze(1)
#print(output)
#output = torch.cat(output, 1)
return output | 36.980769 | 126 | 0.631825 | 1,825 | 0.949038 | 0 | 0 | 0 | 0 | 0 | 0 | 167 | 0.086843 |
828aa1df6ebc3553389f760e5439ccc3f6c4765d | 981 | py | Python | App/items/models/items.py | fmgar/BlackMarker-API | a185d61d518ad505d2fd8882f0e8cd15474786cb | [
"MIT"
]
| null | null | null | App/items/models/items.py | fmgar/BlackMarker-API | a185d61d518ad505d2fd8882f0e8cd15474786cb | [
"MIT"
]
| null | null | null | App/items/models/items.py | fmgar/BlackMarker-API | a185d61d518ad505d2fd8882f0e8cd15474786cb | [
"MIT"
]
| null | null | null | """Items model. """
# Django
from django.db import models
# Utilities
from App.utils.models import BlackMarketModel
# Models
from .category import Category
from .unit import Unit
from .owner import Owner
class Item(BlackMarketModel):
"""Items model.
Is a model to items we goin to sell """
name = models.CharField(max_length=100, unique=True, blank=False, null=False)
category = models.ForeignKey(Category, blank=True, on_delete=models.SET_NULL, null=True)
description = models.TextField(max_length=200, blank=True)
type_item = models.CharField(max_length=15, blank=True)
unit = models.ForeignKey(Unit, blank=True, on_delete=models.SET_NULL, null=True)
price = models.DecimalField(max_digits=5, decimal_places=2, blank=False, null=False)
owner = models.ForeignKey(Owner, blank=True, on_delete=models.SET_NULL, null=True)
is_active = models.BooleanField(default=True)
def __str__(self):
return 'name:{}'.format(self.name)
| 32.7 | 92 | 0.734964 | 771 | 0.785933 | 0 | 0 | 0 | 0 | 0 | 0 | 115 | 0.117227 |
828b34c1c1112e8cd47750832efbc80f1a49fc80 | 2,241 | py | Python | run_all.py | yuriisthebest/Advent-of-Code | 1a4b3d6e57b0751dec097ccfc83472c458605e37 | [
"MIT"
]
| null | null | null | run_all.py | yuriisthebest/Advent-of-Code | 1a4b3d6e57b0751dec097ccfc83472c458605e37 | [
"MIT"
]
| null | null | null | run_all.py | yuriisthebest/Advent-of-Code | 1a4b3d6e57b0751dec097ccfc83472c458605e37 | [
"MIT"
]
| null | null | null | import json
import time
from multiprocessing import Process
from utils.paths import PATHS
from years.AoC2021.tasks import TASKS2021
# Constants
PARALLEL_COMPUTATION = True
TASKS = {
2021: TASKS2021
}
def asses_task(task: type, answers: dict, year: int) -> None:
"""
Run a task 4 times (part 1 test, part 1 task, part 2 test, part 2 task)
Test if the answers of each run correspond to the correct answers
:param task: Task object able to run a task
:param answers: The correct answers of the given task
:param year: The year where this task was asked
"""
t = task()
pred = t.run_all()
true = answers[task.__name__]
assert pred[0][0] == true[0] or true[0] == 0, \
f"({year}, {task.__name__}) Part 1 has failed on the test data. Expected: {true[0]}, got: {pred[0][0]}"
assert pred[0][1] == true[1] or true[1] == 0, \
f"({year}, {task.__name__}) Part 1 has failed on the real data. Expected: {true[1]}, got: {pred[0][1]}"
assert pred[1][0] == true[2] or true[2] == 0, \
f"({year}, {task.__name__}) Part 2 has failed on the test data. Expected: {true[2]}, got: {pred[1][0]}"
assert pred[1][1] == true[3] or true[3] == 0, \
f"({year}, {task.__name__}) Part 2 has failed on the real data. Expected: {true[3]}, got: {pred[1][1]}"
if __name__ == "__main__":
start = time.perf_counter()
num_tests = 0
processes = []
for year_num in TASKS.keys():
# Find the answers of the current year
with open(f"{PATHS[year_num]}\\answers.json") as f:
year_answers = json.load(f)
# Compute task results (unknown answers have a value of -1)
for i, current_task in enumerate(TASKS[year_num]):
num_tests += 1
if PARALLEL_COMPUTATION:
p = Process(target=asses_task, args=[current_task, year_answers, year_num])
p.start()
processes.append(p)
else:
asses_task(current_task, year_answers, year_num)
# Wait for processes to stop and report success
for process in processes:
process.join()
print(f"\n*** All {num_tests} tests completed successfully in {time.perf_counter() - start:.2f} sec***")
| 37.983051 | 111 | 0.617135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,024 | 0.456939 |
828cb262d3250d0e1b3f07edb7bc92fd873589c5 | 1,467 | py | Python | python/edl/tests/unittests/master_client_test.py | WEARE0/edl | f065ec02bb27a67c80466103e298bd6f37494048 | [
"Apache-2.0"
]
| 90 | 2020-04-21T01:46:10.000Z | 2022-02-10T09:09:34.000Z | python/edl/tests/unittests/master_client_test.py | WEARE0/edl | f065ec02bb27a67c80466103e298bd6f37494048 | [
"Apache-2.0"
]
| 37 | 2018-03-02T22:41:15.000Z | 2020-04-22T16:48:36.000Z | python/edl/tests/unittests/master_client_test.py | WEARE0/edl | f065ec02bb27a67c80466103e298bd6f37494048 | [
"Apache-2.0"
]
| 34 | 2018-03-02T23:28:25.000Z | 2020-03-25T08:50:29.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import paddle_edl.utils.master_pb2 as master_pb2
import unittest
from edl.utils.master_client import Client
from edl.utils.utils import get_file_list, get_logger
os.environ["https_proxy"] = ""
os.environ["http_proxy"] = ""
class TestMasterClient(unittest.TestCase):
def setUp(self):
self._client = Client("127.0.0.1:8080")
def test_add_dataset(self):
dataset = master_pb2.DataSet()
dataset.name = "train"
for t in get_file_list("./test_file_list.txt"):
dataset.file_list.append(t[0])
res = self._client.add_dataset(dataset)
assert res is None or res.type == "", "must not any error"
res = self._client.add_dataset(dataset)
assert res.type == "DuplicateInitDataSet", "must error"
if __name__ == "__main__":
logger = get_logger(10)
unittest.main()
| 32.6 | 74 | 0.712338 | 542 | 0.369461 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.502386 |
828ccbf87f380dbc253cd5ac125a944fc9a7bd55 | 4,262 | py | Python | src/commercetools/services/types.py | BramKaashoek/commercetools-python-sdk | 4a4191d7816c921401b782d8ae37626cb32791a1 | [
"MIT"
]
| null | null | null | src/commercetools/services/types.py | BramKaashoek/commercetools-python-sdk | 4a4191d7816c921401b782d8ae37626cb32791a1 | [
"MIT"
]
| null | null | null | src/commercetools/services/types.py | BramKaashoek/commercetools-python-sdk | 4a4191d7816c921401b782d8ae37626cb32791a1 | [
"MIT"
]
| null | null | null | import typing
from commercetools import schemas, types
from commercetools.services import abstract
from commercetools.typing import OptionalListStr
__all__ = ["TypeService"]
class TypeDeleteSchema(abstract.AbstractDeleteSchema):
pass
class TypeQuerySchema(abstract.AbstractQuerySchema):
pass
class TypeService(abstract.AbstractService):
def get_by_id(self, id: str, expand: OptionalListStr = None) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
return self._client._get(f"types/{id}", query_params, schemas.TypeSchema)
def get_by_key(self, key: str, expand: OptionalListStr = None) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
return self._client._get(f"types/key={key}", query_params, schemas.TypeSchema)
def query(
self,
where: OptionalListStr = None,
sort: OptionalListStr = None,
expand: OptionalListStr = None,
limit: int = None,
offset: int = None,
) -> types.TypePagedQueryResponse:
params = TypeQuerySchema().dump(
{
"where": where,
"sort": sort,
"expand": expand,
"limit": limit,
"offset": offset,
}
)
return self._client._get("types", params, schemas.TypePagedQueryResponseSchema)
def create(
self, draft: types.TypeDraft, expand: OptionalListStr = None
) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
return self._client._post(
"types", query_params, draft, schemas.TypeDraftSchema, schemas.TypeSchema
)
def update_by_id(
self,
id: str,
version: int,
actions: typing.List[types.TypeUpdateAction],
expand: OptionalListStr = None,
*,
force_update: bool = False,
) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
update_action = types.TypeUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"types/{id}",
params=query_params,
data_object=update_action,
request_schema_cls=schemas.TypeUpdateSchema,
response_schema_cls=schemas.TypeSchema,
force_update=force_update,
)
def update_by_key(
self,
key: str,
version: int,
actions: typing.List[types.TypeUpdateAction],
expand: OptionalListStr = None,
*,
force_update: bool = False,
) -> types.Type:
query_params = {}
if expand:
query_params["expand"] = expand
update_action = types.TypeUpdate(version=version, actions=actions)
return self._client._post(
endpoint=f"types/key={key}",
params=query_params,
data_object=update_action,
request_schema_cls=schemas.TypeUpdateSchema,
response_schema_cls=schemas.TypeSchema,
force_update=force_update,
)
def delete_by_id(
self,
id: str,
version: int,
expand: OptionalListStr = None,
*,
force_delete: bool = False,
) -> types.Type:
params = {"version": version}
if expand:
params["expand"] = expand
query_params = TypeDeleteSchema().dump(params)
return self._client._delete(
endpoint=f"types/{id}",
params=query_params,
response_schema_cls=schemas.TypeSchema,
force_delete=force_delete,
)
def delete_by_key(
self,
key: str,
version: int,
expand: OptionalListStr = None,
*,
force_delete: bool = False,
) -> types.Type:
params = {"version": version}
if expand:
params["expand"] = expand
query_params = TypeDeleteSchema().dump(params)
return self._client._delete(
endpoint=f"types/key={key}",
params=query_params,
response_schema_cls=schemas.TypeSchema,
force_delete=force_delete,
)
| 30.22695 | 87 | 0.585171 | 4,077 | 0.956593 | 0 | 0 | 0 | 0 | 0 | 0 | 230 | 0.053965 |
828db07d0c0f0f1db466402e002749cf071a28f8 | 3,454 | py | Python | augraphy/augmentations/noisetexturize.py | RyonSayer/augraphy | be1e8dcf0f129ac3fc30ba1cad0d8de02443f67f | [
"MIT"
]
| 36 | 2021-06-25T02:17:57.000Z | 2022-03-29T02:36:09.000Z | augraphy/augmentations/noisetexturize.py | shaheryar1/augraphy | 5dd52fdd3b497312606c6d3afa4003f94a8cbcc4 | [
"MIT"
]
| 136 | 2021-06-25T07:39:46.000Z | 2022-03-31T13:00:30.000Z | augraphy/augmentations/noisetexturize.py | shaheryar1/augraphy | 5dd52fdd3b497312606c6d3afa4003f94a8cbcc4 | [
"MIT"
]
| 24 | 2021-06-27T21:15:11.000Z | 2022-03-08T03:28:17.000Z | import random
import cv2
import numpy as np
from augraphy.base.augmentation import Augmentation
class NoiseTexturize(Augmentation):
"""Creates a random noise based texture pattern to emulate paper textures.
Consequently applies noise patterns to the original image from big to small.
:param sigma_range: Defines bounds of noise fluctuations.
:type sigma_range: tuple, optional
:param turbulence_range: Defines how quickly big patterns will be
replaced with the small ones. The lower value -
the more iterations will be performed during texture generation.
:type turbulence_range: tuple, optional
:param p: The probability this Augmentation will be applied.
:type p: float, optional
"""
def __init__(
self,
sigma_range=(3, 10),
turbulence_range=(2, 5),
p=1,
):
"""Constructor method"""
super().__init__(p=p)
self.sigma_range = sigma_range
self.turbulence_range = turbulence_range
# Constructs a string representation of this Augmentation.
def __repr__(self):
return f"NoiseTexturize(sigma_range={self.sigma_range}, turbulence_range={self.turbulence_range}, p={self.p})"
# Applies the Augmentation to input data.
def __call__(self, image, layer=None, force=False):
if force or self.should_run():
image = image.copy()
sigma = random.randint(self.sigma_range[0], self.sigma_range[1])
turbulence = random.randint(
self.turbulence_range[0],
self.turbulence_range[1],
)
result = image.astype(float)
rows, cols = image.shape[:2]
if len(image.shape) > 2:
channel = image.shape[2]
else:
channel = 0
ratio = cols
while not ratio == 1:
result += self.noise(cols, rows, channel, ratio, sigma=sigma)
ratio = (ratio // turbulence) or 1
cut = np.clip(result, 0, 255)
cut = cut.astype(np.uint8)
return cut
def noise(self, width, height, channel, ratio, sigma):
"""The function generates an image, filled with gaussian nose. If ratio
parameter is specified, noise will be generated for a lesser image and
then it will be upscaled to the original size. In that case noise will
generate larger square patterns. To avoid multiple lines, the upscale
uses interpolation.
:param ratio: the size of generated noise "pixels"
:param sigma: defines bounds of noise fluctuations
"""
mean = 0
# assert width % ratio == 0, "Can't scale image with of size {} and ratio {}".format(width, ratio)
# assert height % ratio == 0, "Can't scale image with of size {} and ratio {}".format(height, ratio)
h = int(height / ratio)
w = int(width / ratio)
if h == 0:
h = 1
if w == 0:
w = 1
gaussian = np.vectorize(lambda x: random.gauss(mean, sigma))
result = gaussian(np.array((w, h)))
result = cv2.resize(
result,
dsize=(width, height),
interpolation=cv2.INTER_LINEAR,
)
# for multiple channels input, convert result to multiple channels
if channel:
result = np.stack([result, result, result], axis=2)
return result
| 33.533981 | 118 | 0.606543 | 3,353 | 0.970759 | 0 | 0 | 0 | 0 | 0 | 0 | 1,559 | 0.451361 |
828dc1f2bed1b15e7518a1fcf0598cc4397058a0 | 50,333 | py | Python | plugins/modules/bigip_sslo_config_ssl.py | kevingstewart/f5_sslo_ansible | 13001a8eab514b5f1ea374abdfc7dd2383655a86 | [
"Apache-2.0"
]
| 7 | 2021-06-25T15:39:49.000Z | 2022-02-28T10:58:53.000Z | plugins/modules/bigip_sslo_config_ssl.py | kevingstewart/f5_sslo_ansible | 13001a8eab514b5f1ea374abdfc7dd2383655a86 | [
"Apache-2.0"
]
| 6 | 2021-06-29T18:18:45.000Z | 2021-09-17T12:04:24.000Z | plugins/modules/bigip_sslo_config_ssl.py | kevingstewart/f5_sslo_ansible | 13001a8eab514b5f1ea374abdfc7dd2383655a86 | [
"Apache-2.0"
]
| 3 | 2021-06-28T23:25:38.000Z | 2022-02-28T10:57:32.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2021, kevin-dot-g-dot-stewart-at-gmail-dot-com
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Version: 1.0.1
#### Updates:
#### 1.0.1 - added 9.0 support
# - changed max version
# - added clientssl "alpn" proxy support
# - added clientssl logPublisher support
# - added serverssl logPublisher support
# - updated version and previousVersion keys to match target SSLO version
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_sslo_config_ssl
short_description: Manage an SSL Orchestrator SSL configuration
description:
- Manage an SSL Orchestrator SSL configuration
version_added: "1.0.0"
options:
name:
description:
- Specifies the name of the SSL configuration. Configuration auto-prepends "ssloT_" to service.
Service name should be less than 14 characters and not contain dashes "-".
type: str
required: True
clientSettings:
description:
- Specifies the client-side SSL settings
suboptions:
cipherType:
description:
- Defines the type of cipher used, either "string" (for cipher strings), or "group" (an existing cipher group).
type: str
choices:
- string
- group
default: string
cipher:
description:
- Defines the actual cipher string (ex. "DEFAULT"), or existing cipher group (ex. /Common/f5-default) to use.
type: str
default: DEFAULT
enableTLS1_3:
description:
- Defines whether or not to enable client-side TLSv1.3 support. When enabled, the cipherType must be "group" and cipher must indicate an existing cipher group.
type: bool
default: False
cert:
description:
- Defines the certificate applied in the client side settings. For a forward proxy this is the template certificate and (ex. /Common/default.crt). For a reverse proxy, this is the client-facing server certificate.
type: str
default: /Common/default.crt
key:
description:
- Defines the private key applied in the client side settings. For a forward proxy this is the template key and (ex. /Common/default.key). For a reverse proxy, this is the client-facing server private key.
type: str
default: /Common/default.key
chain:
description:
- Defines the certificate keychain in the client side settings.
type: str
default: None
caCert:
description:
- Defines the CA certificate applied in the client side settings. This is the signing/forging CA certificate used for forward proxy TLS handling. This setting is not applicable in reverse proxy SSL.
type: str
default: None
caKey:
description:
- Defines the CA private key applied in the client side settings. This is the signing/forging CA private key used for forward proxy TLS handling. This setting is not applicable in reverse proxy SSL.
type: str
default: None
caChain:
description:
- Defines the CA certificate keychain in the client side settings. This would contain any CA subordinated in the trust chain between the signing CA and explicitly-trusted root certificate. If required, it should contain any intermediate CA certificates, up to but not including the self-signed root CA.
type: str
default: None
alpn:
description:
- Requires 9.0+. Enables or disables ALPN HTTP/2 full proxy in an outbound (forward proxy) topology.
type: bool
default: False
logPublisher:
description:
- Requires 9.0+. Defines a specific log publisher to use for client-side SSL-related events.
type: str
default: /Common/sys-ssl-publisher
serverSettings:
description:
- Specifies the server-side SSL settings
suboptions:
cipherType:
description:
- Defines the type of cipher used, either "string" (for cipher strings), or "group" (an existing cipher group).
type: str
choices:
- string
- group
default: string
cipher:
description:
- Defines the actual cipher string (ex. "DEFAULT"), or existing cipher group (ex. /Common/f5-default) to use.
type: str
default: DEFAULT
enableTLS1_3:
description:
- Defines whether or not to enable server-side TLSv1.3 support. When enabled, the cipherType must be "group" and cipher must indicate an existing cipher group.
type: bool
default: False
caBundle:
description:
- Defines the certificate authority bundle used to validate remote server certificates. This setting is most applicable in the forward proxy use case to validate remote (Internat) server certificates.
type: str
default: /Common/ca-bundle.crt
blockExpired:
description:
- Defines the action to take if an expired remote server certificate is encountered. For forward proxy the default is to ignore expired certificates (False). For reverse proxy the default is to drop expired certificates (True).
type: bool
default: False
blockUntrusted:
description:
- Defines the action to take if an untrusted remote server certificate is encountered, based on the defined caBundle. For forward proxy the default is to ignore untrusted certificates (False). For reverse proxy the default is to drop untrusted certificates (True).
type: bool
default: False
ocsp:
description:
- Defines an OCSP configuration to use to perform certificate revocation checking again remote server certificates.
type: str
default: None
crl:
description:
- Defines a CRL configuration to use to perform certificate revocation checking again remote server certificates.
type: str
default: None
logPublisher:
description:
- Requires 9.0+. Defines a specific log publisher to use for server-side SSL-related events.
type: str
default: /Common/sys-ssl-publisher
bypassHandshakeFailure:
description:
- Defines the action to take if a server side TLS handshake failure is detected. A value of False will cause the connection to fail. A value of True will shutdown TLS decryption and allow the connection to proceed un-decrypted.
type: bool
default: False
bypassClientCertFailure:
description:
- Defines the action to take if a server side TLS handshake client certificate request is detected. A value of False will cause the connection to fail. A value of True will shutdown TLS decryption and allow the connection to proceed un-decrypted.
type: bool
default: False
mode:
description:
- Defines how this task is handled. With the default setting of 'update', the module performs the tasks required to update the target resource. With the 'output' setting, the resulting JSON object blocks are returned without updating the target resource. This option is useful for debugging, and when subordinate objects (ex. SSL, services, service chains, policy, resolver) are created in the same playbook, and their respectice output JSON referenced in a single Topology create task.
type: str
choices:
- update
- output
default: update
state:
description:
- Specifies the present/absent state required.
type: str
choices:
- absent
- present
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Kevin Stewart (kevin-dot-g-dot-stewart-at-gmail-dot-com)
'''
EXAMPLES = r'''
- name: Create SSLO SSL Forward Proxy Settings (simple)
hosts: localhost
gather_facts: False
connection: local
collections:
- kevingstewart.f5_sslo_ansible
vars:
provider:
server: 172.16.1.77
user: admin
password: admin
validate_certs: no
server_port: 443
tasks:
- name: SSLO SSL forward proxy settings
bigip_sslo_config_ssl:
provider: "{{ provider }}"
name: "demo_ssl"
clientSettings:
caCert: "/Common/subrsa.f5labs.com"
caKey: "/Common/subrsa.f5labs.com"
delegate_to: localhost
- name: Create SSLO SSL Forward Proxy Settings
hosts: localhost
gather_facts: False
connection: local
collections:
- kevingstewart.f5_sslo_ansible
vars:
provider:
server: 172.16.1.77
user: admin
password: admin
validate_certs: no
server_port: 443
tasks:
- name: SSLO SSL settings
bigip_sslo_config_ssl:
provider: "{{ provider }}"
name: "demo_ssl"
clientSettings:
cipherType: "group"
cipher: "/Common/f5-default"
enableTLS1_3: True
cert: "/Common/default.crt"
key: "/Common/default.key"
caCert: "/Common/subrsa.f5labs.com"
caKey: "/Common/subrsa.f5labs.com"
caChain: "/Common/my-ca-chain"
alpn: True
logPublisher: "/Common/my-ssl-publisher"
serverSettings:
cipherType: "group"
cipher: "/Common/f5-default"
enableTLS1_3: True
caBundle: "/Common/local-ca-bundle.crt"
blockExpired: False
blockUntrusted: False
ocsp: "/Common/my-ocsp"
crl: "/Common/my-crl"
logPublisher: "/Common/my-ssl-publisher"
bypassHandshakeFailure: True
bypassClientCertFailure: True
delegate_to: localhost
- name: Create SSLO SSL Reverse Proxy Settings (simple)
hosts: localhost
gather_facts: False
connection: local
collections:
- kevingstewart.f5_sslo_ansible
vars:
provider:
server: 172.16.1.77
user: admin
password: admin
validate_certs: no
server_port: 443
tasks:
- name: SSLO SSL settings
bigip_sslo_config_ssl:
provider: "{{ provider }}"
name: "demo_ssl"
clientSettings:
cert: "/Common/myserver.f5labs.com"
key: "/Common/myserver.f5labs.com"
delegate_to: localhost
- name: Create SSLO SSL Reverse Proxy Settings
hosts: localhost
gather_facts: False
connection: local
collections:
- kevingstewart.f5_sslo_ansible
vars:
provider:
server: 172.16.1.77
user: admin
password: admin
validate_certs: no
server_port: 443
tasks:
- name: SSLO SSL settings
bigip_sslo_config_ssl:
provider: "{{ provider }}"
name: "demo5"
clientSettings:
cipherType: "group"
cipher: "/Common/f5-default"
enableTLS1_3: True
cert: "/Common/myserver.f5labs.com"
key: "/Common/myserver.f5labs.com"
chain: "/Common/my-ca-chain"
serverSettings:
cipherType: "group"
cipher: "/Common/f5-default"
enableTLS1_3: True
caBundle: "/Common/local-ca-bundle.crt"
blockExpired: False
blockUntrusted: False
delegate_to: localhost
'''
RETURN = r'''
name:
description:
- Changed name of SSL configuration.
type: str
sample: demo_ssl
clientSettings:
description: client-side SSL settings
type: complex
contains:
cipherType:
description: defines "string" for cipher string, or "group" for cipher group
type: str
sample: string
cipher:
description: defines the cipher string or an existing cipher group
type: str
sample: DEFAULT or /Common/f5-default
enableTLS1_3:
description: enables or disables client-side TLSv1.3
type: bool
sample: True
cert:
description: defines the client-facing certificate. For forward proxy this is the template certificate. For reverse proxy this is the server certificate.
type: str
sample: /Common/default.crt
key:
description: defines the client-facing private key. For forward proxy this is the template key. For reverse proxy this is the server private key.
type: str
sample: /Common/default.key
chain:
description: defines the client-facing CA certificate chain. For reverse proxy this is the server certificate's CA chain.
type: str
sample: /Common/local-ca-chain.crt
caCert:
description: defines the issuing CA certificate for a forward proxy.
type: str
sample: /Common/default.crt
caKey:
description: defines the issuing CA private key for a forward proxy.
type: str
sample: /Common/default.key
caChain:
description: defines the CA certificate chain for the issuing CA in a forward proxy.
type: str
sample: /Common/local-ca-chain.crt
alpn:
description: requires 9.0+. Enables or disables ALPN HTTP/2 full proxy through a forward proxy topology.
type: bool
sample: True
logPublisher:
description: requires 9.0+. Defines a specific log publisher for client-side SSL-related events.
type: str
sample: /Common/sys-ssl-publisher
serverSettings:
description: network settings for for-service configuration
type: complex
contains:
cipherType:
description: defines "string" for cipher string, or "group" for cipher group
type: str
sample: string
cipher:
description: defines the cipher string or an existing cipher group
type: str
sample: DEFAULT or /Common/f5-default
enableTLS1_3:
description: enables or disables server-side TLSv1.3
type: bool
sample: True
caBundle:
description: defines a CA bundle used to valdate remote server certificates.
type: str
sample: /Common/ca-bundle.crt
blockExpired:
description: defines the action to take on receiving an expired remote server certificate, True = block, False = ignore.
type: bool
sample: True
blockUntrusted:
description: defines the action to take on receiving an untrusted remote server certificate, True = block, False = ignore.
type: bool
sample: True
ocsp:
description: defines aan existing OCSP configuration to validate revocation of remote server certificates.
type: str
sample: /Common/my-ocsp
crl:
description: defines aan existing CRL configuration to validate revocation of remote server certificates.
type: str
sample: /Common/my-crl
logPublisher:
description: requires 9.0+. Defines a specific log publisher for server-side SSL-related events.
type: str
sample: /Common/sys-ssl-publisher
bypassHandshakeFailure:
description:
- Defines the action to take on receiving a TLS handshake alert from a server. True = bypass decryption and allow through, False = block
type: bool
sample: True
bypassClientCertFailure:
description:
- Defines the action to take on receiving a TLS handshake client certificate request from a server. True = bypass decryption and allow through, False = block
type: bool
sample: True
mode:
description: describes the action to take on the task.
type: str
sample: update
state:
description:
- Changed state.
type: str
sample: present
'''
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ansible_collections.f5networks.f5_modules.plugins.module_utils.bigip import (
F5RestClient
)
from ansible_collections.f5networks.f5_modules.plugins.module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name, f5_argument_spec
)
from ansible_collections.f5networks.f5_modules.plugins.module_utils.icontrol import (
tmos_version
)
from ipaddress import (
ip_network, ip_interface
)
import json, time, re
global print_output
global json_template
global obj_attempts
global min_version
global max_version
print_output = []
## define object creation attempts count (with 1 seconds pause between each attempt)
obj_attempts = 20
## define minimum supported tmos version - min(SSLO 5.x)
min_version = 5.0
## define maximum supported tmos version - max(SSLO 8.x)
max_version = 9.0
json_template = {
"name":"f5-ssl-orchestrator-gc",
"inputProperties":[
{
"id":"f5-ssl-orchestrator-operation-context",
"type":"JSON",
"value":{
"operationType":"CREATE",
"deploymentType":"SSL_SETTINGS",
"deploymentName":"TEMPLATE_NAME",
"deploymentReference":"",
"partition":"Common",
"strictness":False
}
},
{
"id":"f5-ssl-orchestrator-tls",
"type":"JSON",
"value":{
"sslSettingsReference":"",
"sslSettingsName":"",
"description":"",
"previousVersion":"7.2",
"version":"7.2",
"generalSettings":{
"isForwardProxy":True,
"bypassHandshakeAlert":False,
"bypassClientCertFailure":False
},
"clientSettings":{
"ciphers":{
"isCipherString":True,
"cipherString":"DEFAULT",
"cipherGroup":"/Common/f5-default"
},
"certKeyChain":[
{
"cert":"/Common/default.crt",
"key":"/Common/default.key",
"chain":"",
"passphrase":"",
"name":"CERT_KEY_CHAIN_0"
}
],
"caCertKeyChain":[],
"forwardByPass":True,
"enabledSSLProcessingOptions":[]
},
"serverSettings":{
"ciphers":{
"isCipherString":True,
"cipherString":"DEFAULT",
"cipherGroup":"/Common/f5-default"
},
"caBundle":"/Common/ca-bundle.crt",
"expiredCertificates":False,
"untrustedCertificates":False,
"ocsp":"",
"crl":"",
"enabledSSLProcessingOptions":[]
},
"name":"TEMPLATE_NAME",
"advancedMode":"off",
"strictness":False,
"partition":"Common"
}
},
{
"id":"f5-ssl-orchestrator-topology",
"type":"JSON"
}
],
"configurationProcessorReference":{
"link":"https://localhost/mgmt/shared/iapp/processors/f5-iappslx-ssl-orchestrator-gc"
},
"configProcessorTimeoutSeconds": 120,
"statsProcessorTimeoutSeconds": 60,
"configProcessorAffinity": {
"processorPolicy": "LOCAL",
"affinityProcessorReference": {
"link": "https://localhost/mgmt/shared/iapp/affinity/local"
}
},
"state":"BINDING",
"presentationHtmlReference":{
"link":"https://localhost/iapps/f5-iappslx-ssl-orchestrator/sgc/sgcIndex.html"
},
"operation":"CREATE"
}
json_ca_cert_template = {
"cert":"/Common/default.crt",
"key":"/Common/defaut.key",
"chain":"",
"isCa":True,
"usage":"CA",
"port":"0",
"passphrase":"",
"certKeyChainMismatch":False,
"isDuplicateVal":False,
"name":"CA_CERT_KEY_CHAIN_0"
}
json_enable_tls13 = {
"name":"TLSv1.3",
"value":"TLSv1.3"
}
class Parameters(AnsibleF5Parameters):
api_map = {}
updatables = []
api_attributes = []
returnables = []
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
global print_output
@property
def name(self):
name = self._values['name']
name = "ssloT_" + name
return name
@property
def client_cipher_type(self):
try:
client_cipher_type = self._values['clientSettings']['cipherType']
if client_cipher_type is None:
return "string"
return client_cipher_type
except:
return "string"
@property
def client_cipher(self):
try:
client_cipher = self._values['clientSettings']['cipher']
if client_cipher is None:
return "DEFAULT"
return client_cipher
except:
return "DEFAULT"
@property
def client_enable_tls13(self):
try:
client_enable_tls13 = self._values['clientSettings']['enableTLS1_3']
if client_enable_tls13 is None:
return False
return client_enable_tls13
except:
return False
@property
def client_cert(self):
try:
client_cert = self._values['clientSettings']['cert']
if client_cert is None:
return "/Common/default.crt"
return client_cert
except:
return "/Common/default.crt"
@property
def client_key(self):
try:
client_key = self._values['clientSettings']['key']
if client_key is None:
return "/Common/default.key"
return client_key
except:
return "/Common/default.key"
@property
def client_chain(self):
try:
client_chain = self._values['clientSettings']['chain']
if client_chain is None:
return None
return client_chain
except:
return None
@property
def client_ca_cert(self):
try:
client_ca_cert = self._values['clientSettings']['caCert']
if client_ca_cert is None:
return None
return client_ca_cert
except:
return None
@property
def client_ca_key(self):
try:
client_ca_key = self._values['clientSettings']['caKey']
if client_ca_key is None:
return None
return client_ca_key
except:
return None
@property
def client_ca_chain(self):
try:
client_ca_chain = self._values['clientSettings']['caChain']
if client_ca_chain is None:
return None
return client_ca_chain
except:
return None
@property
def server_cipher_type(self):
try:
server_cipher_type = self._values['serverSettings']['cipherType']
if server_cipher_type is None:
return "string"
return server_cipher_type
except:
return "string"
@property
def server_cipher(self):
try:
server_cipher = self._values['serverSettings']['cipher']
if server_cipher is None:
return "DEFAULT"
return server_cipher
except:
return "DEFAULT"
@property
def server_enable_tls13(self):
try:
server_enable_tls13 = self._values['serverSettings']['enableTLS1_3']
if server_enable_tls13 is None:
return False
return server_enable_tls13
except:
return False
@property
def server_ca_bundle(self):
try:
server_ca_bundle = self._values['serverSettings']['caBundle']
if server_ca_bundle is None:
return "/Common/ca-bundle.crt"
return server_ca_bundle
except:
return "/Common/ca-bundle.crt"
@property
def server_block_expired(self):
try:
server_block_expired = self._values['serverSettings']['blockExpired']
if server_block_expired is None:
return None
return server_block_expired
except:
return None
@property
def server_block_untrusted(self):
try:
server_block_untrusted = self._values['serverSettings']['blockUntrusted']
if server_block_untrusted is None:
return None
return server_block_untrusted
except:
return None
@property
def server_ocsp(self):
try:
server_ocsp = self._values['serverSettings']['ocsp']
if server_ocsp is None:
return None
return server_ocsp
except:
return None
@property
def server_crl(self):
try:
server_crl = self._values['serverSettings']['crl']
if server_crl is None:
return None
return server_crl
except:
return None
@property
def bypass_handshake_failure(self):
bypass_handshake_failure = self._values['bypassHandshakeFailure']
if bypass_handshake_failure is None:
return False
return bypass_handshake_failure
@property
def bypass_clientcert_failure(self):
bypass_clientcert_failure = self._values['bypassClientCertFailure']
if bypass_clientcert_failure is None:
return False
return bypass_clientcert_failure
@property
def mode(self):
mode = self._values['mode']
return mode
@property
def client_alpn(self):
try:
client_alpn = self._values['clientSettings']['alpn']
if client_alpn is None:
return False
return client_alpn
except:
return False
@property
def client_log_publisher(self):
try:
client_log_publisher = self._values['clientSettings']['logPublisher']
if client_log_publisher is None:
return "/Common/sys-ssl-publisher"
return client_log_publisher
except:
return "/Common/sys-ssl-publisher"
@property
def server_log_publisher(self):
try:
server_log_publisher = self._values['clientSettings']['logPublisher']
if server_log_publisher is None:
return "/Common/sys-ssl-publisher"
return server_log_publisher
except:
return "/Common/sys-ssl-publisher"
class ModuleManager(object):
global print_output
global json_template
global obj_attempts
global min_version
global max_version
def __init__(self, *args, **kwargs):
self.module = kwargs.pop('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
def getSsloVersion(self):
## use this method to get the SSLO version (first two digits (x.y))
uri = "https://{0}:{1}/mgmt/shared/iapp/installed-packages".format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
resp = self.client.api.get(uri).json()
for x in resp["items"]:
if x["appName"] == "f5-iappslx-ssl-orchestrator":
tmpversion = x["release"].split(".")
version = tmpversion[0] + "." + tmpversion[1]
return float(version)
break
except:
raise F5ModuleError("SSL Orchestrator package does not appear to be installed. Aborting.")
def deleteOperation(self, id):
## use this method to delete an operation that failed
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
id
)
resp = self.client.api.delete(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
else:
return False
def update_json(self, operation):
## use this to method to create and return a modified copy of the JSON template
self.config = json_template
## get base name
self.local_name = re.sub('ssloT_', '', self.want.name)
## perform some input validation
## if TLS1.3 is enabled, the isCipherString value must be "false"
if self.want.client_enable_tls13 == True and self.want.client_cipher_type == "string":
raise F5ModuleError("Enabling client-side TLS 1.3 also requires a cipher group")
if self.want.server_enable_tls13 == True and self.want.server_cipher_type == "string":
raise F5ModuleError("Enabling server-side TLS 1.3 also requires a cipher group")
## =================================
## 1.0.1 general update: modify version and previousVersion values to match target BIG-IP version
## =================================
self.config["inputProperties"][0]["value"]["version"] = self.ssloVersion
self.config["inputProperties"][1]["value"]["version"] = self.ssloVersion
self.config["inputProperties"][1]["value"]["previousVersion"] = self.ssloVersion
## general json settings for all operations
self.config["inputProperties"][0]["value"]["deploymentName"] = self.want.name
self.config["inputProperties"][0]["value"]["operationType"] = operation
self.config["inputProperties"][1]["value"]["name"] = self.want.name
self.config["inputProperties"][1]["value"]["generalSettings"]["bypassHandshakeAlert"] = self.want.bypass_handshake_failure
self.config["inputProperties"][1]["value"]["generalSettings"]["bypassClientCertFailure"] = self.want.bypass_clientcert_failure
if self.want.client_enable_tls13 == False:
self.config["inputProperties"][1]["value"]["clientSettings"]["enabledSSLProcessingOptions"].append(json_enable_tls13)
if self.want.server_enable_tls13 == False:
self.config["inputProperties"][1]["value"]["serverSettings"]["enabledSSLProcessingOptions"].append(json_enable_tls13)
## generic client settings
self.config["inputProperties"][1]["value"]["clientSettings"]["certKeyChain"][0]["cert"] = self.want.client_cert
self.config["inputProperties"][1]["value"]["clientSettings"]["certKeyChain"][0]["key"] = self.want.client_key
if self.want.client_chain != None:
self.config["inputProperties"][1]["value"]["clientSettings"]["certKeyChain"][0]["chain"] = self.want.client_chain
if self.want.client_cipher_type == "string":
self.config["inputProperties"][1]["value"]["clientSettings"]["ciphers"]["isCipherString"] = True
self.config["inputProperties"][1]["value"]["clientSettings"]["ciphers"]["cipherString"] = self.want.client_cipher
elif self.want.client_cipher_type == "group":
self.config["inputProperties"][1]["value"]["clientSettings"]["ciphers"]["isCipherString"] = False
self.config["inputProperties"][1]["value"]["clientSettings"]["ciphers"]["cipherGroup"] = self.want.client_cipher
## generic server settings
self.config["inputProperties"][1]["value"]["serverSettings"]["caBundle"] = self.want.server_ca_bundle
if self.want.server_cipher_type == "string":
self.config["inputProperties"][1]["value"]["serverSettings"]["ciphers"]["isCipherString"] = True
self.config["inputProperties"][1]["value"]["serverSettings"]["ciphers"]["cipherString"] = self.want.server_cipher
elif self.want.server_cipher_type == "group":
self.config["inputProperties"][1]["value"]["serverSettings"]["ciphers"]["isCipherString"] = False
self.config["inputProperties"][1]["value"]["serverSettings"]["ciphers"]["cipherGroup"] = self.want.server_cipher
if self.want.server_ocsp != None:
self.config["inputProperties"][1]["value"]["serverSettings"]["ocsp"] = self.want.server_ocsp
if self.want.server_crl != None:
self.config["inputProperties"][1]["value"]["serverSettings"]["crl"] = self.want.server_crl
## Test if this is a forward or reverse proxy config, based on presence of client_ca_cert value
if self.want.client_ca_cert != None:
## assume this is a forward proxy
self.config["inputProperties"][1]["value"]["generalSettings"]["isForwardProxy"] = True
self.proxyType = "forward"
self.ca_cert_config = json_ca_cert_template
self.ca_cert_config["cert"] = self.want.client_ca_cert
self.ca_cert_config["key"] = self.want.client_ca_key
if self.want.client_ca_chain != None:
self.ca_cert_config["chain"] = self.want.client_ca_chain
self.config["inputProperties"][1]["value"]["clientSettings"]["caCertKeyChain"].append(self.ca_cert_config)
## client settings
self.config["inputProperties"][1]["value"]["clientSettings"]["forwardByPass"] = True
## server settings - set defaults if none specified
if self.want.server_block_untrusted == None:
## for forward proxy default to False unless specified
self.config["inputProperties"][1]["value"]["serverSettings"]["untrustedCertificates"] = True
else:
self.config["inputProperties"][1]["value"]["serverSettings"]["untrustedCertificates"] = self.want.server_block_untrusted
if self.want.server_block_expired == None:
## for forward proxy default to False unless specified
self.config["inputProperties"][1]["value"]["serverSettings"]["expiredCertificates"] = True
else:
self.config["inputProperties"][1]["value"]["serverSettings"]["expiredCertificates"] = self.want.server_block_expired
else:
## assume this is a reverse proxy
self.config["inputProperties"][1]["value"]["generalSettings"]["isForwardProxy"] = False
self.proxyType = "reverse"
## client settings
self.config["inputProperties"][1]["value"]["clientSettings"]["forwardByPass"] = False
## server settings - set defaults if none specified
if self.want.server_block_untrusted == None:
## for forward proxy default to False unless specified
self.config["inputProperties"][1]["value"]["serverSettings"]["untrustedCertificates"] = False
else:
self.config["inputProperties"][1]["value"]["serverSettings"]["untrustedCertificates"] = self.want.server_block_untrusted
if self.want.server_block_expired == None:
## for forward proxy default to False unless specified
self.config["inputProperties"][1]["value"]["serverSettings"]["expiredCertificates"] = False
else:
self.config["inputProperties"][1]["value"]["serverSettings"]["expiredCertificates"] = self.want.server_block_expired
## ================================================
## updates: 9.0
## alpn - only available in 9.0+ and forward proxy
if self.ssloVersion >= 9.0 and self.proxyType == "forward":
self.config["inputProperties"][1]["value"]["clientSettings"]["alpn"] = self.want.client_alpn
## logPublisher - only available in 9.0+
if self.ssloVersion >= 9.0:
self.config["inputProperties"][1]["value"]["clientSettings"]["logPublisher"] = self.want.client_log_publisher
self.config["inputProperties"][1]["value"]["serverSettings"]["logPublisher"] = self.want.server_log_publisher
## ================================================
## create operation
if operation == "CREATE":
#### TO DO: update JSON code for CREATE operation
self.config["name"] = "sslo_obj_SSL_SETTINGS_CREATE_" + self.want.name
## modify/delete operations
elif operation in ["DELETE", "MODIFY"]:
self.config["name"] = "sslo_obj_SSL_SETTINGS_MODIFY_" + self.want.name
## get object ID and add to deploymentReference and existingBlockId values
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
query = "?$filter=name+eq+'{0}'&$select=id".format(self.want.name)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201, 202] or 'code' in response and response['code'] not in [200, 201, 202]:
raise F5ModuleError(resp.content)
try:
id = response["items"][0]['id']
self.config["inputProperties"][0]["value"]["deploymentReference"] = "https://localhost/mgmt/shared/iapp/blocks/" + id
self.config["inputProperties"][1]["value"]["existingBlockId"] = id
except:
raise F5ModuleError("Failure to create/modify - unable to fetch object ID")
if operation in ["MODIFY"]:
pass
#### TO DO: update JSON code for MODIFY operation
return self.config
def exec_module(self):
start = datetime.now().isoformat()
self.ssloVersion = self.getSsloVersion()
changed = False
result = dict()
state = self.want.state
## test for correct TMOS version
if self.ssloVersion < min_version or self.ssloVersion > max_version:
raise F5ModuleError("Unsupported SSL Orchestrator version, requires a version between min(" + str(min_version) + ") and max(" + str(max_version) + ")")
## enable/disable testdev to output to JSON only for testing (1) or push config to server (0)
testdev = 0
if testdev:
self.exists()
jsonstr = self.update_json("CREATE")
print_output.append("jsonstr = " + str(jsonstr))
else:
if state == 'present':
changed = self.update()
elif state == 'absent':
changed = self.absent()
result.update(dict(changed=changed))
print_output.append('changed=' + str(changed))
return result
def update(self):
if self.module.check_mode:
return True
## use this method to create the objects (if not exists) or modify (if exists)
if self.exists():
## MODIFY: object exists - perform modify - get modified json first
jsonstr = self.update_json("MODIFY")
if self.want.mode == "output":
print_output.append(jsonstr)
else:
## post the object modify json
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=jsonstr)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201, 202] or 'code' in response and response['code'] not in [200, 201, 202]:
raise F5ModuleError(resp.content)
## get operation id from last request and loop through check
self.operationId = str(response["id"])
attempts = 1
error = ""
while attempts <= obj_attempts:
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
query = "?$filter=id+eq+'{0}'".format(self.operationId)
resp = self.client.api.get(uri + query).json()
try:
if resp["items"][0]["state"] == "BOUND":
return True
break
elif resp["items"][0]["state"] == "ERROR":
error = str(resp["items"][0]["error"])
break
except:
time.sleep(1)
attempts += 1
if error != "":
## delete attempted configuration and raise error
self.deleteOperation(self.operationId)
raise F5ModuleError("Creation error: " + error)
else:
raise F5ModuleError("Object " + self.want.name + " create/modify operation timeout")
else:
## CREATE: object doesn't exist - perform create - get modified json first
jsonstr = self.update_json("CREATE")
if self.want.mode == "output":
print_output.append(jsonstr)
else:
## post the object create json
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=jsonstr)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201, 202] or 'code' in response and response['code'] not in [200, 201, 202]:
raise F5ModuleError(resp.content)
## get operation id from last request and loop through check
self.operationId = str(response["id"])
attempts = 1
error = ""
while attempts <= obj_attempts:
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
query = "?$filter=id+eq+'{0}'".format(self.operationId)
resp = self.client.api.get(uri + query).json()
try:
if resp["items"][0]["state"] == "BOUND":
return True
break
elif resp["items"][0]["state"] == "ERROR":
error = str(resp["items"][0]["error"])
break
except:
time.sleep(1)
attempts += 1
if error != "":
## delete attempted configuration and raise error
self.deleteOperation(self.operationId)
raise F5ModuleError("Creation error: " + self.operationId + ":" + error)
else:
raise F5ModuleError("Object " + self.want.name + " create/modify operation timeout")
def absent(self):
## use this method to delete the objects (if exists)
if self.exists():
if self.module.check_mode:
return True
## DELETE: object doesn't exist - perform create - get modified json first
jsonstr = self.update_json("DELETE")
if self.want.mode == "output":
print_output.append(jsonstr)
else:
## post the object create json
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=jsonstr)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status not in [200, 201, 202] or 'code' in response and response['code'] not in [200, 201, 202]:
raise F5ModuleError(resp.content)
## get operation id from last request and loop through check
self.operationId = str(response["id"])
attempts = 1
error = ""
while attempts <= obj_attempts:
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
query = "?$filter=id+eq+'{0}'".format(self.operationId)
resp = self.client.api.get(uri + query).json()
try:
if resp["items"][0]["state"] == "BOUND":
return True
break
elif resp["items"][0]["state"] == "ERROR":
error = str(resp["items"][0]["error"])
break
except:
time.sleep(1)
attempts += 1
if error != "":
## delete attempted configuration and raise error
self.deleteOperation(self.operationId)
raise F5ModuleError("Creation error: " + self.operationId + ":" + error)
else:
raise F5ModuleError("Object " + self.want.name + " create/modify operation timeout")
else:
## object doesn't exit - just exit (changed = False)
return False
def exists(self):
## use this method to see if the objects already exists - queries for the respective application service object
uri = "https://{0}:{1}/mgmt/shared/iapp/blocks/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
query = "?$filter=name+eq+'{0}'".format(self.want.name)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
foundit = 0
for i in range(0, len(response["items"])):
try:
if str(response["items"][i]["name"]) == self.want.name:
foundit = 1
self.existing_config = response["items"][i]
break
except:
pass
if foundit == 1:
return True
else:
return False
else:
return False
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
clientSettings=dict(
required=True,
type='dict',
options=dict(
cipherType=dict(
choices=['string','group'],
default='string'
),
cipher=dict(default=None),
enableTLS1_3=dict(type='bool', default=False),
cert=dict(default='/Common/default.crt'),
key=dict(default='/Common/default.key'),
chain=dict(default=None),
caCert=dict(default=None),
caKey=dict(default=None),
caChain=dict(),
alpn=dict(type='bool', default=False),
logPublisher=dict(default='/Common/sys-ssl-publisher')
)
),
serverSettings=dict(
type='dict',
options=dict(
cipherType=dict(
choices=['string','group'],
default='string'
),
cipher=dict(default=None),
enableTLS1_3=dict(type='bool', default=False),
caBundle=dict(default='/Common/ca-bundle.crt'),
blockExpired=dict(type='bool'),
blockUntrusted=dict(type='bool'),
ocsp=dict(default=None),
crl=dict(default=None),
logPublisher=dict(default='/Common/sys-ssl-publisher')
)
),
bypassHandshakeFailure=dict(type='bool', default=False),
bypassClientCertFailure=dict(type='bool', default=False),
state=dict(
default='present',
choices=['absent','present']
),
mode=dict(
choices=["update","output"],
default="update"
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
## start here
## define global print_output
global print_output
print_output = []
## define argumentspec
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
## send to exec_module, result contains output of tasks
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
result = dict(
print_output = print_output,
**results
)
module.exit_json(**result)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main() | 36.955213 | 492 | 0.580573 | 29,833 | 0.592713 | 0 | 0 | 6,308 | 0.125325 | 0 | 0 | 25,577 | 0.508156 |
828ee62b4ffbbb1d5dea12315eaccdf681093a66 | 52,854 | py | Python | nemo/pipelines.py | simonsobs/nemo | ab72fa1c5ea878fcb63eaf31642b3d7bdd6ac636 | [
"BSD-3-Clause"
]
| 2 | 2021-01-11T13:10:27.000Z | 2022-03-09T16:31:48.000Z | nemo/pipelines.py | simonsobs/nemo | ab72fa1c5ea878fcb63eaf31642b3d7bdd6ac636 | [
"BSD-3-Clause"
]
| 3 | 2020-11-11T10:44:47.000Z | 2022-01-05T07:28:58.000Z | nemo/pipelines.py | simonsobs/nemo | ab72fa1c5ea878fcb63eaf31642b3d7bdd6ac636 | [
"BSD-3-Clause"
]
| 1 | 2021-03-05T18:31:00.000Z | 2021-03-05T18:31:00.000Z | """
This module defines pipelines - sets of tasks in nemo that we sometimes want to do on different inputs
(e.g., real data or simulated data).
"""
import os
import sys
import glob
import shutil
import time
import astropy.io.fits as pyfits
import astropy.table as atpy
from astLib import astWCS
import numpy as np
from scipy import ndimage, interpolate
import copy
from pixell import enmap
import nemo
from . import startUp
from . import filters
from . import photometry
from . import catalogs
from . import maps
from . import signals
from . import completeness
from . import MockSurvey
import nemoCython
#------------------------------------------------------------------------------------------------------------
def filterMapsAndMakeCatalogs(config, rootOutDir = None, copyFilters = False, measureFluxes = True,
invertMap = False, verbose = True, useCachedMaps = True):
"""Runs the map filtering and catalog construction steps according to the given configuration.
Args:
config (:obj: 'startup.NemoConfig'): Nemo configuration object.
rootOutDir (str): If None, use the default given by config. Otherwise, use this to override where the
output filtered maps and catalogs are written.
copyFilters (bool, optional): If True, and rootOutDir is given (not None), then filters will be
copied from the default output location (from a pre-existing nemo run) to the appropriate
directory under rootOutDir. This is used by, e.g., contamination tests based on sky sims, where
the same kernels as used on the real data are applied to simulated maps. If rootOutDir = None,
setting copyKernels = True has no effect.
measureFluxes (bool, optional): If True, measure fluxes. If False, just extract S/N values for
detected objects.
invertMap (bool, optional): If True, multiply all maps by -1; needed by
:meth:maps.estimateContaminationFromInvertedMaps).
Returns:
Optimal catalog (keeps the highest S/N detection when filtering at multiple scales).
Note:
See bin/nemo for how this pipeline is applied to real data, and maps.sourceInjectionTest
for how this is applied to source-free sims that are generated on the fly.
"""
if config.parDict['twoPass'] == False:
catalog=_filterMapsAndMakeCatalogs(config, rootOutDir = rootOutDir, copyFilters = copyFilters,
measureFluxes = measureFluxes, invertMap = invertMap,
verbose = verbose, useCachedMaps = useCachedMaps)
else:
# Two pass pipeline
# On 1st pass, find sources (and maybe clusters) with canned settings, masking nothing.
# On 2nd pass, the 1st pass catalog will be used to mask or subtract sources from maps used for
# noise estimation only.
# No point doing this if we're not using the map itself for the noise term in the filter
for f in config.parDict['mapFilters']:
for key in f.keys():
if key == 'noiseParams' and f['noiseParams']['method'] != 'dataMap':
raise Exception("There is no point running if filter noise method != 'dataMap'.")
# Pass 1 - find point sources, save nothing
# NOTE: We need to do this for each map in the list, if we have a multi-frequency filter
pass1PtSrcSettings={'label': "Beam",
'class': "BeamMatchedFilter",
'params': {'noiseParams': {'method': "model",
'noiseGridArcmin': 40.0,
'numNoiseBins': 2},
'saveFilteredMaps': False,
'outputUnits': 'uK',
'edgeTrimArcmin': 0.0}}
config.parDict['mapFilters']=[pass1PtSrcSettings]
config.parDict['photFilter']=None
config.parDict['maskPointSourcesFromCatalog']=[] # This is only applied on the 2nd pass
config.parDict['measureShapes']=True # Double-lobed extended source at f090 causes havoc in one tile
orig_unfilteredMapsDictList=list(config.unfilteredMapsDictList)
config.parDict['forcedPhotometryCatalog']=None # If in this mode, only wanted on 2nd pass
pass1CatalogsList=[]
surveyMasksList=[] # ok, these should all be the same, otherwise we have problems...
for mapDict in orig_unfilteredMapsDictList:
# We use whole tile area (i.e., don't trim overlaps) so that we get everything if under MPI
# Otherwise, powerful sources in overlap regions mess things up under MPI
# Serial mode doesn't have this issue as it can see the whole catalog over all tiles
# But since we now use full area, we may double subtract ovelap sources when in serial mode
# So the removeDuplicates call fixes that, and doesn't impact anything else here
surveyMasksList.append(mapDict['surveyMask'])
mapDict['surveyMask']=None
config.unfilteredMapsDictList=[mapDict]
catalog=_filterMapsAndMakeCatalogs(config, verbose = False, writeAreaMasks = False)
if len(catalog) > 0 :
catalog, numDuplicatesFound, names=catalogs.removeDuplicates(catalog)
pass1CatalogsList.append(catalog)
# Pass 2 - subtract point sources in the maps used for noise term in filter only
# To avoid ringing in the pass 2, we siphon off the super bright things found in pass 1
# We subtract those from the maps used in pass 2 - we then need to add them back at the end
config.restoreConfig()
config.parDict['measureShapes']=True # We'll keep this for pass 2 as well
siphonSNR=50
for mapDict, catalog, surveyMask in zip(orig_unfilteredMapsDictList, pass1CatalogsList, surveyMasksList):
#catalogs.catalog2DS9(catalog[catalog['SNR'] > siphonSNR], config.diagnosticsDir+os.path.sep+"pass1_highSNR_siphoned.reg")
mapDict['noiseMaskCatalog']=catalog[catalog['SNR'] < siphonSNR]
mapDict['subtractPointSourcesFromCatalog']=[catalog[catalog['SNR'] > siphonSNR]]
mapDict['maskSubtractedPointSources']=True
mapDict['surveyMask']=surveyMask
config.unfilteredMapsDictList=orig_unfilteredMapsDictList
catalog=_filterMapsAndMakeCatalogs(config, verbose = False)
# Merge back in the bright sources that were subtracted in pass 1
# (but we don't do that in forced photometry mode)
mergeList=[catalog]
if config.parDict['forcedPhotometryCatalog'] is None:
for pass1Catalog in pass1CatalogsList:
mergeList.append(pass1Catalog[pass1Catalog['SNR'] > siphonSNR])
catalog=atpy.vstack(mergeList)
return catalog
#------------------------------------------------------------------------------------------------------------
def _filterMapsAndMakeCatalogs(config, rootOutDir = None, copyFilters = False, measureFluxes = True,
invertMap = False, verbose = True, useCachedMaps = True,
writeAreaMasks = True):
"""Runs the map filtering and catalog construction steps according to the given configuration.
Args:
config (:obj: 'startup.NemoConfig'): Nemo configuration object.
rootOutDir (str): If None, use the default given by config. Otherwise, use this to override where the
output filtered maps and catalogs are written.
copyFilters (bool, optional): If True, and rootOutDir is given (not None), then filters will be
copied from the default output location (from a pre-existing nemo run) to the appropriate
directory under rootOutDir. This is used by, e.g., contamination tests based on sky sims, where
the same kernels as used on the real data are applied to simulated maps. If rootOutDir = None,
setting copyKernels = True has no effect.
measureFluxes (bool, optional): If True, measure fluxes. If False, just extract S/N values for
detected objects.
invertMap (bool, optional): If True, multiply all maps by -1; needed by
:meth:maps.estimateContaminationFromInvertedMaps).
Returns:
Optimal catalog (keeps the highest S/N detection when filtering at multiple scales).
Note:
See bin/nemo for how this pipeline is applied to real data, and maps.sourceInjectionTest
for how this is applied to source-free sims that are generated on the fly.
"""
# If running on sims (source-free or with injected sources), this ensures we use the same kernels for
# filtering the sim maps as was used on the real data, by copying kernels to the sims dir. The kernels
# will then be loaded automatically when filterMaps is called. Yes, this is a bit clunky...
if rootOutDir is not None:
filteredMapsDir=rootOutDir+os.path.sep+"filteredMaps"
diagnosticsDir=rootOutDir+os.path.sep+"diagnostics"
dirList=[rootOutDir, filteredMapsDir, diagnosticsDir]
for d in dirList:
os.makedirs(d, exist_ok = True)
if copyFilters == True:
for tileName in config.tileNames:
fileNames=glob.glob(config.diagnosticsDir+os.path.sep+tileName+os.path.sep+"filter*#%s*.fits" % (tileName))
if len(fileNames) == 0:
raise Exception("Could not find pre-computed filters to copy - you need to add 'saveFilter: True' to the filter params in the config file (this is essential for doing source injection sims quickly).")
kernelCopyDestDir=diagnosticsDir+os.path.sep+tileName
os.makedirs(kernelCopyDestDir, exist_ok = True)
for f in fileNames:
dest=kernelCopyDestDir+os.path.sep+os.path.split(f)[-1]
if os.path.exists(dest) == False:
shutil.copyfile(f, dest)
print("... copied filter %s to %s ..." % (f, dest))
else:
rootOutDir=config.rootOutDir
filteredMapsDir=config.filteredMapsDir
diagnosticsDir=config.diagnosticsDir
# We re-sort the filters list here - in case we have photFilter defined
photFilter=config.parDict['photFilter']
filtersList=[]
if photFilter is not None:
for f in config.parDict['mapFilters']:
if f['label'] == photFilter:
filtersList.append(f)
for f in config.parDict['mapFilters']:
if photFilter is not None:
if f['label'] == photFilter:
continue
filtersList.append(f)
if photFilter is not None:
assert(filtersList[0]['label'] == photFilter)
photFilteredMapDict=None
# Make filtered maps for each filter and tile
catalogDict={}
for tileName in config.tileNames:
# Now have per-tile directories (friendlier for Lustre)
tileFilteredMapsDir=filteredMapsDir+os.path.sep+tileName
tileDiagnosticsDir=diagnosticsDir+os.path.sep+tileName
for d in [tileFilteredMapsDir, tileDiagnosticsDir]:
os.makedirs(d, exist_ok = True)
if verbose == True: print(">>> Making filtered maps - tileName = %s ..." % (tileName))
# We could load the unfiltered map only once here?
# We could also cache 'dataMap' noise as it will always be the same
for f in filtersList:
label=f['label']+"#"+tileName
catalogDict[label]={}
if 'saveDS9Regions' in f['params'] and f['params']['saveDS9Regions'] == True:
DS9RegionsPath=config.filteredMapsDir+os.path.sep+tileName+os.path.sep+"%s_filteredMap.reg" % (label)
else:
DS9RegionsPath=None
filteredMapDict=filters.filterMaps(config.unfilteredMapsDictList, f, tileName,
filteredMapsDir = tileFilteredMapsDir,
diagnosticsDir = tileDiagnosticsDir, selFnDir = config.selFnDir,
verbose = True, undoPixelWindow = True,
useCachedMaps = useCachedMaps)
if f['label'] == photFilter:
photFilteredMapDict={}
photFilteredMapDict['SNMap']=filteredMapDict['SNMap']
photFilteredMapDict['data']=filteredMapDict['data']
# Forced photometry on user-supplied list of objects, or detect sources
if 'forcedPhotometryCatalog' in config.parDict.keys() and config.parDict['forcedPhotometryCatalog'] is not None:
catalog=photometry.makeForcedPhotometryCatalog(filteredMapDict,
config.parDict['forcedPhotometryCatalog'],
useInterpolator = config.parDict['useInterpolator'],
DS9RegionsPath = DS9RegionsPath)
else:
# Normal mode
catalog=photometry.findObjects(filteredMapDict, threshold = config.parDict['thresholdSigma'],
minObjPix = config.parDict['minObjPix'],
findCenterOfMass = config.parDict['findCenterOfMass'],
removeRings = config.parDict['removeRings'],
ringThresholdSigma = config.parDict['ringThresholdSigma'],
rejectBorder = config.parDict['rejectBorder'],
objIdent = config.parDict['objIdent'],
longNames = config.parDict['longNames'],
useInterpolator = config.parDict['useInterpolator'],
measureShapes = config.parDict['measureShapes'],
invertMap = invertMap,
DS9RegionsPath = DS9RegionsPath)
# We write area mask here, because it gets modified by findObjects if removing rings
# NOTE: condition added to stop writing tile maps again when running nemoMass in forced photometry mode
maskFileName=config.selFnDir+os.path.sep+"areaMask#%s.fits" % (tileName)
surveyMask=np.array(filteredMapDict['surveyMask'], dtype = int)
if writeAreaMasks == True:
if os.path.exists(maskFileName) == False and os.path.exists(config.selFnDir+os.path.sep+"areaMask.fits") == False:
maps.saveFITS(maskFileName, surveyMask, filteredMapDict['wcs'], compressed = True,
compressionType = 'PLIO_1')
if measureFluxes == True:
photometry.measureFluxes(catalog, filteredMapDict, config.diagnosticsDir,
photFilteredMapDict = photFilteredMapDict,
useInterpolator = config.parDict['useInterpolator'])
else:
# Get S/N only - if the reference (fixed) filter scale has been given
# This is (probably) only used by maps.estimateContaminationFromInvertedMaps
if photFilter is not None:
photometry.getSNRValues(catalog, photFilteredMapDict['SNMap'],
filteredMapDict['wcs'], prefix = 'fixed_',
useInterpolator = config.parDict['useInterpolator'],
invertMap = invertMap)
catalogDict[label]['catalog']=catalog
# Merged/optimal catalogs
optimalCatalog=catalogs.makeOptimalCatalog(catalogDict, constraintsList = config.parDict['catalogCuts'])
return optimalCatalog
#------------------------------------------------------------------------------------------------------------
def makeSelFnCollection(config, mockSurvey):
"""Makes a collection of selection function dictionaries (one per footprint specified in selFnFootprints
in the config file, plus the full survey mask), that contain information on noise levels, area covered,
and completeness.
Returns a dictionary (keys: 'full' - corresponding to whole survey, plus other keys named by footprint).
"""
# Q varies across tiles
Q=signals.QFit(config)
# We only care about the filter used for fixed_ columns
photFilterLabel=config.parDict['photFilter']
for filterDict in config.parDict['mapFilters']:
if filterDict['label'] == photFilterLabel:
break
# We'll only calculate completeness for this given selection
SNRCut=config.parDict['selFnOptions']['fixedSNRCut']
# Handle any missing options for calcCompleteness (these aren't used by the default fast method anyway)
if 'numDraws' not in config.parDict['selFnOptions'].keys():
config.parDict['selFnOptions']['numDraws']=2000000
if 'numIterations' not in config.parDict['selFnOptions'].keys():
config.parDict['selFnOptions']['numIterations']=100
# We can calculate stats in different extra areas (e.g., inside optical survey footprints)
footprintsList=[]
if 'selFnFootprints' in config.parDict.keys():
footprintsList=footprintsList+config.parDict['selFnFootprints']
# Run the selection function calculation on each tile in turn
selFnCollection={'full': []}
for footprintDict in footprintsList:
if footprintDict['label'] not in selFnCollection.keys():
selFnCollection[footprintDict['label']]=[]
for tileName in config.tileNames:
RMSTab=completeness.getRMSTab(tileName, photFilterLabel, config.selFnDir)
compMz=completeness.calcCompleteness(RMSTab, SNRCut, tileName, mockSurvey, config.parDict['massOptions'], Q,
numDraws = config.parDict['selFnOptions']['numDraws'],
numIterations = config.parDict['selFnOptions']['numIterations'],
method = config.parDict['selFnOptions']['method'])
selFnDict={'tileName': tileName,
'RMSTab': RMSTab,
'tileAreaDeg2': RMSTab['areaDeg2'].sum(),
'compMz': compMz}
selFnCollection['full'].append(selFnDict)
# Generate footprint intersection masks (e.g., with HSC) and RMS tables, which are cached
# May as well do this bit here (in parallel) and assemble output later
for footprintDict in footprintsList:
completeness.makeIntersectionMask(tileName, config.selFnDir, footprintDict['label'], masksList = footprintDict['maskList'])
tileAreaDeg2=completeness.getTileTotalAreaDeg2(tileName, config.selFnDir, footprintLabel = footprintDict['label'])
if tileAreaDeg2 > 0:
RMSTab=completeness.getRMSTab(tileName, photFilterLabel, config.selFnDir,
footprintLabel = footprintDict['label'])
compMz=completeness.calcCompleteness(RMSTab, SNRCut, tileName, mockSurvey, config.parDict['massOptions'], Q,
numDraws = config.parDict['selFnOptions']['numDraws'],
numIterations = config.parDict['selFnOptions']['numIterations'],
method = config.parDict['selFnOptions']['method'])
selFnDict={'tileName': tileName,
'RMSTab': RMSTab,
'tileAreaDeg2': RMSTab['areaDeg2'].sum(),
'compMz': compMz}
selFnCollection[footprintDict['label']].append(selFnDict)
# Optional mass-limit maps
if 'massLimitMaps' in list(config.parDict['selFnOptions'].keys()):
for massLimitDict in config.parDict['selFnOptions']['massLimitMaps']:
completeness.makeMassLimitMap(SNRCut, massLimitDict['z'], tileName, photFilterLabel, mockSurvey,
config.parDict['massOptions'], Q, config.diagnosticsDir,
config.selFnDir)
return selFnCollection
#------------------------------------------------------------------------------------------------------------
def makeMockClusterCatalog(config, numMocksToMake = 1, combineMocks = False, writeCatalogs = True,
writeInfo = True, verbose = True):
"""Generate a mock cluster catalog using the given nemo config.
Returns:
List of catalogs (each is an astropy Table object)
"""
# Having changed nemoMock interface, we may need to make output dir
if os.path.exists(config.mocksDir) == False:
os.makedirs(config.mocksDir, exist_ok = True)
# Noise sources in mocks
if 'applyPoissonScatter' in config.parDict.keys():
applyPoissonScatter=config.parDict['applyPoissonScatter']
else:
applyPoissonScatter=True
if 'applyIntrinsicScatter' in config.parDict.keys():
applyIntrinsicScatter=config.parDict['applyIntrinsicScatter']
else:
applyIntrinsicScatter=True
if 'applyNoiseScatter' in config.parDict.keys():
applyNoiseScatter=config.parDict['applyNoiseScatter']
else:
applyNoiseScatter=True
if verbose: print(">>> Mock noise sources (Poisson, intrinsic, measurement noise) = (%s, %s, %s) ..." % (applyPoissonScatter, applyIntrinsicScatter, applyNoiseScatter))
# Q varies across tiles
Q=signals.QFit(config)
# We only care about the filter used for fixed_ columns
photFilterLabel=config.parDict['photFilter']
for filterDict in config.parDict['mapFilters']:
if filterDict['label'] == photFilterLabel:
break
# The same as was used for detecting objects
thresholdSigma=config.parDict['thresholdSigma']
# We need an assumed scaling relation for mock observations
scalingRelationDict=config.parDict['massOptions']
if verbose: print(">>> Setting up mock survey ...")
# NOTE: Sanity check is possible here: area in RMSTab should equal area from areaMask.fits
# If it isn't, there is a problem...
# Also, we're skipping the individual tile-loading routines here for speed
checkAreaConsistency=False
wcsDict={}
RMSMap=pyfits.open(config.selFnDir+os.path.sep+"RMSMap_%s.fits" % (photFilterLabel))
RMSTab=atpy.Table().read(config.selFnDir+os.path.sep+"RMSTab.fits")
count=0
totalAreaDeg2=0
RMSMapDict={}
areaDeg2Dict={}
if checkAreaConsistency == True:
areaMap=pyfits.open(config.selFnDir+os.path.sep+"areaMask.fits")
t0=time.time()
for tileName in config.tileNames:
count=count+1
if tileName == 'PRIMARY':
if tileName in RMSMap:
extName=tileName
data=RMSMap[extName].data
else:
data=None
if data is None:
for extName in RMSMap:
data=RMSMap[extName].data
if data is not None:
break
RMSMapDict[tileName]=RMSMap[extName].data
wcsDict[tileName]=astWCS.WCS(RMSMap[extName].header, mode = 'pyfits')
else:
RMSMapDict[tileName]=RMSMap[tileName].data
wcsDict[tileName]=astWCS.WCS(RMSMap[tileName].header, mode = 'pyfits')
# Area from RMS table
areaDeg2=RMSTab[RMSTab['tileName'] == tileName]['areaDeg2'].sum()
areaDeg2Dict[tileName]=areaDeg2
totalAreaDeg2=totalAreaDeg2+areaDeg2
# Area from map (slower)
if checkAreaConsistency == True:
areaMask, wcsDict[tileName]=completeness.loadAreaMask(tileName, config.selFnDir)
areaMask=areaMap[tileName].data
map_areaDeg2=(areaMask*maps.getPixelAreaArcmin2Map(areaMask.shape, wcsDict[tileName])).sum()/(60**2)
if abs(map_areaDeg2-areaDeg2) > 1e-4:
raise Exception("Area from areaMask.fits doesn't agree with area from RMSTab.fits")
RMSMap.close()
if checkAreaConsistency == True:
areaMap.close()
t1=time.time()
if verbose: print("... took %.3f sec ..." % (t1-t0))
# Useful for testing:
if 'seed' in config.parDict.keys():
seed=config.parDict['seed']
else:
seed=None
if seed is not None:
np.random.seed(seed)
# We're now using one MockSurvey object for the whole survey
massOptions=config.parDict['massOptions']
minMass=5e13
zMin=0.0
zMax=2.0
defCosmo={'H0': 70.0, 'Om0': 0.30, 'Ob0': 0.05, 'sigma8': 0.80, 'ns': 0.95, 'delta': 500, 'rhoType': 'critical'}
for key in defCosmo:
if key not in massOptions.keys():
massOptions[key]=defCosmo[key]
H0=massOptions['H0']
Om0=massOptions['Om0']
Ob0=massOptions['Ob0']
sigma8=massOptions['sigma8']
ns=massOptions['ns']
delta=massOptions['delta']
rhoType=massOptions['rhoType']
mockSurvey=MockSurvey.MockSurvey(minMass, totalAreaDeg2, zMin, zMax, H0, Om0, Ob0, sigma8, ns,
delta = delta, rhoType = rhoType, enableDrawSample = True)
print("... mock survey parameters:")
for key in defCosmo.keys():
print(" %s = %s" % (key, str(massOptions[key])))
for key in ['tenToA0', 'B0', 'Mpivot', 'sigma_int']:
print(" %s = %s" % (key, str(scalingRelationDict[key])))
print(" total area = %.1f square degrees" % (totalAreaDeg2))
print(" random seed = %s" % (str(seed)))
if verbose: print(">>> Making mock catalogs ...")
catList=[]
for i in range(numMocksToMake):
mockTabsList=[]
t0=time.time()
for tileName in config.tileNames:
# It's possible (depending on tiling) that blank tiles were included - so skip
# We may also have some tiles that are almost but not quite blank
if RMSMapDict[tileName].sum() == 0 or areaDeg2Dict[tileName] < 0.5:
continue
mockTab=mockSurvey.drawSample(RMSMapDict[tileName], scalingRelationDict, Q, wcs = wcsDict[tileName],
photFilterLabel = photFilterLabel, tileName = tileName, makeNames = True,
SNRLimit = thresholdSigma, applySNRCut = True,
areaDeg2 = areaDeg2Dict[tileName],
applyPoissonScatter = applyPoissonScatter,
applyIntrinsicScatter = applyIntrinsicScatter,
applyNoiseScatter = applyNoiseScatter)
if mockTab is not None:
mockTabsList.append(mockTab)
tab=atpy.vstack(mockTabsList)
catList.append(tab)
t1=time.time()
if verbose: print("... making mock catalog %d took %.3f sec ..." % (i+1, t1-t0))
# Write catalog and .reg file
if writeCatalogs == True:
#colNames=['name', 'RADeg', 'decDeg', 'template', 'redshift', 'redshiftErr', 'true_M500', 'true_fixed_y_c', 'fixed_SNR', 'fixed_y_c', 'fixed_err_y_c']
#colFmts =['%s', '%.6f', '%.6f', '%s', '%.3f', '%.3f', '%.3f', '%.3f', '%.1f', '%.3f', '%.3f']
mockCatalogFileName=config.mocksDir+os.path.sep+"mockCatalog_%d.csv" % (i+1)
catalogs.writeCatalog(tab, mockCatalogFileName)
catalogs.writeCatalog(tab, mockCatalogFileName.replace(".csv", ".fits"))
addInfo=[{'key': 'fixed_SNR', 'fmt': '%.1f'}]
catalogs.catalog2DS9(tab, mockCatalogFileName.replace(".csv", ".reg"), constraintsList = [],
addInfo = addInfo, color = "cyan")
if combineMocks == True:
tab=None
for i in range(numMocksToMake):
mockCatalogFileName=config.mocksDir+os.path.sep+"mockCatalog_%d.fits" % (i+1)
stackTab=atpy.Table().read(mockCatalogFileName)
if tab == None:
tab=stackTab
else:
tab=atpy.vstack([tab, stackTab])
outFileName=config.mocksDir+os.path.sep+"mockCatalog_combined.fits"
tab.meta['NEMOVER']=nemo.__version__
tab.write(outFileName, overwrite = True)
# Write a small text file with the parameters used to generate the mocks into the mocks dir (easier than using headers)
if writeInfo == True:
mockKeys=['massOptions', 'makeMockCatalogs', 'applyPoissonScatter', 'applyIntrinsicScatter', 'applyNoiseScatter']
with open(config.mocksDir+os.path.sep+"mockParameters.txt", "w") as outFile:
for m in mockKeys:
if m in config.parDict.keys():
outFile.write("%s: %s\n" % (m, config.parDict[m]))
return catList
#------------------------------------------------------------------------------------------------------------
def extractSpec(config, tab, method = 'CAP', diskRadiusArcmin = 4.0, highPassFilter = False,
estimateErrors = True, saveFilteredMaps = False):
"""Returns a table containing the spectral energy distribution, extracted using either compensated
aperture photometry (CAP) at each object location in the input catalog, or using a matched filter.
Maps at different frequencies will first be matched to the lowest resolution beam, using a Gaussian
kernel.
For the CAP method, at each object location, the temperature fluctuation is measured within a disk of
radius diskRadiusArcmin, after subtracting the background measured in an annulus between
diskRadiusArcmin < r < sqrt(2) * diskRadiusArcmin (i.e., this should be similar to the method
described in Schaan et al. 2020).
For the matched filter method, the catalog must contain a `template` column, as produced by the main
`nemo` script, with template names in the format Arnaud_M2e14_z0p4 (for example). This will be used to
set the signal scale used for each object. All definitions of filters in the config will be ignored,
in favour of a filter using a simple CMB + white noise model. Identical filters will be used for all
maps (i.e., the method of Saro et al. 2014).
Args:
config (:obj:`startup.NemoConfig`): Nemo configuration object.
tab (:obj:`astropy.table.Table`): Catalog containing input object positions. Must contain columns
'name', 'RADeg', 'decDeg'.
method (str, optional):
diskRadiusArcmin (float, optional): If using CAP method: disk aperture radius in arcmin, within
which the signal is measured. The background will be estimated in an annulus between
diskRadiusArcmin < r < sqrt(2) * diskRadiusArcmin.
highPassFilter (bool, optional): If using CAP method: if set, subtract the large scale
background using maps.subtractBackground, with the smoothing scale set to
2 * sqrt(2) * diskRadiusArcmin.
estimateErrors (bool, optional): If used CAP method: if set, estimate uncertainties by placing
random apertures throughout the map. For now, this is done on a tile-by-tile basis, and
doesn't take into account inhomogeneous noise within a tile.
saveFilteredMaps (bool, optional): If using matchedFilter method: save the filtered maps under
the `nemoSpecCache` directory (which is created in the current working directory, if it
doesn't already exist).
Returns:
Catalog containing spectral energy distribution measurements for each object.
For the CAP method, units of extracted signals are uK arcmin^2.
For the matchedFilter method, extracted signals are deltaT CMB amplitude in uK.
"""
diagnosticsDir=config.diagnosticsDir
# Choose lowest resolution as the reference beam - we match to that
refBeam=None
refFWHMArcmin=0
refIndex=0
beams=[]
for i in range(len(config.unfilteredMapsDictList)):
mapDict=config.unfilteredMapsDictList[i]
beam=signals.BeamProfile(mapDict['beamFileName'])
if beam.FWHMArcmin > refFWHMArcmin:
refBeam=beam
refFWHMArcmin=beam.FWHMArcmin
refIndex=i
beams.append(beam)
# Sort the list of beams and maps so that the one with the reference beam is in index 0
config.unfilteredMapsDictList.insert(0, config.unfilteredMapsDictList.pop(refIndex))
beams.insert(0, beams.pop(refIndex))
# Figure out how much we need to Gaussian blur to match the reference beam
# NOTE: This was an alternative to proper PSF-matching that wasn't good enough for ACT beams
#for i in range(1, len(config.unfilteredMapsDictList)):
#mapDict=config.unfilteredMapsDictList[i]
#beam=beams[i]
#degPerPix=np.mean(np.diff(beam.rDeg))
#assert(abs(np.diff(beam.rDeg).max()-degPerPix) < 0.001)
#resMin=1e6
#smoothPix=0
#attFactor=1.0
#for j in range(1, 100):
#smoothProf=ndimage.gaussian_filter1d(beam.profile1d, j)
#smoothProf=smoothProf/smoothProf.max()
#res=np.sum(np.power(refBeam.profile1d-smoothProf, 2))
#if res < resMin:
#resMin=res
#smoothPix=j
#attFactor=1/smoothProf.max()
#smoothScaleDeg=smoothPix*degPerPix
#mapDict['smoothScaleDeg']=smoothScaleDeg
#mapDict['smoothAttenuationFactor']=1/ndimage.gaussian_filter1d(beam.profile1d, smoothPix).max()
# For testing on CMB maps here
refMapDict=config.unfilteredMapsDictList[0]
# PSF matching via a convolution kernel
kernelDict={} # keys: tile, obsFreqGHz
for tileName in config.tileNames:
if tileName not in kernelDict.keys():
kernelDict[tileName]={}
for i in range(1, len(config.unfilteredMapsDictList)):
mapDict=config.unfilteredMapsDictList[i]
beam=beams[i]
degPerPix=np.mean(np.diff(beam.rDeg))
assert(abs(np.diff(beam.rDeg).max()-degPerPix) < 0.001)
# Calculate convolution kernel
sizePix=beam.profile1d.shape[0]*2
if sizePix % 2 == 0:
sizePix=sizePix+1
symRDeg=np.linspace(-0.5, 0.5, sizePix)
assert((symRDeg == 0).sum())
symProf=interpolate.splev(abs(symRDeg), beam.tck)
symRefProf=interpolate.splev(abs(symRDeg), refBeam.tck)
fSymRef=np.fft.fft(np.fft.fftshift(symRefProf))
fSymBeam=np.fft.fft(np.fft.fftshift(symProf))
fSymConv=fSymRef/fSymBeam
fSymConv[fSymBeam < 1e-1]=0 # Was 1e-2; this value avoids ringing, smaller values do not
symMatched=np.fft.ifft(fSymBeam*fSymConv).real
symConv=np.fft.ifft(fSymConv).real
# This allows normalization in same way as Gaussian smooth method
symConv=symConv/symConv.sum()
convedProf=ndimage.convolve(symProf, np.fft.fftshift(symConv))
attenuationFactor=1/convedProf.max() # norm
# Make profile object
peakIndex=np.argmax(np.fft.fftshift(symConv))
convKernel=signals.BeamProfile(profile1d = np.fft.fftshift(symConv)[peakIndex:], rDeg = symRDeg[peakIndex:])
## Check plots
#import pylab as plt
#plt.figure(figsize=(10,8))
#plt.plot(abs(symRDeg*60), symRefProf, label = 'ref', lw = 3)
#plt.plot(abs(symRDeg*60), convedProf*attenuationFactor, label = 'kernel convolved')
#integralRatio=np.trapz(symRefProf)/np.trapz(convedProf*attenuationFactor)
#plt.title("%.3f" % (integralRatio))
#plt.semilogy()
#plt.legend()
#ratio=(convedProf*attenuationFactor)/symRefProf
#plt.figure(figsize=(10,8))
#plt.plot(abs(symRDeg*60), ratio, label = 'ratio')
#plt.plot(abs(symRDeg*60), [1.0]*len(symRDeg), 'r-')
#plt.legend()
# Fudging 2d kernel to match (fix properly later)
# NOTE: Now done at higher res but doesn't make much difference
# (but DOES blow up in some tiles if you use e.g. have the resolution)
wcs=astWCS.WCS(config.tileCoordsDict[tileName]['header'], mode = 'pyfits').copy()
wcs.header['CDELT1']=np.diff(refBeam.rDeg)[0]
wcs.header['CDELT2']=np.diff(refBeam.rDeg)[0]
wcs.header['NAXIS1']=int(np.ceil(2*refBeam.rDeg.max()/wcs.header['CDELT1']))
wcs.header['NAXIS2']=int(np.ceil(2*refBeam.rDeg.max()/wcs.header['CDELT2']))
wcs.updateFromHeader()
shape=(wcs.header['NAXIS2'], wcs.header['NAXIS1'])
degreesMap=np.ones([shape[0], shape[1]], dtype = float)*1e6
RADeg, decDeg=wcs.pix2wcs(int(degreesMap.shape[1]/2), int(degreesMap.shape[0]/2))
degreesMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(degreesMap, wcs, RADeg, decDeg, 1.0)
beamMap=signals.makeBeamModelSignalMap(degreesMap, wcs, beam, amplitude = None)
refBeamMap=signals.makeBeamModelSignalMap(degreesMap, wcs, refBeam, amplitude = None)
matchedBeamMap=maps.convolveMapWithBeam(beamMap*attenuationFactor, wcs, convKernel, maxDistDegrees = 1.0)
# Find and apply radial fudge factor
yRow=np.where(refBeamMap == refBeamMap.max())[0][0]
rowValid=np.logical_and(degreesMap[yRow] < refBeam.rDeg.max(), matchedBeamMap[yRow] != 0)
ratio=refBeamMap[yRow][rowValid]/matchedBeamMap[yRow][rowValid]
zeroIndex=np.argmin(degreesMap[yRow][rowValid])
assert(degreesMap[yRow][rowValid][zeroIndex] == 0)
tck=interpolate.splrep(degreesMap[yRow][rowValid][zeroIndex:], ratio[zeroIndex:])
fudge=interpolate.splev(convKernel.rDeg, tck)
#fudge[fudge < 0.5]=1.0
#fudge[fudge > 1.5]=1.0
fudgeKernel=signals.BeamProfile(profile1d = convKernel.profile1d*fudge, rDeg = convKernel.rDeg)
## Check plot
#import pylab as plt
#plt.figure(figsize=(10,8))
#plt.plot(convKernel.rDeg, fudge, lw = 3, label = 'fudge')
#plt.plot(convKernel.rDeg, [1.0]*len(fudge), 'r-')
#plt.title("fudge")
##plt.ylim(0, 2)
#plt.legend()
#plt.show()
# 2nd fudge factor - match integrals of 2d kernels
fudgeMatchedBeamMap=maps.convolveMapWithBeam(beamMap*attenuationFactor, wcs, fudgeKernel, maxDistDegrees = 1.0)
attenuationFactor=refBeamMap.sum()/fudgeMatchedBeamMap.sum()
# Check at map pixelization that is actually used
#shape=(config.tileCoordsDict[tileName]['header']['NAXIS2'],
#config.tileCoordsDict[tileName]['header']['NAXIS1'])
#wcs=astWCS.WCS(config.tileCoordsDict[tileName]['header'], mode = 'pyfits').copy()
#degreesMap=np.ones([shape[0], shape[1]], dtype = float)*1e6
#RADeg, decDeg=wcs.pix2wcs(int(degreesMap.shape[1]/2), int(degreesMap.shape[0]/2))
#degreesMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(degreesMap, wcs, RADeg, decDeg, 1.0)
#beamMap=signals.makeBeamModelSignalMap(degreesMap, wcs, beam, amplitude = None)
#refBeamMap=signals.makeBeamModelSignalMap(degreesMap, wcs, refBeam, amplitude = None)
#fudgeMatchedBeamMap=maps.convolveMapWithBeam(beamMap*attenuationFactor, wcs, fudgeKernel, maxDistDegrees = 1.0)
## Check plot
#import pylab as plt
#yRow=np.where(refBeamMap == refBeamMap.max())[0][0]
#rowValid=np.logical_and(degreesMap[yRow] < refBeam.rDeg.max(), fudgeMatchedBeamMap[yRow] != 0)
#plt.figure(figsize=(10,8))
#plt.plot(degreesMap[yRow][rowValid]*60, refBeamMap[yRow][rowValid], lw = 3, label = 'ref')
#plt.plot(degreesMap[yRow][rowValid]*60, fudgeMatchedBeamMap[yRow][rowValid], label = 'fudged')
#integralRatio=np.trapz(fudgeMatchedBeamMap[yRow][rowValid])/np.trapz(refBeamMap[yRow][rowValid])
#plt.title("native map res - %.3f" % (integralRatio))
#plt.semilogy()
#plt.ylim(1e-5)
#plt.legend()
#plt.show()
#from astLib import astImages
#astImages.saveFITS("ref.fits", refBeamMap, wcs)
#astImages.saveFITS("fudgematched.fits", fudgeMatchedBeamMap, wcs)
#astImages.saveFITS("diff.fits", refBeamMap-fudgeMatchedBeamMap, wcs)
#import IPython
#IPython.embed()
#sys.exit()
# NOTE: If we're NOT passing in 2d kernels, don't need to organise by tile
kernelDict[tileName][mapDict['obsFreqGHz']]={'smoothKernel': fudgeKernel,
'smoothAttenuationFactor': attenuationFactor}
if method == 'CAP':
catalog=_extractSpecCAP(config, tab, kernelDict, diskRadiusArcmin = 4.0, highPassFilter = False,
estimateErrors = True)
elif method == 'matchedFilter':
catalog=_extractSpecMatchedFilter(config, tab, kernelDict, saveFilteredMaps = saveFilteredMaps)
else:
raise Exception("'method' should be 'CAP' or 'matchedFilter'")
return catalog
#------------------------------------------------------------------------------------------------------------
def _extractSpecMatchedFilter(config, tab, kernelDict, saveFilteredMaps = False, noiseMethod = 'dataMap'):
"""See extractSpec.
"""
cacheDir="nemoSpecCache"+os.path.sep+os.path.basename(config.rootOutDir)
os.makedirs(cacheDir, exist_ok = True)
# Build filter configs
allFilters={'class': 'ArnaudModelMatchedFilter',
'params': {'noiseParams': {'method': noiseMethod, 'noiseGridArcmin': 40.0},
'saveFilteredMaps': False,
'saveRMSMap': False,
'savePlots': False,
'saveDS9Regions': False,
'saveFilter': False,
'outputUnits': 'yc',
'edgeTrimArcmin': 0.0,
'GNFWParams': 'default'}}
filtersList=[]
templatesUsed=np.unique(tab['template']).tolist()
for t in templatesUsed:
newDict=copy.deepcopy(allFilters)
M500MSun=float(t.split("_M")[-1].split("_")[0])
z=float(t.split("_z")[-1].replace("p", "."))
newDict['params']['M500MSun']=M500MSun
newDict['params']['z']=z
newDict['label']=t
filtersList.append(newDict)
# Filter and extract
# NOTE: We assume index 0 of the unfiltered maps list is the reference for which the filter is made
catalogList=[]
for tileName in config.tileNames:
print("... rank %d: tileName = %s ..." % (config.rank, tileName))
diagnosticsDir=cacheDir+os.path.sep+tileName
os.makedirs(diagnosticsDir, exist_ok = True)
for f in filtersList:
tempTileTab=None # catalogs are organised by tile and template
filterObj=None
for mapDict in config.unfilteredMapsDictList:
if tempTileTab is None:
shape=(config.tileCoordsDict[tileName]['header']['NAXIS2'],
config.tileCoordsDict[tileName]['header']['NAXIS1'])
wcs=astWCS.WCS(config.tileCoordsDict[tileName]['header'], mode = 'pyfits')
tempTileTab=catalogs.getCatalogWithinImage(tab, shape, wcs)
tempTileTab=tempTileTab[tempTileTab['template'] == f['label']]
if tempTileTab is None or len(tempTileTab) == 0:
continue
if mapDict['obsFreqGHz'] == config.unfilteredMapsDictList[0]['obsFreqGHz']:
filteredMapDict, filterObj=filters.filterMaps([mapDict], f, tileName,
filteredMapsDir = cacheDir,
diagnosticsDir = diagnosticsDir,
selFnDir = cacheDir,
verbose = True,
undoPixelWindow = True,
returnFilter = True)
else:
mapDict['smoothKernel']=kernelDict[tileName][mapDict['obsFreqGHz']]['smoothKernel']
mapDict['smoothAttenuationFactor']=kernelDict[tileName][mapDict['obsFreqGHz']]['smoothAttenuationFactor']
mapDictToFilter=maps.preprocessMapDict(mapDict.copy(), tileName = tileName)
filteredMapDict['data']=filterObj.applyFilter(mapDictToFilter['data'])
RMSMap=filterObj.makeNoiseMap(filteredMapDict['data'])
filteredMapDict['SNMap']=np.zeros(filterObj.shape)
mask=np.greater(filteredMapDict['surveyMask'], 0)
filteredMapDict['SNMap'][mask]=filteredMapDict['data'][mask]/RMSMap[mask]
filteredMapDict['data']=enmap.apply_window(filteredMapDict['data'], pow=-1.0)
if saveFilteredMaps == True:
outFileName=cacheDir+os.path.sep+'%d_' % (mapDict['obsFreqGHz'])+f['label']+'#'+tileName+'.fits'
# Add conversion to delta T in here?
maps.saveFITS(outFileName, filteredMapDict['data'], filteredMapDict['wcs'])
freqTileTab=photometry.makeForcedPhotometryCatalog(filteredMapDict,
tempTileTab,
useInterpolator = config.parDict['useInterpolator'])
photometry.measureFluxes(freqTileTab, filteredMapDict, cacheDir,
useInterpolator = config.parDict['useInterpolator'],
ycObsFreqGHz = mapDict['obsFreqGHz'])
# We don't take tileName from the catalog, some objects in overlap areas may only get cut here
if len(freqTileTab) == 0:
tempTileTab=None
continue
tempTileTab, freqTileTab, rDeg=catalogs.crossMatch(tempTileTab, freqTileTab, radiusArcmin = 2.5)
colNames=['deltaT_c', 'y_c', 'SNR']
suff='_%d' % (mapDict['obsFreqGHz'])
for colName in colNames:
tempTileTab[colName+suff]=freqTileTab[colName]
if 'err_'+colName in freqTileTab.keys():
tempTileTab['err_'+colName+suff]=freqTileTab['err_'+colName]
if tempTileTab is not None and len(tempTileTab) > 0:
catalogList.append(tempTileTab)
if len(catalogList) > 0:
catalog=atpy.vstack(catalogList)
else:
catalog=[]
return catalog
#------------------------------------------------------------------------------------------------------------
def _extractSpecCAP(config, tab, kernelDict, method = 'CAP', diskRadiusArcmin = 4.0, highPassFilter = False,
estimateErrors = True):
"""See extractSpec.
"""
# Define apertures like Schaan et al. style compensated aperture photometry filter
innerRadiusArcmin=diskRadiusArcmin
outerRadiusArcmin=diskRadiusArcmin*np.sqrt(2)
catalogList=[]
for tileName in config.tileNames:
# This loads the maps, applies any masks, and smooths to approx. same scale
mapDictList=[]
freqLabels=[]
for mapDict in config.unfilteredMapsDictList:
mapDict=maps.preprocessMapDict(mapDict.copy(), tileName = tileName)
if highPassFilter == True:
mapDict['data']=maps.subtractBackground(mapDict['data'], mapDict['wcs'],
smoothScaleDeg = (2*outerRadiusArcmin)/60)
freqLabels.append(int(round(mapDict['obsFreqGHz'])))
mapDictList.append(mapDict)
wcs=mapDict['wcs']
shape=mapDict['data'].shape
# Extract spectra
pixAreaMap=maps.getPixelAreaArcmin2Map(shape, wcs)
maxSizeDeg=(outerRadiusArcmin*1.2)/60
tileTab=catalogs.getCatalogWithinImage(tab, shape, wcs)
for label in freqLabels:
tileTab['diskT_uKArcmin2_%s' % (label)]=np.zeros(len(tileTab))
tileTab['err_diskT_uKArcmin2_%s' % (label)]=np.zeros(len(tileTab))
tileTab['diskSNR_%s' % (label)]=np.zeros(len(tileTab))
for row in tileTab:
degreesMap=np.ones(shape, dtype = float)*1e6 # NOTE: never move this
degreesMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(degreesMap, wcs,
row['RADeg'], row['decDeg'],
maxSizeDeg)
innerMask=degreesMap < innerRadiusArcmin/60
outerMask=np.logical_and(degreesMap >= innerRadiusArcmin/60, degreesMap < outerRadiusArcmin/60)
for mapDict, label in zip(mapDictList, freqLabels):
d=mapDict['data']
diskFlux=(d[innerMask]*pixAreaMap[innerMask]).sum()-(d[outerMask]*pixAreaMap[outerMask]).sum()
row['diskT_uKArcmin2_%s' % (label)]=diskFlux
# Estimate noise in every measurement (on average) from spatting down on random positions
# This will break if noise is inhomogeneous though. But at least it's done separately for each tile.
# We can later add something that scales / fits using the weight map?
if estimateErrors == True:
randTab=catalogs.generateRandomSourcesCatalog(mapDict['surveyMask'], wcs, 1000)
for label in freqLabels:
randTab['diskT_uKArcmin2_%s' % (label)]=np.zeros(len(randTab))
for row in randTab:
degreesMap=np.ones(shape, dtype = float)*1e6 # NOTE: never move this
degreesMap, xBounds, yBounds=nemoCython.makeDegreesDistanceMap(degreesMap, wcs,
row['RADeg'], row['decDeg'],
maxSizeDeg)
innerMask=degreesMap < innerRadiusArcmin/60
outerMask=np.logical_and(degreesMap >= innerRadiusArcmin/60, degreesMap < outerRadiusArcmin/60)
for mapDict, label in zip(mapDictList, freqLabels):
d=mapDict['data']
diskFlux=(d[innerMask]*pixAreaMap[innerMask]).sum()-(d[outerMask]*pixAreaMap[outerMask]).sum()
row['diskT_uKArcmin2_%s' % (label)]=diskFlux
noiseLevels={}
for label in freqLabels:
if signals.fSZ(float(label)) < 0:
SNRSign=-1
else:
SNRSign=1
noiseLevels[label]=np.percentile(abs(randTab['diskT_uKArcmin2_%s' % (label)]), 68.3)
tileTab['err_diskT_uKArcmin2_%s' % (label)]=noiseLevels[label]
tileTab['diskSNR_%s' % (label)]=SNRSign*(tileTab['diskT_uKArcmin2_%s' % (label)]/noiseLevels[label])
catalogList.append(tileTab)
catalog=atpy.vstack(catalogList)
return catalog
| 54.941788 | 220 | 0.592235 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20,550 | 0.388807 |
828f0e49b2ff5b08550d07840cd6144c5f6a6f99 | 5,026 | py | Python | pypkg-gen.py | GameMaker2k/Neo-Hockey-Test | 5737bfedf0d83f69964e85ac1dbf7e6a93c13f44 | [
"BSD-3-Clause"
]
| 1 | 2020-04-04T10:25:42.000Z | 2020-04-04T10:25:42.000Z | pypkg-gen.py | GameMaker2k/Neo-Hockey-Test | 5737bfedf0d83f69964e85ac1dbf7e6a93c13f44 | [
"BSD-3-Clause"
]
| null | null | null | pypkg-gen.py | GameMaker2k/Neo-Hockey-Test | 5737bfedf0d83f69964e85ac1dbf7e6a93c13f44 | [
"BSD-3-Clause"
]
| 3 | 2021-09-07T08:44:33.000Z | 2021-12-07T23:49:39.000Z | #!/usr/bin/env python
'''
This program is free software; you can redistribute it and/or modify
it under the terms of the Revised BSD License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Revised BSD License for more details.
Copyright 2011-2016 Game Maker 2k - https://github.com/GameMaker2k
Copyright 2011-2016 Kazuki Przyborowski - https://github.com/KazukiPrzyborowski
$FileInfo: pypkg-gen.py - Last Update: 6/1/2016 Ver. 0.2.0 RC 1 - Author: cooldude2k $
'''
from __future__ import absolute_import, division, print_function, unicode_literals;
import re, os, sys, time, platform, datetime, argparse, subprocess;
__version_info__ = (0, 2, 0, "rc1");
if(__version_info__[3]!=None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2])+"+"+str(__version_info__[3]);
if(__version_info__[3]==None):
__version__ = str(__version_info__[0])+"."+str(__version_info__[1])+"."+str(__version_info__[2]);
proname = "pypkg-gen";
prover = __version__;
profullname = proname+" "+prover;
def which_exec(execfile):
for path in os.environ["PATH"].split(":"):
if os.path.exists(path + "/" + execfile):
return path + "/" + execfile;
linuxdist = [None];
try:
linuxdist = platform.linux_distribution();
except AttributeError:
linuxdist = [None];
getlinuxdist = linuxdist;
setdistroname = "debian";
setdistrocname = "jessie";
if(getlinuxdist[0] is not None and (getlinuxdist[0].lower()=="debian" or getlinuxdist[0].lower()=="ubuntu" or getlinuxdist[0].lower()=="linuxmint")):
setdistroname = getlinuxdist[0].lower();
setdistrocname = getlinuxdist[2].lower();
if(setdistrocname==""):
lsblocatout = which_exec("lsb_release");
pylsblistp = subprocess.Popen([lsblocatout, "-c"], stdout=subprocess.PIPE, stderr=subprocess.PIPE);
pylsbout, pylsberr = pylsblistp.communicate();
if(sys.version[0]=="3"):
pylsbout = pylsbout.decode("utf-8");
pylsb_esc = re.escape("Codename:")+'([a-zA-Z\t+\s+]+)';
pylsbname = re.findall(pylsb_esc, pylsbout)[0].lower();
setdistrocname = pylsbname.strip();
if(getlinuxdist[0] is not None and getlinuxdist[0].lower()=="archlinux"):
setdistroname = getlinuxdist[0].lower();
setdistrocname = None;
parser = argparse.ArgumentParser(conflict_handler = "resolve", add_help = True);
parser.add_argument("-v", "--version", action = "version", version = profullname);
parser.add_argument("-s", "--source", default = os.path.realpath(os.getcwd()), help = "source dir");
parser.add_argument("-d", "--distro", default = setdistroname, help = "enter linux distribution name");
parser.add_argument("-c", "--codename", default = setdistrocname, help = "enter release code name");
parser.add_argument("-p", "--pyver", default = sys.version[0], help = "enter version of python to use");
getargs = parser.parse_args();
bashlocatout = which_exec("bash");
getargs.source = os.path.realpath(getargs.source);
getargs.codename = getargs.codename.lower();
getargs.distro = getargs.distro.lower();
if(getargs.pyver=="2"):
getpyver = "python2";
if(getargs.pyver=="3"):
getpyver = "python3";
if(getargs.pyver!="2" and getargs.pyver!="3"):
if(sys.version[0]=="2"):
getpyver = "python2";
if(sys.version[0]=="3"):
getpyver = "python3";
get_pkgbuild_dir = os.path.realpath(getargs.source+os.path.sep+"pkgbuild");
get_pkgbuild_dist_pre_list = [d for d in os.listdir(get_pkgbuild_dir) if os.path.isdir(os.path.join(get_pkgbuild_dir, d))];
get_pkgbuild_dist_list = [];
for dists in get_pkgbuild_dist_pre_list:
tmp_pkgbuild_python = os.path.realpath(get_pkgbuild_dir+os.path.sep+dists+os.path.sep+getpyver);
if(os.path.exists(tmp_pkgbuild_python) and os.path.isdir(tmp_pkgbuild_python)):
get_pkgbuild_dist_list.append(dists);
if(not getargs.distro in get_pkgbuild_dist_list):
print("Could not build for "+getargs.distro+" distro.");
sys.exit();
if(getargs.distro=="debian" or getargs.distro=="ubuntu" or getargs.distro=="linuxmint"):
pypkgpath = os.path.realpath(getargs.source+os.path.sep+"pkgbuild"+os.path.sep+getargs.distro+os.path.sep+getpyver+os.path.sep+"pydeb-gen.sh");
pypkgenlistp = subprocess.Popen([bashlocatout, pypkgpath, getargs.source, getargs.codename], stdout=subprocess.PIPE, stderr=subprocess.PIPE);
pypkgenout, pypkgenerr = pypkgenlistp.communicate();
if(sys.version[0]=="3"):
pypkgenout = pypkgenout.decode("utf-8");
print(pypkgenout);
pypkgenlistp.wait();
if(getargs.distro=="archlinux"):
pypkgpath = os.path.realpath(getargs.source+os.path.sep+"pkgbuild"+os.path.sep+getargs.distro+os.path.sep+getpyver+os.path.sep+"pypac-gen.sh");
pypkgenlistp = subprocess.Popen([bashlocatout, pypkgpath, getargs.source, getargs.codename], stdout=subprocess.PIPE, stderr=subprocess.PIPE);
pypkgenout, pypkgenerr = pypkgenlistp.communicate();
if(sys.version[0]=="3"):
pypkgenout = pypkgenout.decode("utf-8");
print(pypkgenout);
pypkgenlistp.wait();
| 45.279279 | 149 | 0.729208 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,204 | 0.239554 |
829007d1ff44f42bdcbdcc5f79b823572db44839 | 194 | py | Python | 10/testtime.py | M0nica/python-foundations | fe5065d3af71511bdd0fcf437d1d9f15f9faf1ee | [
"MIT"
]
| null | null | null | 10/testtime.py | M0nica/python-foundations | fe5065d3af71511bdd0fcf437d1d9f15f9faf1ee | [
"MIT"
]
| null | null | null | 10/testtime.py | M0nica/python-foundations | fe5065d3af71511bdd0fcf437d1d9f15f9faf1ee | [
"MIT"
]
| null | null | null | import time
print (time.strftime("%B %e, %Y"))
# Guides:
# how to formate date:
# http://strftime.net/
# how to use time:
# http://www.cyberciti.biz/faq/howto-get-current-date-time-in-python/
| 19.4 | 69 | 0.680412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 151 | 0.778351 |
8292fb356f36b5d5f890f807991392f40a46cdec | 514 | py | Python | 2020/02/Teil 2 - V01.py | HeWeMel/adventofcode | 90acb10f03f21ef388673bbcf132d04972175970 | [
"MIT"
]
| 1 | 2020-12-12T19:34:59.000Z | 2020-12-12T19:34:59.000Z | 2020/02/Teil 2 - V01.py | HeWeMel/adventofcode | 90acb10f03f21ef388673bbcf132d04972175970 | [
"MIT"
]
| null | null | null | 2020/02/Teil 2 - V01.py | HeWeMel/adventofcode | 90acb10f03f21ef388673bbcf132d04972175970 | [
"MIT"
]
| null | null | null | import re
with open('input.txt', 'r') as f:
pw_ok=0
for line in f:
(rule,s,space_and_pw) = line.partition(':')
(lowhigh,s,c) = rule.partition(' ')
(low,s,high) = lowhigh.partition('-')
pw=space_and_pw[1:-1]
c1=pw[int(low)-1]
c2=pw[int(high)-1]
if (c1==c and c2!=c) or (c1!=c and c2==c):
print(low, high, c, pw, c1, c2, 'ok')
pw_ok+=1
else:
print(low, high, c, pw, c1, c2, 'falsch')
print (pw_ok)
#737 | 27.052632 | 53 | 0.486381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.075875 |
82959d9cf1c7742a4ce7e67d8116a609f7ef7317 | 8,399 | py | Python | slides_manager/openslide_engine.py | crs4/ome_seadragon | e2a7a2178c4abdff1b0a98bc194c672b2476e9a2 | [
"MIT"
]
| 31 | 2016-02-16T15:11:25.000Z | 2021-06-21T15:58:58.000Z | slides_manager/openslide_engine.py | crs4/ome_seadragon | e2a7a2178c4abdff1b0a98bc194c672b2476e9a2 | [
"MIT"
]
| 11 | 2017-06-23T17:23:47.000Z | 2022-03-31T14:19:27.000Z | slides_manager/openslide_engine.py | crs4/ome_seadragon | e2a7a2178c4abdff1b0a98bc194c672b2476e9a2 | [
"MIT"
]
| 4 | 2016-12-15T22:08:04.000Z | 2019-10-24T23:12:53.000Z | # Copyright (c) 2019, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import openslide
from openslide import OpenSlide
from openslide.deepzoom import DeepZoomGenerator
from io import BytesIO
from PIL import Image
from .rendering_engine_interface import RenderingEngineInterface
from .. import settings
from ome_seadragon_cache import CacheDriverFactory
class OpenSlideEngine(RenderingEngineInterface):
def __init__(self, image_id, connection):
super(OpenSlideEngine, self).__init__(image_id, connection)
def _get_openslide_wrapper(self, original_file_source, file_mimetype):
img_path = self._get_image_path(original_file_source, file_mimetype)
if img_path:
return OpenSlide(img_path)
else:
return None
def _get_deepzoom_config(self, tile_size=None, limit_bounds=None):
cfg = {
'tile_size': tile_size if tile_size is not None else settings.DEEPZOOM_TILE_SIZE,
'overlap': settings.DEEPZOOM_OVERLAP,
'limit_bounds': limit_bounds if limit_bounds is not None else settings.DEEPZOOM_LIMIT_BOUNDS
}
self.logger.debug(settings.DEEPZOOM_LIMIT_BOUNDS)
self.logger.debug(cfg)
return cfg
def _get_deepzoom_wrapper(self, original_file_source, file_mimetype, tile_size=None, limit_bounds=None):
os_wrapper = self._get_openslide_wrapper(original_file_source, file_mimetype)
if os_wrapper:
return DeepZoomGenerator(os_wrapper, **self._get_deepzoom_config(tile_size, limit_bounds))
else:
return None
def _get_image_mpp(self, original_file_source=False, file_mimetype=None):
slide = self._get_openslide_wrapper(original_file_source, file_mimetype)
if slide:
try:
mpp_x = slide.properties[openslide.PROPERTY_NAME_MPP_X]
mpp_y = slide.properties[openslide.PROPERTY_NAME_MPP_Y]
return (float(mpp_x) + float(mpp_y)) / 2
except (KeyError, ValueError):
return 0
else:
return 0
def get_openseadragon_config(self, original_file_source=False, file_mimetype=None):
return {
'mpp': self._get_image_mpp(original_file_source, file_mimetype)
}
def _get_slide_bounds(self, original_file_source=False, file_mimetype=None):
slide = self._get_openslide_wrapper(original_file_source, file_mimetype)
if slide:
return (
int(slide.properties.get('openslide.bounds-x', 0)),
int(slide.properties.get('openslide.bounds-y', 0)),
int(slide.properties.get('openslide.bounds-height', 0)),
int(slide.properties.get('openslide.bounds-width', 0))
)
else:
return None
def get_slide_bounds(self, original_file_source=False, file_mimetype=None):
bounds = self._get_slide_bounds(original_file_source, file_mimetype)
if bounds:
return {
'bounds_x': bounds[0],
'bounds_y': bounds[1],
'bounds_height': bounds[2],
'bounds_width': bounds[3]
}
else:
return bounds
def _get_original_file_json_description(self, resource_path, file_mimetype=None, tile_size=None, limit_bounds=True):
slide = self._get_openslide_wrapper(original_file_source=True,
file_mimetype=file_mimetype)
if slide:
if limit_bounds:
_, _, height, width = self._get_slide_bounds(True, file_mimetype)
return self._get_json_description(resource_path, height, width, tile_size)
return self._get_json_description(resource_path, slide.dimensions[1], slide.dimensions[0], tile_size)
return None
def get_dzi_description(self, original_file_source=False, file_mimetype=None, tile_size=None, limit_bounds=None):
dzi_slide = self._get_deepzoom_wrapper(original_file_source, file_mimetype, tile_size, limit_bounds)
if dzi_slide:
return dzi_slide.get_dzi(settings.DEEPZOOM_FORMAT)
else:
return None
def get_thumbnail(self, size, original_file_source=False, file_mimeype=None):
if settings.IMAGES_CACHE_ENABLED:
cache = CacheDriverFactory(settings.IMAGES_CACHE_DRIVER).\
get_cache(settings.CACHE_HOST, settings.CACHE_PORT, settings.CACHE_DB, settings.CACHE_EXPIRE_TIME)
# get thumbnail from cache
thumb = cache.thumbnail_from_cache(self.image_id, size, settings.DEEPZOOM_FORMAT, 'openslide')
else:
thumb = None
# if thumbnail is not in cache build it ....
if thumb is None:
self.logger.debug('No thumbnail loaded from cache, building it')
slide = self._get_openslide_wrapper(original_file_source, file_mimeype)
if slide:
thumb = slide.get_thumbnail((size, size))
# ... and store it into the cache
if settings.IMAGES_CACHE_ENABLED:
cache.thumbnail_to_cache(self.image_id, thumb, size, settings.DEEPZOOM_FORMAT, 'openslide')
else:
self.logger.debug('Thumbnail loaded from cache')
return thumb, settings.DEEPZOOM_FORMAT
def get_tile(self, level, column, row, original_file_source=False, file_mimetype=None,
tile_size=None, limit_bounds=None):
if settings.IMAGES_CACHE_ENABLED:
cache = CacheDriverFactory(settings.IMAGES_CACHE_DRIVER).\
get_cache(settings.CACHE_HOST, settings.CACHE_PORT, settings.CACHE_DB, settings.CACHE_EXPIRE_TIME)
tile_size = tile_size if tile_size is not None else settings.DEEPZOOM_TILE_SIZE
self.logger.debug('TILE SIZE IS: %s', tile_size)
cache_params = {
'image_id': self.image_id,
'level': level,
'column': column,
'row': row,
'tile_size': tile_size,
'image_format': settings.DEEPZOOM_FORMAT,
'rendering_engine': 'openslide'
}
if cache_params['image_format'].lower() == 'jpeg':
cache_params['image_quality'] = settings.DEEPZOOM_JPEG_QUALITY
# get tile from cache
tile = cache.tile_from_cache(**cache_params)
else:
tile = None
# if tile is not in cache build it ...
if tile is None:
slide = self._get_deepzoom_wrapper(original_file_source, file_mimetype, tile_size, limit_bounds)
if slide:
dzi_tile = slide.get_tile(level, (column, row))
tile_buffer = BytesIO()
tile_conf = {
'format': settings.DEEPZOOM_FORMAT
}
if tile_conf['format'].lower() == 'jpeg':
tile_conf['quality'] = settings.DEEPZOOM_JPEG_QUALITY
dzi_tile.save(tile_buffer, **tile_conf)
tile = Image.open(tile_buffer)
# ... and store it into the cache
if settings.IMAGES_CACHE_ENABLED:
cache_params['image_obj'] = tile
cache.tile_to_cache(**cache_params)
return tile, settings.DEEPZOOM_FORMAT
| 46.921788 | 120 | 0.654245 | 7,014 | 0.835099 | 0 | 0 | 0 | 0 | 0 | 0 | 1,726 | 0.205501 |
8296d1c9102045de4d1df9fbc075b8f844636279 | 4,772 | py | Python | pyppy/config.py | maehster/pyppy | 10aadd7ace210cb32c51cdd64060a3337d89324b | [
"MIT"
]
| 5 | 2021-01-25T09:52:09.000Z | 2022-01-29T14:35:41.000Z | pyppy/config.py | maehster/pyppy | 10aadd7ace210cb32c51cdd64060a3337d89324b | [
"MIT"
]
| 7 | 2021-01-23T10:49:01.000Z | 2021-01-30T08:17:38.000Z | pyppy/config.py | maehster/pyppy | 10aadd7ace210cb32c51cdd64060a3337d89324b | [
"MIT"
]
| 1 | 2021-05-25T05:42:10.000Z | 2021-05-25T05:42:10.000Z | """Global config management
This module provides functions for initializing, accessing and destroying
a global config object. You can initialize a global config from any object.
However, in the context of pyppy, only the instance attributes of the
object are used and work with the decorators ``fill_args`` and ``condition``.
But you can use any object you like. The config management methods are
just a convenience reference to the original object.
Initialization
--------------
In this example, we initialize a global config from a ``NameSpace`` parsed
with a custom ``ArgumentParser``. For demonstration purposes, the parser
will not parse args from the commandline but from a list::
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--message")
# parse_args returns an argparse.Namespace
args = parser.parse_args(["--message", "hello!"])
To initialize a global config object, import the function ``initialize_config``
and pass the args variable::
from pyppy.config import initialize_config
initialize_config(args)
You can also create an empty global object (which just holds a reference
to an empty ``object``) and change it afterwards via
accessing the global config object (see Config access section)::
from pyppy.config import initialize_config
initialize_config(args)
Access
------
Now that you have initialized the global config, you can use it
throughout your code::
from pyppy.config import config
print(config().message)
# "hello!"
Note
----
The original object that you used to initialize the global config
is returned any time you call ``config()``, so you can do everything
with the object that you could also do before.
Modification
------------
It is possible to change the global config object during time, e.g. to pass
information between objects in your code. We know that the term 'config'
is not ideal for these use cases and we're working on functionality to
handle these use cases in a better way. Here's an example of config
modification::
config().message = "bye!"
print(config().message)
Reset
-----
There can be only one global config object. So whenever you have
initialized a config you cannot initialize a new one. If you try to
an exception is raised. In the rare cases you might want to have
a new global config you can explicitly destroy the current one and
initialize a new one::
from pyppy.config import destroy_config
destroy_config()
initialize_config(args2)
"""
from types import SimpleNamespace
from pyppy.exc import ConfigAlreadyInitializedException
_CONFIG = "pyppy-config"
def initialize_config(obj: object = SimpleNamespace()) -> None:
"""
Initialize a global config with the specified object or
with an empty ``object`` if no object is given.
Parameters
----------
obj : object
Object to initialize the global config with. Whenever
you will call ``pyppy.config.config()`` you will get a r
reference to this object.
Returns
-------
None
Examples
--------
>>> destroy_config()
>>> c = SimpleNamespace()
>>> c.option = "say_hello"
>>> initialize_config(c)
>>> config().option
'say_hello'
>>> destroy_config()
"""
if hasattr(config, _CONFIG):
raise ConfigAlreadyInitializedException(
(
"Config has already been initialized. "
"If you want to initialize a new config call "
f"{destroy_config.__name__}()."
)
)
config(obj)
def config(_obj: object = None) -> object:
"""
Accesses a previously initialized global config.
Returns
-------
object:
The object that was used to initialize the global
config.
Examples
--------
>>> destroy_config()
>>> c = SimpleNamespace()
>>> c.option = "say_hello"
>>> initialize_config(c)
>>> config().option
'say_hello'
>>> destroy_config()
"""
if not hasattr(config, _CONFIG) and _obj:
setattr(config, _CONFIG, _obj)
if not hasattr(config, _CONFIG):
raise Exception("Please initialize config first!")
return getattr(config, _CONFIG)
def destroy_config() -> None:
"""
Deletes the global reference to the object that the config
was initialized with.
Examples
--------
>>> destroy_config()
>>> c = SimpleNamespace()
>>> c.option = "say_hello"
>>> initialize_config(c)
>>> config().option
'say_hello'
>>> destroy_config()
>>> config().option
Traceback (most recent call last):
...
Exception: Please initialize config first!
"""
if hasattr(config, _CONFIG):
delattr(config, _CONFIG)
| 28.404762 | 79 | 0.676027 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,073 | 0.853521 |
82974de24f0cc3cfa731bcef6d90cc11159650a2 | 878 | py | Python | LeetCode/3_sum.py | milkrong/Basic-Python-DS-Algs | e3accd22d8cf25546f33883aac634a9bfe108b34 | [
"MIT"
]
| null | null | null | LeetCode/3_sum.py | milkrong/Basic-Python-DS-Algs | e3accd22d8cf25546f33883aac634a9bfe108b34 | [
"MIT"
]
| null | null | null | LeetCode/3_sum.py | milkrong/Basic-Python-DS-Algs | e3accd22d8cf25546f33883aac634a9bfe108b34 | [
"MIT"
]
| null | null | null | def three_sum(nums):
"""
Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0?
Find all unique triplets in the array which gives the sum of zero.
:param nums: list[int]
:return: list[list[int]]
"""
if len(nums) < 3:
return []
nums.sort()
res = []
for i in range(len(nums) - 2):
if i > 0 and nums[i - 1] == nums[i]: continue
l, r = i + 1, len(nums) - 1
while l < r:
s = nums[i] + nums[l] + nums[r]
if s == 0:
res.append([nums[i], nums[l], nums[r]])
l += 1;
r -= 1
while l < r and nums[l] == nums[l - 1]: l += 1
while l < r and nums[r] == nums[r + 1]: r -= 1
elif s < 0:
l += 1
else:
r -= 1
return res | 30.275862 | 98 | 0.425968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.269932 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.