max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
plugins/WikiFileTarget.py | seanth/nicecast-trackupdate | 2 | 12793251 | # Copyright (c) 2020 <NAME> <www.sean-graham.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from Target import Target
import os
import sys
import configparser
import logging
import time
class WikiFileTarget(Target):
pluginName = "Wiki File Writer"
enableArchive = True
episodeNumber = "XX"
showArtist = ""
filePath = ""
archiveURL = ""
wikiFile = None
def __init__(self, config, episode, episodeDate):
logger = logging.getLogger("wiki updater")
self.episodeNumber = episode
if(episodeDate):
self.episodeDate = episodeDate
logger.debug(f"overriding date with {self.episodeDate}")
# read config entries
try:
self.filePath = config.get('ListCommon', 'filePath')
self.archiveURL = config.get('ListCommon', 'archiveURL')
self.showArtist = config.get('ListCommon', 'showArtist')
except configparser.NoSectionError:
logger.error("ListCommon: No [ListCommon] section in config")
return
except configparser.NoOptionError:
logger.error("ListCommon: Missing values in config")
return
# if I gave a shit about non-unix platforms I might
# try to use the proper path sep here. exercise left
# for the reader.
if(self.filePath.endswith("/") != True):
self.filePath += "/"
self.filePath = os.path.expanduser(self.filePath)
fileDate = '{dt:%Y}{dt:%m}{dt:%d}'.format(dt=self.episodeDate)
self.archiveURL = f"{self.archiveURL}{fileDate}.mp3"
headerText = ""
headerText += "\n\n=== "
if(self.archiveURL != ""):
headerText += "[" + self.archiveURL + " "
headerText += "Show #" + self.episodeNumber + " - "
headerText += self.getLongDate()
if(self.archiveURL != ""):
headerText += "]"
headerText += " ===\n"
headerText += "{| border=1 cellspacing=0 cellpadding=5\n"
headerText += "|'''Song'''\n"
headerText += "|'''Artist'''\n"
headerText += "|'''Album'''\n"
self.wikiFile = open(self.filePath + fileDate + "-wiki.txt", 'w+')
self.logToFile(self.wikiFile, headerText)
return
def logTrack(self, track, startTime):
if( track.ignore is not True ):
trackText = f"|-\n|{track.title}\n|{track.artist}\n|{track.album}\n"
self.logToFile(self.wikiFile, trackText)
return
def close(self):
print("Closing Wiki File...")
self.logToFile(self.wikiFile, "|}" )
self.wikiFile.close()
return
| 2.1875 | 2 |
main.py | thedoctor095/TrustPilotReviews | 0 | 12793252 | <reponame>thedoctor095/TrustPilotReviews
import time
import requests
from bs4 import BeautifulSoup
query = str(input('Please enter the website for which you wish to know TrustPilot reviews: '))
tp_address = 'https://www.trustpilot.com/review/'
tp_query = tp_address + query.lower()
response = requests.get(tp_query)
soup = BeautifulSoup(response.content,
'html.parser')
#function which checks if the query returns a valid(non-404) website on TrustPilot
def queryValidator():
webpage_status = soup.find('div',
class_='errors_error404__tUqzU')
if webpage_status != None:
print('Please input a full website domain. (eg. www.google.com or google.com)')
else:
tpSearch()
#scraping function which returns the review info
def tpSearch():
website_rating = soup.find('p',
class_='typography_typography__QgicV typography_bodysmall__irytL typography_color-gray-7__9Ut3K typography_weight-regular__TWEnf typography_fontstyle-normal__kHyN3')
website_rating = website_rating.text
time.sleep(1)
website_name = soup.find('span',
class_='typography_typography__QgicV typography_h1__Xmcta typography_weight-heavy__E1LTj typography_fontstyle-normal__kHyN3 styles_displayName__GElWn')
website_name = website_name.text
time.sleep(1)
rating_overall_review = soup.find('span',
class_='typography_typography__QgicV typography_bodysmall__irytL typography_color-gray-7__9Ut3K typography_weight-regular__TWEnf typography_fontstyle-normal__kHyN3 styles_text__W4hWi')
overall_review = []
for item in rating_overall_review:
overall_review.append(item)
time.sleep(1)
review_type = soup.findAll('p',
class_='typography_typography__QgicV typography_bodysmall__irytL typography_color-gray-7__9Ut3K typography_weight-regular__TWEnf typography_fontstyle-normal__kHyN3 styles_cell__qnPHy styles_labelCell__vLP9S')
time.sleep(1)
review_percent = soup.findAll('p',
class_='typography_typography__QgicV typography_bodysmall__irytL typography_color-gray-7__9Ut3K typography_weight-regular__TWEnf typography_fontstyle-normal__kHyN3 styles_cell__qnPHy styles_percentageCell__cHAnb')
time.sleep(1)
for review, percent in zip(review_type, review_percent):
print(percent.text,'of people reviewed this site as',review.text,'.')
print('The overall reviews for',website_name,'is',overall_review[-1],
'({}/5) with a total number of'.format(website_rating), overall_review[0], 'reviews.')
if __name__=='__main__':
queryValidator()
| 3.203125 | 3 |
formulaic/materializers/arrow.py | CamDavidsonPilon/formulaic | 0 | 12793253 | from interface_meta import override
from .pandas import PandasMaterializer
class ArrowMaterializer(PandasMaterializer):
REGISTRY_NAME = 'arrow'
DEFAULT_FOR = ['pyarrow.lib.Table']
@override
def _init(self, sparse=False):
super()._init(sparse=sparse)
self.__data_context = LazyArrowTableProxy(self.data)
@override
@property
def data_context(self):
return self.__data_context
class LazyArrowTableProxy:
def __init__(self, table):
self.table = table
self.column_names = set(self.table.column_names)
self._cache = {}
def __contains__(self, value):
return value in self.column_names
def __getitem__(self, key):
if key not in self.column_names:
raise KeyError(key)
if key not in self._cache:
self._cache[key] = self.table.column(key).to_pandas()
return self._cache[key]
| 2.125 | 2 |
my_tsp/evaluation_metrics/loss.py | vmeta42/metaai | 0 | 12793254 | <filename>my_tsp/evaluation_metrics/loss.py
import numpy as np
import torch
from torch import nn
from ..utils.util import cal_cls_eva_thre
DIV_CONSTANT = 1e-5
# 回归损失-均方方差的平方根
class RMSELoss(nn.Module):
def __init__(self):
super().__init__()
self.mse = nn.MSELoss()
def forward(self, yhat, y):
return torch.sqrt(self.mse(yhat, y))
# 分类损失函数
class CLSLoss(nn.Module):
def __init__(self):
super().__init__()
self.cls_criterion = nn.BCEWithLogitsLoss()
def forward(self, cls_ypred, train_Y_labels, min_thre=12.6, max_thre=15.0):
train_Y_cls_labels = torch.zeros(size=train_Y_labels.shape)
# 将预测的值(回归)转换为二分类是否是异常的的标签
for i in range(train_Y_cls_labels.shape[0]):
for j in range(train_Y_cls_labels.shape[1]):
# 如果处于异常阈值范围内,label为1
if (train_Y_labels[i, j] <= min_thre) or (train_Y_labels[i, j] >= max_thre):
train_Y_cls_labels[i, j] = 1
# 如果处于正常阈值范围内,label为0
elif (min_thre < train_Y_labels[i, j] < max_thre):
train_Y_cls_labels[i, j] = 0
else:
print('error!!! if语句判断情况没有考虑完全,请检查!!!!')
train_Y_cls_labels[i, j] = 0
# 计算分类损失
cls_loss = self.cls_criterion(cls_ypred, train_Y_cls_labels)
return cls_loss
# 评估方法
def cal_cls_eval_loss(result_dict, min_thre=12.6, max_thre=15, penalty=1.2):
rmse_loss = RMSELoss()
total_reg_loss_penalty = 0
n_samples = 0
precision_list = []
recall_list = []
accuracy_list = []
f1_score_list = []
for key, values in result_dict.items():
train_seqs = values[0]
label_seqs = values[1]
pred_seqs = values[2]
for i in range(len(label_seqs)):
n_samples = n_samples + 1
# print('pred_seqs[i]:', pred_seqs[i])
loss = rmse_loss(torch.from_numpy(pred_seqs[i]), torch.from_numpy(label_seqs[i])).item()
# print('loss:', loss)
train_seq = train_seqs[i]
label_seq = label_seqs[i]
pred_seq = pred_seqs[i]
# 判断label_seq是否处于异常阈值范围
# 输入训练序列没有故障状态 但是label_seq有故障状态
if (label_seq.min() <= min_thre or label_seq.max() >= max_thre):
# if (label_seq.min() <= min_thre or label_seq.max() >= max_thre) and (train_seq.min() > min_thre and train_seq.max() < max_thre):
cur_cls_thre, abnormal_index = cal_cls_eva_thre(pred_seq)
# print('cur_cls_thre:', cur_cls_thre)
TP = 0
FP = 0
TN = 0
FN = 0
for j in range(len(label_seq)):
# (TP: 实际为故障,预测为故障)
# 实际小于 最低阈值,预测也小于 最低阈值
if (label_seq[j] <= min_thre) and (pred_seq[j] <= cur_cls_thre):
TP = TP + 1
# print('label_seq[j]:{}, pred_seq[j]:{} '.format(label_seq[j], pred_seq[j]))
# 实际大于 最高阈值,预测也大于 最高阈值
elif (label_seq[j] >= max_thre) and (pred_seq[j] >= max_thre):
TP = TP + 1
# print('label_seq[j]:{}, pred_seq[j]:{} '.format(label_seq[j], pred_seq[j]))
# (TN: 实际为正常,预测为正常)
# 实际处于 正常阈值,预测也处于 正常阈值
elif (min_thre < label_seq[j] < max_thre) and (cur_cls_thre < pred_seq[j] < max_thre):
TN = TN + 1
# (FP: 实际为正常,预测为故障)
# 实际处于 正常阈值,预测小于 最低阈值 或 大于最高阈值
elif (min_thre < label_seq[j] < max_thre) and \
((pred_seq[j] <= cur_cls_thre) or (pred_seq[j] >= max_thre)):
FP = FP + 1
# (FN: 实际为故障,预测为正常)
# 实际大于 最高阈值,预测小于 最高阈值
elif (label_seq[j] >= max_thre) and (cur_cls_thre < pred_seq[j] < max_thre):
FN = FN + 1
# 实际小于 最低阈值,预测大于 最低阈值
elif (label_seq[j] <= max_thre) and (cur_cls_thre < pred_seq[j] < max_thre):
FN = FN + 1
else:
print('出现错误,情况没有考虑完全,请检查!!!!!!!!!!')
FN = FN + 1
# 得出混淆矩阵 计算各种评估指标
precision = float(TP/(TP+FP+DIV_CONSTANT))
recall = float(TP/(TP+FN+DIV_CONSTANT))
accuracy = float((TP+TN)/(TP+TN+FP+FN+DIV_CONSTANT))
f1_score = float(2.*(precision*recall)/(precision+recall+DIV_CONSTANT))
precision_list.append(precision)
recall_list.append(recall)
accuracy_list.append(accuracy)
f1_score_list.append(f1_score)
# print('TP:{}, FP:{}, TN:{}, FN:{}, precision:{}, recall:{}, accuracy:{}, f1:{}'.
# format(TP, FP, TN, FN, precision, recall, accuracy, f1_score))
if precision < 0.8:
total_reg_loss_penalty = total_reg_loss_penalty + penalty*loss
else:
total_reg_loss_penalty = total_reg_loss_penalty + loss
else:
# TODO: 真实值label故障点一个都没出现,但是预测错误 预测中出现故障点
total_reg_loss_penalty = total_reg_loss_penalty + loss
return total_reg_loss_penalty / n_samples, \
sum(precision_list)/len(precision_list), \
sum(recall_list)/len(recall_list), \
sum(accuracy_list)/len(accuracy_list), \
sum(f1_score_list)/len(f1_score_list),
class loss:
def __init__(self):
super(loss, self).__init__()
def MSELoss(self, ypred, ytrue):
mse_loss = nn.MSELoss(size_average=False)
mse_loss_value = mse_loss(ypred, ytrue).item()
return mse_loss_value
def L1Loss(self, ypred, ytrue):
l1_loss = nn.L1Loss(size_average=False)
l1_loss_value = l1_loss(ypred, ytrue).item()
return l1_loss_value
def RSE(ypred, ytrue):
rse = np.sqrt(np.square(ypred - ytrue).sum()) / np.sqrt(np.square(ytrue - ytrue.mean()).sum())
return rse
def quantile_loss(ytrue, ypred, qs):
'''
Quantile loss version 2
Args:
ytrue (batch_size, output_horizon)
ypred (batch_size, output_horizon, num_quantiles)
'''
L = np.zeros_like(ytrue)
for i, q in enumerate(qs):
yq = ypred[:, :, i]
diff = yq - ytrue
L += np.max(q * diff, (q - 1) * diff)
return L.mean()
def SMAPE(ytrue, ypred):
ytrue = np.array(ytrue).ravel()
ypred = np.array(ypred).ravel() + 1e-4
mean_y = (ytrue + ypred) / 2.
return np.mean(np.abs((ytrue - ypred) / mean_y))
def MAPE(ytrue, ypred):
ytrue = np.array(ytrue).ravel() + 1e-4
ypred = np.array(ypred).ravel()
return np.mean(np.abs((ytrue - ypred) / ytrue))
def gaussian_likelihood_loss(z, mu, sigma):
'''
Gaussian Liklihood Loss
Args:
z (tensor): true observations, shape (num_ts, num_periods)
mu (tensor): mean, shape (num_ts, num_periods)
sigma (tensor): standard deviation, shape (num_ts, num_periods)
likelihood:
(2 pi sigma^2)^(-1/2) exp(-(z - mu)^2 / (2 sigma^2))
log likelihood:
-1/2 * (log (2 pi) + 2 * log (sigma)) - (z - mu)^2 / (2 sigma^2)
'''
negative_likelihood = torch.log(sigma + 1) + (z - mu) ** 2 / (2 * sigma ** 2) + 6
return negative_likelihood.mean()
def negative_binomial_loss(ytrue, mu, alpha):
'''
Negative Binomial Sample
Args:
ytrue (array like)
mu (array like)
alpha (array like)
maximuze log l_{nb} = log Gamma(z + 1/alpha) - log Gamma(z + 1) - log Gamma(1 / alpha)
- 1 / alpha * log (1 + alpha * mu) + z * log (alpha * mu / (1 + alpha * mu))
minimize loss = - log l_{nb}
Note: torch.lgamma: log Gamma function
'''
batch_size, seq_len = ytrue.size()
likelihood = torch.lgamma(ytrue + 1. / alpha) - torch.lgamma(ytrue + 1) - torch.lgamma(1. / alpha) \
- 1. / alpha * torch.log(1 + alpha * mu) \
+ ytrue * torch.log(alpha * mu / (1 + alpha * mu))
return - likelihood.mean() | 2.4375 | 2 |
tests/test_server/test_grpc/test_init.py | Tomaz-Vieira/tiktorch | 8 | 12793255 | import json
import os
import threading
import grpc
from tiktorch.proto.inference_pb2 import Empty
from tiktorch.proto.inference_pb2_grpc import FlightControlStub
from tiktorch.server.grpc import serve
from tiktorch.utils import wait
def test_serving_on_random_port(tmpdir):
conn_file_path = str(tmpdir / "conn.json")
def _server():
serve("127.0.0.1", 0, connection_file_path=conn_file_path)
srv_thread = threading.Thread(target=_server)
srv_thread.start()
wait(lambda: os.path.exists(conn_file_path))
with open(conn_file_path, "r") as conn_file:
conn_data = json.load(conn_file)
assert conn_data["addr"] == "127.0.0.1"
assert conn_data["port"] > 0
addr, port = conn_data["addr"], conn_data["port"]
chan = grpc.insecure_channel(f"{addr}:{port}")
client = FlightControlStub(chan)
result = client.Ping(Empty())
assert isinstance(result, Empty)
client.Shutdown(Empty())
| 2.40625 | 2 |
CS303_Artifical-Intelligence/NCS/data/data_F12/Rename-Files.py | Eveneko/SUSTech-Courses | 4 | 12793256 | <reponame>Eveneko/SUSTech-Courses
import os
# get the folder name
folder_name = os.path.dirname(__file__)
# folder_name = input("Input the folder name:")
# get all files name
file_names = os.listdir(folder_name)
print(file_names)
for i, name in enumerate(file_names):
old_file_name = folder_name + "/" + name
# demo1: for http://www.pdfdo.com/pdf-to-image.aspx
name1 = (name.split('_')[-1]).split('.')[0]
new_file_name = folder_name + "/" + 'p' + name1 + '.png'
# for recover demo1
# name1 = name.lower().replace('-', '_')
# new_file_name = folder_name + "/" + 'name[1:-4]
os.rename(old_file_name, old_file_name.replace('txt', 'csv'))
| 3.71875 | 4 |
app/db.py | vladkhard/learning_fastapi | 0 | 12793257 | import os
import pymongo
DB_NAME = os.getenv("DB_NAME")
client = pymongo.MongoClient("mongodb://db:27017")
db = client[DB_NAME]
| 1.953125 | 2 |
taiga/projects/migrations/0046_triggers_to_update_tags_colors.py | threefoldtech/Threefold-Circles | 1 | 12793258 | <filename>taiga/projects/migrations/0046_triggers_to_update_tags_colors.py
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-06-07 06:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('projects', '0045_merge'),
('userstories', '0011_userstory_tribe_gig'),
('tasks', '0009_auto_20151104_1131'),
('issues', '0006_remove_issue_watchers'),
]
operations = [
# Function: Reduce a multidimensional array only on its first level
migrations.RunSQL(
"""
CREATE OR REPLACE FUNCTION public.reduce_dim(anyarray)
RETURNS SETOF anyarray
AS $function$
DECLARE
s $1%TYPE;
BEGIN
IF $1 = '{}' THEN
RETURN;
END IF;
FOREACH s SLICE 1 IN ARRAY $1 LOOP
RETURN NEXT s;
END LOOP;
RETURN;
END;
$function$
LANGUAGE plpgsql IMMUTABLE;
"""
),
# Function: aggregates multi dimensional arrays
migrations.RunSQL(
"""
DROP AGGREGATE IF EXISTS array_agg_mult (anyarray);
CREATE AGGREGATE array_agg_mult (anyarray) (
SFUNC = array_cat
,STYPE = anyarray
,INITCOND = '{}'
);
"""
),
# Function: array_distinct
migrations.RunSQL(
"""
CREATE OR REPLACE FUNCTION array_distinct(anyarray)
RETURNS anyarray AS $$
SELECT ARRAY(SELECT DISTINCT unnest($1))
$$ LANGUAGE sql;
"""
),
# Rebuild the color tags so it's consisten in any project
migrations.RunSQL(
"""
WITH
tags_colors AS (
SELECT id project_id, reduce_dim(tags_colors) tags_colors
FROM projects_project
WHERE tags_colors != '{}'
),
tags AS (
SELECT unnest(tags) tag, NULL color, project_id FROM userstories_userstory
UNION
SELECT unnest(tags) tag, NULL color, project_id FROM tasks_task
UNION
SELECT unnest(tags) tag, NULL color, project_id FROM issues_issue
UNION
SELECT unnest(tags) tag, NULL color, id project_id FROM projects_project
),
rebuilt_tags_colors AS (
SELECT tags.project_id project_id,
array_agg_mult(ARRAY[[tags.tag, tags_colors.tags_colors[2]]]) tags_colors
FROM tags
LEFT JOIN tags_colors ON
tags_colors.project_id = tags.project_id AND
tags_colors[1] = tags.tag
GROUP BY tags.project_id
)
UPDATE projects_project
SET tags_colors = rebuilt_tags_colors.tags_colors
FROM rebuilt_tags_colors
WHERE rebuilt_tags_colors.project_id = projects_project.id;
"""
),
# Trigger for auto updating projects_project.tags_colors
migrations.RunSQL(
"""
CREATE OR REPLACE FUNCTION update_project_tags_colors()
RETURNS trigger AS $update_project_tags_colors$
DECLARE
tags text[];
project_tags_colors text[];
tag_color text[];
project_tags text[];
tag text;
project_id integer;
BEGIN
tags := NEW.tags::text[];
project_id := NEW.project_id::integer;
project_tags := '{}';
-- Read project tags_colors into project_tags_colors
SELECT projects_project.tags_colors INTO project_tags_colors
FROM projects_project
WHERE id = project_id;
-- Extract just the project tags to project_tags_colors
IF project_tags_colors != ARRAY[]::text[] THEN
FOREACH tag_color SLICE 1 in ARRAY project_tags_colors
LOOP
project_tags := array_append(project_tags, tag_color[1]);
END LOOP;
END IF;
-- Add to project_tags_colors the new tags
IF tags IS NOT NULL THEN
FOREACH tag in ARRAY tags
LOOP
IF tag != ALL(project_tags) THEN
project_tags_colors := array_cat(project_tags_colors,
ARRAY[ARRAY[tag, NULL]]);
END IF;
END LOOP;
END IF;
-- Save the result in the tags_colors column
UPDATE projects_project
SET tags_colors = project_tags_colors
WHERE id = project_id;
RETURN NULL;
END; $update_project_tags_colors$
LANGUAGE plpgsql;
"""
),
# Execute trigger after user_story update
migrations.RunSQL(
"""
DROP TRIGGER IF EXISTS update_project_tags_colors_on_userstory_update ON userstories_userstory;
CREATE TRIGGER update_project_tags_colors_on_userstory_update
AFTER UPDATE ON userstories_userstory
FOR EACH ROW EXECUTE PROCEDURE update_project_tags_colors();
"""
),
# Execute trigger after user_story insert
migrations.RunSQL(
"""
DROP TRIGGER IF EXISTS update_project_tags_colors_on_userstory_insert ON userstories_userstory;
CREATE TRIGGER update_project_tags_colors_on_userstory_insert
AFTER INSERT ON userstories_userstory
FOR EACH ROW EXECUTE PROCEDURE update_project_tags_colors();
"""
),
# Execute trigger after task update
migrations.RunSQL(
"""
DROP TRIGGER IF EXISTS update_project_tags_colors_on_task_update ON tasks_task;
CREATE TRIGGER update_project_tags_colors_on_task_update
AFTER UPDATE ON tasks_task
FOR EACH ROW EXECUTE PROCEDURE update_project_tags_colors();
"""
),
# Execute trigger after task insert
migrations.RunSQL(
"""
DROP TRIGGER IF EXISTS update_project_tags_colors_on_task_insert ON tasks_task;
CREATE TRIGGER update_project_tags_colors_on_task_insert
AFTER INSERT ON tasks_task
FOR EACH ROW EXECUTE PROCEDURE update_project_tags_colors();
"""
),
# Execute trigger after issue update
migrations.RunSQL(
"""
DROP TRIGGER IF EXISTS update_project_tags_colors_on_issue_update ON issues_issue;
CREATE TRIGGER update_project_tags_colors_on_issue_update
AFTER UPDATE ON issues_issue
FOR EACH ROW EXECUTE PROCEDURE update_project_tags_colors();
"""
),
# Execute trigger after issue insert
migrations.RunSQL(
"""
DROP TRIGGER IF EXISTS update_project_tags_colors_on_issue_insert ON issues_issue;
CREATE TRIGGER update_project_tags_colors_on_issue_insert
AFTER INSERT ON issues_issue
FOR EACH ROW EXECUTE PROCEDURE update_project_tags_colors();
"""
),
]
| 2.03125 | 2 |
src/algorithms/number_theory/P003_trial_division/solution_01.py | lakshmikanth-tesla/ProgrammingProblems | 1 | 12793259 | import logging
import math
"""
1. Note
- Loop from 2 till Square Root of N and keep dividing N at every step.
2. Optimisation(s)
- Apart from 2, only ODD numbers are tested for divisiblity.
- Only numbers upto SquareRoot(n) are tested for divisibility.
3. Limitation(s)
- Do not try with numbers which has more than 15-digit prime factors.
"""
def prime_factors_using_trial_division(n):
"""Returns a list of all prime prime_factors of n"""
prime_factors = []
# Test for 2 separately so that only ODD numbers can be tested in the loop
while n % 2 == 0:
factor = 2
prime_factors.append(factor)
n = n // 2
# Test only for ODD numbers starting with 3
for i in xrange(3, int(math.sqrt(n)) + 1, 2):
# logging.debug("i = {0}".format(i))
while n % i == 0:
factor = i
prime_factors.append(factor)
n = n // i
logging.debug("Factor = {0}, N = {1}".format(i, n))
# All factors have been found if N is reduced to 0.
if n == 1:
break
# If no factor has been found then N is PRIME and the only prime factor of itself.
if n > 1:
prime_factors.append(n)
return prime_factors | 4.3125 | 4 |
preprocesss_lastfm_top50.py | mimbres/train_lastfm | 1 | 12793260 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 14 14:11:07 2019
@author: mimbres
"""
import pandas as pd
import numpy as np
from tqdm import trange
LASTFM_FILEPATH = './data/final_mapping.json'
OUTPUT_FILEPATH1 = './data/lastfm_top50_tagmtx.npy'
OUTPUT_FILEPATH2 = './data/lastfm_top50_featmtx.npy'
OUTPUT_FILEPATH3 = './data/lastfm_top50_track_ids.npy'
OUTPUT_FILEPATH4 = './data/lastfm_top50_tag_avail_cnt.npy'
SAVED_SCALER_FILEPATH = './data/std_scaler.sav'
TOP50A = ['rock', 'pop', 'alternative', 'indie', 'favorites', 'female vocalists',
'Love', 'alternative rock', 'electronic', 'beautiful', 'jazz', '00s',
'singer-songwriter', 'metal', 'male vocalists', 'Awesome', 'american',
'Mellow', 'classic rock', '90s', 'soul', 'chillout', 'punk', '80s', 'chill',
'indie rock', 'folk', 'dance', 'instrumental', 'hard rock', 'oldies',
'seen live', 'Favorite', 'country', 'blues', 'guitar', 'cool', 'british',
'acoustic', 'electronica', '70s', 'Favourites', 'Hip-Hop', 'experimental',
'easy listening', 'female vocalist', 'ambient', 'punk rock', 'funk', 'hardcore']
_dict = {'major': 1, 'minor': 0}
# Load .json file...
df=pd.read_json(LASTFM_FILEPATH)
num_items = len(df)
# Shuffle (we can split train/test later)
df = df.sample(frac=1).reset_index(drop=True)
# Create an empty result matrix
tag_mtx = np.zeros((num_items,50))
feat_mtx = np.zeros((num_items,29))
track_ids = np.ndarray((num_items,), dtype=object)
tag_avail_cnt = np.zeros((num_items,))
for i in trange(num_items):
item = np.asarray(df[0][i]) # Get one item
tag_cnt = 0
for tag in TOP50A:
# Check availability of each tag in this item
_idx = np.where(tag == item)[0]
if len(_idx) is not 0: # If top50-tag available...
tag_cnt += 1
column_idx = _idx[0]
#print(i, item[column_idx,:])
tag_mtx[i,TOP50A.index(tag)] = item[column_idx,1].astype(np.float)
tag_avail_cnt[i] = tag_cnt
track_ids[i] = df[1][i][0]
if tag_cnt is not 0:
_feat = np.asarray(df[1][i])
_feat[20] = _dict.get(_feat[20]) # {'major', 'minor'} --> {0,1}
_feat[5] = _feat[5][:4] # '2005-01-01' --> '2005'
feat_mtx[i,:] = _feat[[4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33]]
print('max available tags =', np.max(tag_avail_cnt), '\n',
'avg available tags =', np.mean(tag_avail_cnt[np.where(tag_avail_cnt!=0)]), '\n',
'items with top50 unavailable =', len(np.where(tag_avail_cnt==0)[0]), '\n',
'items with top50 available =', len(np.where(tag_avail_cnt!=0)[0]) )
'''
max available tags = 31.0
avg available tags = 4.705301775916366
items with top50 unavailable = 38595
items with top50 available = 123204
'''
# Reduce top50 unavailable items
tag_mtx = tag_mtx[tag_avail_cnt!=0,:]
feat_mtx = feat_mtx[tag_avail_cnt!=0,:]
track_ids = track_ids[tag_avail_cnt!=0]
# Feature normalization
import pickle
#from sklearn.preprocessing import StandardScaler
scaler = pickle.load(open(SAVED_SCALER_FILEPATH, 'rb'))
feat_mtx_new = scaler.fit_transform(feat_mtx)
feat_mtx_new[:,15] = feat_mtx[:,15]
# Save results as .npy
np.save(OUTPUT_FILEPATH1, tag_mtx.astype(np.int8))
#np.save(OUTPUT_FILEPATH2, feat_mtx.astype(np.int8))
np.save(OUTPUT_FILEPATH2, feat_mtx_new.astype(np.float32))
np.save(OUTPUT_FILEPATH3, track_ids)
np.save(OUTPUT_FILEPATH4, tag_avail_cnt.astype(np.int8))
| 2.125 | 2 |
menu_system.py | frazermills/Conways-Game-of-life | 2 | 12793261 | import pygame
class StartMenu:
def __init__(self, screen, font, text_colour, button_colour):
self.__screen = screen
self.__font = font
self.__text_colour = text_colour
self.__button_colour = button_colour
self.__click = False
self.__button_width = 150
self.__button_height = 75
self.__option = None
self.__buttons_xy = None
self.__button_objects = None
self.__button_command = ["start game", "iterative mode", "quit game"]
self.__title = "Conway's Game of Life - by <NAME>"
@property
def Option(self):
return self.__option
def setup(self):
pygame.display.set_caption(f"{self.__title}")
self.__screen.fill((0,0,0))
def draw_text(self, text, x, y):
textobj = self.__font.render(text, 1, self.__text_colour)
textrect = textobj.get_rect()
textrect.center = (x, y)
self.__screen.blit(textobj, textrect)
def get_button_objects(self):
self.__buttons_xy = [
((self.__screen.get_width() // 2) - (self.__button_width // 2), (self.__screen.get_width() // 2) - i)
for i in reversed(range(-100, 200, 100))
]
self.__button_objects = {
f"button {i}": pygame.Rect(self.__buttons_xy[i][0], self.__buttons_xy[i][1], self.__button_width, self.__button_height)
for i, button in enumerate(self.__buttons_xy)
}
def check_collisions(self):
mousex, mousey = pygame.mouse.get_pos()
if self.__button_objects[f"button 0"].collidepoint((mousex, mousey)):
if self.__click:
self.__option = self.__button_command[0]
elif self.__button_objects[f"button 1"].collidepoint((mousex, mousey)):
if self.__click:
self.__option = self.__button_command[1]
elif self.__button_objects[f"button 2"].collidepoint((mousex, mousey)):
if self.__click:
self.__option = self.__button_command[2]
def display_buttons(self):
for i, button_object in enumerate(self.__button_objects):
pygame.draw.rect(self.__screen, self.__button_colour, self.__button_objects[button_object])
self.draw_text(f"{self.__title}", self.__screen.get_width() // 2, self.__screen.get_height() // 4)
self.draw_text(f"{self.__button_command[0]}", self.__buttons_xy[0][0] + 75, self.__buttons_xy[0][1] + 35)
self.draw_text(f"{self.__button_command[1]}", self.__buttons_xy[1][0] + 75, self.__buttons_xy[1][1] + 35)
self.draw_text(f"{self.__button_command[2]}", self.__buttons_xy[2][0] + 75, self.__buttons_xy[2][1] + 35)
pygame.display.update()
def is_clicked(self):
self.__click = False
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
elif event.type == pygame.MOUSEBUTTONDOWN:
if event.button == True:
self.__click = True
| 3.453125 | 3 |
misc/question2.py | edwintcloud/algorithmPractice | 1 | 12793262 | def iterative_quicksort(arr, begin=0, end=None, counter=0):
'''iterative_quicksort is a recursive quicksorting algorithm'''
# initialize values on first iteration
if end is None:
end = len(arr)-1
# sort until begin is greater than or equal to end
if begin < end:
# starting index, counts size of group lesser than pivot
i = begin
# iterate from begin to end, swaping elements to correct side of pivot(end)
for j in range(begin, end):
counter += 1
if arr[j] <= arr[end]:
arr[i], arr[j] = arr[j], arr[i]
i += 1
# smaller elements are in range [begin..i] (inclusive)
# larger elements are in range [i+1..end-1]
# swap first element in group greater than pivot with the pivot
arr[i], arr[end] = arr[end], arr[i]
# now pivot is at index i+1
# smaller elements are in range [begin..i]
# larger elements are in range [i+2..end]
# sort items before partition and after partition
counter += iterative_quicksort(arr, begin, i-1)
counter += iterative_quicksort(arr, i+1, end)
# return total number of iterations it took to sort
return counter
## TEST ##
arr = [7, 10, 4, 3, 20, 15, 14, 13, 12, 10, 9,
8, 7, 6, 4, 3, 1, 4, 6, 82, 81, 1, 19, 24]
arr = arr + arr
print("Numbers:", arr)
counter = iterative_quicksort(arr)
print("Iterations:", counter)
print("Sorted:", arr)
| 4.28125 | 4 |
test/__init__.py | theophane-droid/stix2arango | 1 | 12793263 | # ensure that we use the current version of the package
import sys
import os
sys.path.insert(0, '/app')
from stix2 import IPv4Address, AutonomousSystem, Identity
from stix2 import Relationship, Incident, IPv6Address
from pyArango.connection import *
from pyArango.theExceptions import CreationError
from stix2arango.feed import Feed, vaccum
from stix2arango.request import Request
from stix2arango.storage import GROUPED, GROUPED_BY_MONTH, TIME_BASED, STATIC
from stix2arango import stix_modifiers
from datetime import datetime, timedelta
from test import postgresql
from test import request
from test import storage
from test import utils
def get_database():
password = <PASSWORD>['<PASSWORD>']
url = os.environ['ARANGO_URL']
db_conn = Connection(username='root', password=password, arangoURL=url)
try:
database = db_conn.createDatabase('stix2arango')
except CreationError:
database = db_conn['stix2arango']
return database
if __name__ == "__main__":
db_conn = get_database()
print('\n\n> Inserting data')
# test with time-base paradigm
autonomous_system = AutonomousSystem(number=1234, name='Google')
ipv4 = IPv4Address(value='192.168.127.12', belongs_to_refs=[autonomous_system.id])
identity = Identity(name='<NAME>', identity_class='individual')
relation = Relationship(source_ref=identity.id, target_ref=ipv4.id, relationship_type='attributed-to')
ipv4_net = IPv4Address(value='172.16.17.32/24', belongs_to_refs=[autonomous_system.id])
ipv6 = IPv6Address(value='2001:0db8:85a3:0000:0000:8a2e:0370:7334', belongs_to_refs=[autonomous_system.id])
feed = Feed(db_conn, 'timefeed', tags=['paynoattention', 'time_based'], storage_paradigm=TIME_BASED)
feed.insert_stix_object_in_arango([ipv4, autonomous_system, identity, relation, ipv4_net, ipv6])
# test with grouped paradigm
autonomous_system = AutonomousSystem(number=1234, name='Google')
ipv4 = IPv4Address(value='192.168.127.12', belongs_to_refs=[autonomous_system.id])
identity = Identity(name='<NAME>', identity_class='individual')
relation = Relationship(source_ref=identity.id, target_ref=ipv4.id, relationship_type='attributed-to')
feed = Feed(db_conn, 'groupedfeed', tags=['paynoattention', 'grouped'], storage_paradigm=GROUPED)
feed.insert_stix_object_in_arango([ipv4, autonomous_system, identity, relation])
# test with grouped-by-month paradigm
feed = Feed(db_conn, 'grouped_by_month_feed', tags=['paynoattention', 'dogstory'], storage_paradigm=GROUPED_BY_MONTH)
identity = Identity(name='<NAME>', identity_class='individual')
course_of_action = Incident(name='INC 1078', description='My dog barked on neighbors')
relation = Relationship(source_ref=course_of_action.id, target_ref=identity.id, relationship_type='attributed-to')
feed.insert_stix_object_in_arango([identity, course_of_action, relation])
feeds = Feed.get_last_feeds(db_conn, datetime(2022, 12, 12))
print('OK')
print('\n\n> Getting data')
request = Request(db_conn, datetime.now())
results = request.request(" [ipv4-addr:x_ip = '192.168.127.12' ] ",
tags=['time_based'], max_depth=1)
assert(len(results) == 5)
request = Request(db_conn, datetime.now())
results = request.request("""[ identity:name = 'My grand mother']""",
tags=['time_based'])
assert(len(results) == 3)
feed = Feed(db_conn, 'patterntestfeed', tags=['patterntestfeed'], storage_paradigm=TIME_BASED, )
ipv4 = IPv4Address(value='172.16.58.3/24')
feed.insert_stix_object_in_arango([ipv4])
request = Request(db_conn, datetime.now())
results = request.request("[ipv4-addr:x_ip='172.16.31.10']",
tags=['patterntestfeed'])
assert(len(results) == 1)
results = request.request("[ malware:name = 'Adware' ]",
tags=['pattern'])
assert(len(results) == 0)
print('OK')
print('\n\n> Vaccum test')
feed = Feed(db_conn, 'vaccumentest', tags=['vaccum'], storage_paradigm=TIME_BASED, vaccum_date=datetime.fromtimestamp(10))
ipv4 = IPv4Address(value='172.16.58.3/24')
feed.insert_stix_object_in_arango([ipv4])
vaccum(db_conn)
feeds = Feed.get_last_feeds(db_conn, datetime(2022, 12, 12))
for feed in feeds:
if feed.feed_name == 'vaccumentest':
raise Exception('Vaccum failed')
print('OK')
print('\n\n> Test index optimisation patch')
r = '[ipv4-addr:value = "mushroom" OR ipv4-addr:net != "red hot"]'
request = Request(db_conn, datetime.now())
results = request.request(r)
assert(len(results))
print('OK')
print('\n\n> Test patch #20')
feed = Feed(db_conn, 'patch20', tags=['patch20'], storage_paradigm=TIME_BASED)
ipv4 = IPv4Address(value='192.168.127.12', belongs_to_refs=[autonomous_system.id])
identity = Identity(name='<NAME>', identity_class='individual')
feed.insert_stix_object_in_arango([ipv4, identity])
autonomous_system = AutonomousSystem(number=1234, name='Google')
ipv4 = IPv4Address(value='192.168.127.12', belongs_to_refs=[autonomous_system.id])
feed.insert_stix_object_in_arango([ipv4, autonomous_system])
feeds = Feed.get_last_feeds(db_conn, datetime.now())
for f in feeds:
if f.feed_name == 'patch20':
assert(f.inserted_stix_types == ['ipv4-addr', 'identity', 'autonomous-system'])
print('OK')
print('\n\n> Test feature static paradigm #21')
feed = Feed(db_conn, 'staticfeed', storage_paradigm=STATIC)
feed.insert_stix_object_in_arango([ipv4])
col_name = feed.storage_paradigm.get_collection_name(feed)
assert(db_conn[col_name].count() == 1)
feed = Feed(db_conn, 'staticfeed', storage_paradigm=STATIC)
feed.insert_stix_object_in_arango([identity, autonomous_system])
assert(db_conn[col_name].count() == 2)
print('OK')
print('\n\n> Test grouped search before')
request = Request(db_conn, datetime.now() - timedelta(days=1000))
r = request.request("[identity:name = '<NAME>']", tags=['grouped'])
assert(len(r))
print('OK') | 1.953125 | 2 |
pysweep/equations/checker.py | anthony-walker/pysweep | 1 | 12793264 | #Programmer: <NAME>
#This file contains a test step function for debugging the swept rule
import numpy, h5py, mpi4py.MPI as MPI
try:
import pycuda.driver as cuda
from pycuda.compiler import SourceModule
except Exception as e:
pass
def step(state,iidx,arrayTimeIndex,globalTimeStep):
"""This is the method that will be called by the swept solver.
state - 4D numpy array(t,v,x,y (v is variables length))
iidx - an iterable of indexs
arrayTimeIndex - the current time step
globalTimeStep - a step counter that allows implementation of the scheme
"""
if scheme:
checkerOneStep(state,iidx,arrayTimeIndex,globalTimeStep)
else:
checkerTwoStep(state,iidx,arrayTimeIndex,globalTimeStep)
def checkerOneStep(state,iidx,arrayTimeIndex,globalTimeStep):
"""Use this function as the one step checker pattern"""
vs = slice(0,state.shape[1],1)
for idx,idy in iidx:
ntidx = (arrayTimeIndex+1,vs,idx,idy) #next step index
state[ntidx] = state[arrayTimeIndex,vs,idx+1,idy]
state[ntidx] += state[arrayTimeIndex,vs,idx-1,idy]
state[ntidx] += state[arrayTimeIndex,vs,idx,idy+1]
state[ntidx] += state[arrayTimeIndex,vs,idx,idy-1]
state[ntidx] /= 4
def checkerTwoStep(state,iidx,arrayTimeIndex,globalTimeStep):
"""Use this function as the two step checker pattern"""
vs = slice(0,state.shape[1],1)
for idx,idy in iidx:
ntidx = (arrayTimeIndex+1,vs,idx,idy) #next step index
state[ntidx] = state[arrayTimeIndex,vs,idx+1,idy]
state[ntidx] += state[arrayTimeIndex,vs,idx-1,idy]
state[ntidx] += state[arrayTimeIndex,vs,idx,idy+1]
state[ntidx] += state[arrayTimeIndex,vs,idx,idy-1]
state[ntidx] /= 4
def createInitialConditions(nv,nx,ny,filename="checkerConditions.hdf5"):
"""Use this function to create a set of initial conditions in an hdf5 file."""
comm = MPI.COMM_WORLD
data = numpy.zeros((nv,nx,ny))
for i in range(0,nx,2):
for j in range(0,ny,2):
data[:,i,j]=1
for i in range(1,nx,2):
for j in range(1,ny,2):
data[:,i,j]=1
with h5py.File(filename,"w",driver="mpio",comm=comm) as hf:
hf.create_dataset("data",data.shape,data=data)
return filename
def set_globals(*args,source_mod=None):
"""Use this function to set cpu global variables"""
global dt,dx,dy,scheme #true for one step
t0,tf,dt,dx,dy,scheme = args
if source_mod is not None:
keys = "<KEY>"
nargs = args[2:]
fc = lambda x:numpy.float64(x)
for i,key in enumerate(keys):
ckey,_ = source_mod.get_global(key)
cuda.memcpy_htod(ckey,fc(nargs[i]))
ckey,_ = source_mod.get_global("SCHEME")
cuda.memcpy_htod(ckey,bytes(scheme))
| 2.421875 | 2 |
tests/test_reference.py | uwcirg/true_nth_usa_portal | 3 | 12793265 | <gh_stars>1-10
"""Unit test module for Reference class"""
from portal.models.intervention import Intervention
from portal.models.questionnaire_bank import QuestionnaireBank
from portal.models.reference import Reference
from portal.system_uri import US_NPI
from tests import TEST_USER_ID, TestCase
class TestReference(TestCase):
def test_clinician(self):
patient = Reference.clinician(TEST_USER_ID)
assert patient.as_fhir()['display'] == self.test_user.display_name
def test_clinician_parse(self):
ref = {'reference': f'api/clinician/{TEST_USER_ID}'}
parsed = Reference.parse(ref)
assert self.test_user == parsed
def test_patient(self):
patient = Reference.patient(TEST_USER_ID)
assert patient.as_fhir()['display'] == self.test_user.display_name
def test_organization(self):
org = Reference.organization(0)
assert org.as_fhir()['display'] == 'none of the above'
def test_org_w_identifier(self):
o = self.prep_org_w_identifier()
o_ref = Reference.organization(o.id)
assert o_ref.as_fhir()['display'] == 'test org'
assert (o_ref.as_fhir()['reference']
== 'api/organization/{}'.format(o.id))
def test_org_w_identifier_parse(self):
o = self.prep_org_w_identifier()
ref = {'reference': 'api/organization/123-45?system={}'.format(US_NPI)}
parsed = Reference.parse(ref)
assert o == parsed
def test_questionnaire(self):
q = self.add_questionnaire('epic1000')
q_ref = Reference.questionnaire(q.name)
assert q_ref.as_fhir()['display'] == 'epic1000'
def test_questionnaire_parse(self):
q = self.add_questionnaire('epiclife')
ref = {
'reference':
'api/questionnaire/{0.value}?system={0.system}'.format(
q.identifiers[0])}
parsed = Reference.parse(ref)
assert q == parsed
def test_questionnaire_bank(self):
q = QuestionnaireBank(name='testy')
q_ref = Reference.questionnaire_bank(q.name)
assert q_ref.as_fhir()['display'] == 'testy'
def test_questionnaire_response(self):
qnr_id = {
"system": "https://ae-eproms-test.cirg.washington.edu",
"value": "588.0"}
qnr_ref = Reference.questionnaire_response(qnr_id)
assert qnr_ref.as_fhir() == {
'reference':
f"{qnr_id['system']}/QuestionnaireResponse/{qnr_id['value']}"
}
def test_qnr_parse(self):
from tests.test_assessment_status import (
mock_eproms_questionnairebanks,
mock_qr
)
doc_id = '2084.0'
# boilerplate necessary to persist a QNR
self.bless_with_basics(make_patient=True)
mock_eproms_questionnairebanks()
qb = QuestionnaireBank.query.filter(
QuestionnaireBank.name == 'localized').one()
mock_qr('epic26', doc_id=doc_id, qb=qb)
# confirm [system]/QuestionnaireResponse/[value] pulls
# the referenced object
qnr_reference = (
"https://stg-ae.us.truenth.org/eproms-demo"
f"/QuestionnaireResponse/{doc_id}")
ref = {'Reference': qnr_reference}
parsed = Reference.parse(ref)
assert parsed.document['identifier']['value'] == doc_id
def test_intervention(self):
i = Intervention.query.filter_by(name='self_management').one()
i_ref = Reference.intervention(i.id)
assert i_ref.as_fhir()['display'] == 'self_management'
assert (i_ref.as_fhir()['reference']
== 'api/intervention/self_management')
def test_intervention_parse(self):
ref = {'reference': 'api/intervention/self_management'}
i = Reference.parse(ref)
assert i.name == 'self_management'
def test_practitioner(self):
p = self.add_practitioner()
p_ref = Reference.practitioner(p.id)
assert p_ref.as_fhir()['display'] == 'first last'
assert (p_ref.as_fhir()['reference']
== 'api/practitioner/12345?system={}'.format(US_NPI))
def test_practitioner_parse(self):
p = self.add_practitioner()
ref = {'reference': 'api/practitioner/12345?system={}'.format(US_NPI)}
parsed = Reference.parse(ref)
assert p == parsed
| 2.359375 | 2 |
src/ytdl2rss.py | kevinoid/ytdl2rss | 1 | 12793266 | <reponame>kevinoid/ytdl2rss<gh_stars>1-10
#!/usr/bin/env python3
"""Create podcast RSS from youtube-dl info JSON."""
import argparse
import codecs
import io
import json
import os
import sys
import time
import traceback
from datetime import datetime
from email.utils import formatdate
from xml.sax.saxutils import escape, quoteattr # nosec
try:
from urllib.parse import urljoin, urlparse
from urllib.request import pathname2url, url2pathname
except ImportError:
from urllib import pathname2url, url2pathname
from urlparse import urljoin, urlparse
__version__ = '0.1.0'
_JSON_PATH_KEY = object()
_VERSION_MESSAGE = (
'%(prog)s '
+ __version__
+ '''
Copyright 2020 <NAME> <<EMAIL>>
%(prog) is free and unencumbered software released into the public domain.
%(prog) is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the Unlicense for details.'''
)
def _resolve_path(path, src_path, dst_path, dst_base):
"""Resolve a path in src_path to a URL in dst_path served at dst_base."""
if not path:
return path
src_dir = os.path.dirname(src_path)
cur_path = os.path.join(src_dir, path)
dst_dir = os.path.dirname(dst_path)
rel_path = os.path.relpath(cur_path, dst_dir)
rel_url = pathname2url(rel_path)
return urljoin(dst_base, rel_url)
def _resolve_url(url, src_path, dst_path, dst_base):
"""Resolve a URL in src_path to a URL in dst_path served at dst_base."""
url_parts = urlparse(url)
if url_parts.scheme:
# url is absolute
return url
if url_parts.netloc:
# url is scheme-relative
return urljoin(dst_base, url)
# Resolve url from containing file
url_path = url2pathname(url)
return _resolve_path(url_path, src_path, dst_path, dst_base)
def _ymd_to_rfc2822(datestr):
"""Convert a date in YYYYMMDD format to RFC 2822 for RSS."""
tt = time.strptime(datestr, '%Y%m%d')
ts = time.mktime(tt)
# Convert to UTC so formatted date is midnight with -0000 (unknown) TZ.
# https://stackoverflow.com/a/19238551
offset = datetime.fromtimestamp(ts) - datetime.utcfromtimestamp(ts)
return formatdate(ts + offset.total_seconds())
def get_entry_media_type(entry):
"""Get media type (i.e. MIME type) from youtube-dl JSON entry info."""
ext = entry['ext']
acodec = entry.get('acodec')
if acodec == 'none':
acodec = None
vcodec = entry.get('vcodec')
if vcodec == 'none':
vcodec = None
media_type = 'audio/' if acodec and not vcodec else 'video/'
if ext == '3g2':
media_type += '3gpp2'
elif ext == '3gp':
media_type += '3gpp'
elif ext == 'avi':
media_type = 'video/vnd.avi'
elif ext in (
'f4a',
'f4b',
'f4p',
'm4a',
'm4b',
'm4p',
'm4r',
):
# These extensions are intended for audio.
# If codecs are not known, assume it is audio.
if not acodec and not vcodec:
media_type = 'audio/mp4'
else:
media_type += 'mp4'
elif ext in ('f4v', 'm4v'):
media_type += 'mp4'
elif ext == 'flv':
media_type = 'video/x-flv'
elif ext == 'gif':
media_type = 'image/gif'
elif ext in ('mk3d', 'mks', 'mkv'):
media_type += 'x-matroska'
elif ext == 'mka':
# This extension is intended for audio.
# If codecs are not known, assume it is audio.
if not acodec and not vcodec:
media_type = 'audio/'
media_type += 'x-matroska'
elif ext == 'mp3':
media_type = 'audio/mpeg'
elif ext == 'ogg':
# Xiph recommends this extension for (vorbis) audio and ogv for video.
# If video codec not known, assume it is audio.
if not vcodec:
media_type = 'audio/'
media_type += 'ogg'
elif ext == 'opus':
# Note: ext: opus could be used to refer to "raw" audio/opus.
# However, this has not been observed on ytdl-supported sites.
# Since Xiph recommends .opus for Opus-in-Ogg
# https://wiki.xiph.org/index.php/MIMETypesCodecs
# and the ytdl extractor for media.ccc.de uses it this way,
# unconditionally convert to ogg.
# If uses of audio/opus are found, consider how to differentiate.
ext = 'ogg'
if acodec is None:
acodec = 'opus'
media_type = 'audio/ogg'
elif ext == 'ogv':
media_type += 'ogg'
elif ext == 'wav':
media_type = 'audio/vnd.wave'
else:
media_type += ext
# Add codecs parameter from https://tools.ietf.org/html/rfc6381
if (acodec or vcodec) and ext not in ('flv', 'gif', 'mp3'):
# Some extractors (e.g. media.ccc.de) use vcodec: h264
# Section 3.3 of RFC 6381 specifies codecs must be a FOURCC
if vcodec == 'h264':
vcodec = 'avc1'
# Note: Add space after ; as in RFC 6381 section 3.6 Examples
media_type += '; codecs='
if acodec and vcodec:
# Note: Add space after , as in RFC 6381 section 3.6 Examples
# TODO: Apply encoding from RFC 2231 if required, see examples
# in RFC 6381 section 3.1
media_type += '"' + vcodec + ', ' + acodec + '"'
else:
media_type += acodec or vcodec
return media_type
def entry_to_rss(entry, rss, base=None, indent=None):
"""Convert youtube-dl entry info object to podcast RSS."""
if indent is None:
indent2 = ''
indent3 = ''
eol = ''
else:
indent2 = indent * 2
indent3 = indent * 3
eol = '\n'
json_path = entry[_JSON_PATH_KEY]
rss.write(indent2)
rss.write('<item>')
rss.write(eol)
webpage_url = entry.get('webpage_url')
if webpage_url:
rss.write(indent3)
rss.write('<guid isPermaLink="true">')
rss.write(escape(webpage_url))
rss.write('</guid>')
rss.write(eol)
else:
rss.write(indent3)
rss.write('<guid>')
rss.write(escape(entry['id']))
rss.write('</guid>')
rss.write(eol)
title = entry.get('title')
if title is not None:
rss.write(indent3)
rss.write('<title>')
rss.write(escape(title))
rss.write('</title>')
rss.write(eol)
upload_date = entry.get('upload_date')
if upload_date is not None:
rss.write(indent3)
rss.write('<pubDate>')
rss.write(_ymd_to_rfc2822(upload_date))
rss.write('</pubDate>')
rss.write(eol)
filename = entry['_filename']
fileurl = _resolve_path(filename, json_path, rss.name, base)
filesize = entry.get('filesize')
media_type = get_entry_media_type(entry)
rss.write(indent3)
rss.write('<enclosure')
if media_type is not None:
rss.write(' type=')
rss.write(quoteattr(media_type))
if filesize is not None:
rss.write(' length=')
rss.write(quoteattr(str(filesize)))
rss.write(' url=')
rss.write(quoteattr(fileurl))
rss.write('/>')
rss.write(eol)
thumbnail = entry.get('thumbnail')
if thumbnail is not None:
thumbnail = _resolve_url(thumbnail, json_path, rss.name, base)
rss.write(indent3)
rss.write('<itunes:image href=')
rss.write(quoteattr(thumbnail))
rss.write('/>')
rss.write(eol)
duration = entry['duration']
if duration is not None:
rss.write(indent3)
rss.write('<itunes:duration>')
rss.write(str(duration))
rss.write('</itunes:duration>')
rss.write(eol)
age_limit = entry.get('age_limit')
if age_limit is not None:
rss.write(indent3)
rss.write('<itunes:explicit>')
# Note: Spotify wants yes/no/clean for item, yes/clean for channel,
# Google wants yes or absent, Apple wants true/false,
# W3C Feed Validator wants yes/no/clean
rss.write('yes' if age_limit > 0 else 'clean')
rss.write('</itunes:explicit>')
rss.write(eol)
# TODO: <itunes:order> from autonumber (not in .info.json)
# or playlist_index (may not be relevant/sequential for single file)
# or sorted order?
description = entry.get('description')
if description is not None:
rss.write(indent3)
rss.write('<description>')
rss.write(escape(description))
rss.write('</description>')
rss.write(eol)
rss.write(indent2)
rss.write('</item>')
rss.write(eol)
def playlist_to_rss(playlist, rss, base=None, indent=None):
"""
Convert youtube-dl playlist info object to podcast RSS.
Playlist is expected to follow the schema defined in
https://github.com/ytdl-org/youtube-dl/pull/21822
Values which are null or missing will be omitted from RSS output where
possible.
Attempts to comply with guidelines from:
https://help.apple.com/itc/podcasts_connect/#/itcb54353390
https://support.google.com/podcast-publishers/answer/9476656
https://podcasters.spotify.com/terms/Spotify_Podcast_Delivery_Specification_v1.6.pdf
https://validator.w3.org/feed/
"""
if indent is None:
indent1 = ''
indent2 = ''
indent3 = ''
eol = ''
else:
indent1 = indent
indent2 = indent * 2
indent3 = indent * 3
eol = '\n'
json_path = playlist.get(_JSON_PATH_KEY)
rss.write(
'<rss version="2.0"'
+ ' xmlns:atom="http://www.w3.org/2005/Atom"'
+ ' xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd"'
+ '>'
)
rss.write(eol)
rss.write(indent1)
rss.write('<channel>')
rss.write(eol)
title = playlist.get('title')
if title is not None:
rss.write(indent2)
rss.write('<title>')
rss.write(escape(title))
rss.write('</title>')
rss.write(eol)
# Not produced by youtube-dl:
description = playlist.get('description')
if description is not None:
rss.write(indent2)
rss.write('<description>')
rss.write(escape(description))
rss.write('</description>')
rss.write(eol)
uploader = playlist.get('uploader')
if uploader is not None:
rss.write(indent2)
rss.write('<itunes:author>')
rss.write(escape(uploader))
rss.write('</itunes:author>')
rss.write(eol)
webpage_url = playlist.get('webpage_url')
if webpage_url is not None:
rss.write(indent2)
rss.write('<link>')
rss.write(escape(webpage_url))
rss.write('</link>')
rss.write(eol)
upload_date = playlist.get('upload_date')
if upload_date is None:
upload_date = max(
entry.get('upload_date') for entry in playlist['entries'] if entry
)
if upload_date is not None:
rss.write(indent2)
rss.write('<pubDate>')
rss.write(_ymd_to_rfc2822(upload_date))
rss.write('</pubDate>')
rss.write(eol)
# Not produced by youtube-dl:
# https://github.com/ytdl-org/youtube-dl/issues/16130
thumbnail = playlist.get('thumbnail')
if thumbnail is not None:
thumbnail = _resolve_url(thumbnail, json_path, rss.name, base)
rss.write(indent2)
rss.write('')
rss.write(eol)
# Apple instructs podcasters to use <itunes:image>, doesn't document
# standardized <image>. Include both.
rss.write(indent2)
rss.write('<itunes:image href=')
rss.write(quoteattr(thumbnail))
rss.write('/>')
rss.write(eol)
age_limits = [entry.get('age_limit') for entry in playlist['entries']]
if age_limits and None not in age_limits:
rss.write(indent2)
rss.write('<itunes:explicit>')
# Note: Spotify wants yes/no/clean for item, yes/clean for channel,
# Google wants yes or absent, Apple wants true/false,
# W3C Feed Validator wants yes/no/clean
rss.write('yes' if max(age_limits) > 0 else 'clean')
rss.write('</itunes:explicit>')
rss.write(eol)
# Provide self link, as recommended
# https://validator.w3.org/feed/docs/warning/MissingAtomSelfLink.html
if base:
rss.write(indent2)
rss.write('<atom:link rel="self" type="application/rss+xml" href=')
rss.write(quoteattr(base))
rss.write('/>')
rss.write(eol)
rss.write(indent2)
rss.write('<generator>')
rss.write(escape(os.path.basename(__file__) + ' ' + __version__))
rss.write('</generator>')
rss.write(eol)
for entry in playlist['entries']:
entry_to_rss(entry, rss, base=base, indent=indent)
rss.write(indent1)
rss.write('</channel>')
rss.write(eol)
rss.write('</rss>\n')
def _load_json(json_path):
"""Load JSON from a file with a given path."""
# Note: Binary so load can detect encoding (as in Section 3 of RFC 4627)
with open(json_path, 'rb') as json_file:
try:
return json.load(json_file)
except Exception as ex:
if sys.version_info[0] >= 3:
ex2 = Exception('Error loading ' + json_path)
exec('raise ex2 from ex') # nosec
else:
ex2 = Exception('Error loading ' + json_path + ': ' + str(ex))
ex2.__cause__ = ex
raise ex2
def entries_to_playlist(entries):
"""Combine youtube-dl entries into a playlist with common metadata."""
# entry playlist metadata keys
keys = {
'playlist_id',
'playlist_title',
'playlist_uploader',
'playlist_uploader_id',
}
# get playlist metadata, if same for all entries
entries_playlist = None
for entry in entries:
entry_playlist = {k: v for k, v in entry.items() if v and k in keys}
if entry_playlist:
if entries_playlist is None:
entries_playlist = entry_playlist
elif entry_playlist != entries_playlist:
# playlist metadata differs between entries
entries_playlist = None
break
if entries_playlist:
# Chop "playlist_" from entry playlist keys for use as playlist keys
playlist = {k[9:]: v for k, v in entries_playlist.items()}
else:
playlist = {}
playlist['_type'] = 'playlist'
playlist['entries'] = entries
return playlist
def _load_info(info_paths):
"""Load youtube-dl JSON info files into a single playlist object."""
entries = []
info_count = 0
last_playlist = None
for info_path in info_paths:
info_count += 1
if info_path == '-':
info = json.load(sys.stdin)
else:
info = _load_json(info_path)
info_entries = info.get('entries')
has_entries = isinstance(info_entries, list)
has_formats = isinstance(info.get('formats'), list)
if has_entries == has_formats:
raise ValueError('Unrecognized JSON in ' + info_path)
if has_formats:
# info for a single video
info[_JSON_PATH_KEY] = info_path
entries.append(info)
else:
# info for a playlist
last_playlist = info
info[_JSON_PATH_KEY] = info_path
for entry in info_entries:
entry[_JSON_PATH_KEY] = info_path
entries.extend(info_entries)
# If the user provided a single playlist, use it as-is
# This lets users easily specify whatever metadata they'd like
if info_count == 1 and last_playlist:
return last_playlist
return entries_to_playlist(entries)
def _parse_indent(indent):
"""Parse indent argument to indent string."""
try:
return ' ' * int(indent)
except ValueError:
return indent
def _parse_args(args, namespace=None):
"""
Parse command-line arguments.
:param args: command-line arguments (usually :py:data:`sys.argv`)
:param namespace: object to take the parsed attributes.
:return: parsed arguments
:rtype: argparse.Namespace
"""
parser = argparse.ArgumentParser(
usage='%(prog)s [options] <JSON file...>',
description=__doc__,
# Use raw formatter to avoid mangling version text
formatter_class=argparse.RawDescriptionHelpFormatter,
)
# Note: Match name of wget -B/--base option with similar purpose
parser.add_argument(
'-B',
'--base',
help='URL from which files will be served, to resolve relative URLs',
)
parser.add_argument(
'-i',
'--indent',
help='XML indent string, or number of spaces to indent',
nargs='?',
type=_parse_indent,
)
parser.add_argument(
'-o',
'--output',
help='Output RSS file.',
)
parser.add_argument(
'-V',
'--version',
action='version',
help='Output version and license information',
version=_VERSION_MESSAGE,
)
parser.add_argument(
'json_files',
nargs='+',
metavar='JSON file...',
help='youtube-dl .info.json files',
)
return parser.parse_args(args, namespace)
def main(*argv):
"""
Entry point for command-line use.
:param argv: command-line arguments (usually :py:data:`sys.argv`)
:return: exit code
:rtype: int
"""
args = _parse_args(argv[1:])
if not args.base or not urlparse(args.base).scheme:
# Note: Not just a spec compliance issue. Affects real aggregators:
# https://github.com/AntennaPod/AntennaPod/issues/2880
sys.stderr.write(
'Warning: URLs in RSS 2.0 must start with a URI scheme per:\n'
'- https://www.rssboard.org/rss-specification#comments\n'
'- https://cyber.harvard.edu/rss/rss.html#comments\n'
'Use -B,--base to specify an absolute URL at which the RSS will '
'be served.\n'
)
# Note: Could use default locale.getpreferredencoding(). Many users would
# "prefer" ISO-8859-1. UTF-8 is a safer default to support more characters
# and for wider podcast distributor/aggregator support.
# (e.g. Apple instructs podcasters to use UTF-8.)
encoding = 'UTF-8'
if args.output:
writer = io.open(args.output, 'w', encoding=encoding)
elif sys.stdout.isatty():
# TTY unlikely to interpret XML declaration. Use Python's encoding.
if sys.stdout.encoding is not None:
encoding = sys.stdout.encoding
writer = sys.stdout
else:
import locale
encoding = locale.getpreferredencoding()
writer = codecs.getwriter(encoding)(sys.stdout)
elif sys.stdout.encoding and sys.stdout.encoding.upper() == encoding:
writer = sys.stdout
elif hasattr(sys.stdout, 'buffer'):
writer = codecs.getwriter(encoding)(sys.stdout.buffer)
else:
writer = codecs.getwriter(encoding)(sys.stdout)
try:
writer.write('<?xml version="1.0" encoding=')
writer.write(quoteattr(encoding))
writer.write('?>')
if args.indent is not None:
writer.write('\n')
playlist_to_rss(
_load_info(args.json_files),
writer,
base=args.base,
indent=args.indent,
)
except UnicodeEncodeError:
# FIXME: Should use a proper XML writer which would represent
# characters outside the file encoding using XML entities.
traceback.print_exc()
sys.stderr.write(
'Consider specifying a different encoding in PYTHONIOENCODING.\n'
)
return 1
finally:
if args.output:
writer.close()
return 0
if __name__ == '__main__':
sys.exit(main(*sys.argv))
| 2.515625 | 3 |
u24_lymphocyte/third_party/treeano/nodes/tests/costs_test.py | ALSM-PhD/quip_classification | 45 | 12793267 | import nose.tools as nt
import numpy as np
import theano
import theano.tensor as T
import treeano
import treeano.nodes as tn
fX = theano.config.floatX
def test_aggregator_node_serialization():
tn.check_serialization(tn.AggregatorNode("a"))
def test_elementwise_cost_node_serialization():
tn.check_serialization(tn.ElementwiseCostNode(
"foo",
{"pred": tn.IdentityNode("foo"),
"target": tn.IdentityNode("bar")}))
def test_total_cost_node_serialization():
tn.check_serialization(tn.TotalCostNode(
"foo",
{"pred": tn.IdentityNode("foo"),
"target": tn.IdentityNode("bar")}))
def test_auxilliary_cost_node_serialization():
tn.check_serialization(tn.AuxiliaryCostNode(
"foo",
{"target": tn.IdentityNode("bar")}))
def test_total_cost_node():
network = tn.TotalCostNode(
"cost",
{"pred": tn.InputNode("x", shape=(3, 4, 5)),
"target": tn.InputNode("y", shape=(3, 4, 5))},
cost_function=treeano.utils.squared_error).network()
fn = network.function(["x", "y"], ["cost"])
x = np.random.rand(3, 4, 5).astype(fX)
y = np.random.rand(3, 4, 5).astype(fX)
np.testing.assert_allclose(fn(x, y)[0],
((x - y) ** 2).mean(),
rtol=1e-5)
np.testing.assert_allclose(fn(x, x)[0],
0)
np.testing.assert_allclose(fn(y, y)[0],
0)
def test_total_cost_node_with_weight():
network = tn.TotalCostNode(
"cost",
{"pred": tn.InputNode("x", shape=(3, 4, 5)),
"weight": tn.InputNode("w", shape=(3, 4, 5)),
"target": tn.InputNode("y", shape=(3, 4, 5))},
cost_function=treeano.utils.squared_error).network()
fn = network.function(["x", "y", "w"], ["cost"])
x = np.random.rand(3, 4, 5).astype(fX)
w = np.random.rand(3, 4, 5).astype(fX)
y = np.random.rand(3, 4, 5).astype(fX)
np.testing.assert_allclose(fn(x, y, w)[0],
(((x - y) ** 2) * w).mean(),
rtol=1e-5)
np.testing.assert_allclose(fn(x, x, w)[0],
0)
np.testing.assert_allclose(fn(y, y, w)[0],
0)
def test_auxiliary_cost_node():
network = tn.HyperparameterNode(
"hp",
tn.SequentialNode(
"seq",
[tn.InputNode("x", shape=(3, 4, 5)),
tn.AuxiliaryCostNode(
"cost1",
{"target": tn.InputNode("y1", shape=(3, 4, 5))}),
tn.AddConstantNode("a1", value=2),
tn.AuxiliaryCostNode(
"cost2",
{"target": tn.InputNode("y2", shape=(3, 4, 5))}),
tn.MultiplyConstantNode("m1", value=2),
tn.AuxiliaryCostNode(
"cost3",
{"target": tn.InputNode("y3", shape=(3, 4, 5))}),
tn.ConstantNode("const", value=0),
tn.InputElementwiseSumNode("cost")]
),
cost_reference="cost",
cost_function=treeano.utils.squared_error,
).network()
fn = network.function(["x", "y1", "y2", "y3"], ["cost"])
x = np.random.rand(3, 4, 5).astype(fX)
ys = [np.random.rand(3, 4, 5).astype(fX) for _ in range(3)]
def mse(x, y):
return ((x - y) ** 2).mean()
expected_output = (mse(x, ys[0])
+ mse(x + 2, ys[1])
+ mse(2 * (x + 2), ys[2]))
np.testing.assert_allclose(fn(x, *ys)[0],
expected_output,
rtol=1e-5)
| 2.125 | 2 |
stubs.min/System/Windows/Forms/__init___parts/ListBindingHelper.py | ricardyn/ironpython-stubs | 1 | 12793268 | class ListBindingHelper(object):
""" Provides functionality to discover a bindable list and the properties of the items contained in the list when they differ from the public properties of the object to which they bind. """
@staticmethod
def GetList(*__args):
"""
GetList(dataSource: object,dataMember: str) -> object
Returns an object,typically a list,from the evaluation of a specified data
source and optional data member.
dataSource: The data source from which to find the list.
dataMember: The name of the data source property that contains the list. This can be null.
Returns: An System.Object representing the underlying list if it was found; otherwise,
dataSource.
GetList(list: object) -> object
Returns a list associated with the specified data source.
list: The data source to examine for its underlying list.
Returns: An System.Object representing the underlying list if it exists; otherwise,the
original data source specified by list.
"""
pass
@staticmethod
def GetListItemProperties(*__args):
"""
GetListItemProperties(dataSource: object,dataMember: str,listAccessors: Array[PropertyDescriptor]) -> PropertyDescriptorCollection
Returns the System.ComponentModel.PropertyDescriptorCollection that describes
the properties of an item type contained in the specified data member of a data
source. Uses the specified System.ComponentModel.PropertyDescriptor array to
indicate which properties to examine.
dataSource: The data source to be examined for property information.
dataMember: The optional data member to be examined for property information. This can be
null.
listAccessors: The System.ComponentModel.PropertyDescriptor array describing which properties
of the data member to examine. This can be null.
Returns: The System.ComponentModel.PropertyDescriptorCollection describing the
properties of an item type contained in a collection property of the specified
data source.
GetListItemProperties(list: object,listAccessors: Array[PropertyDescriptor]) -> PropertyDescriptorCollection
Returns the System.ComponentModel.PropertyDescriptorCollection that describes
the properties of an item type contained in a collection property of a data
source. Uses the specified System.ComponentModel.PropertyDescriptor array to
indicate which properties to examine.
list: The data source to be examined for property information.
listAccessors: The System.ComponentModel.PropertyDescriptor array describing which properties
of the data source to examine. This can be null.
Returns: The System.ComponentModel.PropertyDescriptorCollection describing the
properties of the item type contained in a collection property of the data
source.
GetListItemProperties(list: object) -> PropertyDescriptorCollection
Returns the System.ComponentModel.PropertyDescriptorCollection that describes
the properties of an item type contained in a specified data source,or
properties of the specified data source.
list: The data source to examine for property information.
Returns: The System.ComponentModel.PropertyDescriptorCollection containing the
properties of the items contained in list,or properties of list.
"""
pass
@staticmethod
def GetListItemType(*__args):
"""
GetListItemType(dataSource: object,dataMember: str) -> Type
Returns the data type of the items in the specified data source.
dataSource: The data source to examine for items.
dataMember: The optional name of the property on the data source that is to be used as the
data member. This can be null.
Returns: For complex data binding,the System.Type of the items represented by the
dataMember in the data source; otherwise,the System.Type of the item in the
list itself.
GetListItemType(list: object) -> Type
Returns the data type of the items in the specified list.
list: The list to be examined for type information.
Returns: The System.Type of the items contained in the list.
"""
pass
@staticmethod
def GetListName(list,listAccessors):
"""
GetListName(list: object,listAccessors: Array[PropertyDescriptor]) -> str
Returns the name of an underlying list,given a data source and optional
System.ComponentModel.PropertyDescriptor array.
list: The data source to examine for the list name.
listAccessors: An array of System.ComponentModel.PropertyDescriptor objects to find in the
data source. This can be null.
Returns: The name of the list in the data source,as described by listAccessors,orthe
name of the data source type.
"""
pass
__all__=[
'GetList',
'GetListItemProperties',
'GetListItemType',
'GetListName',
]
| 2.921875 | 3 |
test/test_pro.py | coinplus-sa/coinplus-solo | 1 | 12793269 | <reponame>coinplus-sa/coinplus-solo<filename>test/test_pro.py
import unittest
from coinplus_solo_redeem.pro import secret2_reconstruct_base58, secret1_reconstruct_base58
class TestPro(unittest.TestCase):
"""test of the bitcoin conversion from private key to wif"""
def setUp(self):
self.test_vector = [((1, "977TZTQjLNUP1zUn9A5CoPtZ4mAU", "RJTu5AYkaycyxF"),
(2, "GbmQxU1SMqnzpYRHC2XBgUfQs8cA", "okSoTKKXdQRDnd"),
(3, "Q6RNMUc9PK7cd6MnEtyAZZSGfW3r", "CCRhqU6JfqDTbQ"),
("<KEY>", "2rUzh1myYYpk7s")),
((1, "<KEY>", "<KEY>"),
(2, "<KEY>", "<KEY>"),
(3 , "<KEY>", "<KEY>"),
("<KEY>", "<KEY>")),
((1, "<KEY>", "<KEY>"),
(2, "<KEY>", "<KEY>"),
(3, "<KEY>", "ADH2mU73485TeB"),
("ChBXYszLyDjbSzynCKr1puEUB4mh", "gHqichpTAwWjXs")),
]
def test_pro_vector_valid(self):
for card1, card2, card3, resutl_expected in self.test_vector:
s_1_12 = secret1_reconstruct_base58([(card1[0], card1[1]), (card2[0], card2[1])])
s_1_23 = secret1_reconstruct_base58([(card2[0], card2[1]), (card3[0], card3[1])])
s_1_13 = secret1_reconstruct_base58([(card1[0], card1[1]), (card3[0], card3[1])])
s_2_12 = secret2_reconstruct_base58([(card1[0], card1[2]), (card2[0], card2[2])])
s_2_23 = secret2_reconstruct_base58([(card2[0], card2[2]), (card3[0], card3[2])])
s_2_13 = secret2_reconstruct_base58([(card1[0], card1[2]), (card3[0], card3[2])])
self.assertEqual(s_1_12, resutl_expected[0])
self.assertEqual(s_1_23, resutl_expected[0])
self.assertEqual(s_1_13, resutl_expected[0])
self.assertEqual(s_2_12, resutl_expected[1])
self.assertEqual(s_2_23, resutl_expected[1])
self.assertEqual(s_2_13, resutl_expected[1])
| 2.40625 | 2 |
pyblazing/pyblazing/apiv2/context.py | Christian8491/blazingsql | 0 | 12793270 | # NOTE WARNING NEVER CHANGE THIS FIRST LINE!!!! NEVER EVER
import cudf
from collections import OrderedDict
from enum import Enum
from urllib.parse import urlparse
from threading import Lock
from weakref import ref
from pyblazing.apiv2.filesystem import FileSystem
from pyblazing.apiv2 import DataType
from .hive import *
import time
import datetime
import socket
import errno
import subprocess
import os
import re
import pandas
import numpy as np
import pyarrow
from urllib.parse import urlparse
from urllib.parse import ParseResult
from pathlib import PurePath
import cio
import pyblazing
import cudf
import dask_cudf
import dask
import jpype
import dask.distributed
import netifaces as ni
import random
jpype.addClassPath(
os.path.join(
os.getenv("CONDA_PREFIX"),
'lib/blazingsql-algebra.jar'))
jpype.addClassPath(
os.path.join(
os.getenv("CONDA_PREFIX"),
'lib/blazingsql-algebra-core.jar'))
jpype.startJVM(jpype.getDefaultJVMPath(), '-ea', convertStrings=False)
ArrayClass = jpype.JClass('java.util.ArrayList')
ColumnTypeClass = jpype.JClass(
'com.blazingdb.calcite.catalog.domain.CatalogColumnDataType')
dataType = ColumnTypeClass.fromString("GDF_INT8")
ColumnClass = jpype.JClass(
'com.blazingdb.calcite.catalog.domain.CatalogColumnImpl')
TableClass = jpype.JClass(
'com.blazingdb.calcite.catalog.domain.CatalogTableImpl')
DatabaseClass = jpype.JClass(
'com.blazingdb.calcite.catalog.domain.CatalogDatabaseImpl')
BlazingSchemaClass = jpype.JClass('com.blazingdb.calcite.schema.BlazingSchema')
RelationalAlgebraGeneratorClass = jpype.JClass(
'com.blazingdb.calcite.application.RelationalAlgebraGenerator')
def get_np_dtype_to_gdf_dtype_str(dtype):
dtypes = {
np.dtype('float64'): 'GDF_FLOAT64',
np.dtype('float32'): 'GDF_FLOAT32',
np.dtype('int64'): 'GDF_INT64',
np.dtype('int32'): 'GDF_INT32',
np.dtype('int16'): 'GDF_INT16',
np.dtype('int8'): 'GDF_INT8',
np.dtype('bool_'): 'GDF_BOOL8',
np.dtype('datetime64[s]'): 'GDF_DATE64',
np.dtype('datetime64[ms]'): 'GDF_DATE64',
np.dtype('datetime64[ns]'): 'GDF_TIMESTAMP',
np.dtype('datetime64[us]'): 'GDF_TIMESTAMP',
np.dtype('datetime64'): 'GDF_DATE64',
np.dtype('object_'): 'GDF_STRING',
np.dtype('str_'): 'GDF_STRING',
np.dtype('<M8[s]'): 'GDF_DATE64',
np.dtype('<M8[ms]'): 'GDF_DATE64',
np.dtype('<M8[ns]'): 'GDF_TIMESTAMP',
np.dtype('<M8[us]'): 'GDF_TIMESTAMP'
}
ret = dtypes[np.dtype(dtype)]
return ret
def checkSocket(socketNum):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
socket_free = False
try:
s.bind(("127.0.0.1", socketNum))
socket_free = True
except socket.error as e:
if e.errno == errno.EADDRINUSE:
socket_free = False
else:
# something else raised the socket.error exception
print("ERROR: Something happened when checking socket " + str(socketNum))
#print(e)
s.close()
return socket_free
def initializeBlazing(ralId=0, networkInterface='lo', singleNode=False,
allocator="managed", pool=True,initial_pool_size=None, enable_logging=False):
#print(networkInterface)
workerIp = ni.ifaddresses(networkInterface)[ni.AF_INET][0]['addr']
ralCommunicationPort = random.randint(10000, 32000) + ralId
while checkSocket(ralCommunicationPort) == False:
ralCommunicationPort = random.randint(10000, 32000) + ralId
cudf.set_allocator(allocator=allocator,
pool=pool,
initial_pool_size=initial_pool_size,# Default is 1/2 total GPU memory
enable_logging=enable_logging)
cio.initializeCaller(
ralId,
0,
networkInterface.encode(),
workerIp.encode(),
ralCommunicationPort,
singleNode)
cwd = os.getcwd()
return ralCommunicationPort, workerIp, cwd
def getNodePartitions(df, client):
df = df.persist()
workers = client.scheduler_info()['workers']
connectionToId = {}
for worker in workers:
connectionToId[worker] = workers[worker]['name']
dask.distributed.wait(df)
#print(client.who_has(df))
worker_part = client.who_has(df)
worker_partitions = {}
for key in worker_part:
worker = worker_part[key][0]
partition = int(key[key.find(",") + 2:(len(key) - 1)])
if connectionToId[worker] not in worker_partitions:
worker_partitions[connectionToId[worker]] = []
worker_partitions[connectionToId[worker]].append(partition)
#print("worker partitions")
#print(worker_partitions)
return worker_partitions
def collectPartitionsRunQuery(
masterIndex,
nodes,
tables,
fileTypes,
ctxToken,
algebra,
accessToken):
import dask.distributed
worker_id = dask.distributed.get_worker().name
for table_name in tables:
if(isinstance(tables[table_name].input, dask_cudf.core.DataFrame)):
partitions = tables[table_name].get_partitions(worker_id)
if (len(partitions) == 0):
tables[table_name].input = tables[table_name].input.get_partition(
0).head(0)
elif (len(partitions) == 1):
tables[table_name].input = tables[table_name].input.get_partition(
partitions[0]).compute(scheduler='threads')
else:
table_partitions = []
for partition in partitions:
table_partitions.append(
tables[table_name].input.get_partition(partition).compute())
tables[table_name].input = cudf.concat(table_partitions)
return cio.runQueryCaller(
masterIndex,
nodes,
tables,
fileTypes,
ctxToken,
algebra,
accessToken)
# returns a map of table names to the indices of the columns needed. If there are more than one table scan for one table, it merged the needed columns
# if the column list is empty, it means we want all columns
def mergeTableScans(tableScanInfo):
table_names = tableScanInfo.keys()
table_columns = {}
for table_name in table_names:
table_columns[table_name] = []
for table_name in table_names:
for index in range(0, len(tableScanInfo[table_name]['table_columns'])):
if len(tableScanInfo[table_name]['table_columns'][index]) > 0:
table_columns[table_name] = list(set(table_columns[table_name] + tableScanInfo[table_name]['table_columns'][index]))
table_columns[table_name].sort()
else: # if the column list is empty, it means we want all columns
table_columns[table_name] = []
break
return table_columns
def modifyAlegebraAndTablesForArrowBasedOnColumnUsage(algebra, tableScanInfo, originalTables, table_columns_in_use):
newTables={}
for table_name in tableScanInfo:
if originalTables[table_name].fileType == DataType.ARROW:
newTables[table_name] = originalTables[table_name].filterAndRemapColumns(table_columns_in_use[table_name])
for index in range(0,len(tableScanInfo[table_name]['table_scans'])):
orig_scan = tableScanInfo[table_name]['table_scans'][index]
orig_col_indexes = tableScanInfo[table_name]['table_columns'][index]
table_columns_we_want = table_columns_in_use[table_name]
new_col_indexes = []
if len(table_columns_we_want) > 0:
if orig_col_indexes == table_columns_we_want:
new_col_indexes = list(range(0, len(orig_col_indexes)))
else:
for new_index, merged_col_index in enumerate(table_columns_we_want):
if merged_col_index in orig_col_indexes:
new_col_indexes.append(new_index)
orig_project = 'projects=[' + str(orig_col_indexes) + ']'
new_project = 'projects=[' + str(new_col_indexes) + ']'
new_scan = orig_scan.replace(orig_project, new_project)
algebra = algebra.replace(orig_scan, new_scan)
else:
newTables[table_name] = originalTables[table_name]
return newTables, algebra
class BlazingTable(object):
def __init__(
self,
input,
fileType,
files=None,
datasource=[],
calcite_to_file_indices=None,
num_row_groups=None,
args={},
convert_gdf_to_dask=False,
convert_gdf_to_dask_partitions=1,
client=None,
uri_values=[],
in_file=[],
force_conversion=False,
metadata=None):
self.fileType = fileType
if fileType == DataType.ARROW:
if force_conversion:
#converts to cudf for querying
self.input = cudf.DataFrame.from_arrow(input)
self.fileType = DataType.CUDF
else:
self.input = cudf.DataFrame.from_arrow(input.schema.empty_table())
self.arrow_table = input
else:
self.input = input
self.calcite_to_file_indices = calcite_to_file_indices
self.files = files
self.datasource = datasource
# TODO, cc @percy, @cristian!
# num_row_groups: this property is computed in create_table.parse_schema, but not used in run_query.
self.num_row_groups = num_row_groups
self.args = args
if fileType == DataType.CUDF or DataType.DASK_CUDF:
if(convert_gdf_to_dask and isinstance(self.input, cudf.DataFrame)):
self.input = dask_cudf.from_cudf(
self.input, npartitions=convert_gdf_to_dask_partitions)
if(isinstance(self.input, dask_cudf.core.DataFrame)):
self.dask_mapping = getNodePartitions(self.input, client)
self.uri_values = uri_values
self.in_file = in_file
# slices, this is computed in create table, and then reused in sql method
self.slices = None
# metadata, this is computed in create table, after call get_metadata
self.metadata = metadata
# row_groups_ids, vector<vector<int>> one vector of row_groups per file
self.row_groups_id = []
# a pair of values with the startIndex and batchSize info for each slice
self.offset = (0,0)
def has_metadata(self) :
if isinstance(self.metadata, dask_cudf.core.DataFrame):
return not self.metadata.compute().empty
if self.metadata is not None :
return not self.metadata.empty
return False
def filterAndRemapColumns(self,tableColumns):
#only used for arrow
if len(tableColumns) == 0: # len = 0 means all columns
return BlazingTable(self.arrow_table,DataType.ARROW,force_conversion=True)
new_table = self.arrow_table
columns = []
names = []
i = 0
for column in new_table.itercolumns():
for index in tableColumns:
if i == index:
names.append(self.arrow_table.field(i).name)
columns.append(column)
i = i + 1
new_table = pyarrow.Table.from_arrays(columns,names=names)
new_table = BlazingTable(new_table,DataType.ARROW,force_conversion=True)
return new_table
def convertForQuery(self):
return BlazingTable(self.arrow_table,DataType.ARROW,force_conversion=True)
# until this is implemented we cant do self join with arrow tables
# def unionColumns(self,otherTable):
def getSlices(self, numSlices):
nodeFilesList = []
if self.files is None:
for i in range(0, numSlices):
nodeFilesList.append(BlazingTable(self.input, self.fileType))
return nodeFilesList
remaining = len(self.files)
startIndex = 0
for i in range(0, numSlices):
batchSize = int(remaining / (numSlices - i))
# #print(batchSize)
# #print(startIndex)
tempFiles = self.files[startIndex: startIndex + batchSize]
uri_values = self.uri_values[startIndex: startIndex + batchSize]
if isinstance(self.metadata, cudf.DataFrame) or self.metadata is None:
slice_metadata = self.metadata
else:
slice_metadata = self.metadata.get_partition(i).compute()
if self.num_row_groups is not None:
bt = BlazingTable(self.input,
self.fileType,
files=tempFiles,
calcite_to_file_indices=self.calcite_to_file_indices,
num_row_groups=self.num_row_groups[startIndex: startIndex + batchSize],
uri_values=uri_values,
args=self.args,
metadata=slice_metadata)
bt.offset = (startIndex, batchSize)
nodeFilesList.append(bt)
else:
bt = BlazingTable(
self.input,
self.fileType,
files=tempFiles,
calcite_to_file_indices=self.calcite_to_file_indices,
uri_values=uri_values,
args=self.args,
metadata=slice_metadata)
bt.offset = (startIndex, batchSize)
nodeFilesList.append(bt)
startIndex = startIndex + batchSize
remaining = remaining - batchSize
return nodeFilesList
def get_partitions(self, worker):
return self.dask_mapping[worker]
class BlazingContext(object):
def __init__(self,
dask_client=None, # if None, it will run in single node
network_interface=None,
allocator="managed", # options are "default" or "managed". Where "managed" uses Unified Virtual Memory (UVM) and may use system memory if GPU memory runs out
pool=True, # if True, it will allocate a memory pool in the beginning. This can greatly improve performance
initial_pool_size=None, # Initial size of memory pool in bytes (if pool=True). If None, it will default to using half of the GPU memory
enable_logging=False): # If set to True the memory allocator logging will be enabled, but can negatively impact perforamance
"""
:param connection: BlazingSQL cluster URL to connect to
(e.g. 172.16.17.32:8889, blazingsql-gateway:7887).
"""
self.lock = Lock()
self.finalizeCaller = ref(cio.finalizeCaller)
self.dask_client = dask_client
self.nodes = []
self.node_cwds = []
self.finalizeCaller = lambda: NotImplemented
if(dask_client is not None):
if network_interface is None:
network_interface = 'eth0'
worker_list = []
dask_futures = []
masterIndex = 0
i = 0
##print(network_interface)
for worker in list(self.dask_client.scheduler_info()["workers"]):
dask_futures.append(
self.dask_client.submit(
initializeBlazing,
ralId=i,
networkInterface=network_interface,
singleNode=False,
allocator=allocator,
pool=pool,
initial_pool_size=initial_pool_size,
enable_logging=enable_logging,
workers=[worker]))
worker_list.append(worker)
i = i + 1
i = 0
for connection in dask_futures:
ralPort, ralIp, cwd = connection.result()
node = {}
node['worker'] = worker_list[i]
node['ip'] = ralIp
node['communication_port'] = ralPort
#print("ralport is")
#print(ralPort)
self.nodes.append(node)
self.node_cwds.append(cwd)
i = i + 1
else:
ralPort, ralIp, cwd = initializeBlazing(
ralId=0, networkInterface='lo', singleNode=True,
allocator=allocator, pool=pool, initial_pool_size=initial_pool_size, enable_logging=enable_logging)
node = {}
node['ip'] = ralIp
node['communication_port'] = ralPort
self.nodes.append(node)
self.node_cwds.append(cwd)
# NOTE ("//"+) is a neat trick to handle ip:port cases
#internal_api.SetupOrchestratorConnection(orchestrator_host_ip, orchestrator_port)
self.fs = FileSystem()
self.db = DatabaseClass("main")
self.schema = BlazingSchemaClass(self.db)
self.generator = RelationalAlgebraGeneratorClass(self.schema)
self.tables = {}
self.logs_initialized = False
# waitForPingSuccess(self.client)
print("BlazingContext ready")
def ready(self, wait=False):
if wait:
waitForPingSuccess(self.client)
return True
else:
return self.client.ping()
def __del__(self):
self.finalizeCaller()
def __repr__(self):
return "BlazingContext('%s')" % (self.connection)
def __str__(self):
return self.connection
# BEGIN FileSystem interface
def localfs(self, prefix, **kwargs):
return self.fs.localfs(self.dask_client, prefix, **kwargs)
# Use result, error_msg = hdfs(args) where result can be True|False
def hdfs(self, prefix, **kwargs):
return self.fs.hdfs(self.dask_client, prefix, **kwargs)
def s3(self, prefix, **kwargs):
return self.fs.s3(self.dask_client, prefix, **kwargs)
def gs(self, prefix, **kwargs):
return self.fs.gs(self.dask_client, prefix, **kwargs)
def show_filesystems(self):
print(self.fs)
# END FileSystem interface
def _to_url(self, str_input):
url = urlparse(str_input)
return url
def _to_path(self, url):
path = PurePath(url.path)
return path
# BEGIN SQL interface
def explain(self, sql):
return str(self.generator.getRelationalAlgebraString(sql))
def add_remove_table(self, tableName, addTable, table=None):
self.lock.acquire()
try:
if(addTable):
self.db.removeTable(tableName)
self.tables[tableName] = table
arr = ArrayClass()
order = 0
for column in table.input.columns:
if(isinstance(table.input, dask_cudf.core.DataFrame)):
dataframe_column = table.input.head(0)._data[column]
else:
dataframe_column = table.input._data[column]
data_sz = len(dataframe_column)
dtype = get_np_dtype_to_gdf_dtype_str(
dataframe_column.dtype)
dataType = ColumnTypeClass.fromString(dtype)
column = ColumnClass(column, dataType, order)
arr.add(column)
order = order + 1
tableJava = TableClass(tableName, self.db, arr)
self.db.addTable(tableJava)
self.schema = BlazingSchemaClass(self.db)
self.generator = RelationalAlgebraGeneratorClass(self.schema)
else:
self.db.removeTable(tableName)
self.schema = BlazingSchemaClass(self.db)
self.generator = RelationalAlgebraGeneratorClass(self.schema)
del self.tables[tableName]
finally:
self.lock.release()
def create_table(self, table_name, input, **kwargs):
table = None
extra_columns = []
uri_values = []
file_format_hint = kwargs.get(
'file_format', 'undefined') # See datasource.file_format
extra_kwargs = {}
in_file = []
if(isinstance(input, hive.Cursor)):
hive_table_name = kwargs.get('hive_table_name', table_name)
folder_list, uri_values, file_format_hint, extra_kwargs, extra_columns, in_file = get_hive_table(
input, hive_table_name)
kwargs.update(extra_kwargs)
input = folder_list
if isinstance(input, str):
input = [input, ]
if isinstance(input, pandas.DataFrame):
input = cudf.DataFrame.from_pandas(input)
if isinstance(input, pyarrow.Table):
if (self.dask_client is not None):
input = cudf.DataFrame.from_arrow(input)
else:
table = BlazingTable(
input,
DataType.ARROW)
if isinstance(input, cudf.DataFrame):
if (self.dask_client is not None):
table = BlazingTable(
input,
DataType.DASK_CUDF,
convert_gdf_to_dask=True,
convert_gdf_to_dask_partitions=len(
self.nodes),
client=self.dask_client)
else:
table = BlazingTable(input, DataType.CUDF)
elif isinstance(input, list):
parsedSchema = self._parseSchema(
input, file_format_hint, kwargs, extra_columns)
file_type = parsedSchema['file_type']
table = BlazingTable(
parsedSchema['columns'],
file_type,
files=parsedSchema['files'],
datasource=parsedSchema['datasource'],
calcite_to_file_indices=parsedSchema['calcite_to_file_indices'],
num_row_groups=parsedSchema['num_row_groups'],
args=parsedSchema['args'],
uri_values=uri_values,
in_file=in_file)
table.slices = table.getSlices(len(self.nodes))
if parsedSchema['file_type'] == DataType.PARQUET :
parsedMetadata = self._parseMetadata(input, file_format_hint, table.slices, parsedSchema, kwargs, extra_columns)
if isinstance(parsedMetadata, cudf.DataFrame):
table.metadata = parsedMetadata
else:
table.metadata = parsedMetadata
elif isinstance(input, dask_cudf.core.DataFrame):
table = BlazingTable(
input,
DataType.DASK_CUDF,
client=self.dask_client)
if table is not None:
self.add_remove_table(table_name, True, table)
return table
def drop_table(self, table_name):
self.add_remove_table(table_name, False)
def _parseSchema(self, input, file_format_hint, kwargs, extra_columns):
if self.dask_client:
worker = tuple(self.dask_client.scheduler_info()['workers'])[0]
connection = self.dask_client.submit(
cio.parseSchemaCaller,
input,
file_format_hint,
kwargs,
extra_columns,
workers=[worker])
return connection.result()
else:
return cio.parseSchemaCaller(
input, file_format_hint, kwargs, extra_columns)
def _parseMetadata(self, input, file_format_hint, currentTableNodes, schema, kwargs, extra_columns):
if self.dask_client:
dask_futures = []
workers = tuple(self.dask_client.scheduler_info()['workers'])
worker_id = 0
for worker in workers:
file_subset = [ file.decode() for file in currentTableNodes[worker_id].files]
connection = self.dask_client.submit(
cio.parseMetadataCaller,
file_subset,
currentTableNodes[worker_id].offset,
schema,
file_format_hint,
kwargs,
extra_columns,
workers=[worker])
dask_futures.append(connection)
worker_id += 1
return dask.dataframe.from_delayed(dask_futures)
else:
return cio.parseMetadataCaller(
input, currentTableNodes[0].offset, schema, file_format_hint, kwargs, extra_columns)
def _optimize_with_skip_data(self, masterIndex, table_name, table_files, nodeTableList, scan_table_query, fileTypes):
if self.dask_client is None:
current_table = nodeTableList[0][table_name]
table_tuple = (table_name, current_table)
file_indices_and_rowgroup_indices = cio.runSkipDataCaller(masterIndex, self.nodes, table_tuple, fileTypes, 0, scan_table_query, 0)
if not file_indices_and_rowgroup_indices.empty:
file_and_rowgroup_indices = file_indices_and_rowgroup_indices.to_pandas()
files = file_and_rowgroup_indices['file_handle_index'].values.tolist()
grouped = file_and_rowgroup_indices.groupby('file_handle_index')
actual_files = []
current_table.row_groups_ids = []
for group_id in grouped.groups:
row_indices = grouped.groups[group_id].values.tolist()
actual_files.append(table_files[group_id])
row_groups_col = file_and_rowgroup_indices['row_group_index'].values.tolist()
row_group_ids = [row_groups_col[i] for i in row_indices]
current_table.row_groups_ids.append(row_group_ids)
current_table.files = actual_files
else:
dask_futures = []
i = 0
for node in self.nodes:
worker = node['worker']
current_table = nodeTableList[i][table_name]
table_tuple = (table_name, current_table)
dask_futures.append(
self.dask_client.submit(
cio.runSkipDataCaller,
masterIndex, self.nodes, table_tuple, fileTypes, 0, scan_table_query, 0,
workers=[worker]))
i = i + 1
result = dask.dataframe.from_delayed(dask_futures)
for index in range(len(self.nodes)):
file_indices_and_rowgroup_indices = result.get_partition(index).compute()
if file_indices_and_rowgroup_indices.empty :
continue
file_and_rowgroup_indices = file_indices_and_rowgroup_indices.to_pandas()
files = file_and_rowgroup_indices['file_handle_index'].values.tolist()
grouped = file_and_rowgroup_indices.groupby('file_handle_index')
actual_files = []
current_table.row_groups_ids = []
for group_id in grouped.groups:
row_indices = grouped.groups[group_id].values.tolist()
actual_files.append(table_files[group_id])
row_groups_col = file_and_rowgroup_indices['row_group_index'].values.tolist()
row_group_ids = [row_groups_col[i] for i in row_indices]
current_table.row_groups_ids.append(row_group_ids)
current_table.files = actual_files
def sql(self, sql, table_list=[], algebra=None):
# TODO: remove hardcoding
masterIndex = 0
nodeTableList = [{} for _ in range(len(self.nodes))]
fileTypes = []
if (algebra is None):
algebra = self.explain(sql)
if self.dask_client is None:
relational_algebra_steps = cio.getTableScanInfoCaller(algebra)
else:
worker = tuple(self.dask_client.scheduler_info()['workers'])[0]
connection = self.dask_client.submit(
cio.getTableScanInfoCaller,
algebra,
workers=[worker])
relational_algebra_steps = connection.result()
table_columns = mergeTableScans(relational_algebra_steps)
new_tables, algebra = modifyAlegebraAndTablesForArrowBasedOnColumnUsage(algebra, relational_algebra_steps,self.tables, table_columns)
for table in new_tables:
fileTypes.append(new_tables[table].fileType)
ftype = new_tables[table].fileType
if(ftype == DataType.PARQUET or ftype == DataType.ORC or ftype == DataType.JSON or ftype == DataType.CSV):
currentTableNodes = new_tables[table].getSlices(len(self.nodes))
elif(new_tables[table].fileType == DataType.DASK_CUDF):
currentTableNodes = []
for node in self.nodes:
currentTableNodes.append(new_tables[table])
elif(new_tables[table].fileType == DataType.CUDF or new_tables[table].fileType == DataType.ARROW):
currentTableNodes = []
for node in self.nodes:
currentTableNodes.append(new_tables[table])
j = 0
for nodeList in nodeTableList:
nodeList[table] = currentTableNodes[j]
j = j + 1
if new_tables[table].has_metadata():
scan_table_query = relational_algebra_steps[table]['table_scans'][0]
self._optimize_with_skip_data(masterIndex, table, new_tables[table].files, nodeTableList, scan_table_query, fileTypes)
ctxToken = random.randint(0, 64000)
accessToken = 0
if (len(table_list) > 0):
print("NOTE: You no longer need to send a table list to the .sql() funtion")
if self.dask_client is None:
result = cio.runQueryCaller(
masterIndex,
self.nodes,
nodeTableList[0],
fileTypes,
ctxToken,
algebra,
accessToken)
else:
dask_futures = []
i = 0
for node in self.nodes:
worker = node['worker']
dask_futures.append(
self.dask_client.submit(
collectPartitionsRunQuery,
masterIndex,
self.nodes,
nodeTableList[i],
fileTypes,
ctxToken,
algebra,
accessToken,
workers=[worker]))
i = i + 1
result = dask.dataframe.from_delayed(dask_futures)
return result
# END SQL interface
# BEGIN LOG interface
def log(self, query, logs_table_name='bsql_logs'):
if not self.logs_initialized:
self.logs_table_name = logs_table_name
log_files = [self.node_cwds[i] + '/RAL.' + \
str(i) + '.log' for i in range(0, len(self.node_cwds))]
#print(log_files)
dtypes = [
'date64',
'int32',
'str',
'int32',
'int16',
'int16',
'str',
'float32',
'str',
'int32',
'str',
'int32']
names = [
'log_time',
'node_id',
'type',
'query_id',
'step',
'substep',
'info',
'duration',
'extra1',
'data1',
'extra2',
'data2']
t = self.create_table(
self.logs_table_name,
log_files,
delimiter='|',
dtype=dtypes,
names=names,
file_format='csv')
#print("table created")
#print(t)
self.logs_initialized = True
return self.sql(query)
| 1.820313 | 2 |
pytato/cmath.py | alexfikl/pytato | 0 | 12793271 | from __future__ import annotations
__copyright__ = """
Copyright (C) 2020 <NAME>
Copyright (C) 2020 <NAME>
Copyright (C) 2020 <NAME>
Copyright (C) 2021 <NAME>
"""
__license__ = """
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
# {{{ docs
__doc__ = """
.. currentmodule:: pytato
.. autofunction:: abs
.. autofunction:: sqrt
.. autofunction:: sin
.. autofunction:: cos
.. autofunction:: tan
.. autofunction:: arcsin
.. autofunction:: arccos
.. autofunction:: arctan
.. autofunction:: conj
.. autofunction:: arctan2
.. autofunction:: sinh
.. autofunction:: cosh
.. autofunction:: tanh
.. autofunction:: exp
.. autofunction:: log
.. autofunction:: log10
.. autofunction:: isnan
.. autofunction:: real
.. autofunction:: imag
"""
# }}}
import numpy as np
import pymbolic.primitives as prim
from typing import Tuple, Optional
from pytato.array import Array, ArrayOrScalar, IndexLambda, _dtype_any
from pytato.scalar_expr import SCALAR_CLASSES
from pymbolic import var
def _apply_elem_wise_func(inputs: Tuple[ArrayOrScalar],
func_name: str,
ret_dtype: Optional[_dtype_any] = None
) -> ArrayOrScalar:
if all(isinstance(x, SCALAR_CLASSES) for x in inputs):
np_func = getattr(np, func_name)
return np_func(*inputs) # type: ignore
if not inputs:
raise ValueError("at least one argument must be present")
shape = None
sym_args = []
bindings = {}
for index, inp in enumerate(inputs):
if isinstance(inp, Array):
if inp.dtype.kind not in ["f", "c"]:
raise ValueError("only floating-point or complex "
"arguments supported")
if shape is None:
shape = inp.shape
elif inp.shape != shape:
# FIXME: merge this logic with arithmetic, so that broadcasting
# is implemented properly
raise NotImplementedError("broadcasting in function application")
if ret_dtype is None:
ret_dtype = inp.dtype
bindings[f"in_{index}"] = inp
sym_args.append(
prim.Subscript(var(f"in_{index}"),
tuple(var(f"_{i}") for i in range(len(shape)))))
else:
sym_args.append(inp)
assert shape is not None
assert ret_dtype is not None
return IndexLambda(
prim.Call(var(f"pytato.c99.{func_name}"), tuple(sym_args)),
shape, ret_dtype, bindings)
def abs(x: Array) -> ArrayOrScalar:
if x.dtype.kind == "c":
result_dtype = np.empty(0, dtype=x.dtype).real.dtype
else:
result_dtype = x.dtype
return _apply_elem_wise_func((x,), "abs", ret_dtype=result_dtype)
def sqrt(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "sqrt")
def sin(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "sin")
def cos(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "cos")
def tan(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "tan")
def arcsin(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "asin")
def arccos(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "acos")
def arctan(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "atan")
def conj(x: Array) -> ArrayOrScalar:
if x.dtype.kind != "c":
return x
return _apply_elem_wise_func((x,), "conj")
def arctan2(y: Array, x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((y, x), "atan2") # type:ignore
def sinh(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "sinh")
def cosh(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "cosh")
def tanh(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "tanh")
def exp(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "exp")
def log(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "log")
def log10(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "log10")
def isnan(x: Array) -> ArrayOrScalar:
return _apply_elem_wise_func((x,), "isnan", np.dtype(np.int32))
def real(x: Array) -> ArrayOrScalar:
if x.dtype.kind == "c":
result_dtype = np.empty(0, dtype=x.dtype).real.dtype
else:
return x
return _apply_elem_wise_func((x,), "real", ret_dtype=result_dtype)
def imag(x: Array) -> ArrayOrScalar:
if x.dtype.kind == "c":
result_dtype = np.empty(0, dtype=x.dtype).real.dtype
else:
import pytato as pt
return pt.zeros(x.shape, dtype=x.dtype)
return _apply_elem_wise_func((x,), "imag", ret_dtype=result_dtype)
# vim: fdm=marker
| 1.609375 | 2 |
plagiarism/core.py | u2rafi/python-plagiarism | 2 | 12793272 | import numpy as np
from io import TextIOWrapper
from typing import Iterable, Any, Union, TextIO, List, Optional
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from plagiarism.sources import Source
class Output(object):
"""
Class that format ndarray data to a plain format
:parameter
data: ndarray
mapping: mapping of a target array with data (array)
sorted: is soring data array
nim_percentage: percentage of minimum marching similarity
>>> out = Output(data=...)
>>> out.getlist()
>>> out.get()
"""
def __init__(
self,
data: np.ndarray,
*,
mapping: Optional[list],
sorted: Optional[bool] = True,
nim_percentage: Optional[float] = 1.0
) -> None:
self.data = data
self.map = mapping
self.sorted = sorted
self.nim_percentage = nim_percentage
@staticmethod
def _sorting(d: Iterable[dict], *, reverse=False) -> Iterable:
"""
Sorting of an array containing dictionary
:parameter:
d: array of dictionary
reverse: is reverse ordering
:return:
a sorted array
"""
return sorted(d, key=lambda x: x['score'], reverse=reverse)
def getlist(self) -> List:
"""
Get list of dictionary in a array
:return:
An array
"""
return list(self._sorting(self._generate_result()) if self.sorted else list(self._generate_result()))
def get(self) -> float:
"""
Get an array of values if there are no mapping
:return:
An array
"""
result = [item[0] * 100 for item in self.data]
result.sort(reverse=True)
return result[0]
def _generate_result(self) -> Iterable:
""" Generator that convert ndarray for an array of dictionary """
if self.map:
for index, score in enumerate(self.data):
_score: float = score[0] * 100
if _score >= self.nim_percentage:
yield dict(doc=self.map[index], score="{:.2f}".format(_score))
else:
for item in self.data:
yield dict(score="{:.2f}".format(item[0] * 100))
def __call__(self, *args, **kwargs):
return self.getlist()
def __iter__(self):
return self.getlist()
class Plagiarism(object):
"""
Find plagiarism in a dataset with the given input using scikit-learn (tf-idf algorithm) cosine similarity
:parameter
source: `Source` instance having file or file content
>>> plg = Plagiarism(source=...)
>>> plg.compare(...).get() # get percentage in number (float)
>>> plg.compare(...).getlist()
"""
def __init__(
self,
source: Source,
*,
nim_percentage: Optional[float] = 1.0
) -> None:
self._tfidf_vectorizer = TfidfVectorizer()
self.source = source
self.nim_percentage = nim_percentage
def _cosine_similarity(self, x, y) -> Any:
""" Compute cosine similarity between samples in x and y. K(x, y) = <Xx, y> / (||x||*||y||) """
return cosine_similarity(x, y)
def _get_source(self) -> Union[Iterable, list]:
return self.source.get_content()
def _compare_transform(self, raw_document) -> Any:
tfidf = self._tfidf_vectorizer.fit_transform(list(self._get_source()) + [raw_document])
return (tfidf * tfidf.T).A[0, 1]
@staticmethod
def _get_input_content(f: Union[bytes, TextIO]) -> str:
if type(f) is bytes:
return f.decode()
return f.read()
def compare(
self,
raw_document: Union[TextIOWrapper, TextIO, bytes, str]
) -> Output:
"""
Compare cosine similarity between documents
:param raw_document: Text file or text contents
:return:
Instance of Output
"""
raw_document = raw_document if type(raw_document) == str else self._get_input_content(raw_document)
vect_x = self._tfidf_vectorizer.fit_transform(self.source.get_content())
vect_y = self._tfidf_vectorizer.transform([raw_document])
similarity = self._cosine_similarity(vect_x, vect_y)
return Output(data=similarity, mapping=self.source.get_mapping(), nim_percentage=self.nim_percentage)
| 2.765625 | 3 |
data_scripts/sony250/read_all_imgs.py | laomao0/AEnet | 1 | 12793273 | import cv2
import numpy as np
import random
import os
def imread(path):
# print(path)
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
# covert BRG to RGB
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# convert BGR to RGB
img = img[:,:,[2, 1, 0]]
return img
def imsave(path, img):
# convert RGB to BGR
img = img[:,:,[2, 1, 0]]
# save
cv2.imwrite(path, img)
# dataset_path = '/DATA/wangshen_data/ShortLongDataset/Sony240/full_sharp'
dataset_path = '/DATA/wangshen_data/ShortLongDataset/Sony240/test'
all_dirs = sorted(os.listdir(dataset_path))
# counter = 0
for dir in all_dirs:
list_path = os.path.join(dataset_path, dir)
items = sorted(os.listdir(list_path)) # imgs
num = len(items)
print(list_path, num)
for it in items:
img_path = os.path.join(list_path, it)
# counter = counter + 1
try:
# print(img_path)
a = imread(img_path)
# print(counter)
except:
print(img_path)
| 2.65625 | 3 |
src/sentry_top/plugin.py | robopsi/sentry-top | 10 | 12793274 | import sentry_top
from collections import defaultdict
from nydus.db import create_cluster
from time import time
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from sentry.models import Project
from sentry.plugins.base import Plugin
if not getattr(settings, 'SENTRY_TOP', None):
raise ImproperlyConfigured('You need to configure SENTRY_TOP')
def get_cluster(hosts=None, router='nydus.db.routers.keyvalue.PartitionRouter'):
if hosts is None:
hosts = {
0: {} # localhost / default
}
return create_cluster({
'engine': 'nydus.db.backends.redis.Redis',
'router': router,
'hosts': hosts,
})
redis = get_cluster(**settings.SENTRY_TOP['redis'])
MINUTES = settings.SENTRY_TOP.get('total_minutes', 15)
class TopPlugin(Plugin):
author = 'Sentry Team'
author_url = 'https://github.com/getsentry/sentry-top'
version = sentry_top.VERSION
description = 'Tracks active projects ala `top`'
resource_links = [
('Bug Tracker', 'https://github.com/getsentry/sentry-top/issues'),
('Source', 'https://github.com/getsentry/sentry-top'),
]
slug = 'top'
title = 'Top'
conf_title = title
conf_key = 'top'
def can_enable_for_projects(self):
return False
def add_event(self, project, client=redis):
minute = int(time() / 60)
keys = [
# 'stop:e:{0}:{1}'.format(event.group_id),
'stop:p:{0}'.format(minute),
]
with client.map() as conn:
for key in keys:
conn.zincrby(key, project.id)
conn.expire(key, (MINUTES + 1) * 60)
def top_projects(self, minutes=15, num=100, client=redis):
now = int(time() / 60)
keys = []
for minute in xrange(minutes):
keys.append('stop:p:{0}'.format(now - minute))
counts = []
with client.map() as conn:
for key in keys:
counts.append(conn.zrevrange(key, 0, num, withscores=True))
results = defaultdict(int)
for countset in counts:
for project_id, count in countset:
results[int(project_id)] += int(count)
sorted_results = sorted(
results.items(), key=lambda x: x[1], reverse=True)[:num]
project_map = dict(
(p.id, p) for p in Project.objects.filter(id__in=[
p_id for p_id, _ in sorted_results
]).select_related('team')
)
return [
(project_map[p_id], c)
for (p_id, c) in sorted_results
if p_id in project_map
]
def is_rate_limited(self, project):
# TODO(dcramer): we need a way to hook into Sentry at event input
# that guarantees this stat
self.add_event(project)
| 1.960938 | 2 |
aoc/twenty/day1/day.py | jaehoonhwang/advent-of-code | 0 | 12793275 | import os
from aoc.utils.file_reader import read_file_line
from aoc.utils.file_reader import path_join
directory_path = os.path.dirname(os.path.realpath(__file__))
input_filename = "input.txt"
target_number = 2020
"""
"""
def problem_part1(lines):
seen = set()
answer = None
for number in lines:
if number in seen:
answer = number * (target_number - number)
break
else:
seen.add(target_number - number)
return answer
"""
"""
def problem_part2(lines):
seen = set()
mapping = {}
answer = None
for index, number in enumerate(lines):
for inner_index in range(len(lines)):
summation = number + lines[inner_index]
seen.add(target_number - summation)
mapping[summation] = (number, lines[inner_index])
for number in lines:
if number in seen:
number1 = mapping[target_number-number][0]
number2 = mapping[target_number-number][1]
answer = number * number1 * number2
break
return answer
def day1_main():
print("2020 AOC Challenge Day 1: Report Repair")
input_path = path_join(directory_path, input_filename)
raw_texts = read_file_line(input_path)
lines = [int(number) for number in raw_texts]
part1_answer = problem_part1(lines)
print("Part 1, Answer: {}".format(part1_answer))
part2_answer = problem_part2(lines)
print("Part 2, Answer: {}".format(part2_answer))
if __name__ == "__main__":
day1_main() | 3.265625 | 3 |
2019/12 December/dp12012019.py | vishrutkmr7/DailyPracticeProblemsDIP | 5 | 12793276 | <reponame>vishrutkmr7/DailyPracticeProblemsDIP
# This problem was recently asked by Google:
# Given a nested dictionary, flatten the dictionary, where nested dictionary keys can be represented through dot notation.
import collections.abc as collections
def flatten_dictionary(d, parent_key="", sep="."):
# Fill this in.
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten_dictionary(v, new_key, sep=sep).items())
else:
items.append((new_key, v))
return dict(items)
d = {"a": 1, "b": {"c": 2, "d": {"e": 3}}}
print(flatten_dictionary(d))
# {'a': 1, 'b.c': 2, 'b.d.e': 3}
| 4.03125 | 4 |
hbos_server/validationbase.py | robscetury/hbos | 0 | 12793277 | from abc import abstractmethod
class Validation(object):
def __init__(self, is_valid:bool, message:str):
self._is_valid = is_valid
self._message = message
@property
def is_valid(self) -> bool:
return self._is_valid
@is_valid.setter
def is_valid(self,value:bool):
self._is_valid = value
@property
def message(self)->str:
return self._message
@message.setter
def message(self,value:str):
self._message = value
class ValidationBase(object):
@abstractmethod
def validate(self, object) -> Validation:
raise NotImplemented
| 3.515625 | 4 |
tests/commands/conftest.py | danpoland/slacktools-interactivity | 0 | 12793278 | <reponame>danpoland/slacktools-interactivity
from typing import Dict
import pytest
from interactivity.generics import Payload
def make_command_request_data():
return {
"token": "token",
"command": "/command",
"text": "do_work op1 op2",
"response_url": "https://testing.commands",
"trigger_id": "trigger_id",
"user_id": "user_id",
"user_name": "user_name",
"team_id": "team_id",
"team_name": "team_name",
"enterprise_id": "enterprise_id",
"enterprise_name": "enterprise_name",
"channel_id": "channel_id",
"channel_name": "channel_name",
"team_domain": "CrispyDev",
}
@pytest.fixture
def command_request_data():
return make_command_request_data()
@pytest.fixture
def make_command_payload():
def _make_command_payload(request_data: Dict = None):
command_request_data = make_command_request_data()
if request_data:
command_request_data = {**command_request_data, **request_data}
return Payload(**command_request_data)
return _make_command_payload
@pytest.fixture
def payload(make_command_payload):
return make_command_payload()
| 2.328125 | 2 |
machine-learning/ml-algos/logistic_regression.py | teetangh/Kaustav-AI-workspace | 0 | 12793279 | #!/usr/bin/env python3
import numpy as np
import math
import random
def compute_z(theta, x):
z = 0
for j in range(len(x)):
z += theta[j] * x[j]
z += theta[len(x)]
return z
def compute_g(z):
return (1)/(1 + math.exp(-z))
def compute_h(z):
return compute_g(z)
def binary_cross_entropy_loss(Y_train, Y_predict):
total = 0
for i in range(len(Y_train)):
total -= (Y_train[i] * math.log(Y_predict[i])) + \
((1 - Y_train[i]) * math.log(1-Y_predict[i]))
average = total / len(Y_train)
return average
def compute_loss_gradients(theta, X_train, Y_train, Y_predict):
delta_theta = []
for j in range(len(X_train[0])):
grad = 0
for i in range(len(Y_train)):
grad += ((Y_predict[i] - Y_train[i]) * X_train[i][j])/len(Y_train)
delta_theta.append(grad)
return delta_theta
def main():
# f = int(input("no of features: "))
n = int(input("no of rows: "))
X_train = []
Y_train = []
for i in range(n):
row = [int(r) for r in input().split()]
X_train.append(row[0:-1])
Y_train.append(row[-1])
theta = [np.random.randn() for i in range(len(X_train))]
print("theta", theta)
for i in range(n):
print(X_train[i], Y_train[i])
epochs = 5
epsilon = 0.00000000000000001
alpha = 0.001
for e in range(epochs):
Y_predict = []
for i in range(n):
print(X_train[i])
Y_predict.append(compute_h(compute_z(theta, X_train[i])))
current_loss = binary_cross_entropy_loss(Y_train, Y_predict)
print("=========> Epoch number:", e, "Current Loss: ", current_loss)
print("Y_predict", Y_predict)
if current_loss <= epsilon:
break
delta_theta = compute_loss_gradients(
theta, X_train, Y_train, Y_predict)
print("delta_theta", delta_theta)
for j in range(len(theta) - 1):
theta[j] = theta[j] - alpha * delta_theta[j]
if __name__ == "__main__":
main()
| 2.953125 | 3 |
1_nas/predictor.py | nuaa-QK/1_NAS | 0 | 12793280 | <filename>1_nas/predictor.py
import os
from info_str import _cur_ver_dir
import numpy as np
from enumerater import Enumerater
from predict_op.label_encoding import decoder, encoder, getClassNum
from keras.utils.np_utils import to_categorical
from keras.models import model_from_json
import time
MAX_NETWORK_LENGTH = 71
#model_json_path = './predict_op/model.json'
#model_weights_path = './predict_op/model.json.h5'
net_data_path = os.path.join(_cur_ver_dir, 'predict_op/data', 'net.npy')
label_data_path = os.path.join(_cur_ver_dir, 'predict_op/data', 'label.npy')
model_json_path = os.path.join(_cur_ver_dir, 'predict_op', 'model.json')
model_weights_path = os.path.join(_cur_ver_dir, 'predict_op', 'model.json.h5')
# TODO Predictor.train -> Predictor.train_model (defined in interface.md)
# TODO DO NOT overuse @staticmethod. It can be your private function in predictor.py.
# TODO Wrtie Predictor.predict & Predictor.train_model discriptions.
# TODO Let each functions be less than 30 line and 80 characters per line.
class Feature:
def __init__(self, graph):
self.graph = graph
def _feature_links(self):
# 从邻接矩阵中提取所有的支链,每一条支链有五个特征,编号,起点,终点,长度,节点编号
g = self.graph
link_set = []
endpoint_link_num_set = []
node_link_num_set = []
endpoint = self._find_endpoint()
link_id = 0
for i in range(len(endpoint)):
if endpoint[i] == 1:
for j in range(len(g)):
if g[i][j] == 1:
link = [link_id, i, 0, 0, []]
link = self._find_link(endpoint, j, link, g, node_link_num_set)
link_id += 1
link_set.append(link)
for i in range(len(endpoint)):
if endpoint[i] == 1:
i_links_num_set = []
i_links_num_set.append(i)
for j in range(len(link_set)):
if link_set[j][1] == i or link_set[j][2] == i:
i_links_num_set.append(j)
endpoint_link_num_set.append(i_links_num_set)
return link_set, endpoint_link_num_set, node_link_num_set
def _feature_nodes(self):
# 对每一个节点提取特征
link_set, endpointLinkNumSet, nodeLinkNumSet = self._feature_links()
node_num = len(self.graph)
feature_num = 25
node_feature = np.zeros((node_num, feature_num), dtype=float)
node_feature[:, 0] = node_num
node_feature[:, 1] = len(link_set)
max_length, max_link_index = self._find_max_link(link_set)
min_length, min_link_index = self._find_min_link(link_set)
node_feature[:, 2] = max_length
node_feature[:, 3] = max_link_index
node_feature[:, 4] = min_length
node_feature[:, 5] = min_link_index
mean = len(link_set) / node_num
link_len = []
for i in range(len(link_set)):
link_len.append(link_set[i][3])
var = np.var(link_len)
node_feature[:, 6] = mean
node_feature[:, 7] = var
# 全局特征,端点个数
endpoint_num = len(endpointLinkNumSet)
node_feature[:, 8] = endpoint_num
global_num = 9
# 局部特征
for i in range(node_num):
node_feature[i][global_num] = self._is_endpoint(i, endpointLinkNumSet)
node_feature[i][global_num + 1] = i
if node_feature[i][global_num] == 1:
node_feature[i][global_num + 2] = self._link_num(i, endpointLinkNumSet)
links = self._find_endpoint_link_set(i, endpointLinkNumSet, link_set)
node_feature[i][global_num + 3] = self._mean_link(links)
node_feature[i][global_num + 4] = self._var_link(links)
_, max_length = self._find_max_link(links)
_, min_length = self._find_min_link(links)
node_feature[i][global_num + 5] = max_length
node_feature[i][global_num + 6] = min_length
else:
link = self._find_node_link(i, nodeLinkNumSet, link_set)
node_feature[i][global_num + 7] = self._relative_Loc(i, link)
node_feature[i][global_num + 8] = link[3]
node_feature[i][global_num + 9] = link[1]
node_feature[i][global_num + 10] = link[2]
links = self._find_node_links(i, nodeLinkNumSet, link_set)
node_feature[i][global_num + 11] = self._mean_link(links)
node_feature[i][global_num + 12] = self._var_link(links)
return node_feature
def _find_endpoint(self):
g = self.graph
endpoint = np.zeros((len(g), 1), dtype=int)
endpoint[0] = 1
for i in range(1, len(g)):
out_link_num = 0
in_link_num = 0
for j in range(len(g)):
if g[i][j] == 1:
out_link_num += 1
if g[j][i] == 1:
in_link_num += 1
if out_link_num > 1 and in_link_num > 1:
break
if out_link_num != 1:
endpoint[i] = 1
if in_link_num > 1:
endpoint[i] = 1
return endpoint
def _find_endpoint_link_set(self, id, endpoint_link_num_set, link_set):
# 寻找端点的支链集
links = []
for i in range(len(endpoint_link_num_set)):
if endpoint_link_num_set[i][0] == id:
for e in range(1, len(endpoint_link_num_set[i])):
links.append(link_set[endpoint_link_num_set[i][e]])
break
return links
def _find_node_links(self, id, node_link_num_set, link_set):
# 寻找与节点所在支链有相同端点的支链集
links = []
link_num = 0
for i in range(len(node_link_num_set)):
if node_link_num_set[i][0] == id:
link_num = node_link_num_set[i][1]
break
for i in range(len(link_set)):
if link_set[i][1] == link_set[link_num][1] and link_set[i][2] == link_set[link_num][2]:
links.append(link_set[i])
return links
def _find_node_link(self, id, node_link_num_set, link_set):
# 寻找节点所在的支链
for i in range(len(node_link_num_set)):
if node_link_num_set[i][0] == id:
link_num = link_set[node_link_num_set[i][1]]
return link_num
def _is_endpoint(self, node_num, endpoint_link_num_set):
# 判断是否为端点
for i in range(len(endpoint_link_num_set)):
if endpoint_link_num_set[i][0] == node_num:
return 1
return 0
def _link_num(self, node_id, endpoint_link_num_set):
# 支链的个数
for i in range(len(endpoint_link_num_set)):
if endpoint_link_num_set[i][0] == node_id:
e = endpoint_link_num_set[i]
return len(e) - 1
return 0
def _mean_link(self, link_set):
# 支链长度的期望
links_len = []
for e in link_set:
links_len.append(e[3])
return np.mean(links_len)
def _var_link(self, link_set):
# 支链的方差
links_len = []
for e in link_set:
links_len.append(e[3])
return np.var(links_len)
def _relative_Loc(self, id, link):
# 节点在支链中的相对位置
for i in range(len(link[4])):
if link[4][i] == id:
return i + 1
def _find_max_link(self, link_set):
# 寻找最长支链
max_length = 0
index = 0
for i in range(len(link_set)):
if link_set[i][3] > max_length:
index = i
max_length = link_set[i][3]
return index, max_length
def _find_min_link(self, link_set):
# 寻找最短支链
min_length = 0
index = 0
for i in range(len(link_set)):
if link_set[i][3] < min_length:
index = i
min_length = link_set[i][3]
return index, min_length
def _find_link(self, endpoint, id, link, G, node_link_num_set):
# 递归搜索链上的所有节点
if endpoint[id] == 1:
link[2] = id
return link
else:
link[3] += 1
link[4].append(id)
node_link_num_set.append([id, link[0]])
for i in range(len(G)):
if G[id][i] == 1:
link = self._find_link(endpoint, i, link, G, node_link_num_set)
break
return link
class Predictor:
def __init__(self):
with open(model_json_path, 'r') as file:
model_json = file.read()
self.model = model_from_json(model_json)
self.model.load_weights(model_weights_path)
def _list2mat(self, G):
# 将领接表转换成邻接矩阵
graph = np.zeros((len(G), len(G)), dtype=int)
for i in range(len(G)):
e = G[i]
if e:
for k in e:
graph[i][k] = 1
return graph
def _graph_concat(self, graphs):
if len(graphs) == 1:
return graphs[0]
elif len(graphs) > 1:
new_graph_length = 0
for g in graphs:
new_graph_length += len(g)
new_graph = np.zeros((new_graph_length, new_graph_length), dtype=int)
x_index = 0 # the staring connection position of next graph
y_index = 0
for g in graphs:
new_graph[x_index:x_index + len(g), y_index:y_index + len(g)] = g
if y_index + len(g) < new_graph_length:
new_graph[x_index + len(g) - 1][y_index + len(g)] = 1
x_index = x_index + len(g)
y_index = y_index + len(g)
return new_graph
def _get_new_order(self, links, graph_size):
# 获得节点在新的编码方式下的顺序
new_order = np.zeros((2, graph_size), dtype=int)
for i in range(graph_size):
new_order[0][i] = new_order[1][i] = i
for l in links:
nodes = l[4]
if nodes:
if nodes[0] > l[2]:
for i in range(len(nodes)):
new_order[1][nodes[i]] = l[1] + i + 1
new_order = np.argsort(new_order[1, :])
return new_order
def _get_new_mat(self, new_order, mat):
# 获得在新的编码方式下网络结构的邻接矩阵
size = len(mat)
graph = np.zeros((size, size), dtype=int)
for i in range(size):
e = mat[i]
if e:
for k in e:
pre = int(np.argwhere(new_order == i))
after = int(np.argwhere(new_order == k))
graph[pre][after] = 1
return graph
def _padding(self, node_feature, length):
# 对输入数据做填充,保证输入数据的一致性
if len(node_feature) < length:
add = np.ones((length - len(node_feature), len(node_feature[0])))
add = -add
node_feature = np.append(node_feature, add, axis=0)
return node_feature
def _trans(self, graphs):
# 对输入的邻接表重新编码并转换成矩阵的形式提取特征
graphs_mat = []
graphs_orders = []
for g in graphs:
g_mat = self._list2mat(g)
links, _, _ = Feature(g_mat)._feature_links()
order = self._get_new_order(links, len(g_mat))
graph_mat = self._get_new_mat(order, g)
graphs_mat.append(graph_mat)
graphs_orders.append(order)
return graphs_mat, graphs_orders
def _class_id_2_parameter(self, order, class_list):
# 将最后输出的类别转换成需要预测的操作详细参数
parameters = decoder(class_list)
parameters_cp = parameters.copy()
for i in range(len(order)):
parameters[order[i]] = parameters_cp[i]
return parameters[:len(order)]
def _save_model(self, model, json_path, weights_path):
model_json = model.to_json()
with open(json_path, 'w') as file:
file.write(model_json)
model.save_weights(weights_path)
def _my_param_style(self, cell_list):
filter_size = [16, 32, 48, 64, 96, 128, 192, 256, 512, 1024]
pool_size = [2, 3, 4, 5, 7]
labels = []
for cell in cell_list:
label = []
if cell[0] == 'conv':
for f_size in filter_size:
if cell[1] <= f_size:
label = [1, [str(cell[2]), str(f_size), 'relu', '0', '0']]
else:
if cell[1] == 'max' or cell[1] == 'avg':
for p_size in pool_size:
if cell[2] <= p_size:
label = [0, ['pool ' + cell[1], str(p_size)]]
elif cell[1] == 'global':
label = [0, ['pool avg', 'global']]
labels.append(label)
return labels
def _read_data(self, net_data_path, label_data_path):
network_feature = np.load(net_data_path)
label = np.load(label_data_path)
return network_feature, label
def _save_data(self, net, label):
np.save(net_data_path, net)
np.save(label_data_path, label)
def _predict(self, inputs):
# 根据输入特征预测操作
inputs = np.array(inputs)
inputs = np.reshape(inputs, (1, inputs.shape[0], inputs.shape[1]))
# model = load_model(model_json_path, model_weights_path)
predict_y = self.model.predict(inputs)
predict_y = np.reshape(predict_y, (predict_y.shape[1], predict_y.shape[2]))
output = []
for i in range(len(inputs[0])):
output.append(np.argmax(predict_y[i]))
return output
# 模块接口
def predictor(self, pre_block, graph_full):
'''
Method for predicting block's operation
:param pre_block: Previous block Networkitem_list
:param graph_full: Current block Networkitem
:return: Operation of each node in the current block,including size and filters
'''
graph_list = []
if pre_block:
for block in pre_block:
graph_list.append(block)
graph_list.append(graph_full)
graphs_mat, graphs_orders = self._trans(graph_list)
new_graph = self._graph_concat(graphs_mat)
inputs = Feature(new_graph)._feature_nodes()
inputs = self._padding(inputs, MAX_NETWORK_LENGTH)
class_list = self._predict(inputs)
ops = self._class_id_2_parameter(graphs_orders[-1],
class_list[len(new_graph) - len(graph_full):len(new_graph)])
return ops
def train_model(self, graph_full, cell_list):
'''
Retrain the predictor model with networks that
get high accuracy on the validation set
:param graph_full: a Network Topology
:param cell_list: Cell list
:returns: None.
'''
x_train = []
y_train = []
net, label = self._read_data(net_data_path, label_data_path)
for k in net:
x_train.append(k)
for k in label:
y_train.append(k)
graphs_mat, _ = self._trans(graph_full)
for graph in graphs_mat:
x = Feature(graph)._feature_nodes()
x = self._padding(x, MAX_NETWORK_LENGTH)
x_train.append(x)
x_train = np.array(x_train)
for cell in cell_list:
cell = self._my_param_style(cell)
y = encoder(cell)
y = to_categorical(y, getClassNum())
y = self._padding(y, MAX_NETWORK_LENGTH)
y_train.append(y)
y_train = np.array(y_train)
self._save_model(model=self.model,
json_path='./predict_op/outdated_model.json',
weights_path='./predict_op/outdated_model.json.h5')
self.model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
self.model.fit(x_train, y_train, batch_size=32, epochs=500)
self._save_model(model=self.model,
json_path=model_json_path,
weights_path=model_weights_path)
self._save_data(x_train, y_train)
if __name__ == '__main__':
# graph = [[[1], [2], [3], [4], [5], []]]
# cell_list = [[('conv', 256, 3, 'relu'), ('conv', 192, 3, 'relu'), ('conv', 512, 1, 'relu'), ('pooling', 'max', 4)
# , ('conv', 128, 1, 'relu'), ('conv', 512, 5, 'relu')]]
# pred = Predictor()
# Blocks = []
# pred.train([], [])
enu = Enumerater(depth=6, width=3)
network_pool = enu.enumerate()
print(len(network_pool))
start = time.time()
i = 0
pred = Predictor()
for ind in range(2, len(network_pool)):
gra = network_pool[ind].graph_part
#Blocks = [network_pool[ind - 2].graph_part, network_pool[ind - 1].graph_part]
Blocks = []
cell_list = pred.predictor(Blocks, gra)
if i%100 == 0:
print("iterator:", i)
i += 1
print(gra)
print(cell_list)
end = time.time()
print(end-start)
| 2.359375 | 2 |
hisa/capsule/capsule.py | rittikaadhikari/stock-recommendation | 0 | 12793281 | <filename>hisa/capsule/capsule.py
from six import with_metaclass
from abc import ABCMeta
class Capsule(with_metaclass(ABCMeta)):
pass
| 1.523438 | 2 |
AA.py | BeahIF/ev3 | 0 | 12793282 | <reponame>BeahIF/ev3
#!/usr/bin/env python3
#coding: utf-8
from ev3dev.ev3 import *
from threading import *
import time, socket
import math
m1 = LargeMotor('outD')
m2 = LargeMotor('outC')
m3 = MediumMotor('outB')
m4 = MediumMotor('outA')
#Sensor_Cor = [ColorSensor('in1'), ColorSensor('in2')]
#Sensor_Cor[0] = ColorSensor('in1') #2
#Sensor_Cor[1] = ColorSensor('in2') #4
#us = UltrasonicSensor('in3')
#us2 = UltrasonicSensor('in4')
#ir = InfraredSensor('in4')
#ir = UltrasonicSensor('in4')
# ir2 = InfraredSensor('in1')
# tou = TouchSensor('in4')
#Sensor_Cor[0].mode = 'COL-COLOR'
#Sensor_Cor[1].mode = 'COL-COLOR'
#us.mode = 'US-DIST-CM'
#us2.mode = 'US-DIST-CM'
#ir.mode = 'US-DIST-CM'
#ir.mode = 'IR-PROX'
# ir2.mode = 'IR-PROX'
m1.run_to_rel_pos(position_sp=5000,speed_sp=150,stop_action="brake")
m2.run_to_rel_pos(position_sp=5000,speed_sp=150,stop_action="brake") | 1.820313 | 2 |
tests/diffcalc/scenarios.py | DiamondLightSource/diffcalc-core | 1 | 12793283 | ###
# Copyright 2008-2011 Diamond Light Source Ltd.
# This file is part of Diffcalc.
#
# Diffcalc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Diffcalc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the*
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Diffcalc. If not, see <http://www.gnu.org/licenses/>.
###
from math import asin, atan2, cos, degrees, radians, sin
# from diffcalc.hkl.vlieg.geometry import VliegPosition
from diffcalc.hkl.calc import sign
from diffcalc.hkl.geometry import Position
from diffcalc.ub.reference import Reflection
def PosFromI16sEuler(phi, chi, eta, mu, delta, gamma):
return Position(
mu=mu,
delta=delta,
nu=gamma,
eta=eta,
chi=chi,
phi=phi,
)
def VliegPos(alpha=None, delta=None, gamma=None, omega=None, chi=None, phi=None):
"""Convert six-circle Vlieg diffractometer angles into 4S+2D You geometry"""
sin_alpha = sin(radians(alpha))
cos_alpha = cos(radians(alpha))
sin_delta = sin(radians(delta))
cos_delta = cos(radians(delta))
sin_gamma = sin(radians(gamma))
cos_gamma = cos(radians(gamma))
asin_delta = degrees(asin(sin_delta * cos_gamma)) # Eq.(83)
vals_delta = [asin_delta, 180.0 - asin_delta]
idx, _ = min(
[(i, abs(delta - d)) for i, d in enumerate(vals_delta)], key=lambda x: x[1]
)
pos_delta = vals_delta[idx]
sgn = sign(cos(radians(pos_delta)))
pos_nu = degrees(
atan2(
sgn * (cos_delta * cos_gamma * sin_alpha + cos_alpha * sin_gamma),
sgn * (cos_delta * cos_gamma * cos_alpha - sin_alpha * sin_gamma),
)
) # Eq.(84)
return Position(mu=alpha, delta=pos_delta, nu=pos_nu, eta=omega, chi=chi, phi=phi)
class SessionScenario:
"""
A test scenario. The test case must have __name, lattice and bmatrix set
and if umatrix is set then so must ref 1 and ref 2. Matrices should be 3*3
python arrays of lists and ref1 and ref2 in the format (h, k, l, position,
energy, tag)."""
def __init__(self):
self.name = None
self.lattice = None
self.bmatrix = None
self.ref1 = None
self.ref2 = None
self.umatrix = None
self.calculations = [] # CalculationScenarios
def __str__(self):
toReturn = "\nTestScenario:"
toReturn += "\n name: " + self.name
toReturn += "\n lattice:" + str(self.lattice)
toReturn += "\n bmatrix:" + str(self.bmatrix)
toReturn += "\n ref1:" + str(self.ref1)
toReturn += "\n ref2:" + str(self.ref2)
toReturn += "\n umatrix:" + str(self.umatrix)
return toReturn
class CalculationScenario:
"""
Used as part of a test scenario. A UB matrix appropriate for this
calcaultion will have been calculated or loaded
"""
def __init__(self, tag, package, mode, energy, modeToTest, modeNumber):
self.tag = tag
self.package = package
self.mode = mode
self.energy = energy
self.wavelength = 12.39842 / energy
self.modeToTest = modeToTest
self.modeNumber = modeNumber
self.hklList = None # hkl triples
self.posList = []
self.paramList = []
def sessions(P=VliegPos):
############################ SESSION0 ############################
# From the dif_init.mat next to dif_dos.exe on Vlieg'session2 cd
# session2 = SessionScenario()
# session2.name = 'latt1'
# session2.lattice = ([4.0004, 4.0004, 2.270000, 90, 90, 90])
# session2.bmatrix = (((1.570639, 0, 0) ,(0.0, 1.570639, 0) ,
# (0.0, 0.0, 2.767923)))
# self.scenarios.append(session2)
############################ SESSION1 ############################
# From b16 on 27June2008 (From <NAME>)
session1 = SessionScenario()
session1.name = "b16_270608"
session1.lattice = (3.8401, 3.8401, 5.43072, 90, 90, 90)
session1.bmatrix = ((1.636204, 0, 0), (0, 1.636204, 0), (0, 0, 1.156971))
session1.ref1 = Reflection(
1,
0,
1.0628,
P(5.000, 22.790, 0.000, 1.552, 22.400, 14.255),
10,
"ref1",
)
session1.ref2 = Reflection(
0,
1,
1.0628,
P(5.000, 22.790, 0.000, 4.575, 24.275, 101.320),
10,
"ref2",
)
session1.umatrix = (
(0.997161, -0.062217, 0.042420),
(0.062542, 0.998022, -0.006371),
(-0.041940, 0.009006, 0.999080),
)
session1.ref1calchkl = (1, 0, 1.0628) # Must match the guessed value!
session1.ref2calchkl = (-0.0329, 1.0114, 1.04)
############################ SESSION2 ############################
# cubic crystal from bliss tutorial
session2 = SessionScenario()
session2.name = "cubic_from_bliss_tutorial"
session2.lattice = (1.54, 1.54, 1.54, 90, 90, 90)
session2.ref1 = Reflection(1, 0, 0, P(0, 60, 0, 30, 0, 0), 12.39842 / 1.54, "ref1")
session2.ref2 = Reflection(
0, 1, 0, P(0, 60, 0, 30, 0, -90), 12.39842 / 1.54, "ref2"
)
session2.bmatrix = ((4.07999, 0, 0), (0, 4.07999, 0), (0, 0, 4.07999))
session2.umatrix = ((1, 0, 0), (0, -1, 0), (0, 0, -1))
session2.ref1calchkl = (1, 0, 0) # Must match the guessed value!
session2.ref2calchkl = (0, 1, 0)
# sixc-0a : fixed omega = 0
c = CalculationScenario("sixc-0a", "sixc", "0", 12.39842 / 1.54, "4cBeq", 1)
c.alpha = 0
c.gamma = 0
c.w = 0
# c.hklList=((0.7, 0.9, 1.3), (1,0,0), (0,1,0), (1, 1, 0))
c.hklList = ((0.7, 0.9, 1.3),)
c.posList.append(
P(0.000000, 119.669750, 0.000000, 59.834875, -48.747500, 307.874983651098)
)
# c.posList.append(P(0.000000, 60.000000, 0.000000, 30.000, 0.000000, 0.000000))
# c.posList.append(P(0.000000, 60.000000, 0.000000, 30.000, 0.000000, -90.0000))
# c.posList.append(P(0.000000, 90.000000, 0.000000, 45.000, 0.000000, -45.0000))
session2.calculations.append(c)
############################ SESSION3 ############################
# AngleCalc scenarios from SPEC sixc. using crystal and alignment
session3 = SessionScenario()
session3.name = "spec_sixc_b16_270608"
session3.lattice = (3.8401, 3.8401, 5.43072, 90, 90, 90)
session3.bmatrix = ((1.636204, 0, 0), (0, 1.636204, 0), (0, 0, 1.156971))
session3.umatrix = (
(0.997161, -0.062217, 0.042420),
(0.062542, 0.998022, -0.006371),
(-0.041940, 0.009006, 0.999080),
)
session3.ref1 = Reflection(
1,
0,
1.0628,
P(5.000, 22.790, 0.000, 1.552, 22.400, 14.255),
12.39842 / 1.24,
"ref1",
)
session3.ref2 = Reflection(
0,
1,
1.0628,
P(5.000, 22.790, 0.000, 4.575, 24.275, 101.320),
12.39842 / 1.24,
"ref2",
)
session3.ref1calchkl = (1, 0, 1.0628)
session3.ref2calchkl = (-0.0329, 1.0114, 1.04)
# sixc-0a : fixed omega = 0
ac = CalculationScenario("sixc-0a", "sixc", "0", 12.39842 / 1.24, "4cBeq", 1)
ac.alpha = 0
ac.gamma = 0
ac.w = 0
### with 'omega_low':-90, 'omega_high':270, 'phi_low':-180, 'phi_high':180
ac.hklList = []
ac.hklList.append((0.7, 0.9, 1.3))
ac.posList.append(P(0.0, 27.352179, 0.000000, 13.676090, 37.774500, 53.965500))
ac.paramList.append(
{
"Bin": 8.3284,
"Bout": 8.3284,
"rho": 36.5258,
"eta": 0.1117,
"twotheta": 27.3557,
}
)
ac.hklList.append((1, 0, 0))
ac.posList.append(P(0.0, 18.580230, 0.000000, 9.290115, -2.403500, 3.589000))
ac.paramList.append(
{
"Bin": -0.3880,
"Bout": -0.3880,
"rho": -2.3721,
"eta": -0.0089,
"twotheta": 18.5826,
}
)
ac.hklList.append((0, 1, 0))
ac.posList.append(P(0.0, 18.580230, 0.000000, 9.290115, 0.516000, 93.567000))
ac.paramList.append(
{
"Bin": 0.0833,
"Bout": 0.0833,
"rho": 0.5092,
"eta": -0.0414,
"twotheta": 18.5826,
}
)
ac.hklList.append((1, 1, 0))
ac.posList.append(P(0.0, 26.394192, 0.000000, 13.197096, -1.334500, 48.602000))
ac.paramList.append(
{
"Bin": -0.3047,
"Bout": -0.3047,
"rho": -1.2992,
"eta": -0.0351,
"twotheta": 26.3976,
}
)
session3.calculations.append(ac)
############################ SESSION4 ############################
# test crystal
session4 = SessionScenario()
session4.name = "test_orth"
session4.lattice = (1.41421, 1.41421, 1.00000, 90, 90, 90)
session4.system = "Orthorhombic"
session4.bmatrix = ((4.44288, 0, 0), (0, 4.44288, 0), (0, 0, 6.28319))
session4.ref1 = Reflection(
0,
1,
2,
P(0.0000, 122.4938, 0.0000, 80.7181, 90.0000, -45.0000),
15.0,
"ref1",
)
session4.ref2 = Reflection(
1,
0,
2,
P(0.0000, 122.4938, 0.000, 61.2469, 70.5288, -45.0000),
15,
"ref2",
)
session4.ref3 = Reflection(
1,
0,
1,
P(0.0000, 60.8172, 0.000, 30.4086, 54.7356, -45.0000),
15,
"ref3",
)
session4.ref4 = Reflection(
1,
1,
2,
P(0.0000, 135.0736, 0.000, 67.5368, 63.4349, 0.0000),
15,
"ref4",
)
session4.reflist = (session4.ref1, session4.ref2, session4.ref3, session4.ref4)
session4.umatrix = (
(0.70711, 0.70711, 0.00),
(-0.70711, 0.70711, 0.00),
(0.00, 0.00, 1.00),
)
session4.ref1calchkl = (0, 1, 2) # Must match the guessed value!
session4.ref2calchkl = (1, 0, 2)
############################ SESSION5 ############################
# test crystal
session5 = SessionScenario()
session5.name = "Dalyite"
session5.lattice = (7.51, 7.73, 7.00, 106.0, 113.5, 99.5)
session5.system = "Triclinic"
session5.bmatrix = (
(0.96021, 0.27759, 0.49527),
(0, 0.84559, 0.25738),
(0, 0, 0.89760),
)
session5.ref1 = Reflection(
0,
1,
2,
P(0.0000, 23.7405, 0.0000, 11.8703, 46.3100, 43.1304),
12.3984,
"ref1",
)
session5.ref2 = Reflection(
1,
0,
3,
P(0.0000, 34.4282, 0.000, 17.2141, 46.4799, 12.7852),
12.3984,
"ref2",
)
session5.ref3 = Reflection(
2,
2,
6,
P(0.0000, 82.8618, 0.000, 41.4309, 41.5154, 26.9317),
12.3984,
"ref3",
)
session5.ref4 = Reflection(
4,
1,
4,
P(0.0000, 71.2763, 0.000, 35.6382, 29.5042, 14.5490),
12.3984,
"ref4",
)
session5.ref5 = Reflection(
8,
3,
1,
P(0.0000, 97.8850, 0.000, 48.9425, 5.6693, 16.7929),
12.3984,
"ref5",
)
session5.ref6 = Reflection(
6,
4,
5,
P(0.0000, 129.6412, 0.000, 64.8206, 24.1442, 24.6058),
12.3984,
"ref6",
)
session5.ref7 = Reflection(
3,
5,
7,
P(0.0000, 135.9159, 0.000, 67.9579, 34.3696, 35.1816),
12.3984,
"ref7",
)
session5.reflist = (
session5.ref1,
session5.ref2,
session5.ref3,
session5.ref4,
session5.ref5,
session5.ref6,
session5.ref7,
)
session5.umatrix = (
(0.99982, 0.00073, 0.01903),
(0.00073, 0.99710, -0.07612),
(-0.01903, 0.07612, 0.99692),
)
session5.ref1calchkl = (0, 1, 2) # Must match the guessed value!
session5.ref2calchkl = (1, 0, 3)
############################ SESSION6 ############################
# test crystal
session6 = SessionScenario()
session6.name = "Acanthite"
session6.lattice = (4.229, 6.931, 7.862, 90, 99.61, 90)
session6.system = "Monoclinic"
session6.bmatrix = (
(1.50688, 0.00000, 0.13532),
(0.00000, 0.90653, 0.00000),
(0.00000, 0.00000, 0.79918),
)
session6.ref1 = Reflection(
0,
1,
2,
P(0.0000, 21.1188, 0.0000, 10.5594, 59.6447, 61.8432),
10.0,
"ref1",
)
session6.ref2 = Reflection(
1,
0,
3,
P(0.0000, 35.2291, 0.000, 62.4207, 87.1516, -90.0452),
10.0,
"ref2",
)
session6.ref3 = Reflection(
1,
1,
6,
P(0.0000, 64.4264, 0.000, 63.9009, 97.7940, -88.8808),
10.0,
"ref3",
)
session6.ref4 = Reflection(
1,
2,
2,
P(0.0000, 34.4369, 0.000, 72.4159, 60.1129, -29.0329),
10.0,
"ref4",
)
session6.ref5 = Reflection(
2,
2,
1,
P(0.0000, 43.0718, 0.000, 21.5359, 8.3873, 29.0230),
10.0,
"ref5",
)
session6.reflist = (
session6.ref1,
session6.ref2,
session6.ref3,
session6.ref4,
session6.ref5,
)
session6.umatrix = (
(0.99411, 0.00079, 0.10835),
(0.00460, 0.99876, -0.04949),
(-0.10825, 0.04969, 0.99288),
)
session6.ref1calchkl = (0, 1, 2) # Must match the guessed value!
session6.ref2calchkl = (1, 0, 3)
########################################################################
return (session1, session2, session3, session4, session5, session6)
| 2.203125 | 2 |
datasets/__init__.py | yubin1219/Semantic-Seg | 0 | 12793284 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .cityscapes import Cityscapes as cityscapes
| 1.179688 | 1 |
testscripts/RDKB/component/WIFIAgent/TS_WIFIAGENT_ForceDisable_CheckRadioEnable_InBridgeMode.py | rdkcmf/rdkb-tools-tdkb | 0 | 12793285 | ##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2020 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version='1.0' encoding='utf-8'?>
<xml>
<id></id>
<!-- Do not edit id. This will be auto filled while exporting. If you are adding a new script keep the id empty -->
<version>3</version>
<!-- Do not edit version. This will be auto incremented while updating. If you are adding a new script you can keep the vresion as 1 -->
<name>TS_WIFIAGENT_ForceDisable_CheckRadioEnable_InBridgeMode</name>
<!-- If you are adding a new script you can specify the script name. Script Name should be unique same as this file name with out .py extension -->
<primitive_test_id></primitive_test_id>
<!-- Do not change primitive_test_id if you are editing an existing script. -->
<primitive_test_name>WIFIAgent_Get</primitive_test_name>
<!-- -->
<primitive_test_version>1</primitive_test_version>
<!-- -->
<status>FREE</status>
<!-- -->
<synopsis>To check if 2.4G and 5G radio gets disabled when WiFi Force Disable is enabled in bridge mode</synopsis>
<!-- -->
<groups_id />
<!-- -->
<execution_time>15</execution_time>
<!-- -->
<long_duration>false</long_duration>
<!-- -->
<advanced_script>false</advanced_script>
<!-- execution_time is the time out time for test execution -->
<remarks></remarks>
<!-- Reason for skipping the tests if marked to skip -->
<skip>false</skip>
<!-- -->
<box_types>
<box_type>Broadband</box_type>
<!-- -->
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
<!-- -->
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIAGENT_128</test_case_id>
<test_objective>This test case is to check if 2.4G and 5G radio gets disabled when WiFi Force Disable is enabled in bridge mode</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components in DUT should be in a running state that includes component under test Cable Modem
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>WIFIAgent_Get
WIFIAgent_Set</api_or_interface_used>
<input_parameters>Device.WiFi.Radio.1.Enable
Device.WiFi.Radio.2.Enable
Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable
Device.X_CISCO_COM_DeviceControl.LanManagementEntry.1.LanMode</input_parameters>
<automation_approch>1.Load the module
2.Get the current lan mode and set the mode to bridge-static
3.Get the current status of Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable,Device.WiFi.Radio.1.Enable and Device.WiFi.Radio.2.Enable
4.Enable Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable
5.Device.WiFi.Radio.1.Enable and Device.WiFi.Radio.2.Enable should be disabled
6.Revert the Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable to previous
7.Verify that Device.WiFi.Radio.1.Enable and Device.WiFi.Radio.2.Enable also go to previous after revert operation
8.Revert the LAN mode to previous state
7.Unload the module</automation_approch>
<expected_output>On Enabling Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable 2.4G and 5G radio should be disabled in bridge-mode</expected_output>
<priority>High</priority>
<test_stub_interface>WIFIAGENT</test_stub_interface>
<test_script>TS_WIFIAGENT_ForceDisable_CheckRadioEnable_InBridgeMode</test_script>
<skipped>No</skipped>
<release_version>M84</release_version>
<remarks>None</remarks>
</test_cases>
<script_tags />
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from time import sleep;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("wifiagent","1");
#IP and Port of box, No need to change,
#This will be replaced with corresponding DUT Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIAGENT_ForceDisable_CheckRadioEnable_InBridgeMode');
#result of connection with test component and DUT
result =obj.getLoadModuleResult();
loadmodulestatus=obj.getLoadModuleResult();
if "SUCCESS" in loadmodulestatus.upper():
#Set the result status of execution
obj.setLoadModuleStatus("SUCCESS");
expectedresult ="SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.X_CISCO_COM_DeviceControl.LanManagementEntry.1.LanMode")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
defaultLANmode = details.split("VALUE:")[1].split(' ')[0];
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 1: Get the current LAN mode";
print "EXPECTED RESULT 1: Should get the current LAN mode";
print "ACTUAL RESULT 1: default LAN mode is %s" %defaultLANmode;
print "[TEST EXECUTION RESULT] : SUCCESS";
mode = "bridge-static";
tdkTestObj = obj.createTestStep('WIFIAgent_Set');
tdkTestObj.addParameter("paramName","Device.X_CISCO_COM_DeviceControl.LanManagementEntry.1.LanMode")
tdkTestObj.addParameter("paramValue", mode)
tdkTestObj.addParameter("paramType","string")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 2: Change lanmode to %s " %mode
print "EXPECTED RESULT 2: Should change lanmode to %s" %mode
print "ACTUAL RESULT 2: Details: %s " %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#sleep to reflect the mode change
sleep(90);
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.Radio.1.Enable")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
defaultRadio1 = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
defaultRadio1 = defaultRadio1.split("VALUE:")[1].split(" ")[0].strip();
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 3: Get the Radio Enable status for 2.4GHz";
print "EXPECTED RESULT 3: Should get the Radio Enable status for 2.4GHz";
print "ACTUAL RESULT 3: Radio Enable status for 2.4GHz state is %s" %defaultRadio1;
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.Radio.2.Enable")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
defaultRadio2 = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
defaultRadio2 = defaultRadio2.split("VALUE:")[1].split(" ")[0].strip();
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 4: Get the Radio Enable status for 5GHz";
print "EXPECTED RESULT 4: Should get the Radio Enable status for 5GHz";
print "ACTUAL RESULT 4: Radio Enable status for 5GHz state is %s" %defaultRadio2;
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
default = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
default = default.split("VALUE:")[1].split(" ")[0].strip();
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 5: Get the current WiFi Force Disable state";
print "EXPECTED RESULT 5: Should get current WiFi Force Disable state";
print "ACTUAL RESULT 5: current WiFi Force Disable state is %s" %default;
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Set');
tdkTestObj.addParameter("paramName","Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable")
tdkTestObj.addParameter("paramValue", "true");
tdkTestObj.addParameter("paramType","boolean")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 6: Enable the WiFi Force Disable";
print "EXPECTED RESULT 6: Should enable Force Disable state";
print "ACTUAL RESULT 6: %s" %details;
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.Radio.1.Enable")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
Radio1 = tdkTestObj.getResultDetails();
if expectedresult in actualresult and "false" in Radio1:
Radio1 = Radio1.split("VALUE:")[1].split(" ")[0].strip();
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 7: Get the Radio Enable status for 2.4GHz as false";
print "EXPECTED RESULT 7: Should get the Radio Enable status for 2.4GHz as false";
print "ACTUAL RESULT 7: Radio Enable status for 2.4GHz state is %s" %Radio1;
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.Radio.2.Enable")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
Radio2 = tdkTestObj.getResultDetails();
if expectedresult in actualresult and "false" in Radio2:
Radio2 = Radio2.split("VALUE:")[1].split(" ")[0].strip();
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 8: Get the Radio Enable status for 5GHz as false";
print "EXPECTED RESULT 8: Should get the Radio Enable status for 5GHz as false";
print "ACTUAL RESULT 8: Radio Enable status for 5GHz state is %s" %Radio2;
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 8: Get the Radio Enable status for 5GHz as false";
print "EXPECTED RESULT 8: Should get the Radio Enable status for 5GHz as false";
print "ACTUAL RESULT 8: Radio Enable status for 5GHz state is %s" %Radio2;
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 7: Get the Radio Enable status for 2.4GHz as false";
print "EXPECTED RESULT 7: Should get the Radio Enable status for 2.4GHz as false";
print "ACTUAL RESULT 7: Radio Enable status for 2.4GHz state is %s" %Radio1;
print "[TEST EXECUTION RESULT] : FAILURE";
#Revert the value
tdkTestObj = obj.createTestStep('WIFIAgent_Set');
tdkTestObj.addParameter("paramName","Device.WiFi.X_RDK-CENTRAL_COM_ForceDisable")
tdkTestObj.addParameter("paramValue", default);
tdkTestObj.addParameter("paramType","boolean")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 9: Revert the WiFi Force Disable status to previous";
print "EXPECTED RESULT 9: Should disable WiFi Force Disable status to %s" %default;
print "ACTUAL RESULT 9: %s" %details;
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.Radio.1.Enable")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult and defaultRadio1 in details:
details = details.split("VALUE:")[1].split(" ")[0].strip();
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 10: Check if Radio enable status for 2.4GHz is in previous state after reverting WiFi Force Disable";
print "EXPECTED RESULT 10: Radio enable status for 2.4GHz should be in previous state after reverting WiFi Force Disable";
print "ACTUAL RESULT 10: default value was :%s and after revertion %s" %(defaultRadio1,details)
print "[TEST EXECUTION RESULT] : SUCCESS";
tdkTestObj = obj.createTestStep('WIFIAgent_Get');
tdkTestObj.addParameter("paramName","Device.WiFi.Radio.2.Enable")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult and defaultRadio2 in details:
details = details.split("VALUE:")[1].split(" ")[0].strip();
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 11: Check if Radio enable status for 5GHz is in previous state after reverting WiFi Force Disable";
print "EXPECTED RESULT 11: Radio enable status for 5GHz should be in previous state after reverting WiFi Force Disable";
print "ACTUAL RESULT 11: default value was :%s and after revertion %s" %(defaultRadio2,details)
print "[TEST EXECUTION RESULT] : SUCCESS";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 11: Check if Radio enable status for 5GHz is in previous state after reverting WiFi Force Disable";
print "EXPECTED RESULT 11: Radio enable status for 5GHz should be in previous state after reverting WiFi Force Disable";
print "ACTUAL RESULT 11: default value was :%s and after revertion %s" %(defaultRadio2,details)
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 10: Check if Radio enable status for 2.4GHz is in previous state after reverting WiFi Force Disable";
print "EXPECTED RESULT 10: Radio enable status for 2.4GHz should be in previous state after reverting WiFi Force Disable";
print "ACTUAL RESULT 10: default value was :%s and after revertion %s" %(defaultRadio1,details)
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 9: Revert the WiFi Force Disable status to previous";
print "EXPECTED RESULT 9: Should disable WiFi Force Disable status to %s" %default;
print "ACTUAL RESULT 9: %s" %details;
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 6: Enable the WiFi Force Disable";
print "EXPECTED RESULT 6: Should enable Force Disable state";
print "ACTUAL RESULT 6: %s" %details;
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 5: Get the current WiFi Force Disable state";
print "EXPECTED RESULT 5: Should get current WiFi Force Disable state";
print "ACTUAL RESULT 5: current WiFi Force Disable state is %s" %default;
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 4: Get the Radio Enable status for 5GHz";
print "EXPECTED RESULT 4: Should get the Radio Enable status for 5GHz";
print "ACTUAL RESULT 4: Radio Enable status for 5GHz state is %s" %defaultRadio2;
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 3: Get the Radio Enable status for 2.4GHz";
print "EXPECTED RESULT 3: Should get the Radio Enable status for 2.4GHz";
print "ACTUAL RESULT 3: Radio Enable status for 2.4GHz state is %s" %defaultRadio1;
print "[TEST EXECUTION RESULT] : FAILURE";
#Revert to previous lan mode
tdkTestObj = obj.createTestStep('WIFIAgent_Set');
tdkTestObj.addParameter("paramName","Device.X_CISCO_COM_DeviceControl.LanManagementEntry.1.LanMode")
tdkTestObj.addParameter("paramValue", defaultLANmode)
tdkTestObj.addParameter("paramType","string")
tdkTestObj.executeTestCase("expectedresult");
actualresult = tdkTestObj.getResult();
details = tdkTestObj.getResultDetails();
if expectedresult in actualresult:
#Set the result status of execution
tdkTestObj.setResultStatus("SUCCESS");
print "TEST STEP 12: Change lanmode to the previous"
print "EXPECTED RESULT 12: Should change lanmode to %s" %defaultLANmode
print "ACTUAL RESULT 12: Details: %s " %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : SUCCESS";
#sleep for change in mode reflection
sleep(90);
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 12: Change lanmode to the previous"
print "EXPECTED RESULT 12: Should change lanmode to %s" %defaultLANmode
print "ACTUAL RESULT 12: Details: %s " %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
#Set the result status of execution
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 2: Change lanmode to %s" %mode
print "EXPECTED RESULT 2: Should change lanmode to %s" %mode
print "ACTUAL RESULT 2: Details: %s " %details;
#Get the result of execution
print "[TEST EXECUTION RESULT] : FAILURE";
else:
tdkTestObj.setResultStatus("FAILURE");
print "TEST STEP 1: Get the current LAN mode";
print "EXPECTED RESULT 1: Should get the current LAN mode";
print "ACTUAL RESULT 1: default LAN mode is %s" %defaultLANmode;
print "[TEST EXECUTION RESULT] : FAILURE";
obj.unloadModule("wifiagent")
else:
print "Failed to load wifiagent module";
obj.setLoadModuleStatus("FAILURE");
| 1.617188 | 2 |
src/darcyai/file_stream.py | edgeworx/darcyai | 0 | 12793286 | <reponame>edgeworx/darcyai<filename>src/darcyai/file_stream.py
import threading
from darcyai.utils import validate_not_none, validate_type, validate
class FileStream:
"""
A class that represents a stream of data from a file.
# Arguments
path: The path to the file to write to.
append: Whether to append to the file or not. Default is False. Default is `False`.
encoding: The encoding to use for the file. Default is "utf-8". Default is `"utf-8"`.
buffer_size: The size of the buffer to use. Default is 1024 * 1024. Default is `1024 * 1024`.
flush_interval: The frequency to flush the file. Default is 0 (disabled). Default is `0`.
# Examples
```python
>>> from darcyai.file_stream import FileStream
>>> file_stream = FileStream(file_path="output.txt",
... append=True,
... encoding="utf-8",
... buffer_size=1024*1024,
... flush_interval=5)
```
"""
def __init__(self,
path: str,
append: bool = False,
encoding: str = "utf-8",
buffer_size: int = 1024 * 1024,
flush_interval: int = 0) -> None:
validate_not_none(path, "path is required")
validate_type(path, str, "path must be a string")
validate_not_none(append, "append is required")
validate_type(append, bool, "append must be a boolean")
validate_not_none(encoding, "encoding is required")
validate_type(encoding, str, "encoding must be a string")
try:
_ = "test".encode(encoding)
except Exception as e:
raise ValueError(f"encoding '{encoding}' is not supported") from e
validate_not_none(buffer_size, "buffer_size is required")
validate_type(buffer_size, int, "buffer_size must be an integer")
validate(buffer_size >= 0, "buffer_size must be greater than or equal to 0")
validate_not_none(flush_interval, "flush_interval is required")
validate_type(flush_interval, int, "flush_interval must be an integer")
validate(flush_interval >= 0, "flush_interval must be greater than or equal to 0")
#pylint: disable=consider-using-with
self.__file = open(file=path,
mode="ab" if append else "wb",
buffering=buffer_size)
self.__encoding = encoding
self.__flush_interval = flush_interval
self.__flush()
def close(self):
"""
Closes the file.
# Examples
```python
>>> from darcyai.file_stream import FileStream
>>> file_stream = FileStream(file_path="output.txt",
... append=True,
... encoding="utf-8",
... buffer_size=1024*1024,
... flush_interval=5)
>>> file_stream.close()
```
"""
self.__file.close()
def write_string(self, data: str) -> None:
"""
Writes the data to the file.
# Arguments
data: The data to write.
# Examples
```python
>>> from darcyai.file_stream import FileStream
>>> file_stream = FileStream(file_path="output.txt",
... append=True,
... encoding="utf-8",
... buffer_size=1024*1024,
... flush_interval=5)
>>> file_stream.write_string("Hello World!")
```
"""
self.write_bytes(data.encode(self.__encoding))
def write_bytes(self, data: bytes) -> None:
"""
Writes the data to the file.
# Arguments
data: The data to write.
# Examples
```python
>>> from darcyai.file_stream import FileStream
>>> file_stream = FileStream(file_path="output.txt",
... append=True,
... encoding="utf-8",
... buffer_size=1024*1024,
... flush_interval=5)
>>> file_stream.write_bytes(b"Hello World!")
```
"""
self.__file.write(data)
def __flush(self):
"""
Flushes the file.
"""
try:
self.__file.flush()
finally:
if self.__flush_interval > 0:
threading.Timer(interval=self.__flush_interval, function=self.__flush).start()
| 2.984375 | 3 |
graphene_django_cud/mutations.py | martasd/graphene-django-cud | 0 | 12793287 | <gh_stars>0
from collections import OrderedDict
import graphene
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import models, transaction
from graphene import Mutation, InputObjectType
from graphene.types.mutation import MutationOptions
from graphene.types.utils import yank_fields_from_attrs
from graphene.utils.str_converters import to_snake_case
from graphene_django.registry import get_global_registry
from graphql import GraphQLError
from graphql_relay import to_global_id
from graphene_django_cud.registry import get_type_meta_registry
from .util import disambiguate_id, disambiguate_ids, get_input_fields_for_model, \
get_all_optional_input_fields_for_model, is_many_to_many, get_m2m_all_extras_field_names, \
get_likely_operation_from_name, get_fk_all_extras_field_names, get_filter_fields_input_args
meta_registry = get_type_meta_registry()
class DjangoCudBase(Mutation):
class Meta:
abstract = True
@classmethod
def get_or_create_foreign_obj(
cls,
field,
value,
data,
info
):
field_type = data.get('type', 'ID')
if field_type == "ID":
return value
else:
input_type_meta = meta_registry.get_meta_for_type(field_type)
# Create new obj
related_obj = cls.create_obj(
value,
info,
input_type_meta.get('auto_context_fields', {}),
input_type_meta.get('many_to_many_extras', {}),
input_type_meta.get('foreign_key_extras', {}),
input_type_meta.get('many_to_one_extras', {}),
field.related_model
)
return related_obj.id
@classmethod
def get_or_create_m2m_objs(
cls,
field,
values,
data,
operation,
info
):
results = []
if not values:
return results
if isinstance(data, bool):
data = {}
field_type = data.get('type', 'ID')
for value in values:
if field_type == "ID":
related_obj = field.related_model.objects.get(pk=disambiguate_id(value))
else:
# This is something that we are going to create
input_type_meta = meta_registry.get_meta_for_type(field_type)
# Create new obj
related_obj = cls.create_obj(
value,
info,
input_type_meta.get('auto_context_fields', {}),
input_type_meta.get('many_to_many_extras', {}),
input_type_meta.get('foreign_key_extras', {}),
input_type_meta.get('many_to_one_extras', {}),
field.related_model
)
results.append(related_obj)
return results
@classmethod
def get_or_create_m2o_objs(
cls,
obj,
field,
values,
data,
operation,
info,
Model
):
results = []
if not values:
return results
field_type = data.get('type', 'auto')
for value in values:
if field_type == "ID":
related_obj = field.related_model.objects.get(pk=disambiguate_id(value))
elif field_type == "auto":
# In this case, a new type has been created for us. Let's first find it's name,
# then get it's meta, and then create it. We also need to attach the obj as the
# foreign key.
_type_name = data.get('type_name', f"Create{Model.__name__}{field.name.capitalize()}")
input_type_meta = meta_registry.get_meta_for_type(field_type)
# .id has to be called here, as the regular input for a foreignkey is ID!
value[field.field.name] = obj.id
related_obj = cls.create_obj(
value,
info,
input_type_meta.get('auto_context_fields', {}),
input_type_meta.get('many_to_many_extras', {}),
input_type_meta.get('foreign_key_extras', {}),
input_type_meta.get('many_to_one_extras', {}),
field.related_model
)
else:
# This is something that we are going to create
input_type_meta = meta_registry.get_meta_for_type(field_type)
# Create new obj
related_obj = cls.create_obj(
value,
info,
input_type_meta.get('auto_context_fields', {}),
input_type_meta.get('many_to_many_extras', {}),
input_type_meta.get('foreign_key_extras', {}),
input_type_meta.get('many_to_one_extras', {}),
field.related_model
)
return []
@classmethod
def create_obj(
cls,
input,
info,
auto_context_fields,
many_to_many_extras,
foreign_key_extras,
many_to_one_extras,
Model
):
meta_registry = get_type_meta_registry()
model_field_values = {}
many_to_many_values = {}
many_to_many_extras_field_names = get_m2m_all_extras_field_names(many_to_many_extras)
many_to_one_extras_field_names = get_m2m_all_extras_field_names(many_to_one_extras) # The layout is the same as for m2m
foreign_key_extras_field_names = get_fk_all_extras_field_names(foreign_key_extras)
for field_name, context_name in auto_context_fields.items():
if hasattr(info.context, context_name):
model_field_values[field_name] = getattr(info.context, context_name)
for name, value in super(type(input), input).items():
# Handle these separately
if name in many_to_many_extras_field_names or name in foreign_key_extras_field_names or name in many_to_one_extras_field_names:
continue
field = Model._meta.get_field(name)
new_value = value
# We have to handle this case specifically, by using the fields
# .set()-method, instead of direct assignment
field_is_many_to_many = is_many_to_many(field)
value_handle_name = "handle_" + name
if hasattr(cls, value_handle_name):
handle_func = getattr(cls, value_handle_name)
assert callable(
handle_func
), f"Property {value_handle_name} on {cls.__name__} is not a function."
new_value = handle_func(value, name, info)
# On some fields we perform some default conversion, if the value was not transformed above.
if new_value == value and value is not None:
if type(field) in (models.ForeignKey, models.OneToOneField):
# Delete auto context field here, if it exists. We have to do this explicitly
# as we change the name below
if name in auto_context_fields:
del model_field_values[name]
name = getattr(field, "db_column", None) or name + "_id"
new_value = disambiguate_id(value)
elif field_is_many_to_many:
new_value = disambiguate_ids(value)
if field_is_many_to_many:
many_to_many_values[name] = new_value
else:
model_field_values[name] = new_value
# We don't have an object yet, and we potentially need to create
# parents before proceeding.
for name, extras in foreign_key_extras.items():
value = input.get(name, None)
field = Model._meta.get_field(name)
obj_id = cls.get_or_create_foreign_obj(
field,
value,
extras,
info
)
model_field_values[name + "_id"] = obj_id
# Foreign keys are added, we are ready to create our object
obj = Model.objects.create(**model_field_values)
for name, values in many_to_many_values.items():
getattr(obj, name).set(values)
# Handle extras fields
many_to_many_to_add = {}
many_to_many_to_remove = {}
for name, extras in many_to_many_extras.items():
field = Model._meta.get_field(name)
if not name in many_to_many_to_add:
many_to_many_to_add[name] = []
many_to_many_to_remove[name] = []
for extra_name, data in extras.items():
field_name = name
if extra_name != "exact":
field_name = name + "_" + extra_name
values = input.get(field_name, None)
if isinstance(data, bool):
data = {}
operation = data.get('operation') or get_likely_operation_from_name(extra_name)
objs = cls.get_or_create_m2m_objs(
field,
values,
data,
operation,
info
)
if len(objs) > 0:
if operation == "add":
many_to_many_to_add[name] += objs
else:
many_to_many_to_remove[name] += objs
many_to_one_to_add = {}
many_to_one_to_remove = {}
for name, extras in many_to_one_extras.items():
field = Model._meta.get_field(name)
if not name in many_to_one_to_add:
many_to_one_to_add[name] = []
many_to_one_to_remove[name] = []
for extra_name, data in extras.items():
field_name = name
if extra_name != "exact":
field_name = name + "_" + extra_name
values = input.get(field_name, None)
if isinstance(data, bool):
data = {}
operation = data.get('operation') or get_likely_operation_from_name(extra_name)
if operation == "add":
objs = cls.get_or_create_m2o_objs(
obj,
field,
values,
data,
operation,
info,
Model
)
many_to_one_to_add[name] += objs
else:
many_to_one_to_remove[name] += disambiguate_ids(values)
for name, objs in many_to_one_to_add.items():
getattr(obj, name).add(*objs)
for name, objs in many_to_one_to_remove.items():
# Only nullable foreign key reverse rels have the remove method,
# so we use this method instead
getattr(obj, name).filter(id__in=objs).delete()
for name, objs in many_to_many_to_add.items():
getattr(obj, name).add(*objs)
for name, objs in many_to_many_to_remove.items():
getattr(obj, name).remove(*objs)
return obj
@classmethod
def update_obj(
cls,
obj,
input,
info,
auto_context_fields,
many_to_many_extras,
foreign_key_extras,
many_to_one_extras,
Model
):
many_to_many_values = {}
many_to_many_add_values = {}
many_to_many_remove_values = {}
many_to_many_extras_field_names = get_m2m_all_extras_field_names(many_to_many_extras)
many_to_one_extras_field_names = get_m2m_all_extras_field_names(many_to_one_extras) # The layout is the same as for m2m
foreign_key_extras_field_names = get_fk_all_extras_field_names(foreign_key_extras)
for field_name, context_name in auto_context_fields.items():
if hasattr(info.context, context_name):
setattr(obj, field_name, getattr(info.context, context_name))
for name, value in super(type(input), input).items():
# Handle these separately
if name in many_to_many_extras_field_names or name in foreign_key_extras_field_names or name in many_to_one_extras_field_names:
continue
field = Model._meta.get_field(name)
new_value = value
# We have to handle this case specifically, by using the fields
# .set()-method, instead of direct assignment
field_is_many_to_many = is_many_to_many(field)
value_handle_name = "handle_" + name
if hasattr(cls, value_handle_name):
handle_func = getattr(cls, value_handle_name)
assert callable(
handle_func
), f"Property {value_handle_name} on {cls.__name__} is not a function."
new_value = handle_func(value, name, info)
# On some fields we perform some default conversion, if the value was not transformed above.
if new_value == value and value is not None:
if type(field) in (models.ForeignKey, models.OneToOneField):
# Delete auto context field here, if it exists. We have to do this explicitly
# as we change the name below
if name in auto_context_fields:
setattr(obj, name, None)
name = getattr(field, "db_column", None) or name + "_id"
new_value = disambiguate_id(value)
elif field_is_many_to_many:
new_value = disambiguate_ids(value)
if field_is_many_to_many:
many_to_many_values[name] = new_value
else:
setattr(obj, name, new_value)
# Handle extras fields
for name, extras in foreign_key_extras.items():
value = input.get(name, None)
field = Model._meta.get_field(name)
obj_id = cls.get_or_create_foreign_obj(
field,
value,
extras,
info
)
setattr(obj, name + "_id", obj_id)
many_to_many_to_add = {}
many_to_many_to_remove = {}
for name, extras in many_to_many_extras.items():
field = Model._meta.get_field(name)
if not name in many_to_many_to_add:
many_to_many_to_add[name] = []
many_to_many_to_remove[name] = []
for extra_name, data in extras.items():
field_name = name
if extra_name != "exact":
field_name = name + "_" + extra_name
values = input.get(field_name, None)
if isinstance(data, bool):
data = {}
operation = data.get('operation') or get_likely_operation_from_name(extra_name)
objs = cls.get_or_create_m2m_objs(
field,
values,
data,
operation,
info
)
if operation == "add":
many_to_many_to_add[name] += objs
else:
many_to_many_to_remove[name] += objs
many_to_one_to_add = {}
many_to_one_to_remove = {}
for name, extras in many_to_one_extras.items():
field = Model._meta.get_field(name)
if not name in many_to_one_to_add:
many_to_one_to_add[name] = []
many_to_one_to_remove[name] = []
for extra_name, data in extras.items():
field_name = name
if extra_name != "exact":
field_name = name + "_" + extra_name
values = input.get(field_name, None)
if isinstance(data, bool):
data = {}
operation = data.get('operation') or get_likely_operation_from_name(extra_name)
if operation == "add":
objs = cls.get_or_create_m2o_objs(
obj,
field,
values,
data,
operation,
info,
Model
)
many_to_one_to_add[name] += objs
else:
many_to_one_to_remove[name] += disambiguate_ids(values)
for name, objs in many_to_one_to_add.items():
getattr(obj, name).add(*objs)
for name, objs in many_to_one_to_remove.items():
# Only nullable foreign key reverse rels have the remove method,
# so we use this method instead
getattr(obj, name).filter(id__in=objs).delete()
for name, objs in many_to_many_to_add.items():
getattr(obj, name).add(*objs)
for name, objs in many_to_many_to_remove.items():
getattr(obj, name).remove(*objs)
return obj
class DjangoUpdateMutationOptions(MutationOptions):
model = None
only_fields = None
exclude_fields = None
return_field_name = None
permissions = None
login_required = None
auto_context_fields = None
optional_fields = ()
required_fields = None
nested_fields = None
type_name = None
many_to_many_extras = None
many_to_one_extras=None
foreign_key_extras = None
class DjangoUpdateMutation(DjangoCudBase):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
permissions=None,
login_required=None,
only_fields=(),
exclude_fields=(),
auto_context_fields={},
optional_fields=(),
required_fields=(),
return_field_name=None,
many_to_many_extras=None,
many_to_one_extras=None,
foreign_key_extras=None,
type_name="",
**kwargs,
):
registry = get_global_registry()
meta_registry = get_type_meta_registry()
model_type = registry.get_type_for_model(model)
assert model_type, f"Model type must be registered for model {model}"
if not return_field_name:
return_field_name = to_snake_case(model.__name__)
if many_to_one_extras is None:
many_to_one_extras = {}
if foreign_key_extras is None:
foreign_key_extras = {}
if many_to_many_extras is None:
many_to_many_extras = {}
input_type_name = type_name or f"Update{model.__name__}Input"
model_fields = get_input_fields_for_model(
model,
only_fields,
exclude_fields,
optional_fields=tuple(auto_context_fields.keys()) + optional_fields,
required_fields=required_fields,
many_to_many_extras=many_to_many_extras,
foreign_key_extras=foreign_key_extras,
many_to_one_extras=many_to_one_extras,
parent_type_name=input_type_name
)
InputType = type(
input_type_name, (InputObjectType,), model_fields
)
# Register meta-data
meta_registry.register(
input_type_name,
{
'auto_context_fields': auto_context_fields or {},
'optional_fields': optional_fields,
'required_fields': required_fields,
'many_to_many_extras': many_to_many_extras or {},
'many_to_one_extras': many_to_one_extras or {},
'foreign_key_extras': foreign_key_extras or {}
}
)
registry.register_converted_field(
input_type_name,
InputType
)
arguments = OrderedDict(
id=graphene.ID(required=True), input=InputType(required=True)
)
output_fields = OrderedDict()
output_fields[return_field_name] = graphene.Field(model_type)
_meta = DjangoUpdateMutationOptions(cls)
_meta.model = model
_meta.fields = yank_fields_from_attrs(output_fields, _as=graphene.Field)
_meta.return_field_name = return_field_name
_meta.permissions = permissions
_meta.auto_context_fields = auto_context_fields or {}
_meta.optional_fields = optional_fields
_meta.required_fields = required_fields
_meta.InputType = InputType
_meta.input_type_name = input_type_name
_meta.many_to_many_extras = many_to_many_extras
_meta.many_to_one_extras = many_to_one_extras
_meta.foreign_key_extras = foreign_key_extras
_meta.login_required = _meta.login_required or (
_meta.permissions and len(_meta.permissions) > 0
)
super().__init_subclass_with_meta__(arguments=arguments, _meta=_meta, **kwargs)
def get_queryset(self):
Model = self._meta.model
return Model.objects
@classmethod
def mutate(cls, root, info, id, input):
if cls._meta.login_required and not info.context.user.is_authenticated:
raise GraphQLError("Must be logged in to access this mutation.")
if cls._meta.permissions and len(cls._meta.permissions) > 0:
if not info.context.user.has_perms(cls._meta.permissions):
raise GraphQLError("Not permitted to access this mutation.")
id = disambiguate_id(id)
Model = cls._meta.model
queryset = cls.get_queryset(Model)
obj = queryset.get(pk=id)
auto_context_fields = cls._meta.auto_context_fields or {}
obj = cls.update_obj(
obj,
input,
info,
auto_context_fields,
cls._meta.many_to_many_extras,
cls._meta.foreign_key_extras,
cls._meta.many_to_one_extras,
Model
)
obj.save()
kwargs = {cls._meta.return_field_name: obj}
return cls(**kwargs)
class DjangoPatchMutationOptions(MutationOptions):
model = None
only_fields = None
exclude_fields = None
return_field_name = None
permissions = None
login_required = None
auto_context_fields = None
many_to_many_extras = None
many_to_one_extras = None
foreign_key_extras = None
type_name = None
class DjangoPatchMutation(DjangoCudBase):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
permissions=None,
login_required=None,
only_fields=(),
exclude_fields=(),
return_field_name=None,
auto_context_fields={},
many_to_one_extras = None,
many_to_many_extras = None,
foreign_key_extras = None,
type_name=None,
**kwargs,
):
registry = get_global_registry()
meta_registry = get_type_meta_registry()
model_type = registry.get_type_for_model(model)
assert model_type, f"Model type must be registered for model {model}"
if not return_field_name:
return_field_name = to_snake_case(model.__name__)
if many_to_one_extras is None:
many_to_one_extras = {}
if foreign_key_extras is None:
foreign_key_extras = {}
if many_to_many_extras is None:
many_to_many_extras = {}
input_type_name = type_name or f"Patch{model.__name__}Input"
model_fields = get_all_optional_input_fields_for_model(
model,
only_fields,
exclude_fields,
many_to_many_extras=many_to_many_extras,
foreign_key_extras=foreign_key_extras,
many_to_one_extras=many_to_one_extras,
parent_type_name=type_name,
)
InputType = type(
input_type_name, (InputObjectType,), model_fields
)
# Register meta-data
meta_registry.register(
input_type_name,
{
'auto_context_fields': auto_context_fields or {},
'many_to_many_extras': many_to_many_extras or {},
'many_to_one_extras': many_to_one_extras or {},
'foreign_key_extras': foreign_key_extras or {}
}
)
registry.register_converted_field(
input_type_name,
InputType
)
arguments = OrderedDict(
id=graphene.ID(required=True), input=InputType(required=True)
)
output_fields = OrderedDict()
output_fields[return_field_name] = graphene.Field(model_type)
_meta = DjangoPatchMutationOptions(cls)
_meta.model = model
_meta.fields = yank_fields_from_attrs(output_fields, _as=graphene.Field)
_meta.return_field_name = return_field_name
_meta.permissions = permissions
_meta.auto_context_fields = auto_context_fields or {}
_meta.InputType = InputType
_meta.input_type_name = input_type_name
_meta.many_to_many_extras = many_to_many_extras
_meta.many_to_one_extras = many_to_one_extras
_meta.foreign_key_extras = foreign_key_extras
_meta.login_required = _meta.login_required or (
_meta.permissions and len(_meta.permissions) > 0
)
super().__init_subclass_with_meta__(arguments=arguments, _meta=_meta, **kwargs)
def get_queryset(self):
Model = self._meta.model
return Model.objects
@classmethod
def mutate(cls, root, info, id, input):
if cls._meta.login_required and not info.context.user.is_authenticated:
raise GraphQLError("Must be logged in to access this mutation.")
if cls._meta.permissions and len(cls._meta.permissions) > 0:
if not info.context.user.has_perms(cls._meta.permissions):
raise GraphQLError("Not permitted to access this mutation.")
id = disambiguate_id(id)
Model = cls._meta.model
queryset = cls.get_queryset(Model)
obj = queryset.get(pk=id)
auto_context_fields = cls._meta.auto_context_fields or {}
obj = cls.update_obj(
obj,
input,
info,
auto_context_fields,
cls._meta.many_to_many_extras,
cls._meta.foreign_key_extras,
cls._meta.many_to_one_extras,
Model
)
obj.save()
kwargs = {cls._meta.return_field_name: obj}
return cls(**kwargs)
class DjangoCreateMutationOptions(MutationOptions):
model = None
only_fields = None
exclude_fields = None
return_field_name = None
permissions = None
login_required = None
auto_context_fields = None
optional_fields = ()
required_fields = ()
many_to_many_extras = None
many_to_one_extras = None
foreign_key_extras = None
type_name = None
class DjangoCreateMutation(DjangoCudBase):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
permissions=None,
login_required=None,
only_fields=(),
exclude_fields=(),
optional_fields=(),
required_fields=(),
auto_context_fields={},
return_field_name=None,
many_to_many_extras=None,
foreign_key_extras = None,
many_to_one_extras = None,
type_name=None,
**kwargs,
):
registry = get_global_registry()
meta_registry = get_type_meta_registry()
model_type = registry.get_type_for_model(model)
if many_to_one_extras is None:
many_to_one_extras = {}
if foreign_key_extras is None:
foreign_key_extras = {}
if many_to_many_extras is None:
many_to_many_extras = {}
assert model_type, f"Model type must be registered for model {model}"
if not return_field_name:
return_field_name = to_snake_case(model.__name__)
input_type_name = type_name or f"Create{model.__name__}Input"
model_fields = get_input_fields_for_model(
model,
only_fields,
exclude_fields,
tuple(auto_context_fields.keys()) + optional_fields,
required_fields,
many_to_many_extras,
foreign_key_extras,
many_to_one_extras,
parent_type_name=input_type_name,
)
InputType = type(
input_type_name, (InputObjectType,), model_fields
)
# Register meta-data
meta_registry.register(
input_type_name,
{
'auto_context_fields': auto_context_fields or {},
'optional_fields': optional_fields,
'required_fields': required_fields,
'many_to_many_extras': many_to_many_extras or {},
'foreign_key_extras': foreign_key_extras or {}
}
)
registry.register_converted_field(
input_type_name,
InputType
)
arguments = OrderedDict(input=InputType(required=True))
output_fields = OrderedDict()
output_fields[return_field_name] = graphene.Field(model_type)
_meta = DjangoCreateMutationOptions(cls)
_meta.model = model
_meta.fields = yank_fields_from_attrs(output_fields, _as=graphene.Field)
_meta.return_field_name = return_field_name
_meta.optional_fields = optional_fields
_meta.required_fields = required_fields
_meta.permissions = permissions
_meta.auto_context_fields = auto_context_fields or {}
_meta.many_to_many_extras = many_to_many_extras or {}
_meta.foreign_key_extras = foreign_key_extras
_meta.many_to_one_extras = many_to_one_extras or {}
_meta.InputType = InputType
_meta.input_type_name = input_type_name
_meta.login_required = _meta.login_required or (
_meta.permissions and len(_meta.permissions) > 0
)
super().__init_subclass_with_meta__(arguments=arguments, _meta=_meta, **kwargs)
@classmethod
def mutate(cls, root, info, input):
if cls._meta.login_required and not info.context.user.is_authenticated:
raise GraphQLError("Must be logged in to access this mutation.")
if cls._meta.permissions and len(cls._meta.permissions) > 0:
if not info.context.user.has_perms(cls._meta.permissions):
raise GraphQLError("Not permitted to access this mutation.")
Model = cls._meta.model
model_field_values = {}
auto_context_fields = cls._meta.auto_context_fields or {}
obj = cls.create_obj(
input,
info,
auto_context_fields,
cls._meta.many_to_many_extras,
cls._meta.foreign_key_extras,
cls._meta.many_to_one_extras,
Model
)
kwargs = {cls._meta.return_field_name: obj}
return cls(**kwargs)
class DjangoBatchCreateMutationOptions(MutationOptions):
model = None
only_fields = None
exclude_fields = None
return_field_name = None
permissions = None
login_required = None
auto_context_fields = None
optional_fields = ()
required_fields = ()
many_to_many_extras = None
many_to_one_extras = None
foreign_key_extras = None
type_name = None
use_type_name = None
class DjangoBatchCreateMutation(DjangoCudBase):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
permissions=None,
login_required=None,
only_fields=(),
exclude_fields=(),
optional_fields=(),
required_fields=(),
auto_context_fields={},
return_field_name=None,
many_to_many_extras=None,
foreign_key_extras = None,
many_to_one_extras = None,
type_name=None,
use_type_name=None,
**kwargs,
):
registry = get_global_registry()
meta_registry = get_type_meta_registry()
model_type = registry.get_type_for_model(model)
if many_to_one_extras is None:
many_to_one_extras = {}
if foreign_key_extras is None:
foreign_key_extras = {}
if many_to_many_extras is None:
many_to_many_extras = {}
assert model_type, f"Model type must be registered for model {model}"
if not return_field_name:
# Pluralize
return_field_name = to_snake_case(model.__name__) + "s"
if use_type_name:
input_type_name = use_type_name
InputType = registry.get_converted_field(
input_type_name
)
if not InputType:
raise GraphQLError(f"Could not find input type with name {input_type_name}")
else:
input_type_name = type_name or f"BatchCreate{model.__name__}Input"
model_fields = get_input_fields_for_model(
model,
only_fields,
exclude_fields,
tuple(auto_context_fields.keys()) + optional_fields,
required_fields,
many_to_many_extras,
foreign_key_extras,
many_to_one_extras,
parent_type_name=input_type_name,
)
InputType = type(
input_type_name, (InputObjectType,), model_fields
)
# Register meta-data
meta_registry.register(
input_type_name,
{
'auto_context_fields': auto_context_fields or {},
'optional_fields': optional_fields,
'required_fields': required_fields,
'many_to_many_extras': many_to_many_extras or {},
'foreign_key_extras': foreign_key_extras or {}
}
)
registry.register_converted_field(
input_type_name,
InputType
)
arguments = OrderedDict(input=graphene.List(InputType, required=True))
output_fields = OrderedDict()
output_fields[return_field_name] = graphene.List(model_type)
_meta = DjangoBatchCreateMutationOptions(cls)
_meta.model = model
_meta.fields = yank_fields_from_attrs(output_fields, _as=graphene.Field)
_meta.return_field_name = return_field_name
_meta.optional_fields = optional_fields
_meta.required_fields = required_fields
_meta.permissions = permissions
_meta.auto_context_fields = auto_context_fields or {}
_meta.many_to_many_extras = many_to_many_extras or {}
_meta.foreign_key_extras = foreign_key_extras
_meta.many_to_one_extras = many_to_one_extras or {}
_meta.InputType = InputType
_meta.input_type_name = input_type_name
_meta.login_required = _meta.login_required or (
_meta.permissions and len(_meta.permissions) > 0
)
super().__init_subclass_with_meta__(arguments=arguments, _meta=_meta, **kwargs)
@classmethod
def mutate(cls, root, info, input):
if cls._meta.login_required and not info.context.user.is_authenticated:
raise GraphQLError("Must be logged in to access this mutation.")
if cls._meta.permissions and len(cls._meta.permissions) > 0:
if not info.context.user.has_perms(cls._meta.permissions):
raise GraphQLError("Not permitted to access this mutation.")
Model = cls._meta.model
model_field_values = {}
auto_context_fields = cls._meta.auto_context_fields or {}
created_objs = []
with transaction.atomic():
for data in input:
obj = cls.create_obj(
data,
info,
auto_context_fields,
cls._meta.many_to_many_extras,
cls._meta.foreign_key_extras,
cls._meta.many_to_one_extras,
Model
)
created_objs.append(obj)
kwargs = {cls._meta.return_field_name: created_objs}
return cls(**kwargs)
class DjangoDeleteMutationOptions(MutationOptions):
model = None
permissions = None
login_required = None
class DjangoDeleteMutation(Mutation):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
permissions=None,
login_required=None,
only_fields=(),
exclude_fields=(),
return_field_name=None,
**kwargs,
):
registry = get_global_registry()
if not return_field_name:
return_field_name = to_snake_case(model.__name__)
arguments = OrderedDict(id=graphene.ID(required=True))
output_fields = OrderedDict()
output_fields["found"] = graphene.Boolean()
output_fields["deleted_id"] = graphene.ID()
_meta = DjangoDeleteMutationOptions(cls)
_meta.model = model
_meta.fields = yank_fields_from_attrs(output_fields, _as=graphene.Field)
_meta.return_field_name = return_field_name
_meta.permissions = permissions
_meta.login_required = _meta.login_required or (
_meta.permissions and len(_meta.permissions) > 0
)
super().__init_subclass_with_meta__(arguments=arguments, _meta=_meta, **kwargs)
@classmethod
def mutate(cls, root, info, id):
if cls._meta.login_required and not info.context.user.is_authenticated:
raise GraphQLError("Must be logged in to access this mutation.")
if cls._meta.permissions and len(cls._meta.permissions) > 0:
if not info.context.user.has_perms(cls._meta.permissions):
raise GraphQLError("Not permitted to access this mutation.")
Model = cls._meta.model
id = disambiguate_id(id)
try:
obj = Model.objects.get(pk=id)
obj.delete()
return cls(found=True, deleted_id=id)
except ObjectDoesNotExist:
return cls(found=False)
class DjangoBatchDeleteMutationOptions(MutationOptions):
model = None
filter_fields = None
filter_class = None
permissions = None
login_required = None
class DjangoBatchDeleteMutation(Mutation):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
model=None,
permissions=None,
login_required=None,
filter_fields=(),
filter_class=None,
**kwargs,
):
registry = get_global_registry()
model_type = registry.get_type_for_model(model)
assert model_type, f"Model type must be registered for model {model}"
assert (
len(filter_fields) > 0
), f"You must specify at least one field to filter on for deletion."
input_arguments = get_filter_fields_input_args(
filter_fields,
model
)
InputType = type(
f"BatchDelete{model.__name__}Input", (InputObjectType,), input_arguments
)
arguments = OrderedDict(input=InputType(required=True))
output_fields = OrderedDict()
output_fields["deletion_count"] = graphene.Int()
output_fields["deleted_ids"] = graphene.List(graphene.ID)
_meta = DjangoBatchDeleteMutationOptions(cls)
_meta.model = model
_meta.fields = yank_fields_from_attrs(output_fields, _as=graphene.Field)
_meta.filter_fields = filter_fields
_meta.permissions = permissions
_meta.login_required = _meta.login_required or (
_meta.permissions and len(_meta.permissions) > 0
)
super().__init_subclass_with_meta__(arguments=arguments, _meta=_meta, **kwargs)
@classmethod
def mutate(cls, root, info, input):
if cls._meta.login_required and not info.context.user.is_authenticated:
raise GraphQLError("Must be logged in to access this mutation.")
if cls._meta.permissions and len(cls._meta.permissions) > 0:
if not info.context.user.has_perms(cls._meta.permissions):
raise GraphQLError("Not permitted to access this mutation.")
Model = cls._meta.model
model_field_values = {}
for name, value in super(type(input), input).items():
filter_field_split = name.split("__", 1)
field_name = filter_field_split[0]
try:
field = Model._meta.get_field(field_name)
except FieldDoesNotExist:
# This can happen with nested selectors. In this case we set the field to none.
field = None
filter_field_is_list = False
if len(filter_field_split) > 1:
# If we have an "__in" final part of the filter, we are now dealing with
# a list of things. Note that all other variants can be coerced directly
# on the filter-call, so we don't really have to deal with other cases.
filter_field_is_list = filter_field_split[-1] == "in"
new_value = value
value_handle_name = "handle_" + name
if hasattr(cls, value_handle_name):
handle_func = getattr(cls, value_handle_name)
assert callable(
handle_func
), f"Property {value_handle_name} on {cls.__name__} is not a function."
new_value = handle_func(value, name, info)
# On some fields we perform some default conversion, if the value was not transformed above.
if new_value == value and value is not None:
if type(field) in (models.ForeignKey, models.OneToOneField):
name = getattr(field, "db_column", None) or name + "_id"
new_value = disambiguate_id(value)
elif type(field) in (
models.ManyToManyField,
models.ManyToManyRel,
models.ManyToOneRel,
) or filter_field_is_list:
new_value = disambiguate_ids(value)
model_field_values[name] = new_value
filter_qs = Model.objects.filter(**model_field_values)
ids = [
to_global_id(get_global_registry().get_type_for_model(Model).__name__, id)
for id in filter_qs.values_list("id", flat=True)
]
deletion_count, _ = filter_qs.delete()
return cls(deletion_count=deletion_count, deleted_ids=ids)
| 1.789063 | 2 |
src/poliastro/frames/enums.py | sundeshgupta/poliastro | 634 | 12793288 | """Coordinate frames definitions.
"""
from enum import Enum
class Planes(Enum):
EARTH_EQUATOR = "Earth mean Equator and Equinox of epoch (J2000.0)"
EARTH_ECLIPTIC = "Earth mean Ecliptic and Equinox of epoch (J2000.0)"
BODY_FIXED = "Rotating body mean Equator and node of date"
| 2.4375 | 2 |
run.py | DongDong-123/zgg_test | 0 | 12793289 | <filename>run.py<gh_stars>0
import os
import time
from readConfig import ReadConfig
from db import DbOperate
import random
class Operate:
# def __init__(self):
# self.dboperate = DbOperate()
def create(self):
# from New_place_order import Execute
# from test_one import Execute
# from test_point import Execute
# from trademark import Execute
from copyright import Execute
# from patent import Execute
response = Execute()
for callback_label in range(response.__FuncCount__):
callback = response.__Func__[callback_label]
print("开始执行:", callback)
response.execute_function(callback)
self.execute_log(callback, "execute")
time.sleep(1)
print("{}执行完毕".format(callback))
def execute_log(self, param, name):
report_path = ReadConfig().save_report()
error_log_path = os.path.join(report_path,
"{}_log{}.log".format(name, time.strftime("%Y-%m-%d", time.localtime())))
with open(error_log_path, "a", encoding="utf-8") as f:
f.write("{}: ".format(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + param + "\n")
def read_exe_log(self, path):
with open(path, 'r', encoding="utf-8") as f:
f.read()
def send_clue(self):
from send_clue import Execute
# from test_clue import Execute
response = Execute()
for callback_label in range(response.__FuncCount__):
callback = response.__Func__[callback_label]
print("开始执行:", callback)
response.execute_function(callback)
self.execute_log(callback, "send_clue")
time.sleep(1)
print("{}发送完毕".format(callback))
# 删除
def delete():
from delete_unpay_case import Execute
test = Execute()
num = test.get_code_num()
for i in range(num):
test.delete_order()
print("删除完毕,共删除{}个".format(num))
# 随机获取类型
def random_list(num, lis):
res = []
for num in range(num):
index = random.randint(1, 34)
res.append(lis[index])
return res
def run():
qq = Operate()
qq.create()
print("下单完毕")
def send_clue():
# ====================================
# all_type = ReadConfig().get_clue_type()
# # 随机数量
# num = 5
# all_type = random_list(num, all_type)
# DbOperate().add("clue", all_type)
# =================================
qq = Operate()
qq.send_clue()
print("线索发送完毕")
if __name__ == '__main__':
# run()
send_clue()
# delete()
| 2.40625 | 2 |
dcor/_fast_dcov_mergesort.py | lemiceterieux/dcor | 98 | 12793290 | '''
Functions to compute fast distance covariance using mergesort.
'''
import warnings
from numba import float64, int64, boolean
import numba
import numpy as np
from ._utils import CompileMode, _transform_to_2d
def _compute_weight_sums(y, weights):
n_samples = len(y)
weight_sums = np.zeros((n_samples,) + weights.shape[1:], dtype=y.dtype)
# Buffer that contains the indexes of the current and
# last iterations
indexes = np.arange(2 * n_samples).reshape((2, n_samples))
indexes[1] = 0 # Remove this
previous_indexes = indexes[0]
current_indexes = indexes[1]
weights_cumsum = np.zeros(
(n_samples + 1,) + weights.shape[1:], dtype=weights.dtype)
merged_subarray_len = 1
# For all lengths that are a power of two
while merged_subarray_len < n_samples:
gap = 2 * merged_subarray_len
indexes_idx = 0
# Numba does not support axis, nor out parameter.
for var in range(weights.shape[1]):
weights_cumsum[1:, var] = np.cumsum(
weights[previous_indexes, var])
# Select the subarrays in pairs
for subarray_pair_idx in range(0, n_samples, gap):
subarray_1_idx = subarray_pair_idx
subarray_2_idx = subarray_pair_idx + merged_subarray_len
subarray_1_idx_last = min(
subarray_1_idx + merged_subarray_len - 1, n_samples - 1)
subarray_2_idx_last = min(
subarray_2_idx + merged_subarray_len - 1, n_samples - 1)
# Merge the subarrays
while (subarray_1_idx <= subarray_1_idx_last and
subarray_2_idx <= subarray_2_idx_last):
previous_index_1 = previous_indexes[subarray_1_idx]
previous_index_2 = previous_indexes[subarray_2_idx]
if y[previous_index_1].item() >= y[previous_index_2].item():
current_indexes[indexes_idx] = previous_index_1
subarray_1_idx += 1
else:
current_indexes[indexes_idx] = previous_index_2
subarray_2_idx += 1
weight_sums[previous_index_2] += (
weights_cumsum[subarray_1_idx_last + 1] -
weights_cumsum[subarray_1_idx])
indexes_idx += 1
# Join the remaining elements of one of the arrays (already sorted)
if subarray_1_idx <= subarray_1_idx_last:
n_remaining = subarray_1_idx_last - subarray_1_idx + 1
indexes_idx_next = indexes_idx + n_remaining
current_indexes[indexes_idx:indexes_idx_next] = (
previous_indexes[subarray_1_idx:subarray_1_idx_last + 1])
indexes_idx = indexes_idx_next
elif subarray_2_idx <= subarray_2_idx_last:
n_remaining = subarray_2_idx_last - subarray_2_idx + 1
indexes_idx_next = indexes_idx + n_remaining
current_indexes[indexes_idx:indexes_idx_next] = (
previous_indexes[subarray_2_idx:subarray_2_idx_last + 1])
indexes_idx = indexes_idx_next
merged_subarray_len = gap
# Swap buffer
previous_indexes, current_indexes = (current_indexes, previous_indexes)
return weight_sums
_compute_weight_sums_compiled = numba.njit(
float64[:, :](float64[:, :], float64[:, :]),
cache=True)(_compute_weight_sums)
def _generate_compute_aijbij_term(compiled):
def _compute_aijbij_term(x, y):
compute_weight_sums = (_compute_weight_sums_compiled
if compiled else _compute_weight_sums)
# x must be sorted
n = len(x)
weights = np.hstack((np.ones_like(y), y, x, x * y))
weight_sums = compute_weight_sums(y, weights)
x = x.ravel()
y = y.ravel()
term_1 = (x * y).T @ weight_sums[:, 0].ravel()
term_2 = x.T @ weight_sums[:, 1].ravel()
term_3 = y.T @ weight_sums[:, 2].ravel()
term_4 = np.sum(weight_sums[:, 3])
# First term in the equation
sums_term = term_1 - term_2 - term_3 + term_4
# Second term in the equation
sum_x = np.sum(x)
sum_y = np.sum(y)
cov_term = n * x.T @ y - np.sum(sum_x * y + sum_y * x) + sum_x * sum_y
d = 4 * sums_term - 2 * cov_term
return d.item()
return _compute_aijbij_term
_compute_aijbij_term = _generate_compute_aijbij_term(compiled=False)
_compute_aijbij_term_compiled = numba.njit(
float64(float64[:, :], float64[:, :]),
cache=True)(
_generate_compute_aijbij_term(compiled=True))
def _compute_row_sums(x):
# x must be sorted
x = x.ravel()
n_samples = len(x)
term_1 = (2 * np.arange(1, n_samples + 1) - n_samples) * x
sums = np.cumsum(x)
term_2 = sums[-1] - 2 * sums
return term_1 + term_2
_compute_row_sums_compiled = numba.njit(
float64[:](float64[:]),
cache=True)(_compute_row_sums)
def _generate_distance_covariance_sqr_mergesort_generic_impl(
compiled):
def _distance_covariance_sqr_mergesort_generic_impl(x, y, unbiased):
compute_aijbij_term = (_compute_aijbij_term_compiled
if compiled else _compute_aijbij_term)
compute_row_sums = (_compute_row_sums_compiled if compiled
else _compute_row_sums)
n = len(x)
# Sort x in ascending order
ordered_indexes = np.argsort(x.ravel())
x = x[ordered_indexes]
y = y[ordered_indexes]
aijbij = compute_aijbij_term(x, y)
a_i = compute_row_sums(x.ravel())
ordered_indexes_y = np.argsort(y.ravel())
b_i_perm = compute_row_sums(y.ravel()[ordered_indexes_y])
b_i = np.empty_like(b_i_perm)
b_i[ordered_indexes_y] = b_i_perm
a_dot_dot = np.sum(a_i)
b_dot_dot = np.sum(b_i)
sum_ab = a_i.ravel().T @ b_i.ravel()
if unbiased:
d3 = (n - 3)
d2 = (n - 2)
d1 = (n - 1)
else:
d3 = d2 = d1 = n
d_cov = (aijbij / n / d3 - 2 * sum_ab / n / d2 / d3 +
a_dot_dot / n * b_dot_dot / d1 / d2 / d3)
return d_cov
return _distance_covariance_sqr_mergesort_generic_impl
_distance_covariance_sqr_mergesort_generic_impl = (
_generate_distance_covariance_sqr_mergesort_generic_impl(
compiled=False))
_distance_covariance_sqr_mergesort_generic_impl_compiled = numba.njit(
float64(float64[:, :], float64[:, :], boolean),
cache=True)(
_generate_distance_covariance_sqr_mergesort_generic_impl(
compiled=True))
impls_dict = {
CompileMode.AUTO: (
_distance_covariance_sqr_mergesort_generic_impl_compiled,
_distance_covariance_sqr_mergesort_generic_impl),
CompileMode.NO_COMPILE: (_distance_covariance_sqr_mergesort_generic_impl,),
CompileMode.COMPILE_CPU: (
_distance_covariance_sqr_mergesort_generic_impl_compiled,)
}
def _distance_covariance_sqr_mergesort_generic(x, y,
*, exponent=1, unbiased=False,
compile_mode=CompileMode.AUTO):
if exponent != 1:
raise ValueError(f"Exponent should be 1 but is {exponent} instead.")
x = _transform_to_2d(x)
y = _transform_to_2d(y)
if compile_mode not in (CompileMode.AUTO, CompileMode.COMPILE_CPU,
CompileMode.NO_COMPILE):
return NotImplementedError(
f"Compile mode {compile_mode} not implemented.")
for impl in impls_dict[compile_mode]:
try:
return impl(x, y,
unbiased)
except TypeError as e:
if compile_mode is not CompileMode.AUTO:
raise e
warnings.warn(f"Falling back to uncompiled MERGESORT fast "
f"distance covariance because of TypeError "
f"exception raised: {e}. Rembember: only floating "
f"point values can be used in the compiled "
f"implementations.")
| 2.5 | 2 |
rotation_averaging/so3.py | nishant34/RotationAveraging | 0 | 12793291 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Date: 2014-10-28 04:41:23
# @Last Modified by: marinheiro
# @Last Modified time: 2014-12-08 23:30:01
"""
Auxiliary functions to convert between different rotation representations.
"""
import numpy
import numpy.linalg
import scipy
import math
# Axis-Angle <-> Log Conversion
def axis_angle_to_log(n, theta):
"""Converts from the axis-angle representation to the log representation
"""
return n*theta
def log_to_axis_angle(w):
"""OI
"""
theta = numpy.linalg.norm(w)
n = numpy.zeros((3,))
if theta != 0.0:
n = w/theta
return (n, theta)
# Quaternion <-> Axis-Angle conversion
def quaternion_to_axis_angle(quat):
"""OI
"""
theta = 2.0*math.atan2(numpy.linalg.norm(quat[1:]), quat[0])
n = numpy.zeros((3,1))
if theta != 0.0:
n = quat[1:]/math.sin(theta/2)
return (n, theta)
def axis_angle_to_quaternion(n, theta):
"""OI
"""
c = math.cos(theta/2)
s = math.sin(theta/2)
quat = numpy.zeros((4,1))
quat[0] = c
quat[1:] = n*s
return quat
# Matrix <-> Quaternion conversion
def matrix_to_quaternion(rot):
"""OI
"""
s = math.sqrt(numpy.trace(rot) + 1.0)/2
quat = numpy.array([[s],
[(rot[2, 1]-rot[1, 2])/(4*s)],
[(rot[0, 2]-rot[2, 0])/(4*s)],
[(rot[1, 0]-rot[0, 1])/(4*s)],
])
return quat
def quaternion_to_matrix(quat):
"""OI
"""
qw = quat[0][0]
qx = quat[1][0]
qy = quat[2][0]
qz = quat[3][0]
rot = numpy.array([[1 - 2*qy*qy - 2*qz*qz, 2*qx*qy - 2*qz*qw, 2*qx*qz + 2*qy*qw],
[2*qx*qy + 2*qz*qw, 1 - 2*qx*qx - 2*qz*qz, 2*qy*qz - 2*qx*qw],
[2*qx*qz - 2*qy*qw, 2*qy*qz + 2*qx*qw, 1 - 2*qx*qx - 2*qy*qy]])
return rot
# Matrix <-> Axis-Angle conversion
def matrix_to_axis_angle(rot):
"""OI
"""
return quaternion_to_axis_angle(matrix_to_quaternion(rot))
def axis_angle_to_matrix(n, theta):
"""OI
"""
# print n.shape, theta
return quaternion_to_matrix(axis_angle_to_quaternion(n, theta))
| 3.578125 | 4 |
biblebot/api/intranet.py | yongki150/biblebot-scraper | 0 | 12793292 | from abc import ABCMeta, abstractmethod
from typing import Optional, Dict, List, Tuple
import re
from .base import (
HTTPClient,
IParser,
APIResponseType,
ILoginFetcher,
ISemesterFetcher,
ResourceData,
ErrorData,
ParserPrecondition,
SemesterData,
)
from ..reqeust import Response
from ..exceptions import ParsingError
from .common import (
httpdate_to_unixtime,
extract_alerts,
extract_hidden_tags,
urlencode,
parse_table,
)
__all__ = (
"IParserPrecondition",
"Login",
"StudentPhoto",
"Chapel",
"Timetable",
"Course",
)
DOMAIN_NAME: str = "https://kbuis.bible.ac.kr" # with protocol
_SEMESTER_KEY: str = "ctl00$ContentPlaceHolder1$cbo_YearHg"
class IParserPrecondition(metaclass=ABCMeta):
@staticmethod
@abstractmethod
def is_blocking(response: Response) -> Optional[ErrorData]:
""" 진행할 수 없는 사전조건인 경우 ErrorData, 그렇지 않은 경우 None """
pass
_ParserPrecondition = ParserPrecondition(IParserPrecondition)
class _SessionExpiredChecker(IParserPrecondition):
@staticmethod
def is_blocking(response: Response) -> Optional[ErrorData]:
alerts = extract_alerts(response.soup)
for alert in alerts:
if "세션" in alert or "수업평가" in alert:
return ErrorData(
error={"title": alert, "alert_messages": alerts}, link=response.url
)
return None
def _extract_semester(response: Response) -> SemesterData:
select_tag = response.soup.find("select", attrs={"name": _SEMESTER_KEY})
if not select_tag:
raise ParsingError("학기 셀렉트 태그를 찾을 수 없습니다.", response)
options = select_tag.find_all("option", selected=True)
if not options:
raise ParsingError("학기 옵션 태그를 찾을 수 없습니다.", response)
try:
selectables: List[str] = [
opt.attrs["value"] for opt in select_tag.find_all("option")
]
selected: str = select_tag.find("option", selected=True).attrs["value"]
except (KeyError, AttributeError):
raise ParsingError("학기 옵션 태그를 정상적으로 선택할 수 없습니다.", response)
return SemesterData(selected=selected, selectable=selectables)
async def _post_with_semester(
url,
cookies: Dict[str, str],
semester: Optional[str] = None,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
""" 인트라넷에서 특정 학기의 정보 조회를 위한 메서드
특정 학기 조회를 위해서는 POST 메서드로 정보를 전송해야하는데, 그 전에 hidden 태그를 함께 보내야함.
1. GET 요청, 해당 페이지를 불러와서 form hidden-tag 의 (name,key) 쌍을 얻는다.
- 여기서 얻는 정보는 학교에서 미리 지정해놓은터 학기, 일반적으로 최신 학기
2. POST 요청, hidden-tag와 학기를 body에 담아 전송한다.
"""
response = await HTTPClient.connector.get(
url, cookies=cookies, headers=headers, timeout=timeout, **kwargs
)
if _SessionExpiredChecker.is_blocking(response):
return response
semester_info: SemesterData = _extract_semester(response)
if (
semester
and semester != semester_info.selected
and semester in semester_info.selectable
):
body = extract_hidden_tags(response.soup)
body[_SEMESTER_KEY] = semester
body["ctl00$ContentPlaceHolder1$hidActionMode"] = "S"
response = await HTTPClient.connector.post(
url, body=body, cookies=cookies, headers=headers, timeout=timeout, **kwargs
)
semester_info: SemesterData = _extract_semester(response)
response.etc["semester"] = semester_info
return response
class Login(ILoginFetcher, IParser):
# TODO: URL 변경 유의
URL: str = DOMAIN_NAME + "/ble_login2.aspx"
@classmethod
async def fetch(
cls,
user_id: str,
user_pw: str,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
form = {"Txt_1": user_id, "Txt_2": user_pw, "use_type": "2"}
return await HTTPClient.connector.post(
cls.URL, headers=headers, body=form, timeout=timeout, **kwargs
)
@classmethod
def parse(cls, response: Response) -> APIResponseType:
"""
로그인 성공: status 302, location header 포함, 리다이렉트 메시지를 body에 포함
로그인 실패: status 200, location header 미포함, alert 메시지룰 body에 포함
"""
# Login 성공
if response.status == 302:
iat = httpdate_to_unixtime(response.headers["date"])
return ResourceData(
data={"cookies": response.cookies, "iat": iat}, link=response.url
)
# TODO: 현 인트라넷 서버 과부하 상황이 없애지면 더 자세한 조건 추가할 예정
# Login 실패: 인트라넷 서버 과부하
elif response.status == 503:
return ErrorData(
error={
"title": response.soup.find("h2").get_text(),
"error_message": response.soup.find("p").get_text()
},
link=response.url
)
# Login 실패: Common 한 오류
else:
alerts: List[str] = extract_alerts(response.soup)
alert = alerts[0] if alerts else ""
return ErrorData(
error={"title": alert, "alert_messages": alerts}, link=response.url
)
class StudentPhoto(IParser):
URL: str = DOMAIN_NAME + "/SchoolRegMng/SR015.aspx"
@classmethod
async def fetch(
cls,
cookies: Dict[str, str],
sid: str,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
query: Dict[str, str] = {"schNo": sid}
query_string = urlencode(query)
url = f"{cls.URL}?{query_string}"
return await HTTPClient.connector.get(
url, cookies=cookies, headers=headers, timeout=timeout, **kwargs
)
@classmethod
@_ParserPrecondition
def parse(cls, response: Response) -> APIResponseType:
"""
사진을 불러온 경우:
headers= {'transfer-encoding': 'chunked', 'content-type': 'image/jpeg', 'content-disposition': 'attachment;filename=image.jpeg'}
사진을 불러오지 못한 경우:
headers= {'transfer-encoding': 없음, 'content-type': 'text/html; charset=ks_c_5601-1987', 'content-disposition': 없음}
"""
if response.headers["content-type"][:5] == "image":
return ResourceData(data={"raw_image": response.raw}, link=response.url)
else:
return ErrorData(error={"title": "이미지를 불러올 수 없습니다."}, link=response.url)
class Chapel(ISemesterFetcher, IParser):
URL: str = DOMAIN_NAME + "/StudentMng/SM050.aspx"
@classmethod
async def fetch(
cls,
cookies: Dict[str, str],
semester: Optional[str] = None,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
return await _post_with_semester(
cls.URL, cookies, semester, headers=headers, timeout=timeout, **kwargs
)
@classmethod
def _parse_summary(cls, response: Response) -> Dict[str, str]:
soup = response.soup
tbody = soup.find("tbody", attrs={"class": "viewbody"})
if not tbody:
raise ParsingError("채플 요약 테이블을 찾을 수 없습니다.", response)
summary: Dict[str, str] = {}
for th, td in zip(tbody.find_all("th"), tbody.find_all("td")):
key = th.get_text(strip=True)
value = td.get_text(strip=True)
day_count = re.search(r"\d+", value)
summary[key] = str(day_count.group()) if day_count else ""
return summary
@classmethod
def _parse_main_table(cls, response: Response) -> Tuple[List, List]:
soup = response.soup
thead = soup.find("thead", attrs={"class": "mhead"})
tbody = soup.find("tbody", attrs={"class": "mbody"})
return parse_table(response, thead, tbody)
@classmethod
@_ParserPrecondition
def parse(cls, response: Response) -> APIResponseType:
summary = cls._parse_summary(response)
head, body = cls._parse_main_table(response)
return ResourceData(
data={"summary": summary, "head": head, "body": body,},
link=response.url,
meta={
"selected": response.etc["semester"].selected,
"selectable": response.etc["semester"].selectable,
},
)
class Timetable(ISemesterFetcher, IParser):
URL: str = DOMAIN_NAME + "/GradeMng/GD160.aspx"
@classmethod
async def fetch(
cls,
cookies: Dict[str, str],
semester: Optional[str] = None,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
return await _post_with_semester(
cls.URL, cookies, semester, headers=headers, timeout=timeout, **kwargs
)
@staticmethod
def _parse_contents(td: str, response: Response) -> Tuple:
matching = re.match(
r"(.+)?\(([^(]*)?\)(\d{2}:\d{2})\s*~\s*([0-9:]{,5})", td
) or re.match(r"(.+)?()(\d{2}:\d{2})\s*~\s*([0-9:]{,5})", td)
if not matching:
ParsingError("시간표 상세정보를 해석할 수 없습니다.", response)
return matching.groups()
@classmethod
def _parse_main_table(cls, response: Response) -> Tuple[List, List]:
soup = response.soup
thead = soup.find("thead", attrs={"class": "mhead"})
tbody = soup.find("tbody", attrs={"class": "mbody"})
result = [[], [], [], [], []]
head, body = parse_table(response, thead, tbody)
for row in body:
for i, each in enumerate(row):
if each:
result[i].append(cls._parse_contents(each, response))
return head, result
@classmethod
@_ParserPrecondition
def parse(cls, response: Response) -> APIResponseType:
head, body = cls._parse_main_table(response)
return ResourceData(
data={"head": head, "body": body},
link=response.url,
meta={
"selected": response.etc["semester"].selected,
"selectable": response.etc["semester"].selectable,
},
)
class Course(ISemesterFetcher, IParser):
URL: str = DOMAIN_NAME + "/GradeMng/GD095.aspx"
@classmethod
async def fetch(
cls,
cookies: Dict[str, str],
semester: Optional[str] = None,
*,
headers: Optional[Dict[str, str]] = None,
timeout: Optional[float] = None,
**kwargs,
) -> Response:
return await _post_with_semester(
cls.URL, cookies, semester, headers=headers, timeout=timeout, **kwargs
)
@classmethod
def _parse_main_table(cls, response: Response) -> Tuple[List, List]:
soup = response.soup
thead = soup.find("thead", attrs={"class": "mhead"})
tbody = soup.find("tbody", attrs={"class": "mbody"})
return parse_table(response, thead, tbody)
@classmethod
@_ParserPrecondition
def parse(cls, response: Response) -> APIResponseType:
head, body = cls._parse_main_table(response)
return ResourceData(
data={"head": head, "body": body},
link=response.url,
meta={
"selected": response.etc["semester"].selected,
"selectable": response.etc["semester"].selectable,
},
)
| 2.578125 | 3 |
calorie/signals.py | clarametto/calorieTracker | 3 | 12793293 | from django.db.models.signals import post_save
from django.contrib.auth.models import User
from .models import Profile
from django.dispatch import receiver
# Create your models here.
@receiver(post_save, sender=User)
def create_profile(sender,instance,created,**kwargs):
if created:
Profile.objects.create(person_of=instance)
print("profile created")
post_save.connect(create_profile,sender=User)
@receiver(post_save, sender=User)
def create_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(person_of=instance)
@receiver(post_save, sender=User)
def save_user_profile(sender, instance, **kwargs):
instance.profile.save()
| 2.3125 | 2 |
terrascript/tls/d.py | amlodzianowski/python-terrascript | 0 | 12793294 | <reponame>amlodzianowski/python-terrascript
# terrascript/tls/d.py
import terrascript
class tls_public_key(terrascript.Data):
pass
| 1.289063 | 1 |
paper/fig_max_speed_ac/fig_max_histo.py | npmurphy/CRNSynthesisFigures | 0 | 12793295 | <filename>paper/fig_max_speed_ac/fig_max_histo.py
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import os
import numpy as np
plt.style.use('paper/figstyle.mpl')
dpi = 300
import sys
sys.path += ["python"]
from figure_util import cm2inch
def get_dataset(workdir, filename="summary_archetype.tsv"):
score_df = pd.read_csv(os.path.join(workdir, filename), sep="\t")
print("All CRNs", len(score_df))
unique = score_df[score_df["unique"]].copy()
print("role isomorphic", len(unique))
return unique
max_df = get_dataset("paper/maximum_out_S4_R3/")
plt.rc('xtick', labelsize="x-small")
fig, ax = plt.subplots(1, 1)
colorcyc = plt.rcParams['axes.prop_cycle'].by_key()['color']
# max_df.plot.bar(y=["score"], x=["CRN"], ax=ax, width=0.6, color="lightblue", label="Optimized")
# max_df.plot.bar(y=["one"], x=["CRN"], ax=ax, width=0.6, color="orange", label="Rate 1")
max_df.plot.bar(y=["score"], x=["CRN"], ax=ax, width=0.7, color=colorcyc[0], label="Optimized")
max_df.plot.bar(y=["one"], x=["CRN"], ax=ax, width=0.7, color=colorcyc[1], label="Rate 1")
ax.legend(["Optimized", "Rate 1.0"])
ax.set_ylabel("Accuracy")
ax.set_xlabel("CRN Number: Max$_{4,3}$ #")
#labels = ax.get_xticklabels()
labels = ax.get_xticklabels()
for i, l in enumerate(labels):
x, y = l.get_position() #print(type(l))
if i % 2 == 0:
y += 0.03
else :
y -= 0.03
l.set_rotation(0)
l.set_position((x,y))
#print(x,y)
#print(l.position)
#print(labels)
#ax[0].set_ylim(bottom=0, top=1.0)
#ax[0].tick_params(axis='x', which='both', length=0)
#plt.setp(labels, rotation=90)
fig.set_size_inches(cm2inch(7.9, 3.5))
#fig.tight_layout()
fig.subplots_adjust(left= 0.12, # the left side of the subplots of the figure
right = 0.99, # the right side of the subplots of the figure
bottom = 0.26, # the bottom of the subplots of the figure
top = 0.99, # the top of the subplots of the figure
wspace = 0.2, # the amount of width reserved for blank space between subplots,
# expressed as a fraction of the average axis width
hspace = 0.2) # the amount of height reserved for white space between subplots,
fig.savefig("max_overview.pdf", dpi=dpi)#bbox_inches="tight")
fig.savefig("max_overview.png", dpi=dpi)#bbox_inches="tight") | 2.25 | 2 |
dataset/change_image.py | zza584231732/face-master | 0 | 12793296 | from scipy.misc import imread,imresize,imsave
import os
path = '/home/zhang/tm/insightface_for_face_recognition-master/dataset/8631_align_train/'
out_path = '/home/zhang/tm/insightface_for_face_recognition-master/dataset/8631_112_align_train/'
img_lists = os.listdir(path)
for img_list in img_lists:
imgpaths = os.path.join(path,img_list)
out_imgpaths = os.path.join(out_path,img_list)
if not os.path.exists(out_imgpaths):
os.mkdir(out_imgpaths)
img_names = os.listdir(imgpaths)
for i in img_names:
img_name = os.path.join(imgpaths,i)
out_img_name = os.path.join(out_imgpaths,i)
img = imread(img_name)
img = imresize(img,(112,96))
imsave(out_img_name,img)
| 2.546875 | 3 |
train.py | youngstudent2/flappy-bird-for-learn | 0 | 12793297 | import flappybird as fb
import random
import time
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import SGD
import numpy as np
import copy
SCALE_FACTOR = 200
class GeneticBrain(fb.Brain):
def __init__(self,n_input,n_hidden):
'''
self.model = Sequential()
self.model.add(Dense(n_hidden,activation='sigmoid',input_shape=(n_input,)))
self.model.add(Dense(1,activation='sigmoid'))
#print(self.getModel())
'''
self.model = NeuralNetwork([n_input,n_hidden],'logistic')
def decideFlap(self,params):
#print(params)
distance = params['distance'] + params['pipeWidth']
deltaHeight = (params['bottomPipeHeight'] + params['topPipeHeight'])/2 - params['height']
velY = params['velY']
data = [distance * SCALE_FACTOR, deltaHeight * SCALE_FACTOR]
pred = self.model.predict(data)
#print(pred)
return pred[0] > 0.5
def getModel(self):
return self.model.getWeights()
def setModel(self,weights):
self.model.setWeights(weights)
return True
class GeneticAlgorithm():
def __init__(self,max_units,top_units):
self.max_units = max_units
self.top_units = top_units
if max_units < top_units:
self.top_units = max_units
self.population = []
self.best_brain = None
def reset(self):
self.iteration = 1
self.mutateRate = 1
self.best_population = 0
self.best_fitness = 0
self.best_score = 0
def createPopulation(self):
self.population = []
for i in range(self.max_units):
newUnit = GeneticBrain(2,6)
newUnit.index = i
newUnit.fitness = 0
newUnit.score = 0
newUnit.isWinner = False
self.population.append(newUnit)
return self.population
def evolvePopulation(self,results):
winners = self.selection(results)
for w in winners:
print("%d: fitness = %f score = %d" %(w.index,w.fitness,w.score))
if self.mutateRate == 1 and winners[0].fitness < 0:
# all is bad
# create another population
print("recreate popultation")
return self.createPopulation()
else:
self.mutateRate = 0.2
if winners[0].fitness > self.best_fitness:
self.best_fitness = winners[0].fitness
self.best_score = winners[0].score
winners[0].model.save('best.h5')
for i in range(self.top_units,self.max_units):
if i == self.top_units:
parantA = winners[0].getModel()
parantB = winners[1].getModel()
offspring = self.crossOver(parantA,parantB)
elif i < self.max_units - 2:
parantA = self.getRandomUnit(winners).getModel()
parantB = self.getRandomUnit(winners).getModel()
offspring = self.crossOver(parantA,parantB)
else:
offspring = winners[0].getModel()
offspring = self.mutation(offspring)
newUnit = self.population[i]
newUnit.setModel(offspring)
newUnit.score = 0
newUnit.isWinner = False
return self.population
def selection(self,results):
for i in range(self.top_units):
self.population[results[i].index].isWinner = True
return results[:self.top_units]
def crossOver(self,parantA,parantB):
length = np.size(parantA[1],0)
cutPoint = random.randint(0,length-1)
for i in range(cutPoint,length):
tmp = parantA[1][0][i]
parantA[1][0][i] = parantB[1][0][i]
parantB[1][0][i] = tmp
if random.randint(0,1):
return parantA
else:
return parantB
def mutation(self,offspring):
for i in offspring[1]:
for bias in i:
bias = self.mutate(bias)
for i in offspring[0]:
for weight in i:
weight = self.mutate(weight)
return offspring
def mutate(self,gene):
if random.random() < self.mutateRate:
mutateFactor = 1 + (random.random() - 0.5) * 3 + (random.random() - 0.5)
gene *= mutateFactor
return gene
def getRandomUnit(self,array):
return array[random.randint(0,len(array)-1)]
def normalize(self,value,maxValue):
if value < -maxValue: value = -maxValue
elif value > maxValue: value = maxValue
return value/maxValue
def saveBestBird(self):
pass
import pygame
class PlayerBrain(fb.Brain): # 玩家大脑
def decideFlap(self,params):
#print(params)
return params['playerClick']
class HappyBrain(fb.Brain):
def __init__(self):
random.seed(2000)
def decideFlap(self,params):
#print(params)
pygame.event.get()
if params['height'] < 40:
return False
r = random.randint(0,1000)
return r > 940
def train():
bird_num = 10
GA = GeneticAlgorithm(bird_num,4)
GA.reset()
brains = GA.createPopulation()
#brains = [HappyBrain()] * bird_num
g = fb.FlappyBirdGame(30,bird_num,brains)
train_time = 200
for i in range(train_time):
g.run()
results = g.result()
print("Generation %d:" %(i))
sorted_brains = []
for r in results[::-1]:
b = r[0].brain
b.fitness = (r[1]['score']) * r[1]['interval'] - r[1]['distance']
b.score = r[1]['score']
sorted_brains.append(b)
brains = GA.evolvePopulation(sorted_brains)
print("best score = %d best fitness = %d" % (GA.best_score,GA.best_fitness))
g.reset(bird_num,brains)
GA.saveBestBird()
print("GA end!")
from simpleNeuralNetwork import NeuralNetwork
class simpleNNBrain(fb.Brain):
def __init__(self):
self.model = NeuralNetwork([2,6,1],'logistic')
print(self.model.getWeights())
def decideFlap(self,params):
distance = params['distance'] + params['pipeWidth']
deltaHeight = (params['bottomPipeHeight'] + params['topPipeHeight'])/2 - params['height']
velY = params['velY']
data = [distance * SCALE_FACTOR, deltaHeight * SCALE_FACTOR]
pred = self.model.predict(data)
#print(pred)
print(pred)
return pred[0] > 0.5
def train_test():
bird_num = 10
brains = []
for i in range(bird_num):
brains.append(simpleNNBrain())
g = fb.FlappyBirdGame(30,bird_num,brains)
for i in range(10):
g.run()
result = g.result()
brains = []
for i in range(bird_num):
brains.append(simpleNNBrain())
g.reset(10,brains)
if __name__ == '__main__':
train()
| 2.5 | 2 |
angular_flask/utils.py | rsom777/Blog | 7 | 12793298 | <filename>angular_flask/utils.py
"""
Helper functions for controllers.py
"""
import os, boto3, uuid, io
from PIL import Image
from flask.ext.httpauth import HTTPBasicAuth
from flask import request, abort
from angular_flask.models import *
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(username_or_token, password):
"""
Check passwords validity against token or username
:param username_or_token:
:param password:
:return:
"""
# first try to authenticate by token
user = User.verify_auth_token(username_or_token)
if not user:
# try to authenticate with username/password
user = User.query.filter_by(username=username_or_token).first()
if not user:
return abort(400, 'username')
elif not user.verify_password(password):
return abort(400, 'password')
return True
def allowed_file(filename):
"""
Check if file extension is in allowed extensions
:param filename: Name of the file to be checked
:return: True or False
"""
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
def save_image(img_type, elem):
"""
Save post cover or user avatar to local filesystem in dev or to S3 in prod
:param img_type: 'avatars' or 'covers'
:param elem: post or user obj on which to save the image
:return: name of the file to be saved
"""
image = request.files['file']
if elem:
filename = elem.photo.rsplit('/', 1)[-1]
# Do not overwrite default image but generate unique file name instead
if filename == 'default.jpg':
filename = str(uuid.uuid4()) + '.' + image.filename.rsplit('.', 1)[1]
elem.photo = app.config['IMG_FOLDER'] + img_type + '/' + filename
else:
filename = str(uuid.uuid4()) + '.' + image.filename.rsplit('.', 1)[1]
img = Image.open(image)
if img_type == 'avatars':
size = 512
else:
size = 1024
maxsize = (size, size)
img.thumbnail(maxsize, Image.ANTIALIAS)
if 'DYNO' in os.environ: # check if the app is running on Heroku server
s3 = boto3.resource('s3')
output = io.BytesIO()
img.save(output, format='JPEG')
s3.Object('theeblog', img_type + '/' + filename).put(Body=output.getvalue())
else: # Otherwise save to local filesystem
img.save(os.path.join(app.config['UPLOAD_FOLDER'] + img_type, filename))
return filename
| 3.046875 | 3 |
python/count_primes.py | anishLearnsToCode/leetcode-algorithms | 17 | 12793299 | from typing import List
class Solution:
def _get_prime_sieve(self, size: int) -> List[int]:
sieve = [1] * (max(size, 2))
sieve[0], sieve[1] = 0, 0
for number in range(2, len(sieve)):
if sieve[number]:
for dividend in range(number ** 2, len(sieve), number):
sieve[dividend] = 0
return sieve
def countPrimes(self, n: int) -> int:
return sum(self._get_prime_sieve(n))
| 3.53125 | 4 |
tests/test_eotile.py | CS-SI/eotile | 7 | 12793300 | <reponame>CS-SI/eotile
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 CS GROUP - France.
#
# This file is part of EOTile.
# See https://github.com/CS-SI/eotile for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
:author: mgerma
:organization: CS GROUP - France
:copyright: 2021 CS GROUP - France. All rights reserved.
:license: see LICENSE file.
"""
import logging
import unittest
from pathlib import Path
from eotile.eotile_module import main as eomain
from eotile.eotiles.eotiles import create_tiles_list_eo, get_tile, write_tiles_bb
from eotile.eotiles.get_bb_from_tile_id import get_tiles_from_tile_id, tile_id_matcher
from eotile.eotiles.utils import build_nominatim_request, input_matcher
class TestEOTile(unittest.TestCase):
def test_tile_list_utils_s2(self):
aux_data_dirpath = Path("eotile/data/aux_data")
filename_tiles_s2 = aux_data_dirpath / "s2_no_overlap.gpkg"
ls2 = create_tiles_list_eo(
filename_tiles_s2,
Path("tests/test_data/illinois.shp"),
)
self.assertEqual(len(ls2), 33)
self.assertTrue(get_tile(ls2, "15TXH") is not None)
self.assertTrue(get_tile(ls2, "15TXF") is not None)
def test_tile_list_utils_l8(self):
aux_data_dirpath = Path("eotile/data/aux_data")
filename_tiles_l8 = aux_data_dirpath / "l8_tiles.gpkg"
l8 = create_tiles_list_eo(
filename_tiles_l8,
Path("tests/test_data/illinois.shp"),
)
self.assertEqual(len(l8), 18)
self.assertTrue(get_tile(l8, "25030") is not None)
def test_read_write_tiles_bb(self):
aux_data_dirpath = Path("eotile/data/aux_data")
filename_tiles_l8 = aux_data_dirpath / "l8_tiles.gpkg"
ll8 = create_tiles_list_eo(
filename_tiles_l8,
Path("tests/test_data/illinois.shp"),
)
write_tiles_bb(ll8, Path("/tmp/test_read_write.shp"))
self.assertTrue(get_tile(ll8, "25030") is not None)
def test_input_matcher(self):
polygon = "POLYGON((1 1,5 1,5 5,1 5,1 1))"
mpoly = "MULTIPOLYGON(((1 1,5 1,5 5,1 5,1 1),(2 2,2 3,3 3,3 2,2 2)),((6 3,9 2,9 4,6 3)))"
bbox1 = "['36.9701313', '42.5082935', '-91.5130518', '-87.0199244']"
bbox2 = "'36.9701313', '42.5082935', '-91.5130518', '-87.0199244'"
bbox3 = "'36.9701313','42.5082935','-91.5130518','-87.0199244'"
location1 = "Toulouse"
location2 = "Nowhere"
location3 = "France"
tile_id1 = "31TCJ"
tile_id2 = "199030"
file1 = "/tmp"
file2 = "/dev/null"
test_list = [
polygon,
mpoly,
bbox1,
bbox2,
bbox3,
location1,
location3,
tile_id1,
tile_id2,
file1,
file2,
]
with self.assertRaises(ValueError):
input_matcher(location2)
out_list = []
for elt in test_list:
out_list.append(input_matcher(elt))
self.assertListEqual(
out_list,
[
"wkt",
"wkt",
"bbox",
"bbox",
"bbox",
"location",
"location",
"tile_id",
"tile_id",
"file",
"file",
],
)
def test_tile_id_list_test(self):
tile_id_list_2 = "31TCJ, 31TCF"
tile_id_list_3 = "199030, 199029, 197031"
out_list = []
for elt in [tile_id_list_2, tile_id_list_3]:
out_list.append(input_matcher(elt))
self.assertListEqual(out_list, ["tile_id", "tile_id"])
def test_id_matcher(self):
test_id_srtm = "N02W102"
test_id_cop = "S02W102"
test_id_s2 = "18SWJ"
test_id_l8 = "12033"
test_id_srtm5x5 = "srtm_37_04"
self.assertEqual(tile_id_matcher(test_id_l8), [False, True, False, False])
self.assertEqual(tile_id_matcher(test_id_s2), [True, False, False, False])
self.assertEqual(tile_id_matcher(test_id_cop), [False, False, True, False])
self.assertEqual(tile_id_matcher(test_id_srtm), [False, False, True, False])
self.assertEqual(tile_id_matcher(test_id_srtm5x5), [False, False, False, True])
def test_get_tiles_from_tile_id(self):
aux_data_dirpath = Path("eotile/data/aux_data")
output_s2, output_l8, output_dem, output_srtm5x5 = get_tiles_from_tile_id(
["31TCJ"], aux_data_dirpath, False, False, dem=True, srtm5x5=True
)
self.assertEqual(len(output_s2), 1)
self.assertEqual(len(output_l8), 4)
self.assertEqual(len(output_dem), 4)
self.assertEqual(len(output_srtm5x5), 1)
output_s2, output_l8, output_dem, output_srtm5x5 = get_tiles_from_tile_id(
["200035"], aux_data_dirpath, False, False, dem=True, srtm5x5=True
)
self.assertEqual(len(output_s2), 8)
self.assertEqual(len(output_l8), 1)
def test_main_module(self):
output_s2, output_l8, output_dem, output_srtm5x5 = eomain(
"-74.657, 39.4284, -72.0429, 41.2409",
no_l8=False,
no_s2=False,
dem=True,
srtm5x5=True,
)
self.assertEqual(len(output_s2), 12)
self.assertEqual(len(output_l8), 9)
self.assertEqual(len(output_dem), 7)
self.assertEqual(len(output_srtm5x5), 2)
def test_main_module_2(self):
output_s2, output_l8, output_dem, output_srtm5x5 = eomain(
"tests/test_data/illinois.shp",
no_l8=False,
no_s2=False,
dem=True,
srtm5x5=True,
)
self.assertEqual(len(output_s2), 33)
self.assertEqual(len(output_l8), 18)
self.assertEqual(len(output_dem), 27)
self.assertEqual(len(output_srtm5x5), 4)
def test_main_module_3(self):
output_s2, output_l8, output_dem, output_srtm5x5 = eomain(
"Toulouse",
no_l8=False,
no_s2=False,
dem=True,
srtm5x5=True,
threshold=0.1,
)
self.assertEqual(len(output_s2), 1)
self.assertEqual(len(output_l8), 2)
self.assertEqual(len(output_dem), 1)
self.assertEqual(len(output_srtm5x5), 1)
def test_main_module_4(self):
output_s2, output_l8, output_dem, output_srtm5x5 = eomain(
"31TCJ",
no_l8=False,
no_s2=False,
dem=True,
srtm5x5=True,
min_overlap=0.1,
)
self.assertEqual(len(output_s2), 1)
self.assertEqual(len(output_l8), 3)
self.assertEqual(len(output_dem), 4)
self.assertEqual(len(output_srtm5x5), 0)
def test_build_nominatim_request(self):
self.assertTrue(
abs(
build_nominatim_request(None, "Toulouse", "0.1").area
- 0.013155945340939995
)
< 0.005
)
if __name__ == "__main__":
logging.basicConfig(filename="test_eotile.log", level=logging.INFO)
unittest.main()
| 1.953125 | 2 |
core/models.py | aashish01/FYP-Project | 0 | 12793301 | <filename>core/models.py
from django.db import models
from django.contrib.auth.models import User
from django_countries.fields import CountryField
from django.core.validators import MinValueValidator, MaxValueValidator
# Create your models here.
from django.shortcuts import reverse
from django.db.models.signals import pre_save
from Ecommerce.utils import unique_slug_generator
CATEGORY_CHOICES = (
('S', 'Shirt'),
('T', 'T-Shirt'),
('H', 'Hoodies'),
('P', 'Pants'),
('SW', 'Sport wear')
)
LABEL_CHOICES = (
('P', 'primary'),
('S', 'secondary'),
('D', 'danger')
)
ADDRESS_CHOICES = (
('B', 'Billing'),
('S', 'Shipping'),
)
AVAILABILITY_PRODUCT = (
('S', 'In Stock'),
('0', 'Out Of Range')
)
class Post(models.Model):
title1 = models.CharField(max_length=120)
title2 = models.CharField(max_length=100, blank=True, null=True)
description = models.TextField()
image = models.ImageField()
def __str__(self):
return self.title1
class Item(models.Model):
title = models.CharField(max_length=100)
price = models.FloatField()
discount_price = models.FloatField(blank=True, null=True)
category = models.CharField(choices=CATEGORY_CHOICES, max_length=2)
label = models.CharField(choices=LABEL_CHOICES, max_length=1)
availabily = models.CharField(
choices=AVAILABILITY_PRODUCT, max_length=1, blank=True, null=True)
slug = models.SlugField(blank=True, null=True)
description = models.TextField()
image = models.ImageField()
def __str__(self):
return self.title
def slug_generator(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = unique_slug_generator(instance)
pre_save.connect(slug_generator, sender=Item)
class OrderItem(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
quantity = models.IntegerField(default=1)
ordered = models.BooleanField(default=False)
def __str__(self):
return f"{self.quantity} of {self.item.title}"
def get_total_item_price(self):
return self.quantity * self.item.price
def get_total_discount_item_price(self):
return self.quantity * self.item.discount_price
def get_amount_saved(self):
return self.get_total_item_price() - self.get_total_discount_item_price()
def get_final_price(self):
if self.item.discount_price:
return self.get_total_discount_item_price()
return self.get_total_item_price()
class Coupon(models.Model):
code = models.CharField(max_length=15)
amount = models.FloatField()
def __str__(self):
return self.code
class Order(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
ref_code = models.CharField(max_length=20, blank=True, null=True)
ordered = models.BooleanField(default=False)
items = models.ManyToManyField(OrderItem)
start_date = models.DateTimeField(auto_now_add=True)
ordered_date = models.DateTimeField()
billing_address = models.ForeignKey(
'Address', related_name='billing_address', on_delete=models.SET_NULL, blank=True, null=True)
shipping_address = models.ForeignKey(
'Address', related_name='shipping_address', on_delete=models.SET_NULL, blank=True, null=True)
payment = models.ForeignKey(
'Payment', on_delete=models.SET_NULL, blank=True, null=True)
coupon = models.ForeignKey(
'Coupon', on_delete=models.SET_NULL, blank=True, null=True)
being_received = models.BooleanField(default=False)
received = models.BooleanField(default=False)
refund_requested = models.BooleanField(default=False)
refund_granted = models.BooleanField(default=False)
'''
add comment
'''
def __str__(self):
return self.user.username
def get_total(self):
total = 0
for order_item in self.items.all():
total += order_item.get_final_price()
if self.coupon:
total -= self.coupon.amount
return total
class Address(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
street_address = models.CharField(max_length=100)
apartment_address = models.CharField(max_length=100)
country = CountryField(multiple=False)
zip = models.CharField(max_length=100)
address_type = models.CharField(max_length=1, choices=ADDRESS_CHOICES)
default = models.BooleanField(default=False)
def __str__(self):
return self.user.username
class Payment(models.Model):
stripe_charge_id = models.CharField(max_length=50)
user = models.ForeignKey(
User, on_delete=models.SET_NULL, blank=True, null=True)
amount = models.FloatField()
timestamp = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username
class UserProfile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
one_click_purchasing = models.BooleanField(default=False)
def __str__(self):
return self.user.username
class Refund(models.Model):
order = models.ForeignKey(Order, on_delete=models.CASCADE)
reason = models.TextField()
accepted = models.BooleanField(default=False)
email = models.EmailField()
def __str__(self):
return f"{self.pk}"
| 2.109375 | 2 |
solutions/python3/116.py | sm2774us/amazon_interview_prep_2021 | 42 | 12793302 | <reponame>sm2774us/amazon_interview_prep_2021
class Solution:
def connect(self, root: "Node") -> "Node":
if root == None:
return root
q, prev = [(root, 1)], None
while q:
curr, pos = q.pop(0)
if prev != None and prev[1] == pos:
prev[0].next = curr
prev = [curr, pos]
if curr.left:
q.append((curr.left, pos + 1))
if curr.right:
q.append((curr.right, pos + 1))
return root
| 3.5625 | 4 |
dataAnalysis.py | RyanRasi/Stock-Market-Predictor | 0 | 12793303 | import matplotlib.pyplot as plt
import pandas as pd
#Data from source
stockData = './stock_market_data-AAPL'
df = pd.read_csv (stockData+".csv")
# Sort DataFrame by date
df = df.sort_values('Date')
# Gets all of the rows
df.head()
#Plots figure
plt.figure(figsize = (18,9))
plt.plot(range(df.shape[0]),(df['Low']+df['High'])/2.0)
plt.xticks(range(0,df.shape[0],500),df['Date'].loc[::500],rotation=45)
plt.title(stockData.replace("./stock_market_data-", ""),fontsize=18)
plt.xlabel('Date',fontsize=18)
plt.ylabel('Mid Price',fontsize=18)
plt.show() | 3.34375 | 3 |
App.py | RRFreitas/Projeto_APS | 0 | 12793304 | <reponame>RRFreitas/Projeto_APS
from Sistema import Sistema
def main():
sistema = Sistema()
sistema.menuPrincipal()
if __name__ == '__main__':
main() | 1.460938 | 1 |
src/visualization/__init__.py | vvrahul11/sentiment_analysis | 0 | 12793305 | from .visualize import plot_confusion_matrix | 1.023438 | 1 |
azplugins/test-py/test_mpcd_sinusoidal_channel.py | astatt/azplugins | 10 | 12793306 | <filename>azplugins/test-py/test_mpcd_sinusoidal_channel.py
# Copyright (c) 2018-2020, <NAME>
# Copyright (c) 2021-2022, Auburn University
# This file is part of the azplugins project, released under the Modified BSD License.
# Maintainer: astatt
import unittest
import numpy as np
import hoomd
from hoomd import md
from hoomd import mpcd
try:
from hoomd import azplugins
import hoomd.azplugins.mpcd
except ImportError:
import azplugins
import azplugins.mpcd
import unittest
# compute MPI ranks for skipping some tests
hoomd.context.initialize()
num_ranks = hoomd.comm.get_num_ranks()
# unit tests for sinusoidal_channel geometry
class mpcd_sinusoidal_channel_test(unittest.TestCase):
def setUp(self):
hoomd.context.initialize()
# set the decomposition in z for mpi builds
if hoomd.comm.get_num_ranks() > 1:
hoomd.comm.decomposition(nz=2)
# default testing configuration
hoomd.init.read_snapshot(hoomd.data.make_snapshot(N=0, box=hoomd.data.boxdim(L=20.)))
# initialize the system from the starting snapshot
# test vertical, diagonal, and horizontal collisions to wall
snap = mpcd.data.make_snapshot(N=3)
snap.particles.position[:] = [[0.,-3.0,5.85],[1.55,0.,5.5],[0.0,0.0,2.2]]
snap.particles.velocity[:] = [[0,0.,1.],[1.,0.,0.],[-1.,-1.,-1.]]
self.s = mpcd.init.read_snapshot(snap)
mpcd.integrator(dt=0.1)
# test creation can happen (with all parameters set)
def test_create(self):
azplugins.mpcd.sinusoidal_channel(A=4., h=2., p=1,boundary="no_slip")
# test for setting parameters
def test_set_params(self):
channel = azplugins.mpcd.sinusoidal_channel(A=4.,h=2., p=1)
self.assertAlmostEqual(channel.A, 4.)
self.assertEqual(channel.boundary, "no_slip")
self.assertAlmostEqual(channel._cpp.geometry.getAmplitude(), 4.)
self.assertEqual(channel._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip)
# change H and also ensure other parameters stay the same
channel.set_params(A=2.)
self.assertAlmostEqual(channel.A, 2.)
self.assertEqual(channel.boundary, "no_slip")
self.assertAlmostEqual(channel._cpp.geometry.getAmplitude(), 2.)
self.assertEqual(channel._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.no_slip)
# change BCs
channel.set_params(boundary="slip")
self.assertEqual(channel.boundary, "slip")
self.assertEqual(channel._cpp.geometry.getBoundaryCondition(), mpcd._mpcd.boundary.slip)
# test for invalid boundary conditions being set
def test_bad_boundary(self):
channel = azplugins.mpcd.sinusoidal_channel(A=4., h=2., p=1)
channel.set_params(boundary="no_slip")
channel.set_params(boundary="slip")
with self.assertRaises(ValueError):
channel.set_params(boundary="invalid")
# test that setting the cosine size too large raises an error
def test_validate_box(self):
# initial configuration is invalid
channel = azplugins.mpcd.sinusoidal_channel(A=10.,h=2., p=1)
with self.assertRaises(RuntimeError):
hoomd.run(1)
# now it should be valid
channel.set_params(A=4.,h=2. ,p=1)
hoomd.run(2)
# make sure we can invalidate it again
channel.set_params(A=10.,h=2. ,p=1)
with self.assertRaises(RuntimeError):
hoomd.run(1)
# test that particles out of bounds can be caught
def test_out_of_bounds(self):
channel = azplugins.mpcd.sinusoidal_channel(A=2., h=1., p=1)
with self.assertRaises(RuntimeError):
hoomd.run(1)
channel.set_params(A=5.,h=2. ,p=1)
hoomd.run(1)
# test basic stepping behavior with no slip boundary conditions
def test_step_noslip(self):
azplugins.mpcd.sinusoidal_channel(A=4.,h=2., p=1, boundary='no_slip')
# take one step, particle 1 hits the wall
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [0,-3.0,5.95])
np.testing.assert_array_almost_equal(snap.particles.position[1], [1.567225,0.0,5.5])
np.testing.assert_array_almost_equal(snap.particles.position[2], [-0.1,-0.1,2.1])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [0,0,1.])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1,0,0])
np.testing.assert_array_almost_equal(snap.particles.velocity[2], [-1,-1,-1])
# particle 0 hits the highest spot and is reflected back
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [0,-3.0,5.95])
np.testing.assert_array_almost_equal(snap.particles.position[1], [1.467225,0.0,5.5])
np.testing.assert_array_almost_equal(snap.particles.position[2], [-0.2,-0.2,2.0])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [0,0,-1.])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1,0,0])
np.testing.assert_array_almost_equal(snap.particles.velocity[2], [-1,-1,-1])
# particle 2 collides diagonally
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [0,-3.0,5.85])
np.testing.assert_array_almost_equal(snap.particles.position[1], [1.367225,0.0,5.5])
np.testing.assert_array_almost_equal(snap.particles.position[2], [-0.11717,-0.11717,2.08283])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [0,0,-1.])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [-1,0,0])
np.testing.assert_array_almost_equal(snap.particles.velocity[2], [1,1,1])
#same as test above except for slip -> velcities differ
def test_step_slip(self):
azplugins.mpcd.sinusoidal_channel(A=4.,h=2. ,p=1, boundary="slip")
# take one step, particle 1 hits the wall
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [0,-3.0,5.95])
np.testing.assert_array_almost_equal(snap.particles.position[1], [1.62764,0,5.463246])
np.testing.assert_array_almost_equal(snap.particles.position[2], [-0.1,-0.1,2.1])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [0,0,1.])
np.testing.assert_array_almost_equal(snap.particles.velocity[1], [0.459737,0,-0.888055])
np.testing.assert_array_almost_equal(snap.particles.velocity[2], [-1,-1,-1])
# take one step, particle 0 hits the wall (same as for no_slip, because it's vertical)
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[0], [0,-3.0,5.95])
np.testing.assert_array_almost_equal(snap.particles.position[2], [-0.2,-0.2,2.0])
np.testing.assert_array_almost_equal(snap.particles.velocity[0], [0,0,-1.])
np.testing.assert_array_almost_equal(snap.particles.velocity[2], [-1,-1,-1])
# take another step, particle 2 hits the wall
hoomd.run(1)
snap = self.s.take_snapshot()
if hoomd.comm.get_rank() == 0:
np.testing.assert_array_almost_equal(snap.particles.position[2], [-0.313714,-0.3,2.066657])
np.testing.assert_array_almost_equal(snap.particles.velocity[2], [-1.150016, -1.,0.823081])
# test that virtual particle filler can be attached, removed, and updated
@unittest.skipIf(num_ranks > 1,"MPI not supported")
def test_filler(self):
# initialization of a filler
channel = azplugins.mpcd.sinusoidal_channel(A=4.,h=2. ,p=1)
channel.set_filler(density=5., kT=1.0, seed=42, type='A')
self.assertTrue(channel._filler is not None)
# run should be able to setup the filler, although this all happens silently
hoomd.run(1)
# changing filler should be allowed
channel.set_filler(density=10., kT=1.5, seed=7)
self.assertTrue(channel._filler is not None)
hoomd.run(1)
# assert an error is raised if we set a bad particle type
with self.assertRaises(RuntimeError):
channel.set_filler(density=5., kT=1.0, seed=42, type='B')
# assert an error is raised if we set a bad density
with self.assertRaises(RuntimeError):
channel.set_filler(density=-1.0, kT=1.0, seed=42)
# removing the filler should still allow a run
channel.remove_filler()
self.assertTrue(channel._filler is None)
hoomd.run(1)
def tearDown(self):
del self.s
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 2.09375 | 2 |
ftp.py | wuzhanghui/postrate | 0 | 12793307 | from ftplib import FTP
import time
import tarfile
import shutil
import os
def ftpconnect(host, username, password):
ftp = FTP()
ftp.set_pasv(0)
ftp.set_debuglevel(2)
ftp.connect(host, 21)
ftp.login(username, password)
ftp.encoding = "utf-8"
return ftp
def downloadfile(ftp, remotepath, localpath):
bufsize = 1024
fp = open(localpath, 'wb')
ftp.retrbinary('RETR ' + remotepath, fp.write, bufsize)
# 接受服务器上文件并写入文本
ftp.set_debuglevel(0) # 关闭调试
fp.close() # 关闭文件
def uploadfile(ftp, remotepath, localpath):
bufsize = 1024
fp = open(localpath, 'rb')
ftp.storbinary('STOR ' + remotepath, fp, bufsize) # 上传文件
ftp.set_debuglevel(0)
# fp.seek(0)
fp.close()
if __name__ == "__main__":
path = './rate/'
f0, f1, f2, f3, f4, f5 = 0, 0, 0, 0, 0, 0
print(f1)
try:
ftp0 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp0, "./下赢用上模型局前预估/" + "下赢用上模型局前预估" + str(time.time()) + ".csv", path + "下赢用上模型局前预估.csv")
ftp0.quit()
except:
f0 = 1
try:
ftp2 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp2, "./地上赢时局前预估/" + "地上赢时局前预估" + str(time.time()) + ".csv", path + "地上赢时局前预估.csv")
ftp2.quit()
except:
f2 = 1
try:
ftp1 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp1, "./地主赢时叫牌胜率/" + "地主赢时叫牌胜率" + str(time.time()) + ".csv", path + "地主赢时叫牌胜率.csv")
ftp1.quit()
except:
f1 = 1
try:
ftp3 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp3, "./地主赢时局前预估/" + "地主赢时局前预估" + str(time.time()) + ".csv", path + "地主赢时局前预估.csv")
ftp3.quit()
except:
f3 = 1
try:
ftp4 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp4, "./地主输时叫牌胜率/" + "地主输时叫牌胜率" + str(time.time()) + ".csv", path + "地主输时叫牌胜率.csv")
ftp4.quit()
except:
f4 = 1
try:
ftp5 = ftpconnect("[240b:250:280:cb00:8171:63df:dae6:187b]", "rate", "")
uploadfile(ftp5, "./地主输时局前预估/" + "地主输时局前预估" + str(time.time()) + ".csv", path + "地主输时局前预估.csv")
ftp5.quit()
except:
f5 = 1
if f0 != 1 and f1 != 1 and f2 != 1 and f3 != 1 and f4 != 1 and f5 != 1:
shutil.rmtree("./rate/")
print(f0,f1,f2,f3,f4,f5)
#os.system("pause")
shutil.copytree("./sample", "./rate/")
| 2.921875 | 3 |
hermes_fix/message_lib/FIX_4_2/fix_messages.py | yabov/hermes_fix | 2 | 12793308 |
from ... import fix_message
from . import fields
from . import field_types
BEGINSTRING = 'FIX.4.2'
MESSAGE_TYPES = {}
class Header(fix_message.MessageBase):
def __init__(self):
super().__init__()
register_StandardHeader_component(self)
class Trailer(fix_message.MessageBase):
def __init__(self):
super().__init__()
register_StandardTrailer_component(self)
##############Begin Repeating Groups###############
class NoIOIQualifiersGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.IOIQualifier, False)
class NoRoutingIDsGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.RoutingType, False)
self.register_field(fields.RoutingID, False)
class NoContraBrokersGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.ContraBroker, False)
self.register_field(fields.ContraTrader, False)
self.register_field(fields.ContraTradeQty, False)
self.register_field(fields.ContraTradeTime, False)
class NoMsgTypesGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.RefMsgType, False)
self.register_field(fields.MsgDirection, False)
class NoRelatedSymGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.RelatdSym, False)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
class LinesOfTextGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.Text, True)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
class NoAllocsGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.AllocAccount, False)
self.register_field(fields.AllocShares, False)
class NoTradingSessionsGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.TradingSessionID, False)
class NoOrdersGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.ClOrdID, True)
self.register_field(fields.ListSeqNo, True)
self.register_field(fields.SettlInstMode, False)
self.register_field(fields.ClientID, False)
self.register_field(fields.ExecBroker, False)
self.register_field(fields.Account, False)
self.register_group(fields.NoAllocs, NoAllocsGroup, False)
self.register_field(fields.SettlmntTyp, False)
self.register_field(fields.FutSettDate, False)
self.register_field(fields.HandlInst, False)
self.register_field(fields.ExecInst, False)
self.register_field(fields.MinQty, False)
self.register_field(fields.MaxFloor, False)
self.register_field(fields.ExDestination, False)
self.register_group(fields.NoTradingSessions, NoTradingSessionsGroup, False)
self.register_field(fields.ProcessCode, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.PrevClosePx, False)
self.register_field(fields.Side, True)
self.register_field(fields.SideValueInd, False)
self.register_field(fields.LocateReqd, False)
self.register_field(fields.TransactTime, False)
self.register_field(fields.OrderQty, False)
self.register_field(fields.CashOrderQty, False)
self.register_field(fields.OrdType, False)
self.register_field(fields.Price, False)
self.register_field(fields.StopPx, False)
self.register_field(fields.Currency, False)
self.register_field(fields.ComplianceID, False)
self.register_field(fields.SolicitedFlag, False)
self.register_field(fields.IOIid, False)
self.register_field(fields.QuoteID, False)
self.register_field(fields.TimeInForce, False)
self.register_field(fields.EffectiveTime, False)
self.register_field(fields.ExpireDate, False)
self.register_field(fields.ExpireTime, False)
self.register_field(fields.GTBookingInst, False)
self.register_field(fields.Commission, False)
self.register_field(fields.CommType, False)
self.register_field(fields.Rule80A, False)
self.register_field(fields.ForexReq, False)
self.register_field(fields.SettlCurrency, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
self.register_field(fields.FutSettDate2, False)
self.register_field(fields.OrderQty2, False)
self.register_field(fields.OpenClose, False)
self.register_field(fields.CoveredOrUncovered, False)
self.register_field(fields.CustomerOrFirm, False)
self.register_field(fields.MaxShow, False)
self.register_field(fields.PegDifference, False)
self.register_field(fields.DiscretionInst, False)
self.register_field(fields.DiscretionOffset, False)
self.register_field(fields.ClearingFirm, False)
self.register_field(fields.ClearingAccount, False)
class NoExecsGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.LastShares, False)
self.register_field(fields.ExecID, False)
self.register_field(fields.LastPx, False)
self.register_field(fields.LastCapacity, False)
class NoMiscFeesGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.MiscFeeAmt, False)
self.register_field(fields.MiscFeeCurr, False)
self.register_field(fields.MiscFeeType, False)
class NoMDEntryTypesGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.MDEntryType, True)
class NoMDEntriesGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.MDEntryType, True)
self.register_field(fields.MDEntryPx, True)
self.register_field(fields.Currency, False)
self.register_field(fields.MDEntrySize, False)
self.register_field(fields.MDEntryDate, False)
self.register_field(fields.MDEntryTime, False)
self.register_field(fields.TickDirection, False)
self.register_field(fields.MDMkt, False)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.QuoteCondition, False)
self.register_field(fields.TradeCondition, False)
self.register_field(fields.MDEntryOriginator, False)
self.register_field(fields.LocationID, False)
self.register_field(fields.DeskID, False)
self.register_field(fields.OpenCloseSettleFlag, False)
self.register_field(fields.TimeInForce, False)
self.register_field(fields.ExpireDate, False)
self.register_field(fields.ExpireTime, False)
self.register_field(fields.MinQty, False)
self.register_field(fields.ExecInst, False)
self.register_field(fields.SellerDays, False)
self.register_field(fields.OrderID, False)
self.register_field(fields.QuoteEntryID, False)
self.register_field(fields.MDEntryBuyer, False)
self.register_field(fields.MDEntrySeller, False)
self.register_field(fields.NumberOfOrders, False)
self.register_field(fields.MDEntryPositionNo, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
class NoQuoteEntriesGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.UnderlyingSymbol, False)
class NoQuoteSetsGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.QuoteSetID, False)
self.register_field(fields.UnderlyingSymbol, False)
self.register_field(fields.UnderlyingSymbolSfx, False)
self.register_field(fields.UnderlyingSecurityID, False)
self.register_field(fields.UnderlyingIDSource, False)
self.register_field(fields.UnderlyingSecurityType, False)
self.register_field(fields.UnderlyingMaturityMonthYear, False)
self.register_field(fields.UnderlyingMaturityDay, False)
self.register_field(fields.UnderlyingPutOrCall, False)
self.register_field(fields.UnderlyingStrikePrice, False)
self.register_field(fields.UnderlyingOptAttribute, False)
self.register_field(fields.UnderlyingContractMultiplier, False)
self.register_field(fields.UnderlyingCouponRate, False)
self.register_field(fields.UnderlyingSecurityExchange, False)
self.register_field(fields.UnderlyingIssuer, False)
self.register_field(fields.EncodedUnderlyingIssuerLen, False)
self.register_field(fields.EncodedUnderlyingIssuer, False)
self.register_field(fields.UnderlyingSecurityDesc, False)
self.register_field(fields.EncodedUnderlyingSecurityDescLen, False)
self.register_field(fields.EncodedUnderlyingSecurityDesc, False)
self.register_field(fields.TotQuoteEntries, False)
self.register_group(fields.NoQuoteEntries, NoQuoteEntriesGroup, False)
class NoBidDescriptorsGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.BidDescriptorType, False)
self.register_field(fields.BidDescriptor, False)
self.register_field(fields.SideValueInd, False)
self.register_field(fields.LiquidityValue, False)
self.register_field(fields.LiquidityNumSecurities, False)
self.register_field(fields.LiquidityPctLow, False)
self.register_field(fields.LiquidityPctHigh, False)
self.register_field(fields.EFPTrackingError, False)
self.register_field(fields.FairValue, False)
self.register_field(fields.OutsideIndexPct, False)
self.register_field(fields.ValueOfFutures, False)
class NoBidComponentsGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.ListID, False)
self.register_field(fields.Side, False)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.NetGrossInd, False)
self.register_field(fields.SettlmntTyp, False)
self.register_field(fields.FutSettDate, False)
self.register_field(fields.Account, False)
class NoStrikesGroup(fix_message.FIXGroup):
def __init__(self, value = None):
super().__init__(value)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.PrevClosePx, False)
self.register_field(fields.ClOrdID, False)
self.register_field(fields.Side, False)
self.register_field(fields.Price, True)
self.register_field(fields.Currency, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
##############End Repeating Groups###############
##############Begin Componenets###############
def register_StandardHeader_component(self):
self.register_field(fields.BeginString, True)
self.register_field(fields.BodyLength, True)
self.register_field(fields.MsgType, True)
self.register_field(fields.SenderCompID, True)
self.register_field(fields.TargetCompID, True)
self.register_field(fields.OnBehalfOfCompID, False)
self.register_field(fields.DeliverToCompID, False)
self.register_field(fields.SecureDataLen, False)
self.register_field(fields.SecureData, False)
self.register_field(fields.MsgSeqNum, True)
self.register_field(fields.SenderSubID, False)
self.register_field(fields.SenderLocationID, False)
self.register_field(fields.TargetSubID, False)
self.register_field(fields.TargetLocationID, False)
self.register_field(fields.OnBehalfOfSubID, False)
self.register_field(fields.OnBehalfOfLocationID, False)
self.register_field(fields.DeliverToSubID, False)
self.register_field(fields.DeliverToLocationID, False)
self.register_field(fields.PossDupFlag, False)
self.register_field(fields.PossResend, False)
self.register_field(fields.SendingTime, True)
self.register_field(fields.OrigSendingTime, False)
self.register_field(fields.XmlDataLen, False)
self.register_field(fields.XmlData, False)
self.register_field(fields.MessageEncoding, False)
self.register_field(fields.LastMsgSeqNumProcessed, False)
self.register_field(fields.OnBehalfOfSendingTime, False)
def register_StandardTrailer_component(self):
self.register_field(fields.SignatureLength, False)
self.register_field(fields.Signature, False)
self.register_field(fields.CheckSum, True)
##############End Componenets###############
class Heartbeat(fix_message.MessageBase):
_msgtype = '0'
_msgcat = 'admin'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.TestReqID, False)
MESSAGE_TYPES['0'] = Heartbeat
class TestRequest(fix_message.MessageBase):
_msgtype = '1'
_msgcat = 'admin'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.TestReqID, True)
MESSAGE_TYPES['1'] = TestRequest
class ResendRequest(fix_message.MessageBase):
_msgtype = '2'
_msgcat = 'admin'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.BeginSeqNo, True)
self.register_field(fields.EndSeqNo, True)
MESSAGE_TYPES['2'] = ResendRequest
class Reject(fix_message.MessageBase):
_msgtype = '3'
_msgcat = 'admin'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.RefSeqNum, True)
self.register_field(fields.RefTagID, False)
self.register_field(fields.RefMsgType, False)
self.register_field(fields.SessionRejectReason, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['3'] = Reject
class SequenceReset(fix_message.MessageBase):
_msgtype = '4'
_msgcat = 'admin'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.GapFillFlag, False)
self.register_field(fields.NewSeqNo, True)
MESSAGE_TYPES['4'] = SequenceReset
class Logout(fix_message.MessageBase):
_msgtype = '5'
_msgcat = 'admin'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['5'] = Logout
class IOI(fix_message.MessageBase):
_msgtype = '6'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.IOIid, True)
self.register_field(fields.IOITransType, True)
self.register_field(fields.IOIRefID, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Side, True)
self.register_field(fields.IOIShares, True)
self.register_field(fields.Price, False)
self.register_field(fields.Currency, False)
self.register_field(fields.ValidUntilTime, False)
self.register_field(fields.IOIQltyInd, False)
self.register_field(fields.IOINaturalFlag, False)
self.register_group(fields.NoIOIQualifiers, NoIOIQualifiersGroup, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
self.register_field(fields.TransactTime, False)
self.register_field(fields.URLLink, False)
self.register_group(fields.NoRoutingIDs, NoRoutingIDsGroup, False)
self.register_field(fields.SpreadToBenchmark, False)
self.register_field(fields.Benchmark, False)
MESSAGE_TYPES['6'] = IOI
class Advertisement(fix_message.MessageBase):
_msgtype = '7'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.AdvId, True)
self.register_field(fields.AdvTransType, True)
self.register_field(fields.AdvRefID, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.AdvSide, True)
self.register_field(fields.Shares, True)
self.register_field(fields.Price, False)
self.register_field(fields.Currency, False)
self.register_field(fields.TradeDate, False)
self.register_field(fields.TransactTime, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
self.register_field(fields.URLLink, False)
self.register_field(fields.LastMkt, False)
self.register_field(fields.TradingSessionID, False)
MESSAGE_TYPES['7'] = Advertisement
class ExecutionReport(fix_message.MessageBase):
_msgtype = '8'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.OrderID, True)
self.register_field(fields.SecondaryOrderID, False)
self.register_field(fields.ClOrdID, False)
self.register_field(fields.OrigClOrdID, False)
self.register_field(fields.ClientID, False)
self.register_field(fields.ExecBroker, False)
self.register_group(fields.NoContraBrokers, NoContraBrokersGroup, False)
self.register_field(fields.ListID, False)
self.register_field(fields.ExecID, True)
self.register_field(fields.ExecTransType, True)
self.register_field(fields.ExecRefID, False)
self.register_field(fields.ExecType, True)
self.register_field(fields.OrdStatus, True)
self.register_field(fields.OrdRejReason, False)
self.register_field(fields.ExecRestatementReason, False)
self.register_field(fields.Account, False)
self.register_field(fields.SettlmntTyp, False)
self.register_field(fields.FutSettDate, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Side, True)
self.register_field(fields.OrderQty, False)
self.register_field(fields.CashOrderQty, False)
self.register_field(fields.OrdType, False)
self.register_field(fields.Price, False)
self.register_field(fields.StopPx, False)
self.register_field(fields.PegDifference, False)
self.register_field(fields.DiscretionInst, False)
self.register_field(fields.DiscretionOffset, False)
self.register_field(fields.Currency, False)
self.register_field(fields.ComplianceID, False)
self.register_field(fields.SolicitedFlag, False)
self.register_field(fields.TimeInForce, False)
self.register_field(fields.EffectiveTime, False)
self.register_field(fields.ExpireDate, False)
self.register_field(fields.ExpireTime, False)
self.register_field(fields.ExecInst, False)
self.register_field(fields.Rule80A, False)
self.register_field(fields.LastShares, False)
self.register_field(fields.LastPx, False)
self.register_field(fields.LastSpotRate, False)
self.register_field(fields.LastForwardPoints, False)
self.register_field(fields.LastMkt, False)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.LastCapacity, False)
self.register_field(fields.LeavesQty, True)
self.register_field(fields.CumQty, True)
self.register_field(fields.AvgPx, True)
self.register_field(fields.DayOrderQty, False)
self.register_field(fields.DayCumQty, False)
self.register_field(fields.DayAvgPx, False)
self.register_field(fields.GTBookingInst, False)
self.register_field(fields.TradeDate, False)
self.register_field(fields.TransactTime, False)
self.register_field(fields.ReportToExch, False)
self.register_field(fields.Commission, False)
self.register_field(fields.CommType, False)
self.register_field(fields.GrossTradeAmt, False)
self.register_field(fields.SettlCurrAmt, False)
self.register_field(fields.SettlCurrency, False)
self.register_field(fields.SettlCurrFxRate, False)
self.register_field(fields.SettlCurrFxRateCalc, False)
self.register_field(fields.HandlInst, False)
self.register_field(fields.MinQty, False)
self.register_field(fields.MaxFloor, False)
self.register_field(fields.OpenClose, False)
self.register_field(fields.MaxShow, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
self.register_field(fields.FutSettDate2, False)
self.register_field(fields.OrderQty2, False)
self.register_field(fields.ClearingFirm, False)
self.register_field(fields.ClearingAccount, False)
self.register_field(fields.MultiLegReportingType, False)
MESSAGE_TYPES['8'] = ExecutionReport
class OrderCancelReject(fix_message.MessageBase):
_msgtype = '9'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.OrderID, True)
self.register_field(fields.SecondaryOrderID, False)
self.register_field(fields.ClOrdID, True)
self.register_field(fields.OrigClOrdID, True)
self.register_field(fields.OrdStatus, True)
self.register_field(fields.ClientID, False)
self.register_field(fields.ExecBroker, False)
self.register_field(fields.ListID, False)
self.register_field(fields.Account, False)
self.register_field(fields.TransactTime, False)
self.register_field(fields.CxlRejResponseTo, True)
self.register_field(fields.CxlRejReason, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['9'] = OrderCancelReject
class Logon(fix_message.MessageBase):
_msgtype = 'A'
_msgcat = 'admin'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.EncryptMethod, True)
self.register_field(fields.HeartBtInt, True)
self.register_field(fields.RawDataLength, False)
self.register_field(fields.RawData, False)
self.register_field(fields.ResetSeqNumFlag, False)
self.register_field(fields.MaxMessageSize, False)
self.register_group(fields.NoMsgTypes, NoMsgTypesGroup, False)
MESSAGE_TYPES['A'] = Logon
class News(fix_message.MessageBase):
_msgtype = 'B'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.OrigTime, False)
self.register_field(fields.Urgency, False)
self.register_field(fields.Headline, True)
self.register_field(fields.EncodedHeadlineLen, False)
self.register_field(fields.EncodedHeadline, False)
self.register_group(fields.NoRoutingIDs, NoRoutingIDsGroup, False)
self.register_group(fields.NoRelatedSym, NoRelatedSymGroup, False)
self.register_group(fields.LinesOfText, LinesOfTextGroup, True)
self.register_field(fields.URLLink, False)
self.register_field(fields.RawDataLength, False)
self.register_field(fields.RawData, False)
MESSAGE_TYPES['B'] = News
class Email(fix_message.MessageBase):
_msgtype = 'C'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.EmailThreadID, True)
self.register_field(fields.EmailType, True)
self.register_field(fields.OrigTime, False)
self.register_field(fields.Subject, True)
self.register_field(fields.EncodedSubjectLen, False)
self.register_field(fields.EncodedSubject, False)
self.register_group(fields.NoRoutingIDs, NoRoutingIDsGroup, False)
self.register_group(fields.NoRelatedSym, NoRelatedSymGroup, False)
self.register_field(fields.OrderID, False)
self.register_field(fields.ClOrdID, False)
self.register_group(fields.LinesOfText, LinesOfTextGroup, True)
self.register_field(fields.RawDataLength, False)
self.register_field(fields.RawData, False)
MESSAGE_TYPES['C'] = Email
class OrderSingle(fix_message.MessageBase):
_msgtype = 'D'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.ClOrdID, True)
self.register_field(fields.ClientID, False)
self.register_field(fields.ExecBroker, False)
self.register_field(fields.Account, False)
self.register_group(fields.NoAllocs, NoAllocsGroup, False)
self.register_field(fields.SettlmntTyp, False)
self.register_field(fields.FutSettDate, False)
self.register_field(fields.HandlInst, True)
self.register_field(fields.ExecInst, False)
self.register_field(fields.MinQty, False)
self.register_field(fields.MaxFloor, False)
self.register_field(fields.ExDestination, False)
self.register_group(fields.NoTradingSessions, NoTradingSessionsGroup, False)
self.register_field(fields.ProcessCode, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.PrevClosePx, False)
self.register_field(fields.Side, True)
self.register_field(fields.LocateReqd, False)
self.register_field(fields.TransactTime, True)
self.register_field(fields.OrderQty, False)
self.register_field(fields.CashOrderQty, False)
self.register_field(fields.OrdType, True)
self.register_field(fields.Price, False)
self.register_field(fields.StopPx, False)
self.register_field(fields.Currency, False)
self.register_field(fields.ComplianceID, False)
self.register_field(fields.SolicitedFlag, False)
self.register_field(fields.IOIid, False)
self.register_field(fields.QuoteID, False)
self.register_field(fields.TimeInForce, False)
self.register_field(fields.EffectiveTime, False)
self.register_field(fields.ExpireDate, False)
self.register_field(fields.ExpireTime, False)
self.register_field(fields.GTBookingInst, False)
self.register_field(fields.Commission, False)
self.register_field(fields.CommType, False)
self.register_field(fields.Rule80A, False)
self.register_field(fields.ForexReq, False)
self.register_field(fields.SettlCurrency, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
self.register_field(fields.FutSettDate2, False)
self.register_field(fields.OrderQty2, False)
self.register_field(fields.OpenClose, False)
self.register_field(fields.CoveredOrUncovered, False)
self.register_field(fields.CustomerOrFirm, False)
self.register_field(fields.MaxShow, False)
self.register_field(fields.PegDifference, False)
self.register_field(fields.DiscretionInst, False)
self.register_field(fields.DiscretionOffset, False)
self.register_field(fields.ClearingFirm, False)
self.register_field(fields.ClearingAccount, False)
MESSAGE_TYPES['D'] = OrderSingle
class OrderList(fix_message.MessageBase):
_msgtype = 'E'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.ListID, True)
self.register_field(fields.BidID, False)
self.register_field(fields.ClientBidID, False)
self.register_field(fields.ProgRptReqs, False)
self.register_field(fields.BidType, True)
self.register_field(fields.ProgPeriodInterval, False)
self.register_field(fields.ListExecInstType, False)
self.register_field(fields.ListExecInst, False)
self.register_field(fields.EncodedListExecInstLen, False)
self.register_field(fields.EncodedListExecInst, False)
self.register_field(fields.TotNoOrders, True)
self.register_group(fields.NoOrders, NoOrdersGroup, True)
MESSAGE_TYPES['E'] = OrderList
class OrderCancelRequest(fix_message.MessageBase):
_msgtype = 'F'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.OrigClOrdID, True)
self.register_field(fields.OrderID, False)
self.register_field(fields.ClOrdID, True)
self.register_field(fields.ListID, False)
self.register_field(fields.Account, False)
self.register_field(fields.ClientID, False)
self.register_field(fields.ExecBroker, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Side, True)
self.register_field(fields.TransactTime, True)
self.register_field(fields.OrderQty, False)
self.register_field(fields.CashOrderQty, False)
self.register_field(fields.ComplianceID, False)
self.register_field(fields.SolicitedFlag, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['F'] = OrderCancelRequest
class OrderCancelReplaceRequest(fix_message.MessageBase):
_msgtype = 'G'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.OrderID, False)
self.register_field(fields.ClientID, False)
self.register_field(fields.ExecBroker, False)
self.register_field(fields.OrigClOrdID, True)
self.register_field(fields.ClOrdID, True)
self.register_field(fields.ListID, False)
self.register_field(fields.Account, False)
self.register_group(fields.NoAllocs, NoAllocsGroup, False)
self.register_field(fields.SettlmntTyp, False)
self.register_field(fields.FutSettDate, False)
self.register_field(fields.HandlInst, True)
self.register_field(fields.ExecInst, False)
self.register_field(fields.MinQty, False)
self.register_field(fields.MaxFloor, False)
self.register_field(fields.ExDestination, False)
self.register_group(fields.NoTradingSessions, NoTradingSessionsGroup, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Side, True)
self.register_field(fields.TransactTime, True)
self.register_field(fields.OrderQty, False)
self.register_field(fields.CashOrderQty, False)
self.register_field(fields.OrdType, True)
self.register_field(fields.Price, False)
self.register_field(fields.StopPx, False)
self.register_field(fields.PegDifference, False)
self.register_field(fields.DiscretionInst, False)
self.register_field(fields.DiscretionOffset, False)
self.register_field(fields.ComplianceID, False)
self.register_field(fields.SolicitedFlag, False)
self.register_field(fields.Currency, False)
self.register_field(fields.TimeInForce, False)
self.register_field(fields.EffectiveTime, False)
self.register_field(fields.ExpireDate, False)
self.register_field(fields.ExpireTime, False)
self.register_field(fields.GTBookingInst, False)
self.register_field(fields.Commission, False)
self.register_field(fields.CommType, False)
self.register_field(fields.Rule80A, False)
self.register_field(fields.ForexReq, False)
self.register_field(fields.SettlCurrency, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
self.register_field(fields.FutSettDate2, False)
self.register_field(fields.OrderQty2, False)
self.register_field(fields.OpenClose, False)
self.register_field(fields.CoveredOrUncovered, False)
self.register_field(fields.CustomerOrFirm, False)
self.register_field(fields.MaxShow, False)
self.register_field(fields.LocateReqd, False)
self.register_field(fields.ClearingFirm, False)
self.register_field(fields.ClearingAccount, False)
MESSAGE_TYPES['G'] = OrderCancelReplaceRequest
class OrderStatusRequest(fix_message.MessageBase):
_msgtype = 'H'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.OrderID, False)
self.register_field(fields.ClOrdID, True)
self.register_field(fields.ClientID, False)
self.register_field(fields.Account, False)
self.register_field(fields.ExecBroker, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Side, True)
MESSAGE_TYPES['H'] = OrderStatusRequest
class Allocation(fix_message.MessageBase):
_msgtype = 'J'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.AllocID, True)
self.register_field(fields.AllocTransType, True)
self.register_field(fields.RefAllocID, False)
self.register_field(fields.AllocLinkID, False)
self.register_field(fields.AllocLinkType, False)
self.register_group(fields.NoOrders, NoOrdersGroup, False)
self.register_group(fields.NoExecs, NoExecsGroup, False)
self.register_field(fields.Side, True)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Shares, True)
self.register_field(fields.LastMkt, False)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.AvgPx, True)
self.register_field(fields.Currency, False)
self.register_field(fields.AvgPrxPrecision, False)
self.register_field(fields.TradeDate, True)
self.register_field(fields.TransactTime, False)
self.register_field(fields.SettlmntTyp, False)
self.register_field(fields.FutSettDate, False)
self.register_field(fields.GrossTradeAmt, False)
self.register_field(fields.NetMoney, False)
self.register_field(fields.OpenClose, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
self.register_field(fields.NumDaysInterest, False)
self.register_field(fields.AccruedInterestRate, False)
self.register_group(fields.NoAllocs, NoAllocsGroup, False)
MESSAGE_TYPES['J'] = Allocation
class ListCancelRequest(fix_message.MessageBase):
_msgtype = 'K'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.ListID, True)
self.register_field(fields.TransactTime, True)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['K'] = ListCancelRequest
class ListExecute(fix_message.MessageBase):
_msgtype = 'L'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.ListID, True)
self.register_field(fields.ClientBidID, False)
self.register_field(fields.BidID, False)
self.register_field(fields.TransactTime, True)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['L'] = ListExecute
class ListStatusRequest(fix_message.MessageBase):
_msgtype = 'M'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.ListID, True)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['M'] = ListStatusRequest
class ListStatus(fix_message.MessageBase):
_msgtype = 'N'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.ListID, True)
self.register_field(fields.ListStatusType, True)
self.register_field(fields.NoRpts, True)
self.register_field(fields.ListOrderStatus, True)
self.register_field(fields.RptSeq, True)
self.register_field(fields.ListStatusText, False)
self.register_field(fields.EncodedListStatusTextLen, False)
self.register_field(fields.EncodedListStatusText, False)
self.register_field(fields.TransactTime, False)
self.register_field(fields.TotNoOrders, True)
self.register_group(fields.NoOrders, NoOrdersGroup, True)
MESSAGE_TYPES['N'] = ListStatus
class AllocationInstructionAck(fix_message.MessageBase):
_msgtype = 'P'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.ClientID, False)
self.register_field(fields.ExecBroker, False)
self.register_field(fields.AllocID, True)
self.register_field(fields.TradeDate, True)
self.register_field(fields.TransactTime, False)
self.register_field(fields.AllocStatus, True)
self.register_field(fields.AllocRejCode, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['P'] = AllocationInstructionAck
class DontKnowTrade(fix_message.MessageBase):
_msgtype = 'Q'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.OrderID, True)
self.register_field(fields.ExecID, True)
self.register_field(fields.DKReason, True)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Side, True)
self.register_field(fields.OrderQty, False)
self.register_field(fields.CashOrderQty, False)
self.register_field(fields.LastShares, False)
self.register_field(fields.LastPx, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['Q'] = DontKnowTrade
class QuoteRequest(fix_message.MessageBase):
_msgtype = 'R'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.QuoteReqID, True)
self.register_group(fields.NoRelatedSym, NoRelatedSymGroup, True)
MESSAGE_TYPES['R'] = QuoteRequest
class Quote(fix_message.MessageBase):
_msgtype = 'S'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.QuoteReqID, False)
self.register_field(fields.QuoteID, True)
self.register_field(fields.QuoteResponseLevel, False)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.BidPx, False)
self.register_field(fields.OfferPx, False)
self.register_field(fields.BidSize, False)
self.register_field(fields.OfferSize, False)
self.register_field(fields.ValidUntilTime, False)
self.register_field(fields.BidSpotRate, False)
self.register_field(fields.OfferSpotRate, False)
self.register_field(fields.BidForwardPoints, False)
self.register_field(fields.OfferForwardPoints, False)
self.register_field(fields.TransactTime, False)
self.register_field(fields.FutSettDate, False)
self.register_field(fields.OrdType, False)
self.register_field(fields.FutSettDate2, False)
self.register_field(fields.OrderQty2, False)
self.register_field(fields.Currency, False)
MESSAGE_TYPES['S'] = Quote
class SettlementInstructions(fix_message.MessageBase):
_msgtype = 'T'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.SettlInstID, True)
self.register_field(fields.SettlInstTransType, True)
self.register_field(fields.SettlInstRefID, True)
self.register_field(fields.SettlInstMode, True)
self.register_field(fields.SettlInstSource, True)
self.register_field(fields.AllocAccount, True)
self.register_field(fields.SettlLocation, False)
self.register_field(fields.TradeDate, False)
self.register_field(fields.AllocID, False)
self.register_field(fields.LastMkt, False)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.Side, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.EffectiveTime, False)
self.register_field(fields.TransactTime, True)
self.register_field(fields.ClientID, False)
self.register_field(fields.ExecBroker, False)
self.register_field(fields.StandInstDbType, False)
self.register_field(fields.StandInstDbName, False)
self.register_field(fields.StandInstDbID, False)
self.register_field(fields.SettlDeliveryType, False)
self.register_field(fields.SettlDepositoryCode, False)
self.register_field(fields.SettlBrkrCode, False)
self.register_field(fields.SettlInstCode, False)
self.register_field(fields.SecuritySettlAgentName, False)
self.register_field(fields.SecuritySettlAgentCode, False)
self.register_field(fields.SecuritySettlAgentAcctNum, False)
self.register_field(fields.SecuritySettlAgentAcctName, False)
self.register_field(fields.SecuritySettlAgentContactName, False)
self.register_field(fields.SecuritySettlAgentContactPhone, False)
self.register_field(fields.CashSettlAgentName, False)
self.register_field(fields.CashSettlAgentCode, False)
self.register_field(fields.CashSettlAgentAcctNum, False)
self.register_field(fields.CashSettlAgentAcctName, False)
self.register_field(fields.CashSettlAgentContactName, False)
self.register_field(fields.CashSettlAgentContactPhone, False)
MESSAGE_TYPES['T'] = SettlementInstructions
class MarketDataRequest(fix_message.MessageBase):
_msgtype = 'V'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.MDReqID, True)
self.register_field(fields.SubscriptionRequestType, True)
self.register_field(fields.MarketDepth, True)
self.register_field(fields.MDUpdateType, False)
self.register_field(fields.AggregatedBook, False)
self.register_group(fields.NoMDEntryTypes, NoMDEntryTypesGroup, True)
self.register_group(fields.NoRelatedSym, NoRelatedSymGroup, True)
MESSAGE_TYPES['V'] = MarketDataRequest
class MarketDataSnapshotFullRefresh(fix_message.MessageBase):
_msgtype = 'W'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.MDReqID, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.FinancialStatus, False)
self.register_field(fields.CorporateAction, False)
self.register_field(fields.TotalVolumeTraded, False)
self.register_group(fields.NoMDEntries, NoMDEntriesGroup, True)
MESSAGE_TYPES['W'] = MarketDataSnapshotFullRefresh
class MarketDataIncrementalRefresh(fix_message.MessageBase):
_msgtype = 'X'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.MDReqID, False)
self.register_group(fields.NoMDEntries, NoMDEntriesGroup, True)
MESSAGE_TYPES['X'] = MarketDataIncrementalRefresh
class MarketDataRequestReject(fix_message.MessageBase):
_msgtype = 'Y'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.MDReqID, True)
self.register_field(fields.MDReqRejReason, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['Y'] = MarketDataRequestReject
class QuoteCancel(fix_message.MessageBase):
_msgtype = 'Z'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.QuoteReqID, False)
self.register_field(fields.QuoteID, True)
self.register_field(fields.QuoteCancelType, True)
self.register_field(fields.QuoteResponseLevel, False)
self.register_field(fields.TradingSessionID, False)
self.register_group(fields.NoQuoteEntries, NoQuoteEntriesGroup, True)
MESSAGE_TYPES['Z'] = QuoteCancel
class QuoteStatusRequest(fix_message.MessageBase):
_msgtype = 'a'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.QuoteID, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Side, False)
self.register_field(fields.TradingSessionID, False)
MESSAGE_TYPES['a'] = QuoteStatusRequest
class QuoteAcknowledgement(fix_message.MessageBase):
_msgtype = 'b'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.QuoteReqID, False)
self.register_field(fields.QuoteID, False)
self.register_field(fields.QuoteAckStatus, True)
self.register_field(fields.QuoteRejectReason, False)
self.register_field(fields.QuoteResponseLevel, False)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.Text, False)
self.register_group(fields.NoQuoteSets, NoQuoteSetsGroup, False)
MESSAGE_TYPES['b'] = QuoteAcknowledgement
class SecurityDefinitionRequest(fix_message.MessageBase):
_msgtype = 'c'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.SecurityReqID, True)
self.register_field(fields.SecurityRequestType, True)
self.register_field(fields.Symbol, False)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Currency, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
self.register_field(fields.TradingSessionID, False)
self.register_group(fields.NoRelatedSym, NoRelatedSymGroup, False)
MESSAGE_TYPES['c'] = SecurityDefinitionRequest
class SecurityDefinition(fix_message.MessageBase):
_msgtype = 'd'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.SecurityReqID, True)
self.register_field(fields.SecurityResponseID, True)
self.register_field(fields.SecurityResponseType, False)
self.register_field(fields.TotalNumSecurities, True)
self.register_field(fields.Symbol, False)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Currency, False)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
self.register_group(fields.NoRelatedSym, NoRelatedSymGroup, False)
MESSAGE_TYPES['d'] = SecurityDefinition
class SecurityStatusRequest(fix_message.MessageBase):
_msgtype = 'e'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.SecurityStatusReqID, True)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Currency, False)
self.register_field(fields.SubscriptionRequestType, True)
self.register_field(fields.TradingSessionID, False)
MESSAGE_TYPES['e'] = SecurityStatusRequest
class SecurityStatus(fix_message.MessageBase):
_msgtype = 'f'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.SecurityStatusReqID, False)
self.register_field(fields.Symbol, True)
self.register_field(fields.SymbolSfx, False)
self.register_field(fields.SecurityID, False)
self.register_field(fields.IDSource, False)
self.register_field(fields.SecurityType, False)
self.register_field(fields.MaturityMonthYear, False)
self.register_field(fields.MaturityDay, False)
self.register_field(fields.PutOrCall, False)
self.register_field(fields.StrikePrice, False)
self.register_field(fields.OptAttribute, False)
self.register_field(fields.ContractMultiplier, False)
self.register_field(fields.CouponRate, False)
self.register_field(fields.SecurityExchange, False)
self.register_field(fields.Issuer, False)
self.register_field(fields.EncodedIssuerLen, False)
self.register_field(fields.EncodedIssuer, False)
self.register_field(fields.SecurityDesc, False)
self.register_field(fields.EncodedSecurityDescLen, False)
self.register_field(fields.EncodedSecurityDesc, False)
self.register_field(fields.Currency, False)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.UnsolicitedIndicator, False)
self.register_field(fields.SecurityTradingStatus, False)
self.register_field(fields.FinancialStatus, False)
self.register_field(fields.CorporateAction, False)
self.register_field(fields.HaltReason, False)
self.register_field(fields.InViewOfCommon, False)
self.register_field(fields.DueToRelated, False)
self.register_field(fields.BuyVolume, False)
self.register_field(fields.SellVolume, False)
self.register_field(fields.HighPx, False)
self.register_field(fields.LowPx, False)
self.register_field(fields.LastPx, False)
self.register_field(fields.TransactTime, False)
self.register_field(fields.Adjustment, False)
MESSAGE_TYPES['f'] = SecurityStatus
class TradingSessionStatusRequest(fix_message.MessageBase):
_msgtype = 'g'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.TradSesReqID, True)
self.register_field(fields.TradingSessionID, False)
self.register_field(fields.TradSesMethod, False)
self.register_field(fields.TradSesMode, False)
self.register_field(fields.SubscriptionRequestType, True)
MESSAGE_TYPES['g'] = TradingSessionStatusRequest
class TradingSessionStatus(fix_message.MessageBase):
_msgtype = 'h'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.TradSesReqID, False)
self.register_field(fields.TradingSessionID, True)
self.register_field(fields.TradSesMethod, False)
self.register_field(fields.TradSesMode, False)
self.register_field(fields.UnsolicitedIndicator, False)
self.register_field(fields.TradSesStatus, True)
self.register_field(fields.TradSesStartTime, False)
self.register_field(fields.TradSesOpenTime, False)
self.register_field(fields.TradSesPreCloseTime, False)
self.register_field(fields.TradSesCloseTime, False)
self.register_field(fields.TradSesEndTime, False)
self.register_field(fields.TotalVolumeTraded, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['h'] = TradingSessionStatus
class MassQuote(fix_message.MessageBase):
_msgtype = 'i'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.QuoteReqID, False)
self.register_field(fields.QuoteID, True)
self.register_field(fields.QuoteResponseLevel, False)
self.register_field(fields.DefBidSize, False)
self.register_field(fields.DefOfferSize, False)
self.register_group(fields.NoQuoteSets, NoQuoteSetsGroup, True)
MESSAGE_TYPES['i'] = MassQuote
class BusinessMessageReject(fix_message.MessageBase):
_msgtype = 'j'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.RefSeqNum, False)
self.register_field(fields.RefMsgType, True)
self.register_field(fields.BusinessRejectRefID, False)
self.register_field(fields.BusinessRejectReason, True)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['j'] = BusinessMessageReject
class BidRequest(fix_message.MessageBase):
_msgtype = 'k'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.BidID, False)
self.register_field(fields.ClientBidID, True)
self.register_field(fields.BidRequestTransType, True)
self.register_field(fields.ListName, False)
self.register_field(fields.TotalNumSecurities, True)
self.register_field(fields.BidType, True)
self.register_field(fields.NumTickets, False)
self.register_field(fields.Currency, False)
self.register_field(fields.SideValue1, False)
self.register_field(fields.SideValue2, False)
self.register_group(fields.NoBidDescriptors, NoBidDescriptorsGroup, False)
self.register_group(fields.NoBidComponents, NoBidComponentsGroup, False)
self.register_field(fields.LiquidityIndType, False)
self.register_field(fields.WtAverageLiquidity, False)
self.register_field(fields.ExchangeForPhysical, False)
self.register_field(fields.OutMainCntryUIndex, False)
self.register_field(fields.CrossPercent, False)
self.register_field(fields.ProgRptReqs, False)
self.register_field(fields.ProgPeriodInterval, False)
self.register_field(fields.IncTaxInd, False)
self.register_field(fields.ForexReq, False)
self.register_field(fields.NumBidders, False)
self.register_field(fields.TradeDate, False)
self.register_field(fields.TradeType, True)
self.register_field(fields.BasisPxType, True)
self.register_field(fields.StrikeTime, False)
self.register_field(fields.Text, False)
self.register_field(fields.EncodedTextLen, False)
self.register_field(fields.EncodedText, False)
MESSAGE_TYPES['k'] = BidRequest
class BidResponse(fix_message.MessageBase):
_msgtype = 'l'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.BidID, False)
self.register_field(fields.ClientBidID, False)
self.register_group(fields.NoBidComponents, NoBidComponentsGroup, True)
MESSAGE_TYPES['l'] = BidResponse
class ListStrikePrice(fix_message.MessageBase):
_msgtype = 'm'
_msgcat = 'app'
def __init__(self):
self.Header = Header()
self.Trailer = Trailer()
super().__init__()
self.register_field(fields.ListID, True)
self.register_field(fields.TotNoStrikes, True)
self.register_group(fields.NoStrikes, NoStrikesGroup, True)
MESSAGE_TYPES['m'] = ListStrikePrice
| 2.125 | 2 |
src/envs/__init__.py | ewanlee/mackrl | 26 | 12793309 | <reponame>ewanlee/mackrl<gh_stars>10-100
from functools import partial
def env_fn(env, **kwargs):
return env(**kwargs)
REGISTRY = {}
from .starcraft2 import StarCraft2Env
REGISTRY["sc2"] = partial(env_fn,
env=StarCraft2Env)
| 2.078125 | 2 |
experiments/experiments_utils.py | anirbanl/Reason-SCAN | 14 | 12793310 | <reponame>anirbanl/Reason-SCAN<filename>experiments/experiments_utils.py
import argparse
import logging
import os
import torch
import sys
sys.path.append(os.path.join(os.path.dirname("__file__"), '../multimodal_seq2seq_gSCAN/'))
import random
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
import logging
from typing import Iterator
import time
import json
from seq2seq.gSCAN_dataset import GroundedScanDataset
from seq2seq.model import Model
from seq2seq.train import train
from seq2seq.predict import predict_and_save
from tqdm import tqdm, trange
from GroundedScan.dataset import GroundedScan
from typing import List
from typing import Tuple
from collections import defaultdict
from collections import Counter
import json
import numpy as np
from seq2seq.gSCAN_dataset import Vocabulary
from seq2seq.helpers import sequence_accuracy
FORMAT = "%(asctime)-15s %(message)s"
logging.basicConfig(format=FORMAT, level=logging.DEBUG,
datefmt="%Y-%m-%d %H:%M")
logger = logging.getLogger(__name__)
def isnotebook():
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
use_cuda = True if torch.cuda.is_available() and not isnotebook() else False
device = "cuda" if use_cuda else "cpu"
if use_cuda:
logger.info("Using CUDA.")
logger.info("Cuda version: {}".format(torch.version.cuda))
def get_gSCAN_parser():
parser = argparse.ArgumentParser(description="Sequence to sequence models for Grounded SCAN")
# General arguments
parser.add_argument("--mode", type=str, default="run_tests", help="train, test or predict", required=True)
parser.add_argument("--output_directory", type=str, default="output", help="In this directory the models will be "
"saved. Will be created if doesn't exist.")
parser.add_argument("--resume_from_file", type=str, default="", help="Full path to previously saved model to load.")
# Data arguments
parser.add_argument("--split", type=str, default="test", help="Which split to get from Grounded Scan.")
parser.add_argument("--data_directory", type=str, default="data/uniform_dataset", help="Path to folder with data.")
parser.add_argument("--input_vocab_path", type=str, default="training_input_vocab.txt",
help="Path to file with input vocabulary as saved by Vocabulary class in gSCAN_dataset.py")
parser.add_argument("--target_vocab_path", type=str, default="training_target_vocab.txt",
help="Path to file with target vocabulary as saved by Vocabulary class in gSCAN_dataset.py")
parser.add_argument("--generate_vocabularies", dest="generate_vocabularies", default=False, action="store_true",
help="Whether to generate vocabularies based on the data.")
parser.add_argument("--load_vocabularies", dest="generate_vocabularies", default=True, action="store_false",
help="Whether to use previously saved vocabularies.")
# Training and learning arguments
parser.add_argument("--training_batch_size", type=int, default=50)
parser.add_argument("--k", type=int, default=0, help="How many examples from the adverb_1 split to move to train.")
parser.add_argument("--test_batch_size", type=int, default=1, help="Currently only 1 supported due to decoder.")
parser.add_argument("--max_training_examples", type=int, default=None, help="If None all are used.")
parser.add_argument("--learning_rate", type=float, default=0.001)
parser.add_argument('--lr_decay', type=float, default=0.9)
parser.add_argument('--lr_decay_steps', type=float, default=20000)
parser.add_argument("--adam_beta_1", type=float, default=0.9)
parser.add_argument("--adam_beta_2", type=float, default=0.999)
parser.add_argument("--print_every", type=int, default=100)
parser.add_argument("--evaluate_every", type=int, default=1000, help="How often to evaluate the model by decoding the "
"test set (without teacher forcing).")
parser.add_argument("--max_training_iterations", type=int, default=100000)
parser.add_argument("--weight_target_loss", type=float, default=0.3, help="Only used if --auxiliary_task set.")
# Testing and predicting arguments
parser.add_argument("--max_testing_examples", type=int, default=None)
parser.add_argument("--splits", type=str, default="test", help="comma-separated list of splits to predict for.")
parser.add_argument("--max_decoding_steps", type=int, default=30, help="After 30 decoding steps, the decoding process "
"is stopped regardless of whether an EOS token "
"was generated.")
parser.add_argument("--output_file_name", type=str, default="predict.json")
# Situation Encoder arguments
parser.add_argument("--simple_situation_representation", dest="simple_situation_representation", default=True,
action="store_true", help="Represent the situation with 1 vector per grid cell. "
"For more information, read grounded SCAN documentation.")
parser.add_argument("--image_situation_representation", dest="simple_situation_representation", default=False,
action="store_false", help="Represent the situation with the full gridworld RGB image. "
"For more information, read grounded SCAN documentation.")
parser.add_argument("--cnn_hidden_num_channels", type=int, default=50)
parser.add_argument("--cnn_kernel_size", type=int, default=7, help="Size of the largest filter in the world state "
"model.")
parser.add_argument("--cnn_dropout_p", type=float, default=0.1, help="Dropout applied to the output features of the "
"world state model.")
parser.add_argument("--auxiliary_task", dest="auxiliary_task", default=False, action="store_true",
help="If set to true, the model predicts the target location from the joint attention over the "
"input instruction and world state.")
parser.add_argument("--no_auxiliary_task", dest="auxiliary_task", default=True, action="store_false")
# Command Encoder arguments
parser.add_argument("--embedding_dimension", type=int, default=25)
parser.add_argument("--num_encoder_layers", type=int, default=1)
parser.add_argument("--encoder_hidden_size", type=int, default=100)
parser.add_argument("--encoder_dropout_p", type=float, default=0.3, help="Dropout on instruction embeddings and LSTM.")
parser.add_argument("--encoder_bidirectional", dest="encoder_bidirectional", default=True, action="store_true")
parser.add_argument("--encoder_unidirectional", dest="encoder_bidirectional", default=False, action="store_false")
# Decoder arguments
parser.add_argument("--num_decoder_layers", type=int, default=1)
parser.add_argument("--attention_type", type=str, default='bahdanau', choices=['bahdanau', 'luong'],
help="Luong not properly implemented.")
parser.add_argument("--decoder_dropout_p", type=float, default=0.3, help="Dropout on decoder embedding and LSTM.")
parser.add_argument("--decoder_hidden_size", type=int, default=100)
parser.add_argument("--conditional_attention", dest="conditional_attention", default=True, action="store_true",
help="If set to true joint attention over the world state conditioned on the input instruction is"
" used.")
parser.add_argument("--no_conditional_attention", dest="conditional_attention", default=False, action="store_false")
# Other arguments
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--corrupt_methods", type=str, default="random")
parser.add_argument("--save_eval_result_dict", default=False, action="store_true")
return parser
def predict_single(example: dict, model: nn.Module, max_decoding_steps: int, pad_idx: int, sos_idx: int,
eos_idx: int, device: str) -> torch.Tensor:
"""
Loop over all data in data_iterator and predict until <EOS> token is reached.
:param example: single example to play with
:param model: a trained model from model.py
:param max_decoding_steps: after how many steps to abort decoding
:param pad_idx: the padding idx of the target vocabulary
:param sos_idx: the start-of-sequence idx of the target vocabulary
:param eos_idx: the end-of-sequence idx of the target vocabulary
"""
# Disable dropout and other regularization.
model.eval()
input_sequence = example["input_tensor"]
target_sequence = example["target_tensor"]
input_lengths = [example["input_tensor"].size(1)]
target_lengths = [example["target_tensor"].size(1)]
situation = example["situation_tensor"]
situation_spec = [example["situation_representation"]]
derivation_spec = [example["derivation_representation"]]
agent_positions = example["agent_position"]
target_positions = example["target_position"]
input_sequence = input_sequence.to(device)
target_sequence = target_sequence.to(device)
situation = situation.to(device)
# Encode the input sequence.
encoded_input = model.encode_input(commands_input=input_sequence,
commands_lengths=input_lengths,
situations_input=situation)
# For efficiency
projected_keys_visual = model.visual_attention.key_layer(
encoded_input["encoded_situations"]) # [bsz, situation_length, dec_hidden_dim]
projected_keys_textual = model.textual_attention.key_layer(
encoded_input["encoded_commands"]["encoder_outputs"]) # [max_input_length, bsz, dec_hidden_dim]
# Iteratively decode the output.
output_sequence = []
contexts_situation = []
hidden = model.attention_decoder.initialize_hidden(
model.tanh(model.enc_hidden_to_dec_hidden(encoded_input["hidden_states"])))
token = torch.tensor([sos_idx], dtype=torch.long, device=device)
decoding_iteration = 0
attention_weights_commands = []
attention_weights_situations = []
while token != eos_idx and decoding_iteration <= max_decoding_steps:
(output, hidden, context_situation, attention_weights_command,
attention_weights_situation) = model.decode_input(
target_token=token, hidden=hidden, encoder_outputs=projected_keys_textual,
input_lengths=input_lengths, encoded_situations=projected_keys_visual)
output = F.log_softmax(output, dim=-1)
token = output.max(dim=-1)[1]
output_sequence.append(token.data[0].item())
attention_weights_commands.append(attention_weights_command.tolist())
attention_weights_situations.append(attention_weights_situation.tolist())
contexts_situation.append(context_situation.unsqueeze(1))
decoding_iteration += 1
if output_sequence[-1] == eos_idx:
output_sequence.pop()
attention_weights_commands.pop()
attention_weights_situations.pop()
if model.auxiliary_task:
target_position_scores = model.auxiliary_task_forward(torch.cat(contexts_situation, dim=1).sum(dim=1))
auxiliary_accuracy_target = model.get_auxiliary_accuracy(target_position_scores, target_positions)
else:
auxiliary_accuracy_agent, auxiliary_accuracy_target = 0, 0
return (input_sequence, derivation_spec, situation_spec, output_sequence, target_sequence,
attention_weights_commands, attention_weights_situations, auxiliary_accuracy_target)
def make_corrupt_example(raw_example, corrupt_methods="systematic"):
if corrupt_methods == "systematic":
ret_example = copy.deepcopy(raw_example)
new_command = ret_example['input_command']
if "while" in ret_example['input_command'][-1]:
# we move while into the front
new_command = ret_example['input_command'][-1:] + ret_example['input_command'][:-1]
elif ret_example['input_command'][-1][-2:] == "ly":
# this is the adv
new_command = ret_example['input_command'][-1:] + ret_example['input_command'][:-1]
# we can also switch words in the middle
# circle, square, cylinder
# use a as a maker
start_index = new_command.index('a')
if "circle" in new_command:
end_index = new_command.index('circle')
elif "square" in new_command:
end_index = new_command.index('square')
elif "cylinder" in new_command:
end_index = new_command.index('cylinder')
if end_index - start_index > 2:
# there are two adj then
new_command[start_index+1:end_index] = new_command[start_index+1:end_index][::-1]
ret_example['input_command'] = new_command
elif corrupt_methods == "random":
ret_example = copy.deepcopy(raw_example)
random.shuffle(ret_example['input_command'])
return ret_example
def levenshteinDistance(s1, s2):
"""
The Levenshtein distance allows deletion, insertion and substitution:
https://en.wikipedia.org/wiki/Edit_distance
Implementation reference:
https://stackoverflow.com/questions/2460177/edit-distance-in-python
"""
if len(s1) > len(s2):
s1, s2 = s2, s1
distances = range(len(s1) + 1)
for i2, c2 in enumerate(s2):
distances_ = [i2+1]
for i1, c1 in enumerate(s1):
if c1 == c2:
distances_.append(distances[i1])
else:
distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))
distances = distances_
norm_dist = distances[-1]/max(len(s1), len(s2))
return norm_dist
class DummyGroundedScanDataset(object):
"""
Loads a GroundedScan instance from a specified location.
"""
def __init__(self, path_to_data: str, save_directory: str, k: int, split="train", input_vocabulary_file="",
target_vocabulary_file="", generate_vocabulary=False):
logger.info("Initializing dummy gSCAN dataset for adverserial experiments...")
assert os.path.exists(path_to_data), "Trying to read a gSCAN dataset from a non-existing file {}.".format(
path_to_data)
if not generate_vocabulary:
assert os.path.exists(os.path.join(save_directory, input_vocabulary_file)) and os.path.exists(
os.path.join(save_directory, target_vocabulary_file)), \
"Trying to load vocabularies from non-existing files."
if split == "test" and generate_vocabulary:
logger.warning("WARNING: generating a vocabulary from the test set.")
# self.dataset = GroundedScan.load_dataset_from_file(path_to_data, save_directory=save_directory, k=k)
# pre-load just to get the grid size
with open(path_to_data, 'r') as infile:
all_data = json.load(infile)
self.image_dimensions = all_data["grid_size"]
self.image_channels = 3
self.split = split
self.directory = save_directory
# Keeping track of data.
self._examples = np.array([])
self._input_lengths = np.array([])
self._target_lengths = np.array([])
if generate_vocabulary:
logger.info("Generating vocabularies...")
self.input_vocabulary = Vocabulary()
self.target_vocabulary = Vocabulary()
self.read_vocabularies()
logger.info("Done generating vocabularies.")
else:
logger.info("Loading vocabularies...")
self.input_vocabulary = Vocabulary.load(os.path.join(save_directory, input_vocabulary_file))
self.target_vocabulary = Vocabulary.load(os.path.join(save_directory, target_vocabulary_file))
logger.info("Done loading vocabularies.")
def read_vocabularies(self) -> {}:
"""
Loop over all examples in the dataset and add the words in them to the vocabularies.
"""
logger.info("Populating vocabulary...")
for i, example in enumerate(self.dataset.get_examples_with_image(self.split)):
self.input_vocabulary.add_sentence(example["input_command"])
self.target_vocabulary.add_sentence(example["target_command"])
def save_vocabularies(self, input_vocabulary_file: str, target_vocabulary_file: str):
self.input_vocabulary.save(os.path.join(self.directory, input_vocabulary_file))
self.target_vocabulary.save(os.path.join(self.directory, target_vocabulary_file))
def get_vocabulary(self, vocabulary: str) -> Vocabulary:
if vocabulary == "input":
vocab = self.input_vocabulary
elif vocabulary == "target":
vocab = self.target_vocabulary
else:
raise ValueError("Specified unknown vocabulary in sentence_to_array: {}".format(vocabulary))
return vocab
def shuffle_data(self) -> {}:
"""
Reorder the data examples and reorder the lengths of the input and target commands accordingly.
"""
random_permutation = np.random.permutation(len(self._examples))
self._examples = self._examples[random_permutation]
self._target_lengths = self._target_lengths[random_permutation]
self._input_lengths = self._input_lengths[random_permutation]
def get_data_iterator(self, batch_size=10) -> Tuple[torch.Tensor, List[int], torch.Tensor, List[dict],
torch.Tensor, List[int], torch.Tensor, torch.Tensor]:
"""
Iterate over batches of example tensors, pad them to the max length in the batch and yield.
:param batch_size: how many examples to put in each batch.
:param auxiliary_task: if true, also batches agent and target positions (flattened, so
agent row * agent columns = agent_position)
:return: tuple of input commands batch, corresponding input lengths, situation image batch,
list of corresponding situation representations, target commands batch and corresponding target lengths.
"""
for example_i in range(0, len(self._examples), batch_size):
if example_i + batch_size > len(self._examples):
batch_size = len(self._examples) - example_i
examples = self._examples[example_i:example_i + batch_size]
input_lengths = self._input_lengths[example_i:example_i + batch_size]
target_lengths = self._target_lengths[example_i:example_i + batch_size]
max_input_length = np.max(input_lengths)
max_target_length = np.max(target_lengths)
input_batch = []
target_batch = []
situation_batch = []
situation_representation_batch = []
derivation_representation_batch = []
agent_positions_batch = []
target_positions_batch = []
for example in examples:
to_pad_input = max_input_length - example["input_tensor"].size(1)
to_pad_target = max_target_length - example["target_tensor"].size(1)
padded_input = torch.cat([
example["input_tensor"],
torch.zeros(int(to_pad_input), dtype=torch.long, device=device).unsqueeze(0)], dim=1)
# padded_input = torch.cat([
# torch.zeros_like(example["input_tensor"], dtype=torch.long, device=device),
# torch.zeros(int(to_pad_input), dtype=torch.long, device=devicedevice).unsqueeze(0)], dim=1) # TODO: change back
padded_target = torch.cat([
example["target_tensor"],
torch.zeros(int(to_pad_target), dtype=torch.long, device=device).unsqueeze(0)], dim=1)
input_batch.append(padded_input)
target_batch.append(padded_target)
situation_batch.append(example["situation_tensor"])
situation_representation_batch.append(example["situation_representation"])
derivation_representation_batch.append(example["derivation_representation"])
agent_positions_batch.append(example["agent_position"])
target_positions_batch.append(example["target_position"])
yield (torch.cat(input_batch, dim=0), input_lengths, derivation_representation_batch,
torch.cat(situation_batch, dim=0), situation_representation_batch, torch.cat(target_batch, dim=0),
target_lengths, torch.cat(agent_positions_batch, dim=0), torch.cat(target_positions_batch, dim=0))
def process(self, example):
empty_example = {}
input_commands = example["input_command"]
target_commands = example["target_command"]
#equivalent_target_commands = example["equivalent_target_command"]
situation_image = example["situation_image"]
self.image_dimensions = situation_image.shape[0]
self.image_channels = situation_image.shape[-1]
situation_repr = example["situation_representation"]
input_array = self.sentence_to_array(input_commands, vocabulary="input")
target_array = self.sentence_to_array(target_commands, vocabulary="target")
#equivalent_target_array = self.sentence_to_array(equivalent_target_commands, vocabulary="target")
empty_example["input_tensor"] = torch.tensor(input_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
empty_example["target_tensor"] = torch.tensor(target_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
#empty_example["equivalent_target_tensor"] = torch.tensor(equivalent_target_array, dtype=torch.long,
# device=device).unsqueeze(dim=0)
empty_example["situation_tensor"] = torch.tensor(situation_image, dtype=torch.float, device=device
).unsqueeze(dim=0)
empty_example["situation_representation"] = situation_repr
empty_example["derivation_representation"] = example["derivation_representation"]
empty_example["agent_position"] = torch.tensor(
(int(situation_repr["agent_position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["agent_position"]["column"]), dtype=torch.long,
device=device).unsqueeze(dim=0)
empty_example["target_position"] = torch.tensor(
(int(situation_repr["target_object"]["position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["target_object"]["position"]["column"]),
dtype=torch.long, device=device).unsqueeze(dim=0)
return empty_example
def read_dataset(self, max_examples=None, simple_situation_representation=True) -> {}:
"""
Loop over the data examples in GroundedScan and convert them to tensors, also save the lengths
for input and target sequences that are needed for padding.
:param max_examples: how many examples to read maximally, read all if None.
:param simple_situation_representation: whether to read the full situation image in RGB or the simplified
smaller representation.
"""
logger.info("Converting dataset to tensors...")
for i, example in enumerate(self.dataset.get_examples_with_image(self.split, simple_situation_representation)):
if max_examples:
if len(self._examples) > max_examples:
return
empty_example = {}
input_commands = example["input_command"]
target_commands = example["target_command"]
#equivalent_target_commands = example["equivalent_target_command"]
situation_image = example["situation_image"]
if i == 0:
self.image_dimensions = situation_image.shape[0]
self.image_channels = situation_image.shape[-1]
situation_repr = example["situation_representation"]
input_array = self.sentence_to_array(input_commands, vocabulary="input")
target_array = self.sentence_to_array(target_commands, vocabulary="target")
#equivalent_target_array = self.sentence_to_array(equivalent_target_commands, vocabulary="target")
empty_example["input_tensor"] = torch.tensor(input_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
empty_example["target_tensor"] = torch.tensor(target_array, dtype=torch.long, device=device).unsqueeze(
dim=0)
#empty_example["equivalent_target_tensor"] = torch.tensor(equivalent_target_array, dtype=torch.long,
# device=device).unsqueeze(dim=0)
empty_example["situation_tensor"] = torch.tensor(situation_image, dtype=torch.float, device=device
).unsqueeze(dim=0)
empty_example["situation_representation"] = situation_repr
empty_example["derivation_representation"] = example["derivation_representation"]
empty_example["agent_position"] = torch.tensor(
(int(situation_repr["agent_position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["agent_position"]["column"]), dtype=torch.long,
device=device).unsqueeze(dim=0)
empty_example["target_position"] = torch.tensor(
(int(situation_repr["target_object"]["position"]["row"]) * int(situation_repr["grid_size"])) +
int(situation_repr["target_object"]["position"]["column"]),
dtype=torch.long, device=device).unsqueeze(dim=0)
self._input_lengths = np.append(self._input_lengths, [len(input_array)])
self._target_lengths = np.append(self._target_lengths, [len(target_array)])
self._examples = np.append(self._examples, [empty_example])
def sentence_to_array(self, sentence: List[str], vocabulary: str) -> List[int]:
"""
Convert each string word in a sentence to the corresponding integer from the vocabulary and append
a start-of-sequence and end-of-sequence token.
:param sentence: the sentence in words (strings)
:param vocabulary: whether to use the input or target vocabulary.
:return: the sentence in integers.
"""
vocab = self.get_vocabulary(vocabulary)
sentence_array = [vocab.sos_idx]
for word in sentence:
sentence_array.append(vocab.word_to_idx(word))
sentence_array.append(vocab.eos_idx)
return sentence_array
def array_to_sentence(self, sentence_array: List[int], vocabulary: str) -> List[str]:
"""
Translate each integer in a sentence array to the corresponding word.
:param sentence_array: array with integers representing words from the vocabulary.
:param vocabulary: whether to use the input or target vocabulary.
:return: the sentence in words.
"""
vocab = self.get_vocabulary(vocabulary)
return [vocab.idx_to_word(word_idx) for word_idx in sentence_array]
@property
def num_examples(self):
return len(self._examples)
@property
def input_vocabulary_size(self):
return self.input_vocabulary.size
@property
def target_vocabulary_size(self):
return self.target_vocabulary.size
| 2.015625 | 2 |
djangoproj/djangoapp/crawler/b_crawler.py | pbarton666/buzz_bot | 0 | 12793311 | <reponame>pbarton666/buzz_bot
#!/usr/bin/env python
from __future__ import with_statement
import logging
# Library path
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# Standard libraries
import atexit
import random
import re
import socket
import threading
import time
import traceback
import weakref
import xmlrpclib
import Queue
import SimpleXMLRPCServer
#import multiprocessing
# Import third-party libraries
import turbogears
# Custom libraries
#import buzzbot.searcher
#import buzzbot.visitor
#import buzzbot.model
#import buzzbot.bot
#import buzzbot.botUtilities
import buzzbot
try:
from buzzbot import *
print "importing all buzzbot modules"
except:
from buzzbot import cpu_core_counter
from buzzbot import searcher
from buzzbot import visitor
from buzzbot import model
from buzzbot import bot
from buzzbot import botUtilities
from buzzbot import commands
myBotRoutines = bot.BotRoutines()
myBotUtilities = botUtilities.Utilities()
print "importing some buzzbot modules"
try:
myBotRoutines = buzzbot.bot.BotRoutines()
myBotUtilities = buzzbot.botUtilities.Utilities()
except:
pass
#I haven't quite grokked the differences in namespaces between the dev and production box
# this insures the visitor module is available
try:
import bot
import botUtilities
import visitor
myBotRoutines = bot.BotRoutines()
myBotUtilities = botUtilities.Utilities()
except:
pass
DEBUG_RUN_SERIALLY = False
import logging
class CrawlerBase(object):
"""
Methods provided to CrawlerClient and CrawlerServer.
"""
def host(self):
"""
Return the connection host.
"""
return turbogears.config.get("crawler.socket_host", "localhost")
def port(self):
"""
Return the connection port.
"""
return int(turbogears.config.get("crawler.socket_port", 50015))
def logger(self):
import logging
name = 'crawler'
fname = '/var/log/buzz/crawler.log'
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
handler = logging.handlers.RotatingFileHandler(
fname, maxBytes=100000, backupCount=5)
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger.addHandler(handler)
return logger
cb = CrawlerBase()
logger = cb.logger()
def has_fork():
"""
Does this OS have the `fork` system call?
"""
return "fork" in os.__dict__
class CrawlerServerFunctions(object):
"""
Set of functions exposed by the CrawlerServer to the CrawlerClient.
In English: the crawler server is a simple xmlrpc server. We have a proxy server
attached to the smlrpc server that we're passing these methods to. Things are a little strange
because not everything can get passed between the proxy and the real server. For instance,
you can easily pass xml or html, but not objects (they could be repr-ed or pickled, of course)
So, for instance, we can ask the proxy server to "enqueue" a request. This involves passing
the proxy server a dict holding the search_id and other stuff like a deletion flag. The
proxy is supposed to relay that to the real server, which is supposed to process it. The
processing involves doing the search, visit, scoring processes/threads and reporting the results
"""
def enqueue(self, item):
"""
Enqueue the item into the Crawler.
"""
#print "enqueue method of class CrawlerServerFunctions called"
# NOTE: The XML-RPC proxy can't accept a bare **kwargs, so it's passed in as a `dict`.
global server
server.crawler.enqueue(**item)
def stop(self):
"""
Stop the Crawler.
"""
global server
server.keep_running = False
def prepare_results(self):
"""
Prepare scored results.
"""
global server
server.crawler.prepare_results()
def results_for(self, search_id):
"""
Return list of scored results for the `search_id`.
"""
global server
return server.crawler.results_for(search_id)
def evaluate(self, code):
"""
Return the result of eval'ing the string of `code`.
"""
global server
if self.allow_dangerous_operations():
return eval(code)
else:
raise SecurityError("Dangerous operations not allowed on server")
def execute(self, code):
"""
Pass `code` to Python's `exec`.
"""
global server
if self.allow_dangerous_operations():
exec code
else:
raise SecurityError("Dangerous operations not allowed on server")
def allow_dangerous_operations(self):
"""
Does this server allow dangerous operations? Returns true if the
`DANGEROUS` environmental variable has a value.
"""
global server
if os.environ.has_key('DANGEROUS'):
return True
else:
return False
def ping(self):
"""
Respond with true, to indicate server is alive.
"""
print "pinging crawlerServiceFunctions.ping"
return server.crawler.ping()
def dogma(self):
return "hello from crawlerServiceFunctions.dogma"
def dogmaFromServer(self):
return server.crawler.dogma()
class proxyServerError(StandardError):
pass
class SecurityError(StandardError):
pass
class WrapperError(StandardError):
pass
class CrawlerServer(CrawlerBase):
def __init__(self, items_completed=True, concurrency_library=None, run_as_cron = False):
"""
Instantiate a server that hosts a Crawler.
"""
#added option to change the xmlrpc port if the crawler is running as an independent
# process, as in the case of a nightly cron job
logger.debug( "crawler.CrawlerServer.__init__ xmlrpc server initiated on %s:%s" % (str(self.host()), str(self.port()) ))
self.service = SimpleXMLRPCServer.SimpleXMLRPCServer(addr=(self.host(), self.port()), logRequests=False, allow_none=True)
self.service.register_instance(CrawlerServerFunctions())
#keep_running = True turns it on; keep_running = False shuts it down)
self.keep_running = True
#this instansiates a crawler process manager and its processes
logger.debug("crawler.CrawlerServer.__init__ instansiating a crawler process server")
self.crawler = Crawler(items_completed=items_completed, concurrency_library=concurrency_library)
logger.debug("crawler.CrawlerServer.__init__ Success in instansiating a crawler process server")
def start(self):
"""
Start the server.
"""
logger.debug( "starting the crawler process server")
self.crawler.start()
while self.keep_running:
self.service.handle_request()
self.stop()
logger.debug("crawler_runner (crawler/proxy thread) stopped")
def stop(self):
"""
Stop the server.
"""
self.keep_running = False
self.crawler.stop()
class ResilentXmlRpcServerProxy(object):
"""
Provides a wrapper around the XmlRpc proxy that retries the connection.
"""
def __init__(self, proxy, timeout_seconds=5.0, pause_seconds=0.25):
#logger.debug("ResilentXmlRpcServerProxy __init--")
self.proxy = proxy
#logger.debug("proxy is of type %s" % type (proxy))
self.timeout_seconds = timeout_seconds
self.pause_seconds = pause_seconds
def __getattr__(self, name):
DEBUG = False
if DEBUG: print "GA: %s" % name
proxy = self.proxy
def wrapper(*args):
init_time = time.time()
deadline = init_time + self.timeout_seconds
while deadline > time.time():
try:
#logger.debug("returning function %s from the xmlrpc proxy wrapper with args: %s" %( name, repr(args) ))
return proxy.__getattr__(name)(*args)
except Exception, e:
if DEBUG: print "WA: retry"
logger.error("xmlrpc server returned error: %s" %e)
time.sleep(self.pause_seconds)
#if this process is too slow, give it a bit more priority (nice is the priority -20 = highest +20 = lowest )
#if time.time() > init_time + 3:
##TODO: fix this niceness thing ;it's a good idea, but too quickly sets *something* to -20 which freezes the system
#try:
#nice = os.nice(0)
#os.nice(nice-1)
#newnice = os.nice(0)
#print "changed nice from %i to %i" %(nice, newnice)
#except:
# pass
logger.info("gave up trying to connect to the XML-RPC server")
raise TimeoutError("Couldn't connect to XML-RPC server, no response after %s seconds" % str(self.timeout_seconds))
return wrapper
class CrawlerClient(CrawlerBase):
"""
Client that connects to the CrawlerServer.
"""
def __init__(self, run_as_cron = False):
"""
Instantiate a client.
"""
#note, don't use a logger in __init__ unless its installed in __init__
self.raw_service = xmlrpclib.ServerProxy(uri="http://%s:%s/" % (self.host(), self.port()), allow_none=True)
self.service = ResilentXmlRpcServerProxy(proxy=self.raw_service)
def enqueue(self, **item):
"""
Enqueue an item for crawling. The `item` is a dict with the same
parameters accepted by `Crawler.enqueue`.
"""
str(item['search_id'])
logger.debug( "crawler client method enqueue called for item: %s" % str(item['search_id']) )
# NOTE: The XML-RPC proxy can't accept a bare **kwargs, so pass them as a `dict`.
testme = self.service.enqueue(item)
return self.service.enqueue(item)
def stop(self):
"""
Stop the CrawlerServer.
"""
print "stopping the xmlrpc server proxy"
try:
self.raw_service.stop()
except:
pass
return self.raw_service.stop()
def dogma(self):
#print "hello from CrawlerClient.dogma"
pass
def evaluate(self, code):
"""
Return the result of evaluating the code on the server.
"""
#print "evaluating " + code
return self.service.evaluate(code)
def execute(self, code):
"""
Pass `code` to Python's `exec`.
"""
self.service.execute(code)
def prepare_results(self):
"""
Prepare scored results.
"""
#logger.debug( "initiating CrawlerClient prepare results process")
self.service.prepare_results()
def results_for(self, search_id):
"""
Return list of scored results for the `search_id`.
"""
#logger.debug( "initiating CrawlerClient results for search id %s" %str(search_id))
return self.service.results_for(search_id)
def ping(self):
"""
Is the server responding?
"""
try:
print "pinging crawler client"
return self.raw_service.ping()
except:
return False
class Crawler(object):
'''sentinal to flag workers to stop'''
STOP = 'STOP'
def __init__(self, items_completed=True, searchers_count=None, visitors_count=None,
scorers_count=None, concurrency_library=None):
self.debug = False
self.debug_run_serially = DEBUG_RUN_SERIALLY
cb = CrawlerBase()
self.logger = cb.logger()
#get the concurrency library
##TODO: set this in the turbogears.config files
self._concurrency_library = concurrency_library
#import the library
exec "import %s" % self._concurrency_library
#this returns the *object* representing the concurrency library
self._concurrency_library_module = eval("%s" % self._concurrency_library)
#figure out how many CPUs we have to work with
try:
self.cpu_core_count = self._concurrency_library_module.cpu_count() #works with multiprocessing
except:
try: self.cpu_core_count = cpu_core_counter.cpu_core_count() # works with threading and pyprocessing
except: self.cpu_core_count = 1
'''
the manager herds the cats (processes), providing locks, semaphores and the such;
it runs on its own process
'''
self.manager = self._concurrency_library_module.Manager()
self.lock = self.manager.Lock()
'''
These objects are queues to be managed within - you guessed it - the manager; it runs on its own process.
If we ever switch to a theading library, these would just be Queue.Queue() objects.
'''
self.items_to_search = self.manager.Queue()
self.items_to_visit= self.manager.Queue()
self.items_to_score= self.manager.Queue()
self.items_to_finalize= self.manager.Queue()
self.items_completed = None
if items_completed:
self.items_completed = self.manager.dict()
'''
the following is a bit convoluted but produces a dict (queue) with three items (searcher, visitor, scorer);
Each of these three items is in itself a dict with the same two items (input, output).
searcher = queue.get('searcher') evaluates to: { input : AutoProxy[Queue], output : AutoProxy[Queue]
myinput = searcher.get('input') evaluates to: AutoProxy[Queue] object
'''
self.queue = {}
self.queue['searcher'] = {}
self.queue['searcher']['input'] = self.items_to_search
self.queue['searcher']['output'] = self.items_to_visit
self.queue['visitor'] = {}
self.queue['visitor']['input'] = self.items_to_visit
self.queue['visitor']['output'] = self.items_to_score
self.queue['scorer'] = {}
self.queue['scorer']['input'] = self.items_to_score
self.queue['scorer']['output'] = self.items_to_finalize
'''
Figure out how many processes to spawn as a function of the CPUs available; the optimal number
is at least partly a function of the real time performance desired - a smaller number provides
faster response
'''
# Worker counts
self.searchers_count = searchers_count or max(2, self.cpu_core_count)
#TODO: experiment with the visitor counts
self.visitors_count = visitors_count or min(5, self.cpu_core_count * 5)
self.scorers_count = scorers_count or min(2, self.cpu_core_count)
# Workers pools
self.searchers = []
self.visitors = []
self.scorers = []
def __del__(self):
#this is the destructor method called after object is killed, so we need to re-import logger
try:
print ("trying to stop crawler process manager")
self.stop()
except Exception, e:
print ("crawler.Crawler failed to stop normally: %s" % e)
pass
finally:
#logger.debug("destroyed")
pass
def start(self, kind=None):
"""
Start the crawler. It will begin processing any entries in the queues
immediately. This starts all types of processes, unless we ask it only to
run one (kind = "searcher", say)
"""
#this logic is to run the program serially for debugging purposes (independent
# processes are hideous to work with).
if not DEBUG_RUN_SERIALLY:
if kind:
logger.info ("crawler start method called for kind = " + kind)
'''
the following statements use strange pythonic syntax to dig evaluate variables; compact
but arcane.
self__dict__ is a dict of object:value pair known to self (i.e., this class)
the term ['%ss' % kind] uses text formatting strings: %s is replaced with the value for kind
self.__dict__['%ss' % kind], then pulls the value for "kind" from the dictionary
...
so, say "kind" is searcher
so, count = searcher_count and workers = searcher
...
this makes it work with any type of process. but *who cares*?
'''
count = self.__dict__['%ss_count' % kind] #this is dict of all objects known to self
workers = self.__dict__['%ss' % kind]
for i in range(count):
worker = None
'''
Here, the "target" i.e., the thing executed by the process; this will be a search process, a
visitor processs, or whatever TBD by the process_wrapper routine. The list of "workers" gets
the process appended to it. The last step is to actually run the process(using the start method).
'''
if self._concurrency_library == "processing":
worker = self._concurrency_library_module.Process(target=self._processes_wrapper, args=[kind])
elif self._concurrency_library == "threading":
worker = self._concurrency_library_module.Thread(target=self._processes_wrapper, args=[kind])
elif self._concurrency_library == "multiprocessing":
worker = self._concurrency_library_module.Process(target=self._processes_wrapper, args=[kind])
else:
raise NotImplementedError("Unknown concurrency_library: %s" % self._concurrency_library)
workers.append(worker)
logger.info("starting %s process" % (kind))
worker.start()
logger.info("started as %s " % (worker.name))
a=1
a=2
else:
'''
Recursively calls the logic above to initaite "worker processes" for the requested number of
searchers, visitors, and scorers (invoked when no worker type is specified in the call).
'''
logger.info( "starting processes for all - searcher, visitor, and scorer")
self.start("searcher")
self.start("visitor")
self.start("scorer")
else:
logger.debug("running serially for debugging")
def dogma(self):
return "hello from Crawler.dogma"
def testQueues(self):
#having an issue with being able to add items to the queues
pass
def _processes_wrapper(self, kind): #line 449
"""
This routine serves as a container for the worker (searcher, visitor, scorer) processes. The idea is
that the calling routine can iterate over all processes using the same logic, because the statement
in the calling routine can be agnostic as to exactly which proces it's calling.
The calling routine has loaded up a set of queues, one each for the searchers, visitors, and scorers. These
queues are stored in a dict structure called queue. The "queue" dict has three objects, each of which is another
dict: searcher, visitor, and scorer. Each of these secondary objects has two entries: input and output (both Queues).
The logic uses the input argument "kind" to find the correct input/output queue combination, and also to figure
out which processing routine to pass control to. For instance, if the "kind" argument is "searcher", it digs out
the searcher input and output queues from the "queue" dict object. Then, using the self._worker_name routine,
discovers that it needs to pass control to the self._searcher_process method embedded in a Process object. Then, by
invoking the "target" method, it launches the process, passing along the specifications for this particular search
and the right output queue (in this case "scorer" input queue dug out of the "queue" dict mentioned above)
"""
#discern the name of the process to be invoked from the "kind" input argument
tagline = "%s %s:" % (kind, self._worker_name())
#creat an alias for the actual Process object
target = self.__getattribute__('%s_process' % kind)
#create aliases for the correct input/output Queue objects to be used
input = self.queue[kind]['input']
output = self.queue[kind]['output']
logger.debug("process wrapper processing a %s queue; currently " %kind)
logger.debug("input queue is %s long; output queue is %s long" %(str(input.qsize()), str(output.qsize())))
#self.debug_run_serially is used for debugging only
logger.info("self.debug_run_serially is set to : %s" %str(self.debug_run_serially) )
if not self.debug_run_serially:
#iterate over the input queue ...
for item in iter(input.get, self.STOP):
logger.debug("input dict to %s is: %s" %(kind, repr(item)))
stop_requested = False
##TODO: implement graceful stop for runaway bots using search.stop_requested
try:
search_id = item.get('search_id')
search = model.Search.get(int(search_id))
stop_requested = search.stoprequested
except:
logger.error("process_wrapper failed to get search_id for handoff to %s" %kind)
logger.error(traceback.print_stack())
if not stop_requested:
#result = None
logger.info("launching process: %s" %kind)
result = target(item=item, output=output)
if result:
logger.debug("process %s found a result" %kind)
else:
logger.debug("process %s result is None" %kind)
if not output:
logger.debug("output is %s" %repr(output))
if result and output != None:
output.put(result)
if kind == 'scorer':
logger.info ("outputting %s to results_for" %result)
else:
try:
logger.debug("couldn't place item in queue; this would clear queue")
#logger.debug(traceback.print_stack())
input.queue.clear()
except:
pass
else:
logger.debug("stop requested in processes_wrapper")
def _worker_name(self):
"""
Returns string name uniquely identifying this worker. Actual name will
depend on the underlying concurrency library.
"""
if self._concurrency_library == "processing":
return self._concurrency_library_module.currentProcess().getName()
elif self._concurrency_library == "threading":
return self._concurrency_library_module.currentThread().getName()
elif self._concurrency_library == "multiprocessing":
return self._concurrency_library_module.current_process().name
else:
raise NotImplementedError("Unknown concurrency_library: %s" % self._concurrency_library)
def searcher_process(self, item, output=None):
"""
This process is invoked by the _process_wrapper routine. It runs the search engine queries via the
routine "searcher.SearcherRunner". It's run as a self-contained process/thread. It's important that
any changes be thoroughly vetted because if it has problems, it will likely die silently.
"""
#check to see if we're running serially for debugging
if item.has_key('debug_run_serially'):
self.debug_run_serially=item.get('debug_run_serially')
serial_return = []
logger.info( "running searcher process for item %s" % repr(item))
#PB if user requested stop, don't bother
search = model.Search.get(item['search_id'])
stop_requested = search.stoprequested
#logger.debug("checked stoprequested")
targetword = search.targetword
#logger.debug("entering stoprequested loop")
if not stop_requested:
print "deploying searcher.SearchRunner"
myresult = searcher.SearchRunner(
#each "result" is a raw url returned from a search engine
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results'],
debug_run_serially = self.debug_run_serially
)
logger.debug("myresult is %s" % repr(myresult))
for result in searcher.SearchRunner(
#each "result" is a raw url returned from a search engine
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results']):
urlid = None
logger.debug("searcher_process found: %s" % repr(result))
##TODO: move this processing logic outside the crawler
#clean the url up (first-order validity checks, etc.) Below returns a list or nothing
logger.debug("evaluating %s" %result)
fixedUrl = myBotUtilities.fixUrls(targetword, urlList = [result])
#logger.info("fixed url is %s" %fixedUrl)
cleanResult = ""
if len(fixedUrl)>0: #cleanResult is null if the url failed our tests
cleanResult= fixedUrl[0]
#logger.debug("checking if url %s is sponsor site %s" %(str(fixedUrl), str(targetword)))
if myBotUtilities.isTargetWordSite(cleanResult, targetword): #sponsor site
cleanResult = ""
logger.debug("%s is from the sponsor site" % str(fixedUrl))
if not myBotUtilities.goodUrl(cleanResult): #known junk, videos, etc
cleanResult = ""
logger.debug("%s is known junk" % str(fixedUrl))
if len(cleanResult) > 0:
#if we have this id for this search, we'll grab its id (content specs may have changed)
dupReturn = myBotUtilities.isDupUrl(item['search_id'], cleanResult) #returns 0 or the ID of thedup
if dupReturn > 0:
urlid = dupReturn
logger.debug("we already have url %s" %str(fixedUrl))
else:
try:
urlid = myBotUtilities.addUrl(item['search_id'], cleanResult)
except:
logger.debug("tried but failed to have botUtilites add this url %s" %cleanResult)
if urlid:
logger.debug("attempting to output this url to visitor process queue: %s" %str(cleanResult))
subitem = dict(
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results'],
url_id = urlid,
parseFast = item['parseFast'] or True
)
output.put(subitem)
logger.debug("visitor process queue fed searchid: %s and urlid: %s " %(str(item['search_id']), str(urlid)))
if self.debug_run_serially:
return serial_return
def visitor_process(self, item, output=None):
'''
This process is invoked by the _process_wrapper routine. It runs the visitors (they read the web sites)
engine queries via the
routine "visitor.Visitor". It's run as a self-contained process/thread. It's important that
any changes be thoroughly vetted because if it has problems, it will likely die silently.
logger.debug ("visitor process started")
'''
logger.debug("visitor_process invoked ... working on item: %s" %repr(item))
return_dict = None; search = None
#check to see if we're running serially for debugging
if item.has_key('debug_run_serially'):
self.debug_run_serially=item.get('debug_run_serially')
serial_return=[]
if item.has_key('parseFast'):
parseFast = item.get('parseFast')
else:
parseFast = True
logger.debug("trying to retrieve search " + str(item['search_id']))
#make sure we can find the search in the database
try:
search = model.Search.get(item['search_id'])
stop_requested = search.stoprequested
except:
logger.error("crawler.visitor_process couldn't load search")
pass
if not search:
logger.error("visitor_process can't find a search")
else:
#we *do have a valid search
logger.debug( "visitor process checking for URLs to visit")
visitorContent = None; url_record = None
try:
url_record = model.URLS.get(item['url_id'])
except:
pass
#visitor.Visitor returns a list object containing model.Content objects
logger.debug("pinging visitor.Visitor")
if url_record:
visitorContent = visitor.Visitor(search, url_record, parseFast)
if visitorContent:
logger.debug("**enqueing a visitor.Visitor object")
for content in visitorContent:
logger.debug("crawler.visitor_process viewing content: %s" %repr(content))
try:
logger.info("we have content for search %s : content: %s" %(str(item['search_id']), str(content.id)))
subitem = dict(
delete_existing = item['delete_existing'],
search_id = item['search_id'],
max_results = item['max_results'],
content_id = content.id,
parseFast = parseFast
)
except:
logger.warn("crawler.visitor_process couldn't parse the input dict")
#debug_run_serially is for debugging - allows serial processing
if self.debug_run_serially and subitem:
serial_return.append(subitem)
#for production - passes this on to the scorer
else:
try:
output.put(subitem)
except:
logger.error("scorer not loaded for urlid %s, content %s" %(str(urlid), str(content.id)))
return None
def enqueue(self, search_id, max_results=8, delete_existing=False, queue_name="items_to_search", **kwargs):
"""
Add a job to the crawler.
Keyword arguments:
* search_id: Crawl this search record.
* max_results: Return approximately this many results. Default is to
let the searcher decide how many to return.
* delete_existing: Delete existing records for this search record?
Defaults to False.
* queue_name: Name of queue to use. Defaults to "items_to_search".
"""
queue = self.__getattribute__(queue_name)
item = kwargs
item['search_id'] = search_id
item['max_results'] = max_results
item['delete_existing'] = delete_existing
logger.debug("enqueued into `%s`: %s" % (queue_name, item))
queue.put(item)
def scorer_process(self, item, output=None):
"""
Score a single item.
"""
logger.info( "scorer process started")
content = None; stop_requested = None; search = None
try:
search_id = item['search_id']
content_id = item['content_id']
except Exception, e:
logger.error("bad item passed to crawler.scorer")
try:
#these may be null or placeholder objects
search = model.Search.get(item['search_id'])
except:
logger.debug("scorer couldn't retrieve search")
try:
content = model.Content.get(content_id) #a search object (db record)
except:
logger.info("scorer couldn't retrieve content")
try:
stop_requested = search.stoprequested
except:
logger.debug("scorer couldn't retrieve stop_requested")
#TODO: implement the "stop_requested" feature to kill runaway bots
if content:
try:
myBotRoutines.addScoreToContent(content)
logger.info("adding score to content %s" %content)
except Exception, e:
logger.debug( "bot.addScoreToContent has a problem")
logger.error(traceback.format_exc(e))
raise
return item
def prepare_results(self):
"""
load the scored results from the output of the scorer process (a Queue object
called items_to_finalize) into a dict called items completed
"""
while True:
item = None
try:
item = self.items_to_finalize.get_nowait()
except Queue.Empty:
pass # Handle below
if not item:
#logger.debug("results_for: no items")
break
leaf = None
self.lock.acquire()
if self.items_completed.has_key(item['search_id']):
leaf = self.items_completed[item['search_id']]
else:
#logger.debug("results_for: creating array for item:" % repr(item))
leaf = []
#logger.debug("results_for: appending Search#%s/Content#%s" % (item['search_id'], item['content_id']))
try:
leaf.append(item['content_id'])
self.items_completed[item['search_id']] = leaf
#logger.debug("leaf (items completed list) is %s" %repr(leaf))
except:
pass
self.lock.release()
def results_for(self, search_id):
"""
Calls the prepare_results method to unload the scorer output queue. When
finished, it calls the destructor for items_completed
"""
#logger.debug("results_for called")
#logger.debug("prepare_results search %s" %str(search_id))
self.prepare_results()
#logger.debug("prepare_results returned")
#logger.debug("self.items_completed: %s" %repr(self.items_completed))
if self.items_completed.has_key(search_id):
results = self.items_completed[search_id]
del self.items_completed[search_id]
#logger.debug("results_for: returning results for Search#%s: %s" % (str(search_id), repr(results)))
return results
else:
#logger.debug("results_for returned no results for Search#%s" % str(search_id))
return []
def ping(self):
"""
Is the server alive? Yes, always because this is a local object.
"""
return True
def stop(self, kind=None):
"""
This is a generic routine to stop processes. If no "kind" argument is provided, it iterates over the
top block of logic for searcher, visitor and scorer process types. The syntax is a bit convoluted here
and noted in the comments
"""
print ("crawler.Crawler.stop called")
cb = CrawlerBase()
logger = cb.logger()
if kind:
#this aliases the variable called <kind>s_count e.g., count = searchers_count
count = self.__dict__['%ss_count' % kind]
#alias for the input queue associated with this process
queue = self.queue[kind]['input']
stopped = False
#throws a "stop" sentinal into the queue
for i in range(count):
try:
logger.info("stopping queue %s" %kind)
#traceback()
queue.put(self.STOP)
except Exception, e:
# Ignore if the queue is already stopped
pass
"""
The next equation assigns an alias for the variable that represents the "kind"
of process we're going to stop. If we passed in "scorer", the variable workers
would be set to "scorers".
"""
workers = self.__dict__['%ss' % kind]
for worker in workers:
try:
#tell it to stop accepting new work until done with what it's doing
worker.join()
except Exception, e:
# Ignore if worker is already dead
pass
#clear the stack of active workers
while len(workers) != 0:
logger.debug("clearing worker stack in crawler.stop")
workers.pop()
stopped = True
if stopped:
try:
import logging
cb = CrawlerBase()
logger = cb.logger()
logger.info("stopped %i %s processes" % (count, kind))
except:
# Logging and logger aren't available otherwise if stop() is called from destructor.
print ("Crawler: stopped %i %s processes" % (count, kind))
pass
else:
"""
If this routine is called without a "kind" it recursively calls itself
to stop each type of active process; this is sort of the main loop for the
method.
"""
self.stop("searcher")
self.stop("visitor")
self.stop("scorer")
class CrawlerRunner(object):
"""
This is the main entry point for the crawler module.
"""
_instance = None
_instance_lock = threading.Lock()
#grab the logger from the server base
# TODO collapse container_location and concurrency_library to single value
def __init__(self, concurrency_library=None, container_location=None, manager=True, run_as_cron = False, **kwargs):
#note, don't use a logger in __init__ unless its installed in __init__
self._concurrency_library = self._get_concurrency_library(concurrency_library)
self._container_location = self._get_container_location(container_location)
self._manager = self._container_location == "local" or manager
self._lock = threading.Lock()
#run_as_cron will spawn a completely new instance of the crawler, hosted on a different
# xmlrpc server than the mainline web app
crawler_kwargs = dict(
concurrency_library=self._concurrency_library
)
crawler_kwargs.update(kwargs)
self.crawler_kwargs = crawler_kwargs
if self._container_location == "local":
crawler_kwargs.update(run_as_cron = run_as_cron)
self.crawler = Crawler( run_as_cron = run_as_cron, **crawler_kwargs)
elif self._container_location == "remote":
self.crawler = CrawlerClient(run_as_cron = run_as_cron)
else:
raise NotImplementedError("Unknown container_location: %s" % self._container_location)
def __del__(self):
"""
The destructor method for a CrawlerRunner object
"""
self._crawler = None
self._lock = None
def run_visitor_serially(self, **kwargs):
self.run_serially = True
self.crawler = Crawler( run_as_cron = run_as_cron, **self.crawler_kwargs)
item = kwargs
item.update(debug_run_serially = True)
visitReturn = self.crawler.visitor_process(kwargs)
if visitReturn:
aFewMore = 3
for j in range(0, min(len(visitReturn), aFewMore-1)):
v = visitReturn[j]
v.update(debug_run_serially = True)
scoreReturn = self.crawler.scorer_process(v)
def run_serially(self, **kwargs):
'''
this is or debugging, and is used the same as enqueue. Instead of directing processing to
the process queues, it runs them serially i.e. the searcher routine hands off to the visitor
routine then the scorer routine. It's much slower, but allows access to the running code.
'''
self.run_serially = True
self.crawler = Crawler(**self.crawler_kwargs)
item = kwargs
item.update(debug_run_serially = True)
searchReturn = self.crawler.searcher_process(item) #a list
if searchReturn:
#try one to see if it works generally
s =searchReturn[0]
s.update(debug_run_serially = True)
s.update(parseFast = kwargs['parseFast'])
visitReturn = self.crawler.visitor_process(s)
if len(visitReturn) >0 :
#visitReturn.update(debug_run_serially = True)
v=visitReturn[0]
v.update(debug_run_serially = True)
scoreReturn = self.crawler.scorer_process(v)
#try a few more
aFewMore = 10
if searchReturn:
for i in range(0, min(len(searchReturn), aFewMore-1)):
s=searchReturn[i]
s.update(debug_run_serially = True)
visitReturn = self.crawler.visitor_process(s)
if visitReturn:
for j in range(0, min(len(visitReturn), aFewMore-1)):
v = visitReturn[j]
v.update(debug_run_serially = True)
scoreReturn = self.crawler.scorer_process(v)
def start(self):
print "%s.start" % self
if self._manager:
"""
The next line signs up this object for garbage collection if (and only if)
the program terminates normally. If it crashes, or is stopped during debugging
there may be an orphaned process. If so, to process may need to be killed manually;
use sudo netstat - tap to look for network connections (host/port specifications
are set in CrawlerBase).
"""
atexit.register(self.stop) #sets up to kill be object upon normal termination
if self._container_location == "remote":
pass
'''
*** We'll start the crawler server from a terminal window - at least for debugging; when
the main (client) program shuts down ungracefully, it doesn't kill the server. This
means we have to kill it manually.
killing_crawler = False
try:
pause_seconds = 0.5
#pat - why are we trying to kill the xmlrpc proxy server?
killCrawler= False
if killCrawler:
while True:
logger.debug("stopping crawler (this is normal)")
logger.debug("for debugging, don't stop the server")
#self.crawler.stop() # Will throw exception when down to end loop
#logger.info("CrawlerRunner.start: killing stale remote crawler...")
killing_crawler = True
time.sleep(pause_seconds)
except Exception, e:
if killing_crawler:
print "killing crawler"
logger.info("CrawlerRunner.start: killed stale remote crawler")
pass # Ignore because service may not be running already
logger.info("CrawlerRunner.start: launching remote xmlrpc server in os")
filename = re.sub("\.pyc$", ".py", __file__, 1)
# TODO safely quote paths
cmd = "'%s' --server --config '%s'" % (filename, commands.configuration)
logger.info(cmd)
#logger.debug("not starting the server from crawler - relying on externally-started one")
os.system("%s &" % cmd)
'''
elif self._container_location == "local":
logger.info("CrawlerRunner.start: launching local crawler")
return self.crawler.start()
else:
raise NotImplementedError("Unknown container_location: %s" % self._container_location)
def stop(self):
print "%s.stop" % self
if self._manager:
with self._lock:
if self.crawler:
try:
return self.crawler.stop()
except Exception, e:
print "CrawlerRunner.stop failed: %s" % e
def enqueue(self, **item):
#logger.debug("CrawlerRunner enqueueing item %s into a %s object" %(repr(item), type(self.crawler)))
return self.crawler.enqueue(**item)
def results_for(self, search_id):
#logger.debug("CrawlerRunner.results_for for search %s" % str(search_id))
return self.crawler.results_for(search_id)
def ping(self):
return self.crawler.ping()
@classmethod
def _get_concurrency_library(self, kind=None):
if kind:
return kind
else:
return turbogears.config.get("crawler.concurrency_library", has_fork() and "processing" or "threading")
@classmethod
def _get_container_location(self, kind=None):
if kind:
return kind
else:
return turbogears.config.get("crawler.container_location", has_fork() and "remote" or "local")
@classmethod
def get_instance(self):
with self._instance_lock:
if not self._instance:
self._instance = self()
self._instance.start()
return self._instance
class SearcherError(StandardError):
pass
class TypeError(StandardError):
pass
class TimeoutError(StandardError):
"""
Raised when a timeout is reached.
"""
pass
if __name__ == "__main__":
import logging
cb = CrawlerBase()
logger = cb.logger()
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--config", dest="configfile", help="Optional configuration file", metavar="FILE")
parser.add_option("-c", "--client", action="store_true", dest="client", help="Start client")
parser.add_option("-s", "--server", action="store_true", dest="server", help="Start server")
#parser.add_option("-n", "--nightly", action="store_true", dest="run_as_cron", help="Run as cron")
parser.add_option("-k", "--concurrency", dest="concurrency_library", help="threading OR processing OR multiprocessing", metavar="LIBRARY")
(options, args) = parser.parse_args()
#set up two possibilites for logging so contemporaneously-executing real-time
# and chron files won't step on each other. Simultaneaty shouldn't be a problem with
# the stuff running as processes because each is on its own thread
cb = CrawlerBase()
logger = cb.logger()
logger.debug("booting configfile")
if options.configfile:
commands.boot(options.configfile)
else:
logger.debug("booting commands")
commands.boot()
if options.client:
logger.info("Starting client...")
client = CrawlerClient(run_as_cron = run_as_cron)
try:
from ipdb import set_trace
except:
from pdb import set_trace
#set_trace()
# TODO figure out how to make session exit without exceptions
else:
logger.info("Starting server from crawler.__main__")
global server
logger.debug("forcing concurrency library to be multiprocessing")
server = CrawlerServer(concurrency_library='multiprocessing')
#server = CrawlerServer(concurrency_library=options.concurrency_library)
try:
# pat - don't need to start the server here server.start()
server.start()
pass
except KeyboardInterrupt:
logger.info("Shutting down crawler process server due to keyboard interrupt...")
server.stop()
logger.debug("crawler process server shut down succesfully")
logger.info("Stopped server")
| 2.03125 | 2 |
trybox_django/tutorial.py | sophilabs/trybox-django | 0 | 12793312 | <reponame>sophilabs/trybox-django
# -*- coding: utf-8 -*-
from trybox.model import Tutorial
from step_01 import step as step01
tutorial = Tutorial(
title='Django',
description='Build a web application step by step using an awesome interactive tutorial for Django',
steps=[
step01,
]
)
| 1.679688 | 2 |
Leak #5 - Lost In Translation/windows/Resources/Dsz/PyScripts/Lib/dsz/mca/core/cmd/cprpc/data/dsz/__init__.py | bidhata/EquationGroupLeaks | 9 | 12793313 | <reponame>bidhata/EquationGroupLeaks
# uncompyle6 version 2.9.10
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10)
# [GCC 6.2.0 20161005]
# Embedded file name: __init__.py
import dsz
import dsz.cmd
import dsz.data
import dsz.lp
class CpRpc(dsz.data.Task):
def __init__(self, cmd=None):
dsz.data.Task.__init__(self, cmd)
def _LoadData(self):
try:
self.Rpc = CpRpc.Rpc(dsz.cmd.data.Get('Rpc', dsz.TYPE_OBJECT)[0])
except:
self.Rpc = None
try:
self.Result = CpRpc.Result(dsz.cmd.data.Get('Result', dsz.TYPE_OBJECT)[0])
except:
self.Result = None
return
class Rpc(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Id = dsz.cmd.data.ObjectGet(obj, 'Id', dsz.TYPE_INT)[0]
except:
self.Id = None
return
class Result(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Id = dsz.cmd.data.ObjectGet(obj, 'Id', dsz.TYPE_INT)[0]
except:
self.Id = None
try:
self.GroupTag = dsz.cmd.data.ObjectGet(obj, 'GroupTag', dsz.TYPE_INT)[0]
except:
self.GroupTag = None
try:
self.Status = dsz.cmd.data.ObjectGet(obj, 'Status', dsz.TYPE_INT)[0]
except:
self.Status = None
try:
self.StatusString = dsz.cmd.data.ObjectGet(obj, 'StatusString', dsz.TYPE_STRING)[0]
except:
self.StatusString = None
try:
self.Address = dsz.cmd.data.ObjectGet(obj, 'Address', dsz.TYPE_STRING)[0]
except:
self.Address = None
try:
self.Output = CpRpc.Result.Output(dsz.cmd.data.ObjectGet(obj, 'Output', dsz.TYPE_OBJECT)[0])
except:
self.Output = None
return
class Output(dsz.data.DataBean):
def __init__(self, obj):
try:
self.Length = dsz.cmd.data.ObjectGet(obj, 'Length', dsz.TYPE_INT)[0]
except:
self.Length = None
try:
self.Data = dsz.cmd.data.ObjectGet(obj, 'Data', dsz.TYPE_STRING)[0]
except:
self.Data = None
return
dsz.data.RegisterCommand('CpRpc', CpRpc)
CPRPC = CpRpc
cprpc = CpRpc | 2.203125 | 2 |
connection_speed.py | r-xela/lm_connection_speed | 2 | 12793314 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import requests
import speedtest
requests.packages.urllib3.disable_warnings()
speedtester = speedtest.Speedtest()
best_server = speedtester.get_best_server()
DL = speedtester.download()
UL = speedtester.upload()
dl_rate = "DL {:.2f}".format(DL / 1000 / 1000)
dl_icon = "i402"
ul_rate = "UL {:.2f}".format(UL / 1000 / 1000)
ul_icon = "i120"
access_token = "YOUR <PASSWORD>"
url = "ENDPOINT_URL"
headers = {'Accept': 'application/json', 'Cache-Control': 'no-cache', 'X-Access-Token': access_token}
data = {
'frames': [
{
'index': 0,
'text': dl_rate,
'icon': dl_icon
},
{
'index': 1,
'text': ul_rate,
'icon': ul_icon
}
]
}
r = requests.post(url, headers=headers, data=json.dumps(data), verify=False)
| 2.3125 | 2 |
src/blockchain/data/mempool.py | ParisNeo/blockchain | 0 | 12793315 | <reponame>ParisNeo/blockchain
"""
File : mempool.py
Author : ParisNeo
Description :
Here are stored the pending transactions
"""
import pickle
import time
from blockchain.data import transaction
class MemPool():
def __init__(self):
self.transactions=[]
| 2.21875 | 2 |
src/broAnalyzer/plots/subjectalternatenames.py | maveeee/passive-tls | 0 | 12793316 | from os.path import join
import pandas as pd
import matplotlib.pyplot as plt
from util.plot import Plot, plotDataFrame, formatXAxisDate
class SubjectAlternateNamesPlot(Plot):
def __init__(self):
super(SubjectAlternateNamesPlot, self).__init__('Subject Alternate Names', 'SubjectAlternateNames.csv', 'subjectAltNames')
self.__output_file_name = "SubjectAlternateNames.png"
def add_args(self, parser):
parser.add_argument('-san', '--subjectAltNames', action='store_true',
help='Plot subject alternate names from certificates')
def parse_args(self, args):
pass
def plot(self, input_file, output_folder):
df = pd.read_csv(input_file,
sep='\x09', usecols=[0, 1, 2], parse_dates=[0], converters={"SubjectAltNames": lambda x: x.strip("[]").split(", ")})
df.dropna(inplace=True)
df['SANLength'] = df['SubjectAltNames'].apply(lambda x:len(x) if isinstance(x, list) else None)
df = df.groupby('Day')['SANLength'].agg(['mean', 'median', 'max', 'min'])
df.columns.name = None
df.index.name = None
fig = plotDataFrame(df, "Length of Subject Alternate Name List")
fig.legend(loc='center left', bbox_to_anchor=(1, 0.5))
formatXAxisDate(fig)
plt.tight_layout()
plt.savefig(join(output_folder, self.__output_file_name), bbox_inches='tight')
| 2.828125 | 3 |
FewShotPreprocessing.py | ahirsharan/MTL_Segmentation | 9 | 12793317 |
import os
import os.path as osp
from PIL import Image
PATH='../Fewshot/Fewshot/'
classes= os.listdir(PATH)
trainp='../Fewshot/train/'
valp='../Fewshot/val/'
testp='../Fewshot/test/'
for classv in classes:
if classv[0]=='.':
continue
pathn=osp.join(PATH,classv)
pathn=pathn+'/'
folders=os.listdir(pathn)
path1=osp.join(trainp,'images/')
path1=osp.join(path1,classv)
os.mkdir(path1)
path1 =path1 +'/'
path2=osp.join(trainp,'labels/')
path2=osp.join(path2,classv)
os.mkdir(path2)
path2=path2+'/'
for i in range(0,8,1):
p=osp.join(pathn,folders[i])
im=Image.open(p)
if(i%2==0):
p1=osp.join(path1,folders[i])
im.save(p1)
else:
p2=osp.join(path2,folders[i])
im.save(p2)
path1=osp.join(valp,'images/')
path1=osp.join(path1,classv)
os.mkdir(path1)
path1 =path1 +'/'
path2=osp.join(valp,'labels/')
path2=osp.join(path2,classv)
os.mkdir(path2)
path2=path2+'/'
for i in range(8,16,1):
p=osp.join(pathn,folders[i])
im=Image.open(p)
if(i%2==0):
p1=osp.join(path1,folders[i])
im.save(p1)
else:
p2=osp.join(path2,folders[i])
im.save(p2)
path1=osp.join(testp,'images/')
path1=osp.join(path1,classv)
os.mkdir(path1)
path1=path1+'/'
path2=osp.join(testp,'labels/')
path2=osp.join(path2,classv)
os.mkdir(path2)
path2=path2+'/'
for i in range(16,20,1):
p=osp.join(pathn,folders[i])
im=Image.open(p)
if(i%2==0):
p1=osp.join(path1,folders[i])
im.save(p1)
else:
p2=osp.join(path2,folders[i])
im.save(p2)
| 2.53125 | 3 |
distributed_dp/dme_run.py | garyxcheng/federated | 330 | 12793318 | <filename>distributed_dp/dme_run.py
# Copyright 2021, Google LLC. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run script for distributed mean estimation."""
import os
import pprint
from absl import app
from absl import flags
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats
import tensorflow as tf
import tensorflow_privacy as tfp
from distributed_dp import accounting_utils
from distributed_dp import ddpquery_utils
from distributed_dp import dme_utils
flags.DEFINE_boolean('show_plot', False, 'Whether to plot the results.')
flags.DEFINE_boolean('print_output', False, 'Whether to print the outputs.')
flags.DEFINE_integer(
'run_id', 1, 'ID of the run, useful for identifying '
'the run when parallelizing this script.')
flags.DEFINE_integer('repeat', 5, 'Number of times to repeat (sequentially).')
flags.DEFINE_string('output_dir', '/tmp/ddp_dme_outputs', 'Output directory.')
flags.DEFINE_string('tag', '', 'Extra subfolder for the output result files.')
flags.DEFINE_enum('mechanism', 'ddgauss', ['ddgauss'], 'DDP mechanism to use.')
flags.DEFINE_float('norm', 10.0, 'Norm of the randomly generated vectors.')
flags.DEFINE_integer(
'k_stddevs', 2, 'Number of standard deviations of the '
'noised, quantized, aggregated siginal to bound.')
flags.DEFINE_boolean(
'sqrtn_norm_growth', False, 'Whether to assume the bound '
'norm(sum_i x_i) <= sqrt(n) * c.')
FLAGS = flags.FLAGS
def experiment(bits,
clip,
beta,
client_data,
epsilons,
delta,
mechanism,
k_stddevs=2,
sqrtn_norm_growth=False):
"""Run a distributed mean estimation experiment.
Args:
bits: A list of compression bits to use.
clip: The initial L2 norm clip.
beta: A hyperparameter controlling the concentration inequality for the
probabilistic norm bound after randomized rounding.
client_data: A Python list of `n` np.array vectors, each with shape (d,).
epsilons: A list of target epsilon values for comparison (serve as x-axis).
delta: The delta for approximate DP.
mechanism: A string specifying the mechanism to compare against Gaussian.
k_stddevs: The number of standard deviations to keep for modular clipping.
Defaults to 2.
sqrtn_norm_growth: Whether to assume the norm of the sum of the vectors grow
at a rate of `sqrt(n)` (i.e. norm(sum_i x_i) <= sqrt(n) * c). If `False`,
we use the upper bound `norm(sum_i x_i) <= n * c`.
Returns:
Experiment results as lists of MSE.
"""
def mse(a, b):
assert a.shape == b.shape
return np.square(a - b).mean()
# Initial fixed params.
num_clients = len(client_data)
d = len(client_data[0])
padded_dim = np.math.pow(2, np.ceil(np.log2(d)))
client_template = tf.zeros_like(client_data[0])
# `client_data` has shape (n, d).
true_avg_vector = np.mean(client_data, axis=0)
# 1. Baseline: central continuous Gaussian.
gauss_mse_list = []
for eps in epsilons:
# Analytic Gaussian.
gauss_stddev = accounting_utils.analytic_gauss_stddev(eps, delta, clip)
gauss_query = tfp.GaussianSumQuery(l2_norm_clip=clip, stddev=gauss_stddev)
gauss_avg_vector = dme_utils.compute_dp_average(
client_data, gauss_query, is_compressed=False, bits=None)
gauss_mse_list.append(mse(gauss_avg_vector, true_avg_vector))
# 2. Distributed DP: try each `b` separately.
ddp_mse_list_per_bit = []
for bit in bits:
discrete_mse_list = []
for eps in epsilons:
if mechanism == 'ddgauss':
gamma, local_stddev = accounting_utils.ddgauss_params(
q=1,
epsilon=eps,
l2_clip_norm=clip,
bits=bit,
num_clients=num_clients,
dim=padded_dim,
delta=delta,
beta=beta,
steps=1,
k=k_stddevs,
sqrtn_norm_growth=sqrtn_norm_growth)
scale = 1.0 / gamma
else:
raise ValueError(f'Unsupported mechanism: {mechanism}')
ddp_query = ddpquery_utils.build_ddp_query(
mechanism,
local_stddev,
l2_norm_bound=clip,
beta=beta,
padded_dim=padded_dim,
scale=scale,
client_template=client_template)
distributed_avg_vector = dme_utils.compute_dp_average(
client_data, ddp_query, is_compressed=True, bits=bit)
discrete_mse_list.append(mse(distributed_avg_vector, true_avg_vector))
ddp_mse_list_per_bit.append(discrete_mse_list)
# Convert to np arrays and do some checks
gauss_mse_list = np.array(gauss_mse_list)
ddp_mse_list_per_bit = np.array(ddp_mse_list_per_bit)
assert gauss_mse_list.shape == (len(epsilons),)
assert ddp_mse_list_per_bit.shape == (len(bits), len(epsilons))
return gauss_mse_list, ddp_mse_list_per_bit
def experiment_repeated(bits,
clip,
beta,
client_data_list,
repeat,
epsilons,
delta,
mechanism,
k_stddevs=2,
sqrtn_norm_growth=False):
"""Sequentially repeat the experiment (see `experiment()` for parameters)."""
assert len(client_data_list) == repeat
n, d = len(client_data_list[0]), len(client_data_list[0][0])
print(f'Sequentially repeating the experiment {len(client_data_list)} times '
f'for n={n}, d={d}, mechanism={mechanism}, c={clip}, bits={bits}, beta='
f'{beta:.3f}, eps={epsilons}, k={k_stddevs}, sng={sqrtn_norm_growth}')
repeat_results = []
for client_data in client_data_list:
repeat_results.append(
experiment(
bits=bits,
clip=clip,
beta=beta,
client_data=client_data,
epsilons=epsilons,
delta=delta,
mechanism=mechanism,
k_stddevs=k_stddevs,
sqrtn_norm_growth=sqrtn_norm_growth))
repeat_gauss_mse_list, repeat_ddp_mse_list_per_bit = zip(*repeat_results)
repeat_gauss_mse_list = np.array(repeat_gauss_mse_list)
repeat_ddp_mse_list_per_bit = np.array(repeat_ddp_mse_list_per_bit)
assert len(repeat_results) == repeat
assert repeat_gauss_mse_list.shape == (repeat, len(epsilons))
assert (repeat_ddp_mse_list_per_bit.shape == (repeat, len(bits),
len(epsilons)))
return repeat_gauss_mse_list, repeat_ddp_mse_list_per_bit
def mean_confidence_interval(data, confidence=0.95):
# `data` should have shape (repeat, len(x-axis)).
n = len(data)
m, se = np.mean(data, axis=0), scipy.stats.sem(data, axis=0)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n - 1)
return m, m - h, m + h
def plot_curve(subplot, epsilons, data, label):
assert len(data.shape) == 2, 'data should be (repeat, len(x-axis))'
means, lower, upper = mean_confidence_interval(data)
subplot.plot(epsilons, means, label=label)
subplot.fill_between(epsilons, lower, upper, alpha=0.2, edgecolor='face')
def main(_):
"""Run distributed mean estimation experiments."""
clip = FLAGS.norm
delta = 1e-5
use_log = True # Whether to use log-scale for y-axis.
k_stddevs = FLAGS.k_stddevs
sqrtn_norm_growth = FLAGS.sqrtn_norm_growth
repeat = FLAGS.repeat
# Parallel subplots for different n=num_clients and d=dimension.
nd_zip = [(100, 250), (1000, 250)]
# nd_zip = [(10000, 2000)]
# Curves within a subplot.
bits = [10, 12, 14, 16]
# bits = [14, 16, 18, 20]
# X-axis: epsilons.
epsilons = [0.75] + list(np.arange(1, 6.5, 0.5))
_, ax = plt.subplots(1, max(2, len(nd_zip)), figsize=(20, 5))
results = []
for j, (n, d) in enumerate(nd_zip):
client_data_list = [
dme_utils.generate_client_data(d, n, l2_norm=clip)
for _ in range(repeat)
]
beta = np.exp(-0.5)
# Run experiment with repetition.
rep_gauss_mse_list, rep_ddp_mse_list_per_bit = experiment_repeated(
bits,
clip,
beta,
client_data_list,
repeat,
epsilons,
delta,
mechanism=FLAGS.mechanism,
k_stddevs=k_stddevs,
sqrtn_norm_growth=sqrtn_norm_growth)
# Generate some basic plots here. Use the saved results to generate plots
# with custom style if needed.
if FLAGS.show_plot:
subplot = ax[j]
# Continuous Gaussian.
plot_curve(
subplot, epsilons, rep_gauss_mse_list, label='Continuous Gaussian')
# Distributed DP.
for index, bit in enumerate(bits):
plot_curve(
subplot,
epsilons,
rep_ddp_mse_list_per_bit[:, index],
label=f'{FLAGS.mechanism} (B = {bit})')
subplot.set(xlabel='Epsilon', ylabel='MSE')
subplot.set_title(f'(n={n}, d={d}, k={k_stddevs})')
subplot.set_yscale('log' if use_log else 'linear')
subplot.legend()
result_dic = {
'n': n,
'd': d,
'rep': repeat,
'c': clip,
'bits': bits,
'k_stddevs': k_stddevs,
'epsilons': epsilons,
'mechanism': FLAGS.mechanism,
'sqrtn_norm_growth': sqrtn_norm_growth,
'gauss': rep_gauss_mse_list,
FLAGS.mechanism: rep_ddp_mse_list_per_bit,
}
results.append(result_dic)
if FLAGS.print_output:
print(f'n={n}, d={d}:')
pprint.pprint(result_dic)
# Save to file.
fname = f'rp={repeat},rid={FLAGS.run_id}.txt'
fname = fname.replace(' ', '')
result_str = pprint.pformat(results)
dirname = os.path.join(FLAGS.output_dir, FLAGS.tag)
if not os.path.exists(dirname):
os.makedirs(dirname)
out_path = os.path.join(dirname, fname)
with open(out_path, 'w') as f:
f.write(result_str)
print('Results saved to', out_path)
if FLAGS.print_output:
print('*' * 80)
print(fname)
print('*' * 10 + 'Results (copy and `eval()` in Python):')
print(result_str)
print('*' * 80)
print('Copy the above results and `eval()` them as a string in Python.')
if FLAGS.show_plot:
plt.show()
print(f'Run {FLAGS.run_id} done.')
if __name__ == '__main__':
app.run(main)
| 2.078125 | 2 |
zasim/cagen/compatibility.py | timo/zasim | 2 | 12793319 | """The compatibility module offers a way for `StepFuncVisitor` objects
to express, what combinations are acceptable and what combinations are going to
break, allowing the constructor of the `StepFunc` to bail out soon
instead of causing an unexpected result during execution.
Each `StepFuncVisitor` has three attributes:
- requires_features
A list of compatibility features, that are required for operation.
- provides_features
A list of features, that are offered by this class.
- incompatible_features
A list of features that are incompatible with this class.
"""
# This file is part of zasim. zasim is licensed under the BSD 3-clause license.
# See LICENSE.txt for details.
class CompatibilityException(Exception):
def __init__(self, conflicts, missing):
self.conflicts = conflicts
self.missing = missing
#def __repr__(self):
#return "<CompatibilityException(conflicts=%s, missing=%s)>" % (self.conflicts, self.missing)
def __str__(self):
return """\
<Compatibility Exception:
feature conflicts:
%s
missing features:
%s
>""" % ("\n ".join(map(str, self.conflicts)),
"\n ".join(map(str, self.missing)))
class NoCodeGeneratedException(Exception):
"""When both the no_python_code and the no_weave_code feature are present,
no valid code has actually been generated."""
one_dimension = "one_dimension"
"""The configuration has one dimension."""
two_dimensions = "two_dimensions"
"""The configuration has two dimensions."""
beta_async_neighbourhood = "beta_async_neighbourhood"
beta_async_accessor = "beta_async_accessor"
histogram = "histogram"
"""This StepFunc has a histogram."""
activity = "activity"
"""This StepFunc calculates the Activity."""
no_python_code = "no_python_code"
"""This StepFunc doesn't generate pure python code."""
no_weave_code = "no_weave_code"
"""This StepFunc doesn't generate weave code."""
random_generator = "random_generator"
| 2.625 | 3 |
Bin/init.py | mfneirae/GrupLAC-Complete | 0 | 12793320 | #
#
# #############################################################################
# Copyright (c) 2018 Universidad Nacional de Colombia All Rights Reserved.
#
# This work was made as a development to improve data collection
# for self-assessment and accreditation processes in the Vicedeanship
# of academic affairs in the Engineering Faculty of the Universidad
# Nacional de Colombia and is licensed under a Creative Commons
# Attribution-NonCommercial - ShareAlike 4.0 International License
# and MIT Licence.
#
# by <NAME>.
#
# For more information write me to <EMAIL>
# Or visit my webpage at https://mfneirae.com/
# #############################################################################
#
#
def inicio():
global GP_DATOS_BASE
global GP_DATOS_BASE_CSV
global GP_DATOS_INSTITUCIONES
global GP_DATOS_INSTITUCIONES_CSV
global GP_DATOS_LINEAS
global GP_DATOS_LINEAS_CSV
global GP_DATOS_SECTORES
global GP_DATOS_SECTORES_CSV
global GP_DATOS_INTEGRANTES
global GP_DATOS_INTEGRANTES_CSV
global REL_GRUPO_PRODUCTO
global REL_GRUPO_PRODUCTO_CSV
global GP_PROD_BIB
global GP_PROD_BIB_CSV
global GP_PROD_TEC
global GP_PROD_TEC_CSV
global GP_APROPIACION
global GP_APROPIACION_CSV
global GP_OBRAS
global GP_OBRAS_CSV
global GP_ACTIVIDADES
global GP_ACTIVIDADES_CSV
global v_colciencias_tipo_producto
global inv_colciencias_tipo_producto
GP_DATOS_BASE = []
GP_DATOS_INSTITUCIONES = []
GP_DATOS_LINEAS = []
GP_DATOS_SECTORES = []
GP_DATOS_INTEGRANTES = []
REL_GRUPO_PRODUCTO = []
GP_PROD_BIB = []
GP_PROD_TEC = []
GP_APROPIACION = []
GP_OBRAS = []
GP_ACTIVIDADES = []
GP_PROD_BIB_CSV=["CODGP_PROD_BIB; \
CODGP_PROD:\
Revista; \
Autor Original; \
Nombre Libro; \
ISBN/ISSN; \
Medio de Divulgación; \
URL; \
Fasciculos; \
Idioma Original; \
Idioma Traduccion; \
Edición; \
Serie; \
Página Inicial; \
Página Final ; \
\n"]
GP_PROD_TEC_CSV=["CODGP_PROD_TEC; \
CODGP_PROD; \
Tema; \
Nombre Comerial; \
Nombre Proyecto; \
Tipo de Ciclo; \
NIT; \
Fecha de Registro; \
Tiene Productos; \
Disponibilidad; \
Objeto; \
Fecha Publicación; \
Número de Contrato; \
Acto Administrativo; \
\n"]
GP_APROPIACION_CSV=["CODGP_PROD_APROPIACION; \
CODGP_PROD; \
Tipos de Participación; \
Fecha Inicio; \
Fecha Fin; \
Proyecto de Inv; \
Medio de publicación; \
Emisora; \
Número de Participantes; \
\n"]
GP_OBRAS_CSV=["CODGP_PROD_OBRAS; \
CODGP_PROD; \
Fecha Creación; \
Disiplina de origen; \
Institución Licencia; \
Fecha Licencia; \
Distinciones; \
Selección Distinción; \
Productos Asociados; \
Número Derechos Autor/NIT; \
\n"]
GP_ACTIVIDADES_CSV=["CODGP_PROD_FORM; \
CODGP_PROD; \
Nombre de Ferias; \
Fecha Inicio Curso; \
Tipo Orientación; \
Nombre Estudiante; \
Programa Académico; \
Valoración; \
Fecha fin Curso; \
Finalidad; \
Duración; \
\n"]
REL_GRUPO_PRODUCTO_CSV =["CODGP_PROD; \
CODGP; \
GP_TIPO_PROD; \
Nombre Producto; \
Lugar; \
Año; \
Idioma; \
Páginas; \
Volumen; \
Editorial; \
Ambito; \
DOI; \
Descripción; \
Instituciones; \
Tipo Vincula Institu; \
Autores\n"]
GP_DATOS_BASE_CSV = ["CODGP;\
Año Formación;\
Mes Formación;\
Lugar;\
Nombre Lider;\
Información Certificada;\
Página Web;\
Correo;\
Clasificación;\
Área del Conocimiento;\
Programa Nacional;\
Programa Nacional 2;\
Plan de trabajo;\
Estado del Arte;\
Objetivos;\
Retos;\
Visión\n"]
GP_DATOS_INSTITUCIONES_CSV = ["CODGP_INSTI;\
CODGP;\
Nombre Institución\n"]
GP_DATOS_LINEAS_CSV = ["CODGP_LINEA;\
CODGP;\
Línea de Investigación\n"]
GP_DATOS_SECTORES_CSV = ["CODGP_SECTOR;\
CODGP;\
Sector\n"]
GP_DATOS_INTEGRANTES_CSV = ["CODGP_INTEGRANTE;\
CODGP;\
COD_RG;\
CVLAC;\
NOMBRE COMPLETO;\
Tipo Vinculación;\
Horas de Dedicación;\
Duración Vinculación;\
Inicio Vinculación;\
Fin Vinculación;\
Fin Vinculación\n"]
v_colciencias_tipo_producto = [ "COD_TIPO_PRODUCTO; \
TIPO_PRODUCTO_COL; \
SUB_TIPO_PRODUCTO_COL; \
TIPO_UAPA\n\
0; \
Evento sin producto asociado; \
Evento sin producto asociado; \
Evento sin producto asociado\n\
1; \
Redes de conocimiento; \
Redes de conocimiento; \
Redes de conocimiento\n\
2; \
Producción bibliográfica - Trabajos en eventos (Capítulos de memoria) - Completo; \
Capítulos de memoria; \
Capítulos de memoria\n\
3; \
Producción técnica - Presentación de trabajo - Comunicación; \
Presentación de trabajo; \
Trabajo de Comunicación\n\
4; \
Demás trabajos - Demás trabajos - Póster; \
Demás trabajos; \
Poster\n\
5; \
Producción técnica - Presentación de trabajo - Conferencia; \
Presentación de trabajo; \
Conferencia\n\
6; \
Producción técnica - Presentación de trabajo - Ponencia; \
Presentación de trabajo; \
Ponencia\n\
7; \
Estrategias pedagógicas para el fomento a la CTI; \
Estrategias pedagógicas; \
Estrategias pedagógicas\n\
8; \
Producción bibliográfica - Artículo - Publicado en revista especializada; \
Publicado en revista especializada; \
Artículo\n\
9; \
Producción bibliográfica - Artículo - Corto (Resumen); \
Corto (Resumen); \
Artículo\n\
10; \
Estrategias pedagógicas para el fomento a la CTI; \
Estrategias pedagógicas; \
Estrategias pedagógicas\n\
11; \
Producción bibliográfica - Artículo - Caso clínico; \
Caso Clínico; \
Artículo\n\
12; \
Producción bibliográfica - Trabajos en eventos (Capítulos de memoria) - Resumen; \
Capítulo de Memoria; \
Resumen\n\
13; \
Producción técnica - Presentación de trabajo - Congreso; \
Congreso; \
Congreso\n\
14; \
Producción técnica - Presentación de trabajo - Simposio; \
Simposio; \
Simposio\n\
15; \
Producción técnica - Presentación de trabajo - Seminario; \
Seminario; \
Seminario\n\
16; \
Producción técnica - Presentación de trabajo - Otro; \
Otro; \
Otro\n\
17; \
Producción bibliográfica - Libro - Libro resultado de investigación; \
Libro resultado de investigación; \
Libro\n\
18; \
Producción bibliográfica - Libro - Otro libro publicado; \
Otro libro publicado; \
Libro - Otro\n\
19; \
Producción bibliográfica - Libro - Libro pedagógico y/o de divulgación; \
Libro pedagógico y/o de divulgación; \
Libro - pedagógico\n\
20; \
Otro capítulo de libro publicado; \
Otro capítulo de libro; \
Capítulo de libro - Otro\n\
21; \
Capítulo de libro; \
Capítulo de libro; \
Capítulo de libro\n\
22; \
Producción bibliográfica - Otro artículo publicado - Periódico de noticias; \
Periódico de noticias; \
Otro\n\
23; \
Producción bibliográfica - Otro artículo publicado - Revista de divulgación; \
Revista de divulgación; \
Otro\n\
24; \
Producción bibliográfica - Otro artículo publicado - Cartas al editor; \
Cartas al editor; \
Otro\n\
25; \
Producción bibliográfica - Otro artículo publicado - Reseñas de libros; \
Reseñas de libros; \
Otro\n\
26; \
Producción bibliográfica - Otro artículo publicado - Columna de opinión; \
Columnas de opinión; \
Otro\n\
27; \
Producción bibliográfica - Documento de trabajo (Working Paper); \
Documento de trabajo (Working Paper); \
Otro\n\
28; \
Producción bibliográfica - Traducciones - Artículo; \
Traducciones - Artículo; \
Traducciones\n\
29; \
Producción bibliográfica - Traducciones - Libro; \
Traducciones - Libro; \
Traducciones\n\
30; \
Producción bibliográfica - Traducciones - Otra; \
Traducciones - Otra; \
Traducciones\n\
31; \
Producción bibliográfica - Otra producción bibliográfica - Introducción; \
Introducción; \
Otro\n\
32; \
Producción bibliográfica - Otra producción bibliográfica - Prólogo; \
Prólogo; \
Otro\n\
33; \
Producción bibliográfica - Otra producción bibliográfica - Epílogo; \
Epílogo; \
Otro\n\
34; \
Producción bibliográfica - Otra producción bibliográfica - Otra; \
Otra; \
Otro\n\
35; \
Producción técnica - Softwares - Computacional; \
Software; \
Software\n\
36; \
Producción técnica - Productos tecnológicos - Gen Clonado; \
Productos tecnológicos - Gen Clonado; \
Productos tecnológicos\n\
37; \
Producción técnica - Productos tecnológicos - Coleccion biologica de referencia con informacion sistematizada; \
Productos tecnológicos - Coleccion biologica de referencia con informacion sistematizada; \
Productos tecnológicos\n\
38; \
Producción técnica - Productos tecnológicos - Otro; \
Productos tecnológicos - Otro; \
Productos tecnológicos\n\
39; \
Producción técnica - Productos tecnológicos - Base de datos de referencia para investigación; \
Productos tecnológicos - Base de datos de referencia para investigación; \
Productos tecnológicos\n\
40; \
Producción técnica - Diseño Industrial; \
Diseño Industrial; \
Otro\n\
41; \
Producción técnica - Esquema de circuito integrado; \
Esquema de circuito integrado; \
Otro\n\
42; \
Producción técnica - Innovaciones generadas de producción empresarial - Organizacional; \
Innovaciones generadas de producción empresarial - Organizacional; \
Innovaciones\n\
43; \
Producción técnica - Innovaciones generadas de producción empresarial - Empresarial; \
Innovaciones generadas de producción empresarial - Empresarial; \
Innovaciones\n\
44; \
Producción técnica - Variedad animal; \
Variedad animal; \
Otro\n\
45; \
Producción técnica - Innovación de proceso o procedimiento; \
Innovación de proceso o procedimiento; \
Innovación\n\
46; \
Producción técnica - Cartas, mapas o similares - Aerofotograma; \
Aerofotograma; \
Otro\n\
47; \
Producción técnica - Cartas, mapas o similares - Carta; \
Carta; \
Otro\n\
48; \
Producción técnica - Cartas, mapas o similares - Fotograma; \
Fotograma; \
Otro\n\
49; \
Producción técnica - Cartas, mapas o similares - Mapa; \
Mapa; \
Otro\n\
50; \
Producción técnica - Cartas, mapas o similares - Otra; \
Otra; \
Otro\n\
51; \
Producción técnica - Variedad vegetal; \
Variedad vegetal; \
Otro\n\
52; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Servicios de proyectos de IDI; \
Servicios de proyectos de IDI; \
Otro\n\
53; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Comercialización de tecnología; \
Comercialización de tecnología; \
Otro\n\
54; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Análisis de competitividad; \
Análisis de competitividad; \
Otro\n\
55; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Informe técnico; \
Informe técnico; \
Otro\n\
56; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Otro; \
Otro; \
Otro\n\
57; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Acciones de transferencia tecnológica; \
Acciones de transferencia tecnológica; \
Otro\n\
58; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Desarrollo de productos; \
Desarrollo de productos; \
Otro\n\
59; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Implementación de sistemas de análisis; \
Implementación de sistemas de análisis; \
Otro\n\
60; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Consultoría en artes,arquitectura y diseño; \
Consultoría en artes,arquitectura y diseño; \
Otro\n\
61; \
Producción técnica - Regulación, norma, reglamento o legislación - Ambiental o de Salud; \
Regulación, norma, reglamento o legislación - Ambiental o de Salud; \
Otro\n\
62; \
Producción técnica - Regulación, norma, reglamento o legislación - Educativa; \
Regulación, norma, reglamento o legislación - Educativa; \
Otro\n\
63; \
Producción técnica - Regulación, norma, reglamento o legislación - Social; \
Regulación, norma, reglamento o legislación - Social; \
Otro\n\
64; \
Producción técnica - Regulación, norma, reglamento o legislación - Técnica; \
Regulación, norma, reglamento o legislación - Técnica; \
Otro\n\
65; \
Producción técnica - Regulación, norma, reglamento o legislación - Guía de práctica clínica; \
Regulación, norma, reglamento o legislación - Guía de práctica clínica; \
Otro\n\
66; \
Producción técnica - Regulación, norma, reglamento o legislación - Proyecto de ley; \
Regulación, norma, reglamento o legislación - Proyecto de ley; \
Otro\n\
67; \
Producción técnica - Reglamento Técnico; \
Reglamento Técnico; \
Otro\n\
68; \
Producción técnica - Empresa de base tecnológica - Spin-off; \
Empresa de base tecnológica - Spin-off; \
Otro\n\
69; \
Producción técnica - Empresa de base tecnológica - Start-up; \
Empresa de base tecnológica - Start-up; \
Otro\n\
70; \
Demás trabajos - Demás trabajos; \
Demás trabajos; \
Otro\n\
71; \
Producción técnica - Signos; \
Signos; \
Otro\n\
72; \
Producción técnica - Softwares - Multimedia; \
Multimedia; \
Software\n\
73; \
Producción técnica - Softwares - Otra; \
Softwares - Otra; \
Software\n\
74; \
Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Básica; \
Técnica - Básica; \
Otro\n\
75; \
Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Ensayo; \
Técnica - Ensayo; \
Otro\n\
76; \
Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Servicios de Proyectos de I+D+I; \
Servicios de Proyectos de I+D+I; \
Otro\n\
77; \
Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Proceso; \
Técnica - Proceso; \
Otro\n\
78; \
Datos complementarios - Participación en comités de evaluación - Profesor titular; \
Participación en comités de evaluación - Profesor titular; \
Comités\n\
79; \
Datos complementarios - Participación en comités de evaluación - Concurso docente; \
Participación en comités de evaluación - Concurso docente; \
Comités\n\
80; \
Datos complementarios - Participación en comités de evaluación - Jefe de cátedra; \
Participación en comités de evaluación - Jefe de cátedra; \
Comités\n\
81; \
Datos complementarios - Participación en comités de evaluación - Evaluación de cursos; \
Participación en comités de evaluación - Evaluación de cursos; \
Comités\n\
82; \
Datos complementarios - Participación en comités de evaluación - Acreditación de programas; \
Participación en comités de evaluación - Acreditación de programas; \
Comités\n\
83; \
Datos complementarios - Participación en comités de evaluación - Asignación de becas; \
Participación en comités de evaluación - Asignación de becas; \
Comités\n\
84; \
Datos complementarios - Participación en comités de evaluación - Otra; \
Participación en comités de evaluación - Otra; \
Comités\n\
85; \
Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Pregrado; \
Jurado Pregrado; \
Comités\n\
86; \
Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Especialización; \
Jurado Especialización; \
Comités\n\
87; \
Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Especialidad Médica; \
Jurado Especialidad Médica; \
Comités\n\
88; \
Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Maestría; \
Jurado Maestría; \
Comités\n\
89; \
Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Doctorado; \
Jurado Doctorado; \
Comités\n\
90; \
Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Otra; \
Jurado Otra; \
Comités\n\
91; \
Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Curso de perfeccionamiento/especialización; \
Jurado Especializaciones; \
Comités\n\
96; \
Producción técnica - Signos Distintivos - Nombres comerciales; \
Signos Distintivos - Nombres comerciales; \
Nombres comerciales\n\
92; \
Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Curso de perfeccionamiento/especialización; \
Jurado Especializaciones; \
Comités\n\
93; \
Producción técnica - Plantas piloto - Planta piloto; \
Plantas piloto - Planta piloto; \
Planta piloto\n\
94; \
Producción técnica - Prototipo - Industrial; \
Prototipo - Industrial; \
Industrial\n\
95; \
Producción técnica - Signos Distintivos - Marcas; \
Signos Distintivos - Marcas; \
Marcas\n\
96; \
Producción técnica - Signos Distintivos - Nombres comerciales; \
Signos Distintivos - Nombres comerciales; \
Nombres comerciales\n\
97; \
Apropiación social y circularción del conocimiento - Ediciones - Anales; \
Ediciones - Anales; \
Analess\n\
98; \
Apropiación social y circularción del conocimiento - Ediciones - Libro; \
Ediciones - Libro; \
Libro\n\
92; \
Producción técnica - Prototipo - Servicios; \
Prototipo - Servicios; \
Servicios\n"]
#***************************************************************************
#Insert
#***************************************************************************
inv_colciencias_tipo_producto = [ "REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`cod_tipo_producto`,\
`tipo_producto_col`,\
`sub_tipo_producto_col`,\
`tipo_uapa`) VALUES (\
0,\
'Evento sin producto asociado',\
'Evento sin producto asociado',\
'Evento sin producto asociado');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
1,\
'Redes de conocimiento',\
'Redes de conocimiento',\
'Redes de conocimiento');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
2,\
'Producción bibliográfica - Trabajos en eventos (Capítulos de memoria) - Completo',\
'Capítulos de memoria',\
'Capítulos de memoria');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
3,\
'Producción técnica - Presentación de trabajo - Comunicación',\
'Presentación de trabajo',\
'Trabajo de Comunicación');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
4,\
'Demás trabajos - Demás trabajos - Póster',\
'Demás trabajos',\
'Poster');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
5,\
'Producción técnica - Presentación de trabajo - Conferencia',\
'Presentación de trabajo',\
'Conferencia');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
6,\
'Producción técnica - Presentación de trabajo - Ponencia',\
'Presentación de trabajo',\
'Ponencia');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
7,\
'Estrategias pedagógicas para el fomento a la CTI',\
'Estrategias pedagógicas',\
'Estrategias pedagógicas');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
8,\
'Producción bibliográfica - Artículo - Publicado en revista especializada',\
'Publicado en revista especializada',\
'Artículo');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
9,\
'Producción bibliográfica - Artículo - Corto (Resumen)',\
'Corto (Resumen)',\
'Artículo');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
10,\
'Estrategias pedagógicas para el fomento a la CTI',\
'Estrategias pedagógicas',\
'Estrategias pedagógicas');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
11,\
'Producción bibliográfica - Artículo - Caso clínico',\
'Caso Clínico',\
'Artículo');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
12,\
'Producción bibliográfica - Trabajos en eventos (Capítulos de memoria) - Resumen',\
'Capítulo de Memoria',\
'Resumen');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
13,\
'Producción técnica - Presentación de trabajo - Congreso',\
'Congreso',\
'Congreso');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
14,\
'Producción técnica - Presentación de trabajo - Simposio',\
'Simposio',\
'Simposio');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
15,\
'Producción técnica - Presentación de trabajo - Seminario',\
'Seminario',\
'Seminario');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
16,\
'Producción técnica - Presentación de trabajo - Otro',\
'Otro',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
17,\
'Producción bibliográfica - Libro - Libro resultado de investigación',\
'Libro resultado de investigación',\
'Libro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
18,\
'Producción bibliográfica - Libro - Otro libro publicado',\
'Otro libro publicado',\
'Libro - Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
19,\
'Producción bibliográfica - Libro - Libro pedagógico y/o de divulgación',\
'Libro pedagógico y/o de divulgación',\
'Libro - pedagógico');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
20,\
'Otro capítulo de libro publicado',\
'Otro capítulo de libro',\
'Capítulo de libro - Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
21,\
'Capítulo de libro',\
'Capítulo de libro',\
'Capítulo de libro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
22,\
'Producción bibliográfica - Otro artículo publicado - Periódico de noticias',\
'Periódico de noticias',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
23,\
'Producción bibliográfica - Otro artículo publicado - Revista de divulgación',\
'Revista de divulgación',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
24,\
'Producción bibliográfica - Otro artículo publicado - Cartas al editor',\
'Cartas al editor',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
25,\
'Producción bibliográfica - Otro artículo publicado - Reseñas de libros',\
'Reseñas de libros',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
26,\
'Producción bibliográfica - Otro artículo publicado - Columna de opinión',\
'Columnas de opinión',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
27,\
'Producción bibliográfica - Documento de trabajo (Working Paper)',\
'Documento de trabajo (Working Paper)',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
28,\
'Producción bibliográfica - Traducciones - Artículo',\
'Traducciones - Artículo',\
'Traducciones');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
29,\
'Producción bibliográfica - Traducciones - Libro',\
'Traducciones - Libro',\
'Traducciones');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
30,\
'Producción bibliográfica - Traducciones - Otra',\
'Traducciones - Otra',\
'Traducciones');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
31,\
'Producción bibliográfica - Otra producción bibliográfica - Introducción',\
'Introducción',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
32,\
'Producción bibliográfica - Otra producción bibliográfica - Prólogo',\
'Prólogo',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
33,\
'Producción bibliográfica - Otra producción bibliográfica - Epílogo',\
'Epílogo',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
34,\
'Producción bibliográfica - Otra producción bibliográfica - Otra',\
'Otra',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
35,\
'Producción técnica - Softwares - Computacional',\
'Software',\
'Software');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
36,\
'Producción técnica - Productos tecnológicos - Gen Clonado',\
'Productos tecnológicos - Gen Clonado',\
'Productos tecnológicos');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
37,\
'Producción técnica - Productos tecnológicos - Coleccion biologica de referencia con informacion sistematizada',\
'Productos tecnológicos - Coleccion biologica de referencia con informacion sistematizada',\
'Productos tecnológicos');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
38,\
'Producción técnica - Productos tecnológicos - Otro',\
'Productos tecnológicos - Otro',\
'Productos tecnológicos');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
39,\
'Producción técnica - Productos tecnológicos - Base de datos de referencia para investigación',\
'Productos tecnológicos - Base de datos de referencia para investigación',\
'Productos tecnológicos');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
40,\
'Producción técnica - Diseño Industrial',\
'Diseño Industrial',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
41,\
'Producción técnica - Esquema de circuito integrado',\
'Esquema de circuito integrado',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
42,\
'Producción técnica - Innovaciones generadas de producción empresarial - Organizacional',\
'Innovaciones generadas de producción empresarial - Organizacional',\
'Innovaciones');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
43,\
'Producción técnica - Innovaciones generadas de producción empresarial - Empresarial',\
'Innovaciones generadas de producción empresarial - Empresarial',\
'Innovaciones');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
44,\
'Producción técnica - Variedad animal',\
'Variedad animal',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
45,\
'Producción técnica - Innovación de proceso o procedimiento',\
'Innovación de proceso o procedimiento',\
'Innovación');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
46,\
'Producción técnica - Cartas, mapas o similares - Aerofotograma',\
'Aerofotograma',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
47,\
'Producción técnica - Cartas, mapas o similares - Carta',\
'Carta',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
48,\
'Producción técnica - Cartas, mapas o similares - Fotograma',\
'Fotograma',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
49,\
'Producción técnica - Cartas, mapas o similares - Mapa',\
'Mapa',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
50,\
'Producción técnica - Cartas, mapas o similares - Otra',\
'Otra',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
51,\
'Producción técnica - Variedad vegetal',\
'Variedad vegetal',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
52,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Servicios de proyectos de IDI',\
'Servicios de proyectos de IDI',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
53,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Comercialización de tecnología',\
'Comercialización de tecnología',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
54,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Análisis de competitividad',\
'Análisis de competitividad',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
55,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Informe técnico',\
'Informe técnico',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
56,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Otro',\
'Otro',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
57,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Acciones de transferencia tecnológica',\
'Acciones de transferencia tecnológica',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
58,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Desarrollo de productos',\
'Desarrollo de productos',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
59,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Implementación de sistemas de análisis',\
'Implementación de sistemas de análisis',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
60,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Consultoría en artes,arquitectura y diseño',\
'Consultoría en artes,arquitectura y diseño',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
61,\
'Producción técnica - Regulación, norma, reglamento o legislación - Ambiental o de Salud',\
'Regulación, norma, reglamento o legislación - Ambiental o de Salud',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
62,\
'Producción técnica - Regulación, norma, reglamento o legislación - Educativa',\
'Regulación, norma, reglamento o legislación - Educativa',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
63,\
'Producción técnica - Regulación, norma, reglamento o legislación - Social',\
'Regulación, norma, reglamento o legislación - Social',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
64,\
'Producción técnica - Regulación, norma, reglamento o legislación - Técnica',\
'Regulación, norma, reglamento o legislación - Técnica',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
65,\
'Producción técnica - Regulación, norma, reglamento o legislación - Guía de práctica clínica',\
'Regulación, norma, reglamento o legislación - Guía de práctica clínica',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
66,\
'Producción técnica - Regulación, norma, reglamento o legislación - Proyecto de ley',\
'Regulación, norma, reglamento o legislación - Proyecto de ley',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
67,\
'Producción técnica - Reglamento Técnico',\
'Reglamento Técnico',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
68,\
'Producción técnica - Empresa de base tecnológica - Spin-off',\
'Empresa de base tecnológica - Spin-off',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
69,\
'Producción técnica - Empresa de base tecnológica - Start-up',\
'Empresa de base tecnológica - Start-up',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
70,\
'Demás trabajos - Demás trabajos',\
'Demás trabajos',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
71,\
'Producción técnica - Signos',\
'Signos',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
72,\
'Producción técnica - Softwares - Multimedia',\
'Multimedia',\
'Software');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
73,\
'Producción técnica - Softwares - Otra',\
'Softwares - Otra',\
'Software');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
74,\
'Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Básica',\
'Técnica - Básica',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
75,\
'Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Ensayo',\
'Técnica - Ensayo',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
76,\
'Producción técnica - Consultoría Científico Tecnológica e Informe Técnico - Servicios de Proyectos de I+D+I',\
'Servicios de Proyectos de I+D+I',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
77,\
'Producción técnica - Regulación, norma, reglamento o legislación - Técnica - Proceso',\
'Técnica - Proceso',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
78,\
'Datos complementarios - Participación en comités de evaluación - Profesor titular',\
'Participación en comités de evaluación - Profesor titular',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
79,\
'Datos complementarios - Participación en comités de evaluación - Concurso docente',\
'Participación en comités de evaluación - Concurso docente',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
80,\
'Datos complementarios - Participación en comités de evaluación - Jefe de cátedra',\
'articipación en comités de evaluación - Jefe de cátedra',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
81,\
'Datos complementarios - Participación en comités de evaluación - Evaluación de cursos',\
'Participación en comités de evaluación - Evaluación de cursos',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
82,\
'Datos complementarios - Participación en comités de evaluación - Acreditación de programas',\
'Participación en comités de evaluación - Acreditación de programas',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
83,\
'Datos complementarios - Participación en comités de evaluación - Asignación de becas',\
'Participación en comités de evaluación - Asignación de becas',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
84,\
'Datos complementarios - Participación en comités de evaluación - Otra',\
'Participación en comités de evaluación - Otra',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
85,\
'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Pregrado',\
'Jurado Pregrado',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
86,\
'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Especialización',\
'Jurado Especialización',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
87,\
'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Especialidad Médica',\
'Jurado Especialidad Médica',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
88,\
'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Maestría',\
'Jurado Maestría',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
89,\
'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Doctorado',\
'Jurado Doctorado',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
90, \
'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Otra',\
'<NAME>',\
'Comités');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
92, \
'Producción técnica - Prototipo - Servicios',\
'Prototipo - Servicios',\
'Servicios');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
93, \
'Producción técnica - Plantas piloto - Planta piloto',\
'Plantas piloto - Planta piloto',\
'Planta piloto');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
94, \
'Producción técnica - Prototipo - Industrial',\
'Prototipo - Industrial',\
'Industrial');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
95, \
'Producción técnica - Signos Distintivos - Marcas',\
'Signos Distintivos - Marcas',\
'Marcas');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
96, \
'Producción técnica - Signos Distintivos - Nombres comerciales',\
'Signos Distintivos - Nombres comerciales',\
'Nombres comerciales');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
97, \
'Apropiación - Eventos Cientificos - Otro',\
'Eventos Cientificos - Otro',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
98, \
'Apropiación - Eventos Cientificos - Taller',\
'Eventos Cientificos - Taller',\
'Taller');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
99, \
'Apropiación - Eventos Cientificos - Congreso',\
'Eventos Cientificos - Congreso',\
'Congreso');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
100, \
'Apropiación - Eventos Cientificos - Encuentro',\
'Eventos Cientificos - Encuentro',\
'Encuentro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
101, \
'Apropiación - Eventos Cientificos - Seminario',\
'Eventos Cientificos - Seminario',\
'Seminario');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
102, \
'Apropiación - Eventos Cientificos - Simposio',\
'Eventos Cientificos - Simposio',\
'Simposio');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
103, \
'Apropiación - Eventos Cientificos - Informes de investigación',\
'Eventos Cientificos - Informes de investigación',\
'Informes de investigación');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
104, \
'Apropiación - Impresos - Manual',\
'Impresos - Manual',\
'Manual');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
105, \
'Apropiación - Impresos - Boletín',\
'Impresos - Boletín',\
'Boletín');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
106, \
'Apropiación - Contenido Multimedia - Comentario',\
'Contenido Multimedia - Comentario',\
'Comentario');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
107, \
'Apropiación - Contenido Multimedia - Entrevista',\
'Contenido Multimedia - Entrevista',\
'Entrevista');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
108, \
'Apropiación - Contenido Virtual - Página Web',\
'Contenido Virtual - Página Web',\
'Página Web');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
109, \
'Apropiación - Estrategias de Comunicación - Estrategias de Comunicación',\
'Estrategias de Comunicación - Estrategias de Comunicación',\
'Estrategias de Comunicación');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
110, \
'Apropiación - Estrategias Pedagógicas - Estrategias Pedagógicas para el fomento a la CTI',\
'Estrategias Pedagógicas - Estrategias Pedagógicas para el fomento a la CTI',\
'Estrategias Pedagógicas para el fomento a la CTI');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
111, \
'Apropiación - Participación Ciudadana - Participación Ciudadana en Proyectos de CTI',\
'Participación Ciudadana - Participación Ciudadana en Proyectos de CTI',\
'Participación Ciudadana en Proyectos de CTI');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
112, \
'Apropiación - Participación Ciudadana - Espacios de Participación Ciudadana',\
'Participación Ciudadana - Espacios de Participación Ciudadana',\
'Espacios de Participación Ciudadana');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
113, \
'Producción en arte, arquitectura y diseño - Obras o productos - Obras o productos',\
'Obras o productos - Obras o productos',\
'Obras o productos');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
114, \
'Actividades de Formación - Actividades de Formación - Asesorías al Programa Ondas',\
'Actividades de Formación - Asesorías al Programa Ondas',\
'Asesorías al Programa Ondas');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
115, \
'Actividades de Formación - Curso de Corta Duración Dictados - Perfeccionamiento',\
'Curso de Corta Duración Dictados - Perfeccionamiento',\
'Perfeccionamiento');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
116, \
'Actividades de Formación - Curso de Corta Duración Dictados - Extensión Extracurricular',\
'Curso de Corta Duración Dictados - Extensión Extracurricular',\
'Extensión Extracurricular');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
117, \
'Actividades de Formación - Trabajos dirigidos/turorías - Monografía de conclusión de curso',\
'Trabajos dirigidos/turorías - Monografía de conclusión de curso',\
'Monografía de conclusión de curso');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
118, \
'Actividades de Formación - Curso de Corta Duración Dictados - Otro',\
'Curso de Corta Duración Dictados - Otro',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
119, \
'Proyectos - Investigación, desarrollo e innovación - Proyectos',\
'Investigación, desarrollo e innovación - Proyectos',\
'Proyectos');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
120, \
'Apropiación social y circularción del conocimiento - Revista',\
'Investigación, desarrollo e innovación - Revista',\
'Revista');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
121, \
'Apropiación social y circularción del conocimiento - Cartilla',\
'Contenidos Impresos - Cartilla',\
'Cartilla');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
122, \
'Actividades de Formación - Cursos de Corta Duración - Especialización',\
'Cursos de Corta Duración - Especialización',\
'Especialización');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
123, \
'Apropiación - Contenidos Multimedia - Otro',\
'Contenidos Multimedia - Otro',\
'Otro');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
124, \
'Apropiación - Contenidos Virtuales - Blog',\
'Contenidos Virtuales - Blog',\
'Blog');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
125, \
'Apropiación - Contenidos Virtuales - Aplicativo',\
'Contenidos Virtuales - Aplicativo',\
'Aplicativo');\n\
REPLACE INTO `uapa_db`.`v_colciencias_tipo_producto` ( \
`COD_TIPO_PRODUCTO`,\
`TIPO_PRODUCTO_COL`,\
`SUB_TIPO_PRODUCTO_COL`,\
`TIPO_UAPA`) VALUES (\
91, \
'Datos complementarios - Jurado/Comisiones evaluadoras de trabajo de grado - Curso de perfeccionamiento/especialización',\
'Jurado Especial',\
'Comités');\n"]
| 1.742188 | 2 |
src/models/scraping_session.py | magnublo/msc-darkweb-scraping | 0 | 12793321 | <gh_stars>0
import datetime
from sqlalchemy import Column, Integer, DateTime, CHAR, Boolean, String
from definitions import MARKET_NAME_COLUMN_LENGTH, Base
TABLE_NAME = 'scraping_session'
PRIMARY_KEY = 'id'
TABLE_NAME_AND_PRIMARY_KEY = TABLE_NAME+"."+PRIMARY_KEY
class ScrapingSession(Base):
__tablename__ = TABLE_NAME
id = Column(PRIMARY_KEY, Integer, primary_key=True)
market = Column(CHAR(MARKET_NAME_COLUMN_LENGTH))
duplicates_encountered = Column(Integer)
nr_of_threads = Column(Integer)
initial_queue_size = Column(Integer)
time_started = Column(DateTime, default=datetime.datetime.utcnow)
time_finished = Column(DateTime)
exited_gracefully = Column(Boolean, default=False, index=True)
host_system_fqdn = Column(String(32)) | 2.40625 | 2 |
autoio-interfaces/autorun/pac99.py | lpratalimaffei/autoio | 0 | 12793322 | <gh_stars>0
""" Runners for PAC99 program
"""
import os
import automol
import pac99_io.reader
from autorun._run import from_input_string
# Read the new groups file stored with src
def _new_groups_str():
""" Read the new groups string
"""
src_path = os.path.dirname(os.path.realpath(__file__))
new_groups_path = os.path.join(src_path, 'aux', NEW_GROUPS_NAME)
with open(new_groups_path) as fobj:
new_groups_str = fobj.read()
return new_groups_str
NEW_GROUPS_NAME = 'new.groups'
INPUT_NAME = '{}.i97'
OUTPUT_NAMES = ('{}.o97', '{}.c97')
# Specialized runner
def nasa_polynomial(script_str, run_dir, input_str, name, formula,
convert=False):
""" Generates NASA polynomial from run
:param convert: convert the polynomial to more standard CHEMKIN
:type convert
"""
# Run PAC99 to get the output file
formula_str = automol.formula.string(formula)
output_strs = direct(script_str, run_dir, input_str, formula_str)
# Obtain the NASA polynomial, convert if necessary
if output_strs is not None:
c97_str = output_strs[1]
poly_str = pac99_io.reader.nasa_polynomial(c97_str)
if convert:
poly_str = pac99_io.pac2ckin_poly(name, formula, poly_str)
else:
poly_str = None
return poly_str
# Generalized runners
def direct(script_str, run_dir, input_str, formula_str):
""" Generates an input file for a ThermP job runs it directly.
Need formula input to run the script
:param input_str: string of input file with .i97 suffix
"""
aux_dct = {NEW_GROUPS_NAME: _new_groups_str()}
input_name = INPUT_NAME.format(formula_str)
output_names = tuple(name.format(formula_str) for name in OUTPUT_NAMES)
output_strs = from_input_string(
script_str, run_dir, input_str,
aux_dct=aux_dct,
input_name=input_name,
output_names=output_names)
if not _check(output_strs):
output_strs = None
return output_strs
def _check(output_strs):
""" assess the output (.o97, .c97 fileS)
"""
o97_output_str, c97_output_str = output_strs
success = True
if 'INSUFFICIENT DATA' in o97_output_str:
print('*ERROR: PAC99 fit failed, maybe increase temperature ranges?')
success = False
if not c97_output_str:
print('No polynomial produced from PAC99 fits, check for errors')
success = False
return success
| 2.515625 | 3 |
main.py | Kerono4ka/modified-genetic-algorithm-for-calculating-the-chromatic-number-of-a-graph | 0 | 12793323 | <filename>main.py
from random import choice, randint
from time import perf_counter
from time import sleep
import operator
import json
from genetic.gene import Gene
from genetic.population import Population
from graph.graph import Graph
from util.Stats import Stats
from util.params import Params
from util.util import get_random_points, get_random_edges
from graph.point import Point
from graph.edge import Edge
# graph = None
# это функция из population.py ее можно и не дублировать
# в population.py используется для другой внутренней ф-ции, а тут для своей
def get_random_parent(no_points, colors_used):
sample_numbers = [0]
for i in range(no_points - 1):
sample_numbers.append(randint(0, colors_used - 1))
return Gene(sample_numbers)
def initialize_population(graph, colors_used):
# n = Params.initial_population_size
population = []
for i in range(Params.initial_population_size):
population.append(get_random_parent(graph.no_points, colors_used))
return Population(population)
def do_genetic(population, graph, colors_used):
""" крутится цикл создания потомства, оценки, опять. Пока не будет stop_genetic_after_count
повторов """
iterations = 0
last_n = [float('Inf')] * Params.stop_genetic_after_count # список из stop_genetic_after_count элементов,
# заполненный inf-ами
best_gene = None
while True:
iterations += 1
max_evaluation = population.get_max_evaluation(graph) # наибольший счет фит.ф-ции
last_n.pop(0) # выкидываем из списка первый элемент
last_n.append(max_evaluation) # вставляем в конец списка мах счет фит.ф-ции
flag = False
# флаг будет false только если все score (а их stop_genetic_after_count) будут одинаковыми
# и тогда цикл прервется, будет считаться, что ГА добился наилучшего результата
for i in range(len(last_n)):
if i != 0 and last_n[i] != last_n[i - 1]: # если значения двух последних фит.ф-ций одинаковы
flag = True # то заканчиваем цикл
if not flag:
break
print(max_evaluation)
best_gene = population.best_n(1, graph)[0] # возвращает одну лучшую хромосому
# print(best_gene)
new_population = population_propogation_default(population, graph, colors_used)
population = Population(new_population)
Graph.no_colours = len(set(best_gene.array))
eval = best_gene.evaluate(graph)
no_colors = len(set(best_gene.array))
conflicts = -1 * (eval + (no_colors * Params.penalty_per_color_used)) / Params.penalty_same_color
return iterations, best_gene, conflicts
def population_propogation_default(population, graph, colors_used):
"""создание популяции детишек из популяции родителей"""
parents_crossover = population.best_n(Params.crossover_parents, graph)
parents_mutation = population.best_n(Params.mutation_parents, graph)
new_population = []
new_population.extend(population.best_n(Params.propogation_count, graph))
new_population.extend(population.crossover(parents_crossover))
new_population.extend(population.mutate(parents_mutation, graph))
new_population.extend(population.random(Params.random_count, graph.no_points, colors_used))
return new_population
def find_out_chorom_num(graph):
diction = {}
for i in range(graph.no_points):
diction.update({i: 0})
for edge in graph.edges:
diction[edge.start] += 1
diction[edge.end] += 1
return max(diction.items(), key=operator.itemgetter(1))[1]
def work(points, edges):
t1 = perf_counter()
graph = Graph(points, edges)
chrom_num = find_out_chorom_num(graph)
min = 1
save_result = None
iterations_sum = 0
while chrom_num > min:
result = int((chrom_num + min) / 2)
population = initialize_population(graph, result)
iterations, best_gene, conflicts = do_genetic(population, graph, result)
colors_used = len(set(best_gene.array))
iterations_sum += iterations
time = perf_counter() - t1
Stats(iterations, time, colors_used, best_gene, conflicts, graph)
if conflicts == 0:
chrom_num = result
save_result = result
else:
min = result + 1
time = perf_counter() - t1
print("result: ")
print("colors: ", save_result, " time ", time, "iterations ", iterations_sum)
def in_file():
no_points = 178
no_edges = 1484
if no_edges > no_points * (no_points - 1) / 2:
raise ValueError('There are too many edges {}'.format(no_edges))
points, points_list = get_random_points(no_points)
edges, edges_list = get_random_edges(no_points, no_edges)
with open('points', 'w') as f:
json.dump(points_list, f)
with open('edges', 'w') as f:
json.dump(edges_list, f)
def out_file():
with open('points', 'r') as f:
points_list = json.load(f)
with open('edges', 'r') as f:
edges_list = json.load(f)
points = []
for point in points_list:
x = point[0]
y = point[1]
new_point = Point(x, y)
points.append(new_point)
edges = []
for edge in edges_list:
start = edge[0]
end = edge[1]
new_edge = Edge(start, end)
edges.append(new_edge)
work(points, edges)
#in_file()
out_file() | 2.78125 | 3 |
mikidown/sandbox.py | ckolumbus/mikidown | 0 | 12793324 | <gh_stars>0
import os
import shutil
from PyQt4.QtCore import QSettings
from PyQt4.QtGui import QApplication
from .mikiwindow import MikiWindow
from .mikibook import Mikibook
from .config import Setting
class Sandbox():
def __init__(self):
path = os.path.join(os.getcwd(), "test_notebook")
Mikibook.initialise("test", path)
settings = Setting([["test", path]])
self.window = MikiWindow(settings)
self.window.show()
print("...Create notebook works")
self.newPage()
self.setText()
self.pageLink()
self.delPage()
self.window.readmeHelp()
print("Start manual testing in sandbox")
def newPage(self):
self.window.notesTree.newPage('pageOne')
self.window.notesTree.newSubpage('subpageOne')
itemOne = self.window.notesTree.pageToItem('pageOne')
self.window.notesTree.setCurrentItem(itemOne)
self.window.notesTree.newPage('pageTwo')
print("...newPage works")
def setText(self):
self.window.liveView(True)
self.window.notesEdit.setText("# head1\n\n"
"## head2\n"
"[subpageOne](pageOne/subpageOne)")
self.window.saveCurrentNote()
self.window.notesView.updateView()
#self.window.notesView.setVisible(True)
elemCol = self.window.notesView.page(
).mainFrame().findAllElements("a")
element = elemCol.at(2)
element.evaluateJavaScript("this.click()")
noteName = self.window.notesTree.currentItem().text(0)
assert(noteName == "subpageOne")
print("...setText works")
def pageLink(self):
self.window.notesEdit.setText("[head2](pageTwo#head2)")
self.window.saveCurrentNote()
self.window.notesView.updateView()
element = self.window.notesView.page(
).mainFrame().findFirstElement("a")
element.evaluateJavaScript("this.click()")
noteName = self.window.notesTree.currentItem().text(0)
assert(noteName == "pageTwo")
print("...pageLink works")
def delPage(self):
# This will delete both pageOne and subpageOne
item = self.window.notesTree.pageToItem('pageOne')
self.window.notesTree.delPage(item)
item = self.window.notesTree.pageToItem('pageTwo')
self.window.notesTree.delPage(item)
print("...delPage works")
def cleanUp(self):
""" When quitting mikidown, the whooshProcess may take time to finish.
Terminate whooshProcess to ensure shutil.rmtree success.
"""
shutil.rmtree("test_notebook")
print("...Cleaned up")
| 2.328125 | 2 |
projecteuler/p0012_test.py | mccxj/online-judge-code-example | 2 | 12793325 | import unittest
import code_helper
class Test0012(unittest.TestCase):
def test_problem(self):
primes = list(code_helper.range_prime(10000))
triangle_number = -1
for n in range(7000, 20000):
triangle_number = n * (n + 1) / 2
divisors = 1
s = triangle_number
for prime in primes:
if s < prime:
break
if s % prime == 0:
time = 1
while s % prime == 0:
s /= prime
time += 1
divisors *= time
if divisors > 500:
break
self.assertEqual(triangle_number, 76576500) | 3.25 | 3 |
ds-sdk-mini/DeepSecurity/antimalware.py | zachwhaley/thus | 24 | 12793326 | <filename>ds-sdk-mini/DeepSecurity/antimalware.py
# Copyright (c) 2020. <NAME>. All Rights Reserved.
#import connect
#import config
class Antimalware:
def __init__(self, config, connection):
self._config=config
self._connection = connection
##Antimalware config
def list(self):
return self._connection.get(url='/antimalwareconfigurations')
def create(self, payload):
return self._connection.post(url='/antimalwareconfigurations', data=payload)
def describe(self, antimalwareID):
return self._connection.get(url='/antimalwareconfigurations/{antiMalwareID}'.format(antiMalwareID=antimalwareID))
def modify(self, antimalwareID, payload):
return self._connection.post(url='/antimalwareconfigurations/{antiMalwareID}'.format(antiMalwareID=antimalwareID), data=payload)
def delete(self, antimalwareID):
return self._connection.delete(url='/antimalwareconfigurations/{antiMalwareID}'.format(antiMalwareID=antimalwareID))
def search(self, payload):
return self._connection.post(url='/antimalwareconfigurations/search', data=payload)
##Directory lists
def listdirectorylists(self):
return self._connection.get(url='/directorylists')
def createdirectorylist(self, payload):
return self._connection.post(url='/directorylists', data=payload)
def describedirectorylist(self, directoryListID):
return self._connection.get(url='/directorylists/{directoryListID}'.format(directoryListID=directoryListID))
def modifydirectorylist(self, directoryListID, payload):
return self._connection.post(url='/directorylists/{directoryListID}'.format(directoryListID=directoryListID), data=payload)
def deletedirectorylist(self, directoryListID):
return self._connection.delete(url='/directorylists/{directoryListID}'.format(directoryListID=directoryListID))
def searchdirectorylist(self, payload):
return self._connection.post(url='/directorylists/search', data=payload)
##File Extension
def listfileExtensionlists(self):
return self._connection.get(url='/fileextensionlists')
def createdfileExtensionlist(self, payload):
return self._connection.post(url='/fileextensionlists', data=payload)
def describefileExtensionlist(self, fileExtensionListID):
return self._connection.get(url='/fileextensionlists/{fileExtensionListID}'.format(fileExtensionListID=fileExtensionListID))
def modifyfileExtensionlist(self, fileExtensionListID, payload):
return self._connection.post(url='/fileextensionlists/{fileExtensionListID}'.format(fileExtensionListID=fileExtensionListID), data=payload)
def deletefileExtensionlist(self, fileExtensionListID):
return self._connection.delete(url='/fileextensionlists/{fileExtensionListID}'.format(fileExtensionListID=fileExtensionListID))
def searchfileExtensionlist(self, payload):
return self._connection.post(url='/fileextensionlists/search', data=payload)
##File
def listfilelists(self):
return self._connection.get(url='/filelists')
def createdfilelist(self, payload):
return self._connection.post(url='/filelists', data=payload)
def describefilelist(self, fileListID):
return self._connection.get(url='/filelists/{fileListID}'.format(fileListID=fileListID))
def modifyfilelist(self, fileListID, payload):
return self._connection.post(url='/filelists/{fileListID}'.format(fileListID=fileListID), data=payload)
def deletefilelist(self, fileListID):
return self._connection.delete(url='/filelists/{fileListID}'.format(fileListID=fileListID))
def searchfilelist(self, payload):
return self._connection.post(url='/filelists/search', data=payload)
##Schedules
def listschedules(self):
return self._connection.get(url='/schedules')
def createdschedules(self, payload):
return self._connection.post(url='/schedules', data=payload)
def describeschedule(self, schedulesID):
return self._connection.get(url='/schedules/{scheduleID}'.format(scheduleID=schedulesID))
def modifyschedule(self, schedulesID, payload):
return self._connection.post(url='/schedules/{scheduleID}'.format(scheduleID=schedulesID), data=payload)
def deleteschedule(self, schedulesID):
return self._connection.delete(url='/schedules/{scheduleID}'.format(scheduleID=schedulesID))
def searchschedule(self, payload):
return self._connection.post(url='/schedules/search', data=payload)
| 2.390625 | 2 |
setup.py | espdev/scikit-curve | 3 | 12793327 | <filename>setup.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import pathlib
from setuptools import setup, find_packages
ROOT_DIR = pathlib.Path(__file__).parent
ROOT_PKG = 'skcurve'
def get_version():
version_info = {}
version_file = ROOT_DIR / ROOT_PKG / '_version.py'
with version_file.open() as f:
exec(f.read(), version_info)
return version_info['__version__']
def get_long_description():
readme_file = ROOT_DIR / 'README.md'
return readme_file.read_text(encoding='utf-8')
setup(
name='scikit-curve',
version=get_version(),
python_requires='>=3.6, <4',
install_requires=[
'numpy',
'scipy',
'networkx',
'csaps >=0.9.0, <1',
'cached_property',
'typing-extensions',
],
extras_require={
'plot': [
'matplotlib',
],
'docs': [
'sphinx >=2.3',
'numpydoc',
'matplotlib',
],
'examples': [
'jupyter',
'matplotlib',
],
'tests': [
'pytest',
'coverage',
],
},
packages=find_packages(exclude=['tests', 'examples']),
url='https://github.com/espdev/scikit-curve',
project_urls={
'Documentation': 'https://scikit-curve.readthedocs.io',
'Code': 'https://github.com/espdev/scikit-curve',
'Issue tracker': 'https://github.com/espdev/scikit-curve/issues',
},
license='BSD 3-Clause',
author='<NAME>',
author_email='<EMAIL>',
description='A toolkit to manipulate n-dimensional geometric curves in Python',
long_description=get_long_description(),
long_description_content_type='text/markdown',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| 1.554688 | 2 |
config.py | Utekhin/cats_bot | 0 | 12793328 | <gh_stars>0
# Api key for developers giphy api
GIPHY_API_KEY = '<KEY>'
#
BOT_API_TOKEN = '<KEY>'
# Text blocks
WELCOME_TEXT = "Hi there!\nI'm catlover bot\nUse buttons or type anything to get your cats"
RENDER_TEXT = "Input your text to place it over random cat pic for your gif"
RENDER_TEXT_WAIT = 'Wait while the cats are preparing your a gif for you'
ABOUT_TEXT = 'Get a cat gif using cataas.com'
RENDER_TEXT_URL = 'https://cataas.com/cat/gif/says/{text}?filter=sepia&color=orange&size=40&type=or'
| 2.234375 | 2 |
impute/decomposition/tests/conftest.py | nimily/low-rank-impute | 0 | 12793329 | <filename>impute/decomposition/tests/conftest.py
import numpy as np
import numpy.linalg as npl
import numpy.random as npr
import pytest
@pytest.fixture(params=[
(1, 50, 40, 2),
(2, 90, 100, 5),
(3, 1500, 1000, 10),
], name='low_rank_matrix')
def low_rank_matrix_fixture(request):
seed, n, m, r = request.param
npr.seed(seed)
u = npl.qr(npr.randn(n, r))[0]
s = np.sort(npr.uniform(0, 1, r))[::-1]
v = npl.qr(npr.randn(m, r))[0]
b = u @ np.diag(s) @ v.T
return b, r, u, s, v
| 2.046875 | 2 |
paginator/paginators.py | pydanny/dj-paginator | 30 | 12793330 | from django.conf import settings
DEFAULT_WINDOW = getattr(settings, 'PAGINATOR_DEFAULT_WINDOW', 4)
def paginate(context, window=DEFAULT_WINDOW, hashtag=''):
"""
Renders the ``pagination/pagination.html`` template, resulting in a
Digg-like display of the available pages, given the current page. If there
are too many pages to be displayed before and after the current page, then
elipses will be used to indicate the undisplayed gap between page numbers.
Requires one argument, ``context``, which should be a dictionary-like data
structure and must contain the following keys:
``paginator``
A ``Paginator`` or ``QuerySetPaginator`` object.
``page_obj``
This should be the result of calling the page method on the
aforementioned ``Paginator`` or ``QuerySetPaginator`` object, given
the current page.
This same ``context`` dictionary-like data structure may also include:
``getvars``
A dictionary of all of the **GET** parameters in the current request.
This is useful to maintain certain types of state, even when requesting
a different page.
"""
try:
paginator = context['paginator']
page_obj = context['page_obj']
page_suffix = context.get('page_suffix', '')
page_range = paginator.page_range
# Calculate the record range in the current page for display.
records = {'first': 1 + (page_obj.number - 1) * paginator.per_page}
records['last'] = records['first'] + paginator.per_page - 1
if records['last'] + paginator.orphans >= paginator.count:
records['last'] = paginator.count
# First and last are simply the first *n* pages and the last *n* pages,
# where *n* is the current window size.
first = set(page_range[:window])
last = set(page_range[-window:])
# Now we look around our current page, making sure that we don't wrap
# around.
current_start = page_obj.number-1-window
if current_start < 0:
current_start = 0
current_end = page_obj.number-1+window
if current_end < 0:
current_end = 0
current = set(page_range[current_start:current_end])
pages = []
# If there's no overlap between the first set of pages and the current
# set of pages, then there's a possible need for elusion.
if len(first.intersection(current)) == 0:
first_list = list(first)
first_list.sort()
second_list = list(current)
second_list.sort()
pages.extend(first_list)
diff = second_list[0] - first_list[-1]
# If there is a gap of two, between the last page of the first
# set and the first page of the current set, then we're missing a
# page.
if diff == 2:
pages.append(second_list[0] - 1)
# If the difference is just one, then there's nothing to be done,
# as the pages need no elusion and are correct.
elif diff == 1:
pass
# Otherwise, there's a bigger gap which needs to be signaled for
# elusion, by pushing a None value to the page list.
else:
pages.append(None)
pages.extend(second_list)
else:
unioned = list(first.union(current))
unioned.sort()
pages.extend(unioned)
# If there's no overlap between the current set of pages and the last
# set of pages, then there's a possible need for elusion.
if len(current.intersection(last)) == 0:
second_list = list(last)
second_list.sort()
diff = second_list[0] - pages[-1]
# If there is a gap of two, between the last page of the current
# set and the first page of the last set, then we're missing a
# page.
if diff == 2:
pages.append(second_list[0] - 1)
# If the difference is just one, then there's nothing to be done,
# as the pages need no elusion and are correct.
elif diff == 1:
pass
# Otherwise, there's a bigger gap which needs to be signaled for
# elusion, by pushing a None value to the page list.
else:
pages.append(None)
pages.extend(second_list)
else:
differenced = list(last.difference(current))
differenced.sort()
pages.extend(differenced)
to_return = {
'MEDIA_URL': settings.MEDIA_URL,
'request': context['request'],
'pages': pages,
'records': records,
'page_obj': page_obj,
'paginator': paginator,
'hashtag': hashtag,
'is_paginated': paginator.count > paginator.per_page,
'page_suffix': page_suffix,
}
if 'request' in context:
getvars = context['request'].GET.copy()
if 'page%s' % page_suffix in getvars:
del getvars['page%s' % page_suffix]
if len(getvars.keys()) > 0:
to_return['getvars'] = "&%s" % getvars.urlencode()
else:
to_return['getvars'] = ''
return to_return
except (KeyError, AttributeError):
return {}
| 3.15625 | 3 |
aux/backend.py | bischjer/auxiliary | 0 | 12793331 | from twisted.internet import reactor, threads
import threading
import functools
import aux.protocol as protocol_module
class Backend(object):
def __init__(self):
self.thread = None
self.reactor = reactor
self.event = threading.Event()
self.protocols = protocols_module
def start(self):
self.thread = threading.Thread(name='BackendThread',
target=self.start_reactor)
self.thread.start()
#The event.set is called when the reactor
#is completely initialized.
self.event.wait()
def stop(self):
self.reactor.callFromThread(self.reactor.stop)
while self.thread.is_alive():
# Do not just do .join() as this will block the mainthread
# in such a way that C-c will not work.
self.thread.join(timeout=0.01)
def start_reactor(self):
self.reactor.callWhenRunning(lambda: self.event.set())
self.reactor.run(installSignalHandlers=0)
def make_proxy(self, obj):
if isinstance(obj, Proxy):
raise AssertionError('Wrapping a Proxy in a Proxy will deadlock')
return Proxy(obj)
class Proxy(object):
def __init__(self, wrapped_obj):
self.__dict__['wrapped_obj'] = wrapped_obj
def __getattr__(self, attr):
if attr in ['wrapped_obj']:
return self.__dict__['wrapped_obj']
if hasattr(self.wrapped_obj, attr):
attr = getattr(self.wrapped_obj, attr)
if callable(attr):
return self.create_blocking_wrapper(attr)
return attr
raise KeyError('%s does not exist in %s' % (attr, self))
def __setattr__(self, attr, value):
setattr(self.wrapped_obj, attr, value)
def create_blocking_wrapper(self, callable_):
@functools.wraps(callable_)
def _blocked(*args, **kwargs):
return threads.blockingCallFromThread(reactor,
callable_,
*args,
**kwargs)
return _blocked
| 2.59375 | 3 |
PyNeuralNetwork/PyNet/old/LoadNetwork.py | mattkjames7/PyNeuralNetwork | 0 | 12793332 | <filename>PyNeuralNetwork/PyNet/old/LoadNetwork.py
import numpy as np
import os
from .NeuralNetwork import NeuralNetwork
def LoadNetwork(FileName):
if os.path.isfile(FileName) == False:
print('file not found')
return None
f = open(FileName,'rb')
Trained = np.fromfile(f,dtype='bool8',count=1)[0]
L = np.fromfile(f,dtype='int32',count=1)[0]
s = np.fromfile(f,dtype='int32',count=L)
Lambda = np.fromfile(f,dtype='float32',count=1)[0]
Range = np.fromfile(d,dtype='float32',count=1)[0]
mt = np.fromfile(f,dtype='int32',count=1)[0]
mcv = np.fromfile(f,dtype='int32',count=1)[0]
if mt > 0:
Xt = np.fromfile(f,dtype='float32',count=mt*(s[0]+1)).reshape((mt,s[0]+1))
yt = np.fromfile(f,dtype='float32',count=mt)
else:
Xt = np.array([],dtype='float32')
yt = np.array([],dtype='float32')
if mcv > 0:
Xcv = np.fromfile(f,dtype='float32',count=mcv*(s[0]+1)).reshape((mcv,s[0]+1))
ycv = np.fromfile(f,dtype='float32',count=mcv)
else:
Xcv = np.array([],dtype='float32')
ycv = np.array([],dtype='float32')
Theta = []
for i in range(0,L-1):
dim = [s[i+1],s[i]+1]
Theta.append(np.fromfile(f,dtype='float32',count=dim[0]*dim[1]).reshape((dim[0],dim[1])))
nSteps = np.fromfile(f,dtype='int32',count=1)[0]
nJ = np.fromfile(f,dtype='int32',count=1)[0]
if nSteps > 0:
Jt = np.fromfile(f,dtype='float32',count=nJ)
Jcv = np.fromfile(f,dtype='float32',count=nJ)
Acct = np.fromfile(f,dtype='float32',count=nJ)
Acccv = np.fromfile(f,dtype='float32',count=nJ)
else:
Jt = np.array([],dtype='float32')
Jcv = np.array([],dtype='float32')
Acct = np.array([],dtype='float32')
Acccv= np.array([],dtype='float32')
f.close()
net = NeuralNetwork(s,Lambda)
net.Trained = Trained
net.mt = mt
net.mcv = m
net.Xt = Xt
net.yt = yt
net.Xcv = Xcv
net.ycv = ycv
net.Theta = Theta
net.Jt = Jt
net.Jcv = Jcv
net.Acct = Acct
net.Acccv = Acccv
return net
| 2.4375 | 2 |
python/models/apps.py | thejoeejoee/UPA-MIT-VUT-2020-2021 | 0 | 12793333 | from django.apps import AppConfig
class ModelsAppConfig(AppConfig):
name = 'models'
__all__ = ['ModelsAppConfig']
| 1.367188 | 1 |
scripts/numerics/test_fixedpoint_1D.py | jhwnkim/nanopores | 8 | 12793334 | <reponame>jhwnkim/nanopores<gh_stars>1-10
"""test (linearized) Scharfetter-Gummel-inspired fixed point PNP.
surprising conclusion: linearized is more robust numerically,
probably due to the exponential terms in nonlinear version.
for small applied voltage (bV=-0.1), both versions almost coincide.
the linear version converges for bV < 1.0. """
from nanopores import *
from nanopores.physics.simplepnps import *
add_params(
bV = -0.1, # [V]
rho = -0.0,
bulkcon = 300.,
imax = 10,
linearize = True,
inewton = 10,
)
# --- create 1D geometry ---
h = 20.
hmem = 3.
domain = Interval(-h/2, h/2)
membrane = Interval(-hmem/2, hmem/2)
lowerb = domain.boundary("left")
upperb = domain.boundary("right")
domain.addsubdomains(
fluid = domain - membrane,
membrane = membrane
)
domain.addboundaries(
lowerb = lowerb,
upperb = upperb,
chargedmembraneb = membrane.boundary(),
)
domain.params["lscale"] = 1e9
domain.synonymes = dict(
solid = "membrane",
bulkfluid = "fluid",
pore = set()
)
geo = domain.create_geometry(lc=.1)
phys_params = dict(
Membraneqs = rho,
bulkcon = bulkcon,
v0 = dict(upperb = 0., lowerb = bV),
)
phys = Physics("pore", geo, **phys_params)
# --- define and solve PDE ---
PNP = PNPFixedPoint if linearize else PNPFixedPointNonlinear
pnp = PNP(geo, phys, inewton=inewton, ipicard=imax, tolnewton=1e-4,
verbose=True, nverbose=True)
#t = Timer("solve")
pnp.solve()
#print "CPU time (solve): %s [s]" % (t.stop(),)
#pnp.visualize()
v, cp, cm = pnp.solutions()
plot1D({"potential": v}, (-h/2, h/2, 101), "x", dim=1, axlabels=("z [nm]", "potential [V]"))
plot1D({"c+": cp, "c-":cm}, (hmem/2, h/2, 101), "x", dim=1, axlabels=("z [nm]", "concentrations [mol/m^3]"))
showplots()
| 2.109375 | 2 |
examples/select.py | dduong42/gzuro | 0 | 12793335 | <reponame>dduong42/gzuro
from gzuro import Grid, SelectList, Text
root = Grid(cols=2)
select_list = SelectList(choices=['blue', 'black', 'grey'], default='blue')
text = Text('Selected: blue')
root.append(select_list)
root.append(text)
@select_list.on_selection
def change_text():
text.content = f'Selected: {select_list.selected}'
root.run()
| 2.765625 | 3 |
lunr/storage/helper/utils/jobs.py | PythonGirlSam/lunr | 6 | 12793336 | # Copyright (c) 2011-2016 Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import logging
import os
import resource as unix
import threading
from lunr.common import logger
from lunr.storage.helper.utils.worker import SaveFailedInvalidCow
log = logger.get_logger()
def spawn(resource, job, *args, **kwargs):
"""
Attempt to start job_name if not already running for those args.
param job: job to run
param args: args for job's run method
keyword callback: callback function to pass to job
keyword error_callback: error_callback function to pass to job
"""
callback = kwargs.pop('callback', None)
error_callback = kwargs.pop('error_callback', None)
interruptible = kwargs.pop('interruptible', False)
# If we asked to skip fork for testing
if kwargs.pop('skip_fork', False):
return run(resource, job, callback, error_callback, args)
# Fork Once to create a child
pid = os.fork()
if pid:
# wait on the child to fork and exit to prevent zombie
os.waitpid(pid, 0)
# Our child now owns the resource, this avoids resource
# file clean up when we the controller returns 200
resource.owned = False
return
# Fork Twice to orphan the child
pid = os.fork()
if pid:
# exit to orphan child, and release waiting parent
os._exit(0)
# Lock resource prior to read/write
with resource:
if interruptible:
# Add the interruptible flag if process can be interrupted
resource.acquire({'pid': os.getpid(), 'interruptible': True})
else:
# Re-assign the owner of the resource to us
resource.acquire({'pid': os.getpid()})
# NOTE: explict close of syslog handler to force reconnect and suppress
# traceback when the next log message goes and finds it's sockets fd is
# inexplictly no longer valid, this is obviously jank
# Manually nuking the logging global lock is the best thing ever.
logging._lock = threading.RLock()
log = logger.get_logger()
root = getattr(log, 'logger', log).root
for handler in root.handlers:
try:
# Re-create log handlers RLocks incase we forked during a locked
# write operation; Not doing this may result in a deadlock the
# next time we write to a log handler
handler.createLock()
handler.close()
except AttributeError:
pass
# Become Session leader
os.setsid()
# chdir root
os.chdir('/')
# Prevent GC close() race condition
gc.collect()
# close fd for api server's socket
os.closerange(3, unix.getrlimit(unix.RLIMIT_NOFILE)[1])
# Run the job and exit
os._exit(run(resource, job, callback, error_callback, args))
def run(lock, job, callback, error_callback, args):
# Start the Job
try:
log.info("starting job '%s'" % job.__name__)
try:
job(*args)
except SaveFailedInvalidCow:
log.exception("Save job failed!")
if error_callback:
try:
error_callback()
except Exception, e:
log.exception("unknown exception '%s' while executing "
"error_callback for '%s'" %
(e, job.__name__))
return 1
except Exception, e:
log.exception("unknown exception '%s' while "
"executing job '%s'" % (e, job.__name__))
return 1
# If callback defined, execute the callback
if callback:
log.info("executing callback for '%s'" % job.__name__)
try:
callback()
except Exception, e:
log.exception("unknown exception '%s' while executing "
"callback for '%s'" % (e, job.__name__))
return 1
return 0
finally:
log.info('finished %s' % job.__name__)
lock.remove()
| 1.976563 | 2 |
Codewars/7kyu/form-the-largest/Python/solution1.py | RevansChen/online-judge | 7 | 12793337 | # Python - 3.6.0
max_number = lambda n: int(''.join(sorted(str(n), reverse = True)))
| 2.796875 | 3 |
project/data/make_features.py | Sanger2000/Predicting-Lung-Cancer-Disease-Progression-from-CT-reports | 0 | 12793338 | <reponame>Sanger2000/Predicting-Lung-Cancer-Disease-Progression-from-CT-reports
import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
import project.data.preprocess_data as preprocess
import torch
from sklearn.preprocessing import LabelEncoder
from pytorch_pretrained_bert import BertTokenizer
def tokenize_input(baseline_text, context_text, split, tokenizer=None, max_len=509):
if tokenizer == None:
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
baseline = tokenizer.tokenize(baseline_text)
context = tokenizer.tokenize(context_text)
baseline_size = int(split*max_len)
context_size = max_len - baseline_size
baseline = preprocess.preprocess_tokens(baseline, baseline_size)
context = preprocess.preprocess_tokens(context, context_size)
final_tokens = ["[CLS]"]
classifications = [0, 0]
for token in baseline:
final_tokens.append(token)
classifications.append(0)
final_tokens.append("[SEP]")
classifications.append(1)
for token in context:
final_tokens.append(token)
classifications.append(1)
final_tokens.append("[SEP]")
for i in range(max_len-(len(context) + len(baseline))):
final_tokens.append("[MASK]")
classifications.append(0)
return tokenizer.convert_tokens_to_ids(final_tokens), classifications
def one_hot_encode(labels):
out = np.zeros((labels.shape[0], 4))
out[np.arange(labels.shape[0]), labels] = 1
return out
def learn_bow(reports, min_df=1, ngram_range=(1, 3), max_features=5000):
stopwords = ['mm', 'dd', '2017', '2016', '2015', '2014', '2013', '2012', 'date', 'md']
countVec = CountVectorizer(min_df = min_df, \
ngram_range = ngram_range, \
max_features = max_features, \
stop_words = stopwords)
countVec.fit(reports)
return countVec.transform(reports)
def prepare_y(data_y):
label_enc = LabelEncoder()
label_enc_y = label_enc.fit(data_y)
return label_enc_y.transform(data_y)
def createTextFeatures(reports, max_base_feats, max_prog_feats):
baseline_text, progress_text, _, __ = reports
baseline_bow = np.array(learn_bow(baseline_text['clean_report_text'].tolist(), max_features=max_base_feats).todense())
progress_bow = np.array(learn_bow(progress_text['clean_report_text'].tolist(), max_features=max_prog_feats).todense())
print(baseline_bow.shape)
print(progress_bow.shape)
overallTextFeatures = np.hstack([baseline_bow, progress_bow])
return overallTextFeatures
def make_id(patient_id):
if patient_id < 10:
return "MSK_00" + str(patient_id)
elif patient_id < 100:
return "MSK_0" + str(patient_id)
else:
return "MSK_" + str(patient_id)
def pad_vectors(feats, max_len, feat_lens):
for val in (True, False):
for i in range(len(feats[val])):
for j in range(max_len):
if j >= len(feats[val][i]):
feats[val][i].append(np.zeros(feat_lens))
return feats
def setupFeatureVectors(df, desired_features, max_before, max_after):
FEAT_LENS = len(desired_features) + max_before + max_after
patients = df.groupby("Patient ID")
max_len = 0
train_feats = {True: [], False: []}
train_labels = []
id_list = set()
count = -1
before_text = np.array(learn_bow(df["before_text"], max_features = max_before).todense())
after_text = np.array(learn_bow(df["after_text"], max_features = max_after).todense())
train_features = {True: [], False: []}
for patient_id in sorted([int(key[-3:]) for key in patients.groups.keys()]):
count += 1
patient = make_id(patient_id)
context = {True: [], False: []}
checker = {True: False, False: False}
len_counter = {True: 0, False: 0}
count2 = -1
for i in patients.groups[patient]:
count2 += 1
checker[df["is_baseline"][i]] = True
len_counter[df["is_baseline"][i]] += 1
context[df["is_baseline"][i]].append(np.concatenate((np.array([df[desired_feat][i] for desired_feat in desired_features]), \
before_text[i], after_text[i])))
if not(checker[True] or checker[False]):
continue
elif not checker[True]:
context[True].append(np.zeros(FEAT_LENS))
elif not checker[False]:
context[False].append(np.zeros(FEAT_LENS))
max_len = max(max_len, len_counter[True], len_counter[False])
id_list.add(patient)
for val in (True, False):
train_features[val].append(context[val])
train_labels.append(df["labels"][i])
train_features = pad_vectors(train_features, max_len, FEAT_LENS)
return np.array(train_features[False]), np.array(train_features[True]), one_hot_encode(prepare_y(train_labels)), id_list
def create_data(max_base, max_prog, max_before, max_after, desired_features):
df = preprocess.load_reports()
df_extraction = preprocess.extractFeatures(df)
baseX, progX, labs, id_list = setupFeatureVectors(df_extraction, desired_features, max_before, max_after)
reports = preprocess.extractText(df, id_list)
df_text = createTextFeatures(reports, max_base, max_prog)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
id_vals = torch.tensor(list(map(lambda x: tokenize_input(x[0], x[1], split=0.4, tokenizer=tokenizer), zip(reports[2]['bert_text'], \
reports[3]['bert_text']))))
return torch.from_numpy(baseX), torch.from_numpy(progX), torch.from_numpy(df_text), torch.from_numpy(labs), id_vals[:,0,:], id_vals[:, 1,:]
| 2.765625 | 3 |
datetoken/objects.py | sonirico/datetoken | 5 | 12793339 | <filename>datetoken/objects.py
from functools import reduce
from .ast import get_utc_now
from .ast import NowExpression, ModifierExpression, SnapExpression
class Token(object):
def __init__(self, nodes=None, at=None):
self._at = at
if not nodes:
self._nodes = [NowExpression()]
elif not isinstance(nodes[0], NowExpression):
self._nodes = nodes
self._nodes.insert(0, NowExpression())
else:
self._nodes = nodes
@property
def is_snapped(self):
"""
:rtype: bool
:return: Whether the token has been snapped, either to the beginning
or end.
"""
return any((isinstance(node, SnapExpression) for node in self._nodes))
@property
def is_calculated(self):
"""
:rtype: bool
:return: Whether the token is modified, meaning it suffers from
additions or subtractions.
"""
return any((isinstance(node, ModifierExpression) for node in self._nodes))
def refresh_at(self, new_at=None):
self._at = new_at or get_utc_now()
@property
def at(self):
return self._at
def to_date(self):
"""
Evaluate ast nodes sequentially, starting with the current
value of `_at`
:return:
"""
return reduce(
lambda accumulated, node: node.get_value(accumulated), self._nodes, self._at
)
def __str__(self):
return "".join([str(node) for node in self._nodes])
| 2.875 | 3 |
stack/eb.py | engineervix/aws-web-stacks | 83 | 12793340 | <filename>stack/eb.py
from awacs import ecr
from awacs.aws import Allow, Policy, Principal, Statement
from awacs.sts import AssumeRole
from troposphere import FindInMap, GetAtt, Join, Output, Ref, iam
from troposphere.elasticbeanstalk import (
Application,
Environment,
OptionSettings
)
from troposphere.iam import InstanceProfile, Role
from . import USE_NAT_GATEWAY
from .assets import assets_management_policy
from .certificates import application as application_certificate
from .containers import container_instance_type
from .environment import environment_variables
from .logs import logging_policy
from .security_groups import (
container_security_group,
load_balancer_security_group
)
from .template import template
from .utils import ParameterWithDefaults as Parameter
from .vpc import (
private_subnet_a,
private_subnet_b,
public_subnet_a,
public_subnet_b,
vpc
)
solution_stack = template.add_parameter(
Parameter(
"SolutionStack",
Description="Elastic Beanstalk solution stack name (do NOT change after "
"stack creation). You most likely want to copy the italicized "
"text from: http://docs.aws.amazon.com/elasticbeanstalk/latest"
"/dg/concepts.platforms.html#concepts.platforms.mcdocker",
Type="String",
Default="",
),
group="Application Server",
label="Solution Stack",
)
key_name = template.add_parameter(
Parameter(
"KeyName",
Description="Name of an existing EC2 KeyPair to enable SSH access to "
"the AWS Elastic Beanstalk instance",
Type="AWS::EC2::KeyPair::KeyName",
ConstraintDescription="must be the name of an existing EC2 KeyPair."
),
group="Application Server",
label="SSH Key Name",
)
template.add_mapping("Region2Principal", {
'ap-northeast-1': {
'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'ap-southeast-1': {
'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'ap-southeast-2': {
'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'cn-north-1': {
'EC2Principal': 'ec2.amazonaws.com.cn',
'OpsWorksPrincipal': 'opsworks.amazonaws.com.cn'},
'eu-central-1': {
'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'eu-west-1': {
'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'sa-east-1': {
'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'us-east-1': {
'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'us-west-1': {
'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'},
'us-west-2': {
'EC2Principal': 'ec2.amazonaws.com',
'OpsWorksPrincipal': 'opsworks.amazonaws.com'}
}
)
web_server_role = Role(
"WebServerRole",
template=template,
AssumeRolePolicyDocument=Policy(
Statement=[
Statement(
Effect=Allow, Action=[AssumeRole],
Principal=Principal(
"Service", [
FindInMap(
"Region2Principal",
Ref("AWS::Region"), "EC2Principal")
]
)
)
]
),
Path="/",
Policies=[
assets_management_policy,
logging_policy,
iam.Policy(
PolicyName="EBBucketAccess",
PolicyDocument=dict(
Statement=[dict(
Effect="Allow",
Action=[
"s3:Get*",
"s3:List*",
"s3:PutObject",
],
Resource=[
"arn:aws:s3:::elasticbeanstalk-*",
"arn:aws:s3:::elasticbeanstalk-*/*",
],
)],
),
),
iam.Policy(
PolicyName="EBXRayAccess",
PolicyDocument=dict(
Statement=[dict(
Effect="Allow",
Action=[
"xray:PutTraceSegments",
"xray:PutTelemetryRecords",
],
Resource="*",
)],
),
),
iam.Policy(
PolicyName="EBCloudWatchLogsAccess",
PolicyDocument=dict(
Statement=[dict(
Effect="Allow",
Action=[
"logs:PutLogEvents",
"logs:CreateLogStream",
],
Resource="arn:aws:logs:*:*:log-group:/aws/elasticbeanstalk*",
)],
),
),
iam.Policy(
PolicyName="ECSManagementPolicy",
PolicyDocument=dict(
Statement=[dict(
Effect="Allow",
Action=[
"ecs:*",
"elasticloadbalancing:*",
],
Resource="*",
)],
),
),
iam.Policy(
PolicyName='ECRManagementPolicy',
PolicyDocument=dict(
Statement=[dict(
Effect='Allow',
Action=[
ecr.GetAuthorizationToken,
ecr.GetDownloadUrlForLayer,
ecr.BatchGetImage,
ecr.BatchCheckLayerAvailability,
],
Resource="*",
)],
),
),
]
)
web_server_instance_profile = InstanceProfile(
"WebServerInstanceProfile",
template=template,
Path="/",
Roles=[Ref(web_server_role)],
)
eb_application = Application(
"EBApplication",
template=template,
Description="AWS Elastic Beanstalk Application"
)
# eb_application_version = ApplicationVersion(
# "EBApplicationVersion",
# template=template,
# Description="Version 1.0",
# ApplicationName=Ref(eb_application),
# SourceBundle=SourceBundle(
# S3Bucket=Join("-", ["elasticbeanstalk-samples", Ref("AWS::Region")]),
# S3Key="nodejs-sample.zip"
# )
# )
template.add_resource(Environment(
"EBEnvironment",
Description="AWS Elastic Beanstalk Environment",
ApplicationName=Ref(eb_application),
SolutionStackName=Ref(solution_stack),
OptionSettings=[
# VPC settings
OptionSettings(
Namespace="aws:ec2:vpc",
OptionName="VPCId",
Value=Ref(vpc),
),
OptionSettings(
Namespace="aws:ec2:vpc",
OptionName="AssociatePublicIpAddress",
# instances need a public IP if we're not using a NAT gateway
Value=str(not USE_NAT_GATEWAY).lower(),
),
OptionSettings(
Namespace="aws:ec2:vpc",
OptionName="Subnets",
Value=Join(",", [
Ref(private_subnet_a),
Ref(private_subnet_b),
]),
),
OptionSettings(
Namespace="aws:ec2:vpc",
OptionName="ELBSubnets",
Value=Join(",", [
Ref(public_subnet_a),
Ref(public_subnet_b),
]),
),
# Launch config settings
OptionSettings(
Namespace="aws:autoscaling:launchconfiguration",
OptionName="InstanceType",
Value=container_instance_type,
),
OptionSettings(
Namespace="aws:autoscaling:launchconfiguration",
OptionName="EC2KeyName",
Value=Ref(key_name),
),
OptionSettings(
Namespace="aws:autoscaling:launchconfiguration",
OptionName="IamInstanceProfile",
Value=Ref(web_server_instance_profile),
),
OptionSettings(
Namespace="aws:autoscaling:launchconfiguration",
OptionName="SecurityGroups",
Value=Join(",", [
Ref(container_security_group),
]),
),
# Load balancer settings
OptionSettings(
Namespace="aws:elb:loadbalancer",
OptionName="SecurityGroups",
Value=Join(",", [
Ref(load_balancer_security_group),
]),
),
# HTTPS Listener (note, these will not appear in the console -- only
# the deprecated options which we are not using will appear there).
OptionSettings(
Namespace="aws:elb:listener:443",
OptionName="ListenerProtocol",
Value="HTTPS",
),
OptionSettings(
Namespace="aws:elb:listener:443",
OptionName="SSLCertificateId",
Value=application_certificate,
),
OptionSettings(
Namespace="aws:elb:listener:443",
OptionName="InstanceProtocol",
Value="HTTP",
),
OptionSettings(
Namespace="aws:elb:listener:443",
OptionName="InstancePort",
Value="80",
),
# OS management options
# OptionSettings(
# Namespace="aws:elasticbeanstalk:environment",
# # allows AWS to reboot our instances with security updates
# OptionName="ServiceRole",
# # should be created by EB by default
# Value="${aws_iam_role.eb_service_role.name),",
# ),
# OptionSettings(
# Namespace="aws:elasticbeanstalk:healthreporting:system",
# OptionName="SystemType", # required for managed updates
# Value="enhanced",
# ),
# OptionSettings(
# Namespace="aws:elasticbeanstalk:managedactions",
# # required for managed updates
# OptionName="ManagedActionsEnabled",
# Value="true",
# ),
# OptionSettings(
# Namespace="aws:elasticbeanstalk:managedactions",
# OptionName="PreferredStartTime",
# Value="Sun:02:00",
# ),
# OptionSettings(
# Namespace="aws:elasticbeanstalk:managedactions:platformupdate",
# OptionName="UpdateLevel",
# Value="minor", # or "patch", ("minor", provides more updates)
# ),
# OptionSettings(
# Namespace="aws:elasticbeanstalk:managedactions:platformupdate",
# OptionName="InstanceRefreshEnabled",
# Value="true", # refresh instances weekly
# ),
# Logging configuration
OptionSettings(
Namespace="aws:elasticbeanstalk:cloudwatch:logs",
OptionName="StreamLogs",
Value="true",
),
OptionSettings(
Namespace="aws:elasticbeanstalk:cloudwatch:logs",
OptionName="DeleteOnTerminate",
Value="false",
),
OptionSettings(
Namespace="aws:elasticbeanstalk:cloudwatch:logs",
OptionName="RetentionInDays",
Value="365",
),
# Environment variables
OptionSettings(
Namespace="aws:elb:listener:443",
OptionName="InstancePort",
Value="80",
),
] + [
OptionSettings(
Namespace="aws:elasticbeanstalk:application:environment",
OptionName=k,
Value=v,
) for k, v in environment_variables
],
))
template.add_output(
Output(
"URL",
Description="URL of the AWS Elastic Beanstalk Environment",
Value=Join("", ["http://", GetAtt("EBEnvironment", "EndpointURL")])
)
)
| 2.09375 | 2 |
hypertrack/tests/tests.py | hypertrack/hypertrack-python | 9 | 12793341 | <filename>hypertrack/tests/tests.py
import os
import unittest
from hypertrack.rest import Client
from hypertrack.exceptions import HyperTrackException
DEVICE_ID = os.getenv("HT_EXISTING_DEVICE_ID")
ACCOUNT_ID = os.getenv("HT_ACCOUNT_ID")
SECRET_KEY = os.getenv("HT_SECRET_KEY")
hypertrack = Client(ACCOUNT_ID, SECRET_KEY)
class TestDevicesAPI(unittest.TestCase):
def test_get_device(self):
device = hypertrack.devices.get(DEVICE_ID)
self.assertTrue('device_id' in device)
self.assertTrue(isinstance(device, dict))
def test_not_existing_device(self):
try:
hypertrack.devices.get('AAAAAAAA-AAAA-AAAA-AAAA-AAAAAAAAAAAA')
# Should not go to the next line
print("Devices API did not throw exception.")
self.assertTrue(False)
except HyperTrackException as e:
self.assertEqual(e.status, 404)
def test_get_all_device(self):
pass
# AEK: 05/14/2020
# We will make it work once needed by a customer
# - the plan is to make the API paginate response
# devices = hypertrack.devices.get_all()
# self.assertTrue(isinstance(devices, list))
def test_start_tracking(self):
response = hypertrack.devices.start_tracking(DEVICE_ID)
self.assertTrue(response is None)
def test_stop_tracking(self):
response = hypertrack.devices.stop_tracking(DEVICE_ID)
self.assertTrue(response is None)
def test_change_name(self):
device = hypertrack.devices.get(DEVICE_ID)
# Save initial device name
old_name = device['device_info']['name']
response = hypertrack.devices.change_name(DEVICE_ID, 'Test Name')
self.assertTrue(response is None)
device = hypertrack.devices.get(DEVICE_ID)
self.assertEqual(device['device_info']['name'], 'Test Name')
# Change name back
response = hypertrack.devices.change_name(DEVICE_ID, old_name)
self.assertTrue(response is None)
# Check that name was changed back
device = hypertrack.devices.get(DEVICE_ID)
self.assertEqual(device['device_info']['name'], old_name)
class TestTripsAPI(unittest.TestCase):
def test_get_create_complete_trip(self):
# Create trip
trip = hypertrack.trips.create({
'device_id': DEVICE_ID,
'geofences': [{
"geometry": {
"type": "Point",
"coordinates": [
35.105761016637075,
47.856801319070776
]
},
"radius": 65,
"metadata": {"id": "dec43d3c-766c-4f6a-bd78-dfe873556782"}
}, {
"geometry": {
"type": "Point",
"coordinates": [
35.10460766676067,
47.85663214471151
]
},
"radius": 55,
"metadata": {"id": "f2e56252-53e3-4194-8d53-d946716618e7"}
}]
})
self.assertEqual(trip['status'], 'active')
self.assertEqual(len(trip['geofences']), 2)
# Get trip geofences
geofence_id = trip['geofences'][0]['geofence_id']
geofence = hypertrack.trips.get_geofence(trip['trip_id'], geofence_id)
self.assertEqual(geofence['radius'], 65)
self.assertEqual(geofence['metadata']['id'], 'dec43d3c-766c-4f6a-bd78-dfe873556782')
# Change geofence metadata
hypertrack.trips.patch_geofence_metadata(trip['trip_id'], geofence_id, {'id': '123'})
geofence = hypertrack.trips.get_geofence(trip['trip_id'], geofence_id)
self.assertEqual(geofence['metadata']['id'], '123')
# Complete Trip
hypertrack.trips.complete(trip['trip_id'])
# Get Trip
get_trip = hypertrack.trips.get(trip['trip_id'])
self.assertTrue(get_trip['status'] in ['completed', 'processing_completion'])
def test_get_all_trips(self):
trips = hypertrack.trips.get_all()
self.assertTrue(isinstance(trips, dict))
self.assertTrue('data' in trips)
if __name__ == '__main__':
unittest.main()
| 2.484375 | 2 |
removepii.py | p-zach/Remove-PII | 1 | 12793342 | # Author: <NAME>
# Python 3.9
import argparse
import nltk
import re
import os
import pathlib
def extract(filePath):
"""Extracts the textual information from a file.
Args:
filePath (str): The path to the file to extract text from.
Raises:
ValueError: If the information could not be extracted due to unsupported file type.
Returns:
str: The text in the provided file.
"""
# get the file extension
ext = pathlib.Path(filePath).suffix
# extract all data from pure text files
if ext == ".txt" or ext == ".md":
text = None
with open(filePath) as file:
text = file.read()
return text
# get the text from PDFs
if ext == ".pdf":
from pdfminer.high_level import extract_text
return extract_text(filePath)
# get the text minus tags from HTML files
if ext == ".html" or ext == ".htm":
from bs4 import BeautifulSoup
with open(filePath) as file:
soup = BeautifulSoup(file, "html.parser")
return soup.get_text()
raise ValueError(f"Text from file {filePath} could not be extracted. Supported types are TXT, PDF, HTML.")
def getNE(text, piiNE):
"""Gets the named entities classified as PII in the text.
Args:
text (str): The text to analyze.
piiNE (list): The types of named entities classified as PII that should be removed. Options: PERSON, ORGANIZATION, GPE, LOCATIOn.
Returns:
set: The set of strings holding named entity PII.
"""
# search for NLTK required data in this directory so the user doesn't need to download it separately
nltk.data.path.append(os.getcwd())
# gets all of the named entities in the text
ne = nltk.ne_chunk(nltk.pos_tag(nltk.word_tokenize(text)))
pii = []
# checks if a subtree contains PII (i.e. it should be removed)
def filterPII(x):
return x.label() in piiNE
# loops through all subtrees with a PII label
for sub in ne.subtrees(filter = filterPII):
# gets the PII's full text string from the subtree's leaves
# ex: [('Google', 'NNP'), ('Science', 'NNP'), ('Fair', 'NNP')] -> Google Science Fair
piiStr = " ".join(pair[0] for pair in sub.leaves())
# adds the PII string to the list
if piiStr not in pii:
pii.append(piiStr)
# converts to a set before returning to remove duplicates
return set(pii)
def getIDInfo(text, types):
"""Gets the ID info classified as PII in the text.
Args:
text (str): The text to analyze.
types (list): The types of ID info classified as PII that should be removed. Options: EMAIL, PHONE, SSN
Returns:
set: The set of strings holding ID info PII.
"""
# gets whether each ID info type should be removed.
phone = "PHONE" in types
email = "EMAIL" in types
ssn = "SSN" in types
# return an empty set if we're not looking for any ID info PII
if not(phone or email or ssn):
return set([])
# initialize the phone number regex
if phone: phoneRegex = re.compile(r'''(
(\d{3}|\(\d{3}\))? # area code
(\s|-|\.)? # separator
(\d{3}) # first 3 digits
(\s|-|\.) # separator
(\d{4}) # last 4 digits
(\s*(ext|x|ext.)\s*(\d{2,5}))? # optional extension
)''', re.VERBOSE)
# initialize the email address regex
if email: emailRegex = re.compile(r'''(
[a-zA-Z0-9._%+-] + # username
@ # @symbol
[a-zA-Z0-9.-] + # domain
(\.[a-zA-Z]{2,4}) # .something
)''', re.VERBOSE)
# initialize the social security number regex
if ssn: ssnRegex = re.compile(r'''(
(?!666|000|9\d{2})\d{3} # SSN can't begin with 666, 000 or anything between 900-999
- # explicit dash (separating Area and Group numbers)
(?!00)\d{2} # don't allow the Group Number to be "00"
- # another dash (separating Group and Serial numbers)
(?!0{4})\d{4} # don't allow last four digits to be "0000"
)''', re.VERBOSE)
pii = []
# utility method for getting PII matches
def getMatches(pattern, t):
# for each match, return the match itself if it is a string or the first member of a tuple match
# this is because matches are sometimes tuples of multiple groups, like a phone number match being:
# ("800-999-2222", "800", "-", "999", "-", "2222")
# However, sometimes the matches are just strings (no groups), so accessing the element at [0] would get the first char, which is not desirable.
return [(match if type(match) is str else match[0]) for match in pattern.findall(t)]
# adds the found phone #s, emails, and SSNs to the PII list
if phone: pii += getMatches(phoneRegex, text)
if email: pii += getMatches(emailRegex, text)
if ssn: pii += getMatches(ssnRegex, text)
# converts to a set before returning to remove duplicates
return set(pii)
def writeFile(text, path):
"""Writes text to the file path.
Args:
text (str): The text to write.
path (str): The path to write the file to.
"""
with open(path, "w") as file:
file.write(text)
def cleanString(text,
verbose = False,
piiNE = ["PERSON", "ORGANIZATION", "GPE", "LOCATION"],
piiNums = ["PHONE", "EMAIL", "SSN"]):
"""Cleans a string of PII.
Args:
text (str): The text to clean.
verbose (bool, optional): Whether status updates should be printed to the console. Defaults to False.
piiNE (list, optional): The types of named entity PII to remove. Defaults to all types: ["PERSON", "ORGANIZATION", "GPE", "LOCATION"].
piiNums (list, optional): The types of ID info PII to remove. Defaults to all types: ["PHONE", "EMAIL", "SSN"].
Returns:
str: The cleaned text string with PII replaced with XXXXX values.
"""
if verbose: print("Cleaning text: getting named entities and identifiable information...")
# combines the NE and ID info PII string sets
piiSet = set.union(getNE(text, piiNE), getIDInfo(text, piiNums))
if verbose: print(str(len(piiSet)) + " PII strings found.")
if verbose: print("Removing PII.")
# replaces PII with XXXXX values
cleaned = text
for pii in piiSet:
cleaned = cleaned.replace(pii, "XXXXX")
# return the cleaned text string
return cleaned
def cleanFile(filePath, outputPath,
verbose = False,
piiNE = ["PERSON", "ORGANIZATION", "GPE", "LOCATION"],
piiNums = ["PHONE", "EMAIL", "SSN"]):
"""Reads a file with PII and saves a copy of that file with PII removed.
Args:
filePath (str): The path to the file with PII.
outputPath (str): The path to the cleaned file to be saved.
verbose (bool, optional): Whether status updates should be printed to the console. Defaults to False.
piiNE (list, optional): The types of named entity PII to remove. Defaults to all types: ["PERSON", "ORGANIZATION", "GPE", "LOCATION"].
piiNums (list, optional): The types of ID info PII to remove. Defaults to all types: ["PHONE", "EMAIL", "SSN"].
"""
if verbose: print("Extracting text from " + filePath + "...")
# gets the file's text
text = extract(filePath)
if verbose: print("Text extracted.")
# gets the text without PII
cleaned = cleanString(text, verbose, piiNE, piiNums)
if verbose: print("Writing clean text to " + outputPath + ".")
# write the cleaned text to the output file
writeFile(cleaned, outputPath)
# if this file is being executed on the command line, parse arguments and process the user's file or text
if __name__ == "__main__":
parser = argparse.ArgumentParser("Removes personally identifiable information (PII) like names and phone numbers from text strings and files.")
parser.add_argument("-f", nargs=2, dest="path", default=[], metavar=("inputPath","outputPath"), help="the file to remove PII from and the clean output file path")
parser.add_argument("-s", dest="text", default=None, help="input a text string to clean")
args = parser.parse_args()
# cleans the user's provided file
if len(args.path) == 2:
cleanFile(args.path[0], args.path[1], verbose=True)
# cleans the user's provided text
elif args.text is not None:
s = cleanString(args.text, verbose=True)
print("Text with PII removed:\n" + s)
else:
print("No action specified.") | 3.3125 | 3 |
tests/test_flask_cdn.py | local-projects/flask-cdn | 0 | 12793343 | <reponame>local-projects/flask-cdn
import unittest
import os
from flask import Flask, render_template_string
from flask.ext.cdn import CDN
class DefaultsTest(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.testing = True
CDN(self.app)
def test_domain_default(self):
""" Tests CDN_DOMAIN default value is correctly set. """
self.assertEquals(self.app.config['CDN_DOMAIN'], None)
def test_debug_default(self):
""" Tests CDN_DEBUG default value is correctly set. """
self.assertEquals(self.app.config['CDN_DEBUG'], False)
def test_https_default(self):
""" Tests CDN_HTTPS default value is correctly set. """
self.assertEquals(self.app.config['CDN_HTTPS'], False)
def test_timestamp_default(self):
""" Tests CDN_TIMESTAMP default value is correctly set. """
self.assertEquals(self.app.config['CDN_TIMESTAMP'], True)
class UrlTests(unittest.TestCase):
def setUp(self):
self.app = Flask(__name__)
self.app.testing = True
self.app.config['CDN_DOMAIN'] = 'mycdnname.cloudfront.net'
self.app.config['CDN_TIMESTAMP'] = False
@self.app.route('/<url_for_string>')
def a(url_for_string):
return render_template_string(url_for_string)
@self.app.route('/')
def b():
return render_template_string("{{ url_for('b') }}")
def client_get(self, ufs):
CDN(self.app)
client = self.app.test_client()
return client.get('/%s' % ufs)
def test_url_for(self):
""" Tests static endpoint correctly affects generated URLs. """
# non static endpoint url_for in template
self.assertEquals(self.client_get('').data, '/')
# static endpoint url_for in template
ufs = "{{ url_for('static', filename='bah.js') }}"
exp = 'http://mycdnname.cloudfront.net/static/bah.js'
self.assertEquals(self.client_get(ufs).data, exp)
def test_url_for_debug(self):
""" Tests CDN_DEBUG correctly affects generated URLs. """
self.app.debug = True
ufs = "{{ url_for('static', filename='bah.js') }}"
self.app.config['CDN_DEBUG'] = True
exp = 'http://mycdnname.cloudfront.net/static/bah.js'
self.assertEquals(self.client_get(ufs).data, exp)
self.app.config['CDN_DEBUG'] = False
exp = '/static/bah.js'
self.assertEquals(self.client_get(ufs).data, exp)
def test_url_for_https(self):
""" Tests CDN_HTTPS correctly affects generated URLs. """
ufs = "{{ url_for('static', filename='bah.js') }}"
self.app.config['CDN_HTTPS'] = True
exp = 'https://mycdnname.cloudfront.net/static/bah.js'
self.assertEquals(self.client_get(ufs).data, exp)
self.app.config['CDN_HTTPS'] = False
exp = 'http://mycdnname.cloudfront.net/static/bah.js'
self.assertEquals(self.client_get(ufs).data, exp)
def test_url_for_timestamp(self):
""" Tests CDN_TIMESTAMP correctly affects generated URLs. """
ufs = "{{ url_for('static', filename='bah.js') }}"
self.app.config['CDN_TIMESTAMP'] = True
path = os.path.join(self.app.static_folder, 'bah.js')
ts = int(os.path.getmtime(path))
exp = 'http://mycdnname.cloudfront.net/static/bah.js?t={}'.format(ts)
self.assertEquals(self.client_get(ufs).data, exp)
self.app.config['CDN_TIMESTAMP'] = False
exp = 'http://mycdnname.cloudfront.net/static/bah.js'
self.assertEquals(self.client_get(ufs).data, exp)
if __name__ == '__main__':
unittest.main()
| 2.6875 | 3 |
experiments/generator.py | usc-sail/mica-violence-ratings-predictions-from-movie-scripts | 3 | 12793344 | <filename>experiments/generator.py
import os
import threading
from random import Random
from glob import iglob as glob
from keras.utils import Sequence
class Generator(Sequence):
def __init__(self, batch_dir, feat_func, shuffle = True, shuffler = Random(42)):
self.batch_dir = batch_dir
self.shuffler = shuffler
self.shuffle = shuffle
self.feat_func = feat_func
self.files = list(glob(os.path.join(self.batch_dir, "*_labels.npz")))
self.shuffler.shuffle(self.files)
self.length = len(self.files)
self.on_epoch_end()
# print('generator initiated')
def on_epoch_end(self):
if self.shuffle:
self.shuffler.shuffle(self.files)
def __getitem__(self, index):
"""Generates one batch of data"""
# print(f'generator: {index}')
label_f = self.files[index % self.length]
return self.feat_func(label_f, self.batch_dir)
def __len__(self):
return self.length | 2.578125 | 3 |
FirebaseManager.py | ksolanoj/GenderAge-Recognition | 0 | 12793345 | import pyrebase
import time
from FaceRecognitionManager import *
firebaseConfig = {
"apiKey": "<KEY>",
"authDomain": "iaproject-29018.firebaseapp.com",
"projectId": "iaproject-29018",
"storageBucket": "iaproject-29018.appspot.com",
"messagingSenderId": "817053540910",
"appId": "1:817053540910:web:423251c3f6691e27fd75bf",
"databaseURL" : ""
}
email = '<EMAIL>'
password = '<PASSWORD>'
firebase = pyrebase.initialize_app(firebaseConfig)
auth = firebase.auth()
storage = firebase.storage()
user = auth.sign_in_with_email_and_password(email, password)
def uploadImage(imageName):
globalPath = "detected/{0}.jpg".format(imageName)
storage.child(globalPath).put(globalPath)
url = storage.child(globalPath).get_url(user['idToken'])
return url
def downloadImage(imageName):
globalPath = "uploaded/{0}.jpg".format(imageName)
downloadPath = 'downloaded/{0}.jpg'.format(imageName)
storage.child(globalPath).download(downloadPath)
return detectImage(downloadPath, imageName)
| 2.4375 | 2 |
tests/core/test_base_types.py | balancap/arrowbic | 4 | 12793346 | <reponame>balancap/arrowbic
import numpy as np
import pyarrow as pa
import pytest
import arrowbic.extensions
from arrowbic.core.base_types import (
from_arrow_to_numpy_dtype,
from_arrow_to_python_class,
from_numpy_to_arrow_type,
is_supported_base_type,
)
def test__is_supported_base_type__proper_result() -> None:
assert not is_supported_base_type(arrowbic.extensions.IntEnumType())
assert not is_supported_base_type(arrowbic.extensions.TensorType())
def test__from_numpy_to_arrow_type__np_dtype__proper_coverage() -> None:
assert from_numpy_to_arrow_type(None) == pa.null()
assert from_numpy_to_arrow_type(type(None)) == pa.null()
assert from_numpy_to_arrow_type(np.bool_) == pa.bool_()
assert from_numpy_to_arrow_type(np.int8) == pa.int8()
assert from_numpy_to_arrow_type(np.float32) == pa.float32()
assert from_numpy_to_arrow_type(np.dtype(str)) == pa.string()
assert from_numpy_to_arrow_type(np.dtype(bytes)) == pa.binary(-1)
assert from_numpy_to_arrow_type(np.dtype("datetime64[s]")) == pa.timestamp("s")
assert from_numpy_to_arrow_type(np.dtype("timedelta64[ns]")) == pa.duration("ns")
with pytest.raises(TypeError):
from_numpy_to_arrow_type(np.dtype("O"))
def test__from_numpy_to_arrow_type__python_class__proper_coverage() -> None:
assert from_numpy_to_arrow_type(None) == pa.null()
assert from_numpy_to_arrow_type(type(None)) == pa.null()
assert from_numpy_to_arrow_type(bool) == pa.bool_()
assert from_numpy_to_arrow_type(int) == pa.int64()
assert from_numpy_to_arrow_type(float) == pa.float64()
assert from_numpy_to_arrow_type(str) == pa.string()
assert from_numpy_to_arrow_type(bytes) == pa.binary(-1)
def test__from_arrow_to_numpy_dtype__proper_coverage() -> None:
assert from_arrow_to_numpy_dtype(None) == type(None) # noqa: E721
assert from_arrow_to_numpy_dtype(type(None)) == type(None) # noqa: E721
assert from_arrow_to_numpy_dtype(pa.null()) == type(None) # noqa: E721
assert from_arrow_to_numpy_dtype(pa.bool_()) == np.bool_
assert from_arrow_to_numpy_dtype(pa.uint8()) == np.uint8
assert from_arrow_to_numpy_dtype(pa.float32()) == np.float32
assert from_arrow_to_numpy_dtype(pa.string()) == np.dtype(str)
assert from_arrow_to_numpy_dtype(pa.binary(-1)) == np.dtype(bytes)
assert from_arrow_to_numpy_dtype(pa.timestamp("us")) == np.dtype("datetime64[us]")
assert from_arrow_to_numpy_dtype(pa.duration("ns")) == np.dtype("timedelta64[ns]")
def test__from_arrow_to_python_class__proper_coverage() -> None:
assert from_arrow_to_python_class(pa.null()) == type(None) # noqa: E721
assert from_arrow_to_python_class(pa.float32()) == float # noqa: E721
assert from_arrow_to_python_class(pa.int32()) == int # noqa: E721
assert from_arrow_to_python_class(pa.string()) == str # noqa: E721
assert from_arrow_to_python_class(pa.binary(-1)) == bytes # noqa: E721
assert from_arrow_to_python_class(pa.timestamp("us")) == np.dtype("datetime64[us]")
assert from_arrow_to_python_class(pa.duration("ns")) == np.dtype("timedelta64[ns]")
| 2.28125 | 2 |
gallery/migrations/0008_shopitem.py | jeffykle/kf-public | 0 | 12793347 | <filename>gallery/migrations/0008_shopitem.py
# Generated by Django 3.1 on 2020-09-06 15:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('gallery', '0007_auto_20200905_1239'),
]
operations = [
migrations.CreateModel(
name='ShopItem',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('gallery.galleryitem',),
),
]
| 1.40625 | 1 |
metadata-ingestion/src/datahub/metadata/com/linkedin/pegasus2avro/timeseries/__init__.py | zhoxie-cisco/datahub | 1 | 12793348 | # flake8: noqa
# This file is autogenerated by /metadata-ingestion/scripts/avro_codegen.py
# Do not modify manually!
# fmt: off
from .....schema_classes import CalendarIntervalClass
from .....schema_classes import PartitionSpecClass
from .....schema_classes import TimeWindowClass
from .....schema_classes import TimeWindowSizeClass
CalendarInterval = CalendarIntervalClass
PartitionSpec = PartitionSpecClass
TimeWindow = TimeWindowClass
TimeWindowSize = TimeWindowSizeClass
# fmt: on
| 1.414063 | 1 |
hnn_core/parallel_backends.py | chenghuzi/hnn-core | 0 | 12793349 | """Parallel backends"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
import os
import sys
import re
import multiprocessing
import shlex
import pickle
import base64
from warnings import warn
from subprocess import Popen, PIPE, TimeoutExpired
import binascii
from queue import Queue, Empty
from threading import Thread, Event
from .cell_response import CellResponse
from .dipole import Dipole
from .network_builder import _simulate_single_trial
_BACKEND = None
def _thread_handler(event, out, queue):
while not event.isSet():
line = out.readline()
if line == '':
break
queue.put(line)
def _gather_trial_data(sim_data, net, n_trials, postproc):
"""Arrange data by trial
To be called after simulate(). Returns list of Dipoles, one for each trial,
and saves spiking info in net (instance of Network).
"""
dpls = []
# Create array of equally sampled time points for simulating currents
cell_type_names = list(net.cell_types.keys())
cell_response = CellResponse(times=sim_data[0]['times'],
cell_type_names=cell_type_names)
net.cell_response = cell_response
for idx in range(n_trials):
# cell response
net.cell_response._spike_times.append(sim_data[idx]['spike_times'])
net.cell_response._spike_gids.append(sim_data[idx]['spike_gids'])
net.cell_response.update_types(net.gid_ranges)
net.cell_response._vsoma.append(sim_data[idx]['vsoma'])
net.cell_response._isoma.append(sim_data[idx]['isoma'])
# extracellular array
for arr_name, arr in net.rec_arrays.items():
# voltages is a n_trials x n_contacts x n_samples array
arr._data.append(sim_data[idx]['rec_data'][arr_name])
arr._times = sim_data[idx]['rec_times'][arr_name]
# dipole
dpl = Dipole(times=sim_data[idx]['times'],
data=sim_data[idx]['dpl_data'])
N_pyr_x = net._params['N_pyr_x']
N_pyr_y = net._params['N_pyr_y']
dpl._baseline_renormalize(N_pyr_x, N_pyr_y) # XXX cf. #270
dpl._convert_fAm_to_nAm() # always applied, cf. #264
if postproc:
window_len = net._params['dipole_smooth_win'] # specified in ms
fctr = net._params['dipole_scalefctr']
if window_len > 0: # param files set this to zero for no smoothing
dpl.smooth(window_len=window_len)
if fctr > 0:
dpl.scale(fctr)
dpls.append(dpl)
return dpls
def _get_mpi_env():
"""Set some MPI environment variables."""
my_env = os.environ.copy()
if 'win' not in sys.platform:
my_env["OMPI_MCA_btl_base_warn_component_unused"] = '0'
if 'darwin' in sys.platform:
my_env["PMIX_MCA_gds"] = "^ds12" # open-mpi/ompi/issues/7516
my_env["TMPDIR"] = "/tmp" # open-mpi/ompi/issues/2956
return my_env
def run_subprocess(command, obj, timeout, proc_queue=None, *args, **kwargs):
"""Run process and communicate with it.
Parameters
----------
command : list of str | str
Command to run as subprocess (see subprocess.Popen documentation).
obj : object
The object to write to stdin after starting child process
with MPI command.
timeout : float
The number of seconds to wait for a process without output.
*args, **kwargs : arguments
Additional arguments to pass to subprocess.Popen.
Returns
-------
child_data : object
The data returned by the child process.
"""
proc_data_bytes = b''
# each loop while waiting will involve two Queue.get() timeouts, each
# 0.01s. This caclulation will error on the side of a longer timeout
# than is specified because more is done each loop that just Queue.get()
timeout_cycles = timeout / 0.02
pickled_obj = base64.b64encode(pickle.dumps(obj))
# non-blocking adapted from https://stackoverflow.com/questions/375427/non-blocking-read-on-a-subprocess-pipe-in-python#4896288 # noqa: E501
out_q = Queue()
err_q = Queue()
threads_started = False
try:
proc = Popen(command, stdin=PIPE, stdout=PIPE, stderr=PIPE, *args,
**kwargs)
# now that the process has started, add it to the queue
# used by MPIBackend.terminate()
if proc_queue is not None:
proc_queue.put(proc)
# set up polling first so all of child's stdout/stderr
# gets captured
event = Event()
out_t = Thread(target=_thread_handler,
args=(event, proc.stdout, out_q))
err_t = Thread(target=_thread_handler,
args=(event, proc.stderr, err_q))
out_t.start()
err_t.start()
threads_started = True
data_received = False
sent_network = False
count_since_last_output = 0
# loop while the process is running the simulation
while True:
child_terminated = proc.poll() is not None
if not data_received:
if _echo_child_output(out_q):
count_since_last_output = 0
else:
count_since_last_output += 1
# look for data in stderr and print child stdout
data_len, proc_data_bytes = _get_data_from_child_err(err_q)
if data_len > 0:
data_received = True
_write_child_exit_signal(proc.stdin)
elif child_terminated:
# child terminated early, and we already
# captured output left in queues
warn("Child process failed unexpectedly")
kill_proc_name('nrniv')
break
if not sent_network:
# Send network object to child so it can start
try:
_write_net(proc.stdin, pickled_obj)
except BrokenPipeError:
# child failed during _write_net(). get the
# output and break out of loop on the next
# iteration
warn("Received BrokenPipeError exception. "
"Child process failed unexpectedly")
continue
else:
sent_network = True
# This is not the same as "network received", but we
# assume it was successful and move on to waiting for
# data in the next loop iteration.
if child_terminated and data_received:
# both exit conditions have been met (also we know that
# the network has been sent)
break
if not child_terminated and \
count_since_last_output > timeout_cycles:
warn("Timeout exceeded while waiting for child process output"
". Terminating...")
kill_proc_name('nrniv')
break
except KeyboardInterrupt:
warn("Received KeyboardInterrupt. Stopping simulation process...")
if threads_started:
# stop the threads
event.set() # close signal
out_t.join()
err_t.join()
# wait for the process to terminate. we need use proc.communicate to
# read any output at its end of life.
try:
outs, errs = proc.communicate(timeout=1)
except TimeoutExpired:
proc.kill()
# wait for output again after kill signal
outs, errs = proc.communicate(timeout=1)
sys.stdout.write(outs)
sys.stdout.write(errs)
if proc.returncode is None:
# It's theoretically possible that we have received data
# and exited the loop above, but the child process has not
# yet terminated. This is unexpected unless KeyboarInterrupt
# is caught
proc.terminate()
try:
proc.wait(1) # wait maximum of 1s
except TimeoutExpired:
warn("Could not kill python subprocess: PID %d" % proc.pid)
if not proc.returncode == 0:
# simulation failed with a numeric return code
raise RuntimeError("MPI simulation failed. Return code: %d" %
proc.returncode)
child_data = _process_child_data(proc_data_bytes, data_len)
# clean up the queue
try:
proc_queue.get_nowait()
except Empty:
pass
return proc, child_data
def _process_child_data(data_bytes, data_len):
"""Process the data returned by child process.
Parameters
----------
data_bytes : str
The data bytes
Returns
-------
data_unpickled : object
The unpickled data.
"""
if not data_len == len(data_bytes):
# This is indicative of a failure. For debugging purposes.
warn("Length of received data unexpected. Expecting %d bytes, "
"got %d" % (data_len, len(data_bytes)))
if len(data_bytes) == 0:
raise RuntimeError("MPI simulation didn't return any data")
# decode base64 byte string
try:
data_pickled = base64.b64decode(data_bytes, validate=True)
except binascii.Error:
# This is here for future debugging purposes. Unit tests can't
# reproduce an incorrectly padded string, but this has been an
# issue before
raise ValueError("Incorrect padding for data length %d bytes" %
len(data_len) + " (mod 4 = %d)" %
(len(data_len) % 4))
# unpickle the data
return pickle.loads(data_pickled)
def _echo_child_output(out_q):
out = ''
while True:
try:
out += out_q.get(timeout=0.01)
except Empty:
break
if len(out) > 0:
sys.stdout.write(out)
return True
return False
def _get_data_from_child_err(err_q):
err = ''
data_length = 0
data_bytes = b''
while True:
try:
err += err_q.get(timeout=0.01)
except Empty:
break
# check for data signal
extracted_data = _extract_data(err, 'data')
if len(extracted_data) > 0:
# _extract_data only returns data when signals on
# both sides were seen
err = err.replace('@start_of_data@', '')
err = err.replace(extracted_data, '')
data_length = _extract_data_length(err, 'data')
err = err.replace('@end_of_data:%d@\n' % data_length, '')
data_bytes = extracted_data.encode()
# print the rest of the child's stderr to our stdout
sys.stdout.write(err)
return data_length, data_bytes
def _has_mpi4py():
"""Determine if mpi4py is present."""
try:
import mpi4py # noqa
except ImportError:
return False
else:
return True
def _has_psutil():
"""Determine if psutil is present."""
try:
import psutil # noqa
except ImportError:
return False
else:
return True
def requires_mpi4py(function):
"""Decorator for testing functions that require MPI."""
import pytest
try:
import mpi4py
assert hasattr(mpi4py, '__version__')
skip = False
except (ImportError, ModuleNotFoundError) as err:
if "TRAVIS_OS_NAME" not in os.environ:
skip = True
else:
raise ImportError(err)
reason = 'mpi4py not available'
return pytest.mark.skipif(skip, reason=reason)(function)
def requires_psutil(function):
"""Decorator for testing functions that require psutil."""
import pytest
try:
import psutil
assert hasattr(psutil, '__version__')
skip = False
except (ImportError, ModuleNotFoundError) as err:
if "TRAVIS_OS_NAME" not in os.environ:
skip = True
else:
raise ImportError(err)
reason = 'psutil not available'
return pytest.mark.skipif(skip, reason=reason)(function)
def _extract_data_length(data_str, object_name):
data_len_match = re.search('@end_of_%s:' % object_name + r'(\d+)@',
data_str)
if data_len_match is not None:
return int(data_len_match.group(1))
else:
raise ValueError("Couldn't find data length in string")
def _extract_data(data_str, object_name):
start_idx = 0
end_idx = 0
start_match = re.search('@start_of_%s@' % object_name, data_str)
if start_match is not None:
start_idx = start_match.end()
else:
# need start signal
return ''
end_match = re.search('@end_of_%s:' % object_name + r'\d+@', data_str)
if end_match is not None:
end_idx = end_match.start()
return data_str[start_idx:end_idx]
# Next 3 functions are from HNN. Will move here. They require psutil
def _kill_procs(procs):
"""Tries to terminate processes in a list before sending kill signal"""
from psutil import wait_procs, NoSuchProcess
# try terminate first
for p in procs:
try:
p.terminate()
except NoSuchProcess:
pass
_, alive = wait_procs(procs, timeout=3)
# now try kill
for p in alive:
p.kill()
_, alive = wait_procs(procs, timeout=3)
return alive
def _get_procs_running(proc_name):
"""Return a list of processes currently running"""
from psutil import process_iter
process_list = []
for p in process_iter(attrs=["name", "exe", "cmdline"]):
if proc_name == p.info['name'] or \
(p.info['exe'] is not None and
os.path.basename(p.info['exe']) == proc_name) or \
(p.info['cmdline'] and
p.info['cmdline'][0] == proc_name):
process_list.append(p)
return process_list
def kill_proc_name(proc_name):
"""Make best effort to kill processes
Parameters
----------
proc_name : str
A string to match process names against and kill all matches
Returns
-------
killed_procs : bool
True if any processes were killed
"""
killed_procs = False
procs = _get_procs_running(proc_name)
if len(procs) > 0:
running = _kill_procs(procs)
if len(running) > 0:
if len(running) < len(procs):
killed_procs = True
pids = [str(proc.pid) for proc in running]
warn("Failed to kill nrniv process(es) %s" %
','.join(pids))
else:
killed_procs = True
return killed_procs
def _write_net(stream, pickled_net):
stream.flush()
stream.write('@start_of_net@')
stream.write(pickled_net.decode())
stream.write('@end_of_net:%d@\n' % len(pickled_net))
stream.flush()
def _write_child_exit_signal(stream):
stream.flush()
stream.write('@data_received@\n')
stream.flush()
class JoblibBackend(object):
"""The JoblibBackend class.
Parameters
----------
n_jobs : int | None
The number of jobs to start in parallel. If None, then 1 trial will be
started without parallelism
Attributes
----------
n_jobs : int
The number of jobs to start in parallel
"""
def __init__(self, n_jobs=1):
self.n_jobs = n_jobs
print("joblib will run over %d jobs" % (self.n_jobs))
def _parallel_func(self, func):
if self.n_jobs != 1:
try:
from joblib import Parallel, delayed
except ImportError:
warn('joblib not installed. Cannot run in parallel.')
self.n_jobs = 1
if self.n_jobs == 1:
my_func = func
parallel = list
else:
parallel = Parallel(self.n_jobs)
my_func = delayed(func)
return parallel, my_func
def __enter__(self):
global _BACKEND
self._old_backend = _BACKEND
_BACKEND = self
return self
def __exit__(self, type, value, traceback):
global _BACKEND
_BACKEND = self._old_backend
def simulate(self, net, tstop, dt, n_trials, postproc=False):
"""Simulate the HNN model
Parameters
----------
net : Network object
The Network object specifying how cells are
connected.
n_trials : int
Number of trials to simulate.
tstop : float
The simulation stop time (ms).
dt : float
The integration time step of h.CVode (ms)
postproc : bool
If False, no postprocessing applied to the dipole
Returns
-------
dpl: list of Dipole
The Dipole results from each simulation trial
"""
parallel, myfunc = self._parallel_func(_simulate_single_trial)
sim_data = parallel(myfunc(net, tstop, dt, trial_idx) for
trial_idx in range(n_trials))
dpls = _gather_trial_data(sim_data, net=net, n_trials=n_trials,
postproc=postproc)
return dpls
class MPIBackend(object):
"""The MPIBackend class.
Parameters
----------
n_procs : int | None
The number of MPI processes requested by the user. If None, then will
attempt to detect number of cores (including hyperthreads) and start
parallel simulation over all of them.
mpi_cmd : str
The name of the mpi launcher executable. Will use 'mpiexec'
(openmpi) by default.
Attributes
----------
n_procs : int
The number of processes MPI will actually use (spread over cores). If 1
is specified or mpi4py could not be loaded, the simulation will be run
with the JoblibBackend
mpi_cmd : list of str
The mpi command with number of procs and options to be passed to Popen
expected_data_length : int
Used to check consistency between data that was sent and what
MPIBackend received.
proc_queue : threading.Queue
A Queue object to hold process handles from Popen in a thread-safe way.
There will be a valid process handle present the queue when a MPI
åsimulation is running.
"""
def __init__(self, n_procs=None, mpi_cmd='mpiexec'):
self.expected_data_length = 0
self.proc = None
self.proc_queue = Queue()
n_logical_cores = multiprocessing.cpu_count()
if n_procs is None:
self.n_procs = n_logical_cores
else:
self.n_procs = n_procs
# did user try to force running on more cores than available?
oversubscribe = False
if self.n_procs > n_logical_cores:
oversubscribe = True
hyperthreading = False
if _has_mpi4py() and _has_psutil():
import psutil
n_physical_cores = psutil.cpu_count(logical=False)
# detect if we need to use hwthread-cpus with mpiexec
if self.n_procs > n_physical_cores:
hyperthreading = True
else:
packages = list()
if not _has_mpi4py():
packages += ['mpi4py']
if not _has_psutil():
packages += ['psutil']
packages = ' and '.join(packages)
warn(f'{packages} not installed. Will run on single processor')
self.n_procs = 1
self.mpi_cmd = mpi_cmd
if self.n_procs == 1:
print("Backend will use 1 core. Running simulation without MPI")
return
else:
print("MPI will run over %d processes" % (self.n_procs))
if hyperthreading:
self.mpi_cmd += ' --use-hwthread-cpus'
if oversubscribe:
self.mpi_cmd += ' --oversubscribe'
self.mpi_cmd += ' -np ' + str(self.n_procs)
self.mpi_cmd += ' nrniv -python -mpi -nobanner ' + \
sys.executable + ' ' + \
os.path.join(os.path.dirname(sys.modules[__name__].__file__),
'mpi_child.py')
# Split the command into shell arguments for passing to Popen
if 'win' in sys.platform:
use_posix = True
else:
use_posix = False
self.mpi_cmd = shlex.split(self.mpi_cmd, posix=use_posix)
def __enter__(self):
global _BACKEND
self._old_backend = _BACKEND
_BACKEND = self
return self
def __exit__(self, type, value, traceback):
global _BACKEND
_BACKEND = self._old_backend
# always kill nrniv processes for good measure
if self.n_procs > 1:
kill_proc_name('nrniv')
def simulate(self, net, tstop, dt, n_trials, postproc=False):
"""Simulate the HNN model in parallel on all cores
Parameters
----------
net : Network object
The Network object specifying how cells are
connected.
tstop : float
The simulation stop time (ms).
dt : float
The integration time step of h.CVode (ms)
n_trials : int
Number of trials to simulate.
postproc: bool
If False, no postprocessing applied to the dipole
Returns
-------
dpl: list of Dipole
The Dipole results from each simulation trial
"""
# just use the joblib backend for a single core
if self.n_procs == 1:
return JoblibBackend(n_jobs=1).simulate(net, tstop=tstop,
dt=dt,
n_trials=n_trials,
postproc=postproc)
print("Running %d trials..." % (n_trials))
dpls = []
env = _get_mpi_env()
self.proc, sim_data = run_subprocess(
command=self.mpi_cmd, obj=[net, tstop, dt, n_trials], timeout=30,
proc_queue=self.proc_queue, env=env, cwd=os.getcwd(),
universal_newlines=True)
dpls = _gather_trial_data(sim_data, net, n_trials, postproc)
return dpls
def terminate(self):
"""Terminate running simulation on this MPIBackend
Safe to call from another thread from the one `simulate_dipole`
was called from.
"""
proc = None
try:
proc = self.proc_queue.get(timeout=1)
except Empty:
warn("No currently running process to terminate")
if proc is not None:
proc.terminate()
try:
proc.wait(5) # wait maximum of 5s
except TimeoutExpired:
warn("Could not kill python subprocess: PID %d" %
proc.pid)
| 2.15625 | 2 |
Platforms/Web/Processing/Api/Discord/Commands/main.py | HeapUnderfl0w/Phaazebot | 0 | 12793350 | from typing import TYPE_CHECKING
if TYPE_CHECKING:
from Platforms.Discord.main_discord import PhaazebotDiscord
from Platforms.Web.index import WebIndex
from aiohttp.web import Response, Request
from .get import apiDiscordCommandsGet
from .create import apiDiscordCommandsCreate
from .list import apiDiscordCommandsList
from .delete import apiDiscordCommandsDelete
from .edit import apiDiscordCommandsEdit
from Platforms.Web.Processing.Api.errors import apiMissingValidMethod, apiNotAllowed
async def apiDiscordCommands(cls:"WebIndex", WebRequest:Request) -> Response:
"""
Default url: /api/discord/commands
"""
PhaazeDiscord:"PhaazebotDiscord" = cls.Web.BASE.Discord
if not PhaazeDiscord: return await apiNotAllowed(cls, WebRequest, msg="Discord module is not active")
method:str = WebRequest.match_info.get("method", "")
if not method: return await apiMissingValidMethod(cls, WebRequest)
elif method == "get":
return await apiDiscordCommandsGet(cls, WebRequest)
elif method == "delete":
return await apiDiscordCommandsDelete(cls, WebRequest)
elif method == "create":
return await apiDiscordCommandsCreate(cls, WebRequest)
elif method == "edit":
return await apiDiscordCommandsEdit(cls, WebRequest)
elif method == "list":
return await apiDiscordCommandsList(cls, WebRequest)
else: return await apiMissingValidMethod(cls, WebRequest, msg=f"'{method}' is not a known method")
| 2.59375 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.