max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
test_autolens/plot/grids/regular_grid.py | rakaar/PyAutoLens | 0 | 12790151 | import autolens as al
grid = al.Grid.uniform(shape_2d=(10, 10), pixel_scales=1.0)
aplt.Grid(grid=grid)
grid = al.Grid.uniform(shape_2d=(10, 10), pixel_scales=1.0, origin=(5.0, 5.0))
aplt.Grid(grid=grid, symmetric_around_centre=False)
| 2.234375 | 2 |
data_utils.py | camille1874/ads_classify | 0 | 12790152 | <reponame>camille1874/ads_classify
import tensorflow as tf
import pandas as pd
from collections import Counter
from imblearn.combine import SMOTEENN
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import NearMiss
import numpy as np
def scale(df, column):
df[column] = (df[column] - df[column].min()) / (df[column].max() - df[column].min())
def read_data(data_file, num_epochs, shuffle):
df_data = pd.read_csv(
tf.gfile.Open(data_file),
header=0,
skipinitialspace=True,
engine="python")
process(df_data)
if "test" in data_file:
labels = None
else:
labels = df_data["y_buy"]
inputs = tf.estimator.inputs.pandas_input_fn(
x=df_data,
y=labels,
batch_size=128,
num_epochs=num_epochs,
shuffle=shuffle,
queue_capacity=50000,
num_threads=1)
return inputs
# 采样
def read_data_with_sampling(data_file, num_epochs, shuffle):
df_data = pd.read_csv(
tf.gfile.Open(data_file),
header=0,
skipinitialspace=True,
engine="python")
process(df_data)
X = df_data.drop(["y_buy"], axis = 1)
y = df_data["y_buy"]
X_column = X.columns
y_column = ["y_buy"]
print(sorted(Counter(y).items()))
# 直接欠采样
# nm = NearMiss(random_state=0, version=3)
# X_resampled, y_resampled = nm.fit_sample(X, y)
# print(sorted(Counter(y_resampled).items()))
# 欠采样和过采样结合1
smote_enn = SMOTEENN(sampling_strategy=0.5, random_state=0)
X_resampled, y_resampled = smote_enn.fit_sample(X, y)
print(sorted(Counter(y_resampled).items()))
## 欠采样和过采样结合2
##smote_tomek = SMOTETomek(sampling_strategy=0.1, random_state=0)
##X_resampled, y_resampled = smote_tomek.fit_resample(X, y)
##print(sorted(Counter(y_resampled).items()))
X_resampled = pd.DataFrame(X_resampled, columns=X_column)
y_resampled = pd.DataFrame(y_resampled, columns=y_column)
process(X_resampled)
inputs = tf.estimator.inputs.pandas_input_fn(
X_resampled,
y_resampled,
batch_size=128,
num_epochs=num_epochs,
shuffle=shuffle,
queue_capacity=70000,
num_threads=1)
return inputs
def process(df):
df.fillna(0, inplace=True)
df["buy_freq"] = df["buy_freq"].astype(int)
df["visit_freq"] = df["visit_freq"].astype(int)
df["last_buy"] = df["last_buy"].astype(int)
df["last_visit"] = df["last_visit"].astype(int)
df["multiple_buy"] = df["multiple_buy"].astype(int)
df["multiple_visit"] = df["multiple_visit"].astype(int)
df["uniq_urls"] = df["uniq_urls"].astype(int)
df["num_checkins"] = df["num_checkins"].astype(int)
| 2.484375 | 2 |
dolo/algos/dtmscc/accuracy.py | zlqs1985/dolo | 0 | 12790153 | from __future__ import division
def compute_residuals(model, dr, grid):
ff = model.functions['arbitrage']
gg = model.functions['transition']
aa = model.functions['auxiliary']
f = lambda m,s,x,M,S,X,p: ff(m,s,x,aa(m,s,x,p),M,S,X,aa(M,S,X,p),p)
g = lambda m,s,x,M,p: gg(m,s,x,aa(m,s,x,p),M,p)
P,Q = model.markov_chain
parms = model.calibration['parameters']
controls = dr(grid)
res = residuals(f, g, i_mc, grid, controls, mdr, P, Q, parms)
return res
def residuals(f, g, i_ms, s, x, dr, P, Q, parms):
N = s.shape[0]
n_s = s.shape[1]
n_ms = P.shape[0] # number of markov states
n_mv = P.shape[1] # number of markov variable
res = numpy.zeros_like(x)
XM = numpy.zeros_like(x)
import time
# solving on grid for markov index i_ms
# m = P[i_ms,:][None,:]
m = numpy.tile(P[i_ms,:],(N,1))
xm = x[:,:]
for I_ms in range(n_ms):
# M = P[I_ms,:][None,:]
M = numpy.tile(P[I_ms,:], (N,1))
prob = Q[i_ms, I_ms]
S = g(m, s, xm, M, parms)
XM[:,:] = dr(I_ms, S)
rr = f(m,s,xm,M,S,XM,parms)
res[:,:] += prob*rr
return res
| 2.21875 | 2 |
app/views/members/routes.py | sagarkaurav/worktable | 3 | 12790154 | import datetime
from app import db, mail
from app.models import Member, MemberInvite
from flask import Blueprint, flash, redirect, render_template, request, url_for
from flask_login import current_user, login_required, login_user
from flask_mail import Message
from .forms import MemberInviteForm, MemberJoinForm
members = Blueprint(
"members", __name__, subdomain="<org_username>", template_folder="templates"
)
@members.route("/")
@login_required
def index(org_username):
member_page = request.args.get("member_page", 1)
members = Member.query.filter_by(organization=current_user.organization).paginate(
int(member_page), 5
)
all_invites = MemberInvite.query.filter_by(
organization=current_user.organization
).all()
return render_template(
"members/index.html", all_invites=all_invites, members=members
)
@members.route("/invite/<invite_id>/remove", methods=["POST"])
@login_required
def remove_invite(org_username, invite_id):
invite = MemberInvite.query.filter_by(
organization=current_user.organization, id=invite_id
).first()
if invite is None:
flash("Unable to find invite to remove", "error")
else:
db.session.delete(invite)
db.session.commit()
flash("Invite removed successfully", "success")
return redirect(
url_for(
"members.index",
org_username=current_user.organization.username,
)
)
@members.route("/invite/", methods=["GET", "POST"])
@login_required
def invite(org_username):
member_invite = MemberInviteForm()
if member_invite.validate_on_submit():
email = member_invite.email.data.lower()
member = Member.query.filter_by(
email=email, organization=current_user.organization
).first()
if member:
flash("Email is already a member", "error")
return redirect(
url_for(
"members.index",
org_username=current_user.organization.username,
)
)
invite = MemberInvite.query.filter_by(
organization=current_user.organization, email=email
).first()
if invite:
flash("Invite already sent.", "warning")
return redirect(
url_for(
"members.index",
org_username=current_user.organization.username,
)
)
new_invite = MemberInvite(email=email, organization=current_user.organization)
db.session.add(new_invite)
db.session.commit()
invite_link = url_for(
".join",
org_username=current_user.organization.username,
token=new_invite.token,
_external=True,
)
msg = Message(
"Worktable organization join invite",
sender="<EMAIL>",
recipients=[new_invite.email],
)
msg.body = render_template(
"members/invite_mail.txt",
join_link=invite_link,
org_name=current_user.organization.name,
)
msg.html = render_template(
"members/invite_mail.html",
join_link=invite_link,
org_name=current_user.organization.name,
)
mail.send(msg)
flash("New member invite has been sent", "success")
return redirect(
url_for("members.index", org_username=current_user.organization.username)
)
return render_template("members/invite.html", form=member_invite)
@members.route("/join/<token>", methods=["GET", "POST"])
def join(org_username, token):
invite = MemberInvite.query.filter_by(token=token).first()
if not invite:
flash("Invalid invite link", "error")
return redirect(url_for("auth.login", org_username=org_username))
member_join = MemberJoinForm()
if member_join.validate_on_submit():
new_member = Member(
first_name=member_join.first_name.data,
last_name=member_join.last_name.data,
email=invite.email,
organization=invite.organization,
)
new_member.password = member_join.password.data
db.session.add(new_member)
db.session.delete(invite)
db.session.commit()
login_user(new_member)
flash("New member invite has been sent", "success")
return redirect(
url_for("dashboard.index", org_username=invite.organization.username)
)
return render_template(
"members/join.html", invite=invite, form=member_join, token=token
)
@members.route("/disable/", methods=["POST"])
@login_required
def disable_account(org_username):
member_id = request.form.get("member_id")
member = Member.query.filter_by(
id=member_id, organization=current_user.organization
).first()
if member is None:
flash("Member account is not found", "error")
elif member.disabled_at:
flash("Member account is already disabled", "error")
elif member == current_user:
flash("You can not disabled your own account", "error")
else:
member.disabled_at = datetime.datetime.utcnow()
db.session.commit()
flash("Member account has been disabled", "success")
redirect_url = url_for(".index", org_username=current_user.organization.username)
return redirect(redirect_url)
@members.route("/enable/", methods=["POST"])
@login_required
def enable_account(org_username):
member_id = request.form.get("member_id")
member = Member.query.filter_by(
id=member_id, organization=current_user.organization
).first()
if member is None:
flash("Member account is not found", "error")
elif member.disabled_at is None:
flash("Member account is already enabled", "error")
elif member == current_user:
flash("You can not enable your own account", "error")
else:
member.disabled_at = None
db.session.commit()
flash("Member account has been enabled", "success")
redirect_url = url_for(".index", org_username=current_user.organization.username)
return redirect(redirect_url)
| 2.390625 | 2 |
HLTrigger/Configuration/python/HLT_75e33/tasks/pfClusteringECALTask_cfi.py | PKUfudawei/cmssw | 1 | 12790155 | <reponame>PKUfudawei/cmssw
import FWCore.ParameterSet.Config as cms
from ..modules.particleFlowClusterECALUncorrected_cfi import *
from ..modules.particleFlowRecHitECAL_cfi import *
from ..tasks.particleFlowClusterECALTask_cfi import *
pfClusteringECALTask = cms.Task(
particleFlowClusterECALTask,
particleFlowClusterECALUncorrected,
particleFlowRecHitECAL
)
| 1.0625 | 1 |
examples/dataflow-python-examples/streaming-examples/slowlychanging-sideinput/sideinput_refresh/dofns.py | ruchirjain86/professional-services | 2,116 | 12790156 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import List
import apache_beam as beam
from apache_beam.io.filesystems import FileSystems
from sideinput_refresh import util
@beam.typehints.with_input_types(bytes)
@beam.typehints.with_output_types(beam.pvalue.TaggedOutput)
class SplitToMultiple(beam.DoFn):
"""Generates a base path for each side input type combining root path received via file notification subscription
and side input type. PCollection recieved will contain only single element representing base path and will
be fired once every x hours matching the side input refresh frequency
Attributes:
sideinput_types: List of Side input types
file_prefix: file_prefix matching required files. Default is * indicating all files
"""
def __init__(self, sideinput_types: List[str], file_prefix: str = "*"):
self.sideinput_types = sideinput_types
self.file_prefix = file_prefix
def process(self,
element,
timestamp=beam.DoFn.TimestampParam,
window=beam.DoFn.WindowParam,
pane_info=beam.DoFn.PaneInfoParam):
# Logging to audit triggering of side input refresh process. Statement will be logged only whenever the pubsub notification
# triggers side input refresh process (i.e normally once in every x hours)
if isinstance(window, beam.transforms.window.GlobalWindow):
logging.info(
f"(Re)loading side input data from basepath {element.decode()} for global window: {timestamp} - {window}"
)
else:
logging.info(
f"(Re)loading side input data from basepath {element.decode()} for window: {util.get_formatted_time(window.start)} - {util.get_formatted_time(window.end)}"
)
for sideinput_type in self.sideinput_types:
yield beam.pvalue.TaggedOutput(
sideinput_type,
FileSystems.join(element.decode(), sideinput_type,
self.file_prefix))
| 2.140625 | 2 |
backend/apps/mails/views.py | KuanWeiLee/froggy-service | 174 | 12790157 | <gh_stars>100-1000
from django.shortcuts import redirect
from django.urls import reverse
from rest_framework.viewsets import ModelViewSet
from rest_framework.decorators import action
from rest_framework.permissions import IsAdminUser
from .models import SendGridMail
from .serializers import SendGridMailSerializer
class MailViewSet(ModelViewSet):
queryset = SendGridMail.objects.all()
serializer_class = SendGridMailSerializer
permission_classes = [IsAdminUser]
http_method_names = ['get', 'post', 'retrieve']
@action(methods=['GET'], detail=True)
def resend(self, request, pk=None):
mail = SendGridMail.objects.get(id=pk)
mail.send()
return redirect(reverse("admin:cases_case_change", args=(mail.case.id,)))
| 1.789063 | 2 |
Collection/ms/01 Arrays/01_two_sum.py | kmanadkat/leetcode-101 | 0 | 12790158 | from typing import List
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
dt = {}
for index, num in enumerate(nums):
# Check If Complement Exists in dictionary
complement = target - num
if complement in dt and dt[complement] != index:
return [index, dt[complement]]
# Add entry for future checks
dt[num] = index
return []
| 3.421875 | 3 |
app/auth_app/views.py | ganggas95/simdus_app | 0 | 12790159 | from flask import (
render_template,
request,
redirect,
url_for)
from flask_login import login_user, logout_user
from app.helpers import BaseView
from .models import LoginForm, User
class AuthView(BaseView):
def dispatch_request(self):
form = LoginForm(request.form)
if request.method == 'GET':
return render_template(
self.template_name,
form=form)
user = User.get_username(form.username.data)
print(user.check_password(form.password.data))
if user and user.check_password(form.password.data):
login_user(user, form.remember.data)
return redirect(
url_for('dashboard_bp.dashboard_page')
)
return redirect(url_for('auth_bp.auth_view'))
class LogoutView(BaseView):
def dispatch_request(self):
logout_user()
return redirect(
url_for('auth_bp.auth_view')
)
| 2.5 | 2 |
curiosity/interaction/update_step.py | neuroailab/curiosity_deprecated | 0 | 12790160 | <reponame>neuroailab/curiosity_deprecated
'''
Defines the training step.
'''
import sys
sys.path.append('tfutils')
import tensorflow as tf
from tfutils.base import get_optimizer, get_learning_rate
import numpy as np
import cv2
from curiosity.interaction import models
import h5py
import json
class RawDepthDiscreteActionUpdater:
'''
Provides the training step.
This is probably where we can put parallelization.
Not finished!
'''
def __init__(world_model, rl_model, data_provider, eta):
self.data_provider = data_provider
self.world_model = world_model
self.rl_model = rl_model
self.eta = eta
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.action = tf.placeholder = tf.placeholder(tf.float32, [None] + world_model.action_one_hot.get_shape().as_list()[1:])
self.adv = tf.placeholder(tf.float32, [None])
self.r = tf.placeholder(tf.float32, [None])
log_prob_tf = tf.nn.log_softmax(rl_model.logits)
prob_tf = tf.nn.softmax(rl_model.logits)
pi_loss = -tf.reduce_sum(tf.reduce_sum(log_prob_tf * self.ac, [1]) * self.adv)
vf_loss = .5 * tf.reduce_sum(tf.square(rl_model.vf - self.r))
entropy = -tf.reduce_sum(prob_tf * log_prob_tf)
self.rl_loss = pi_loss + 0.5 * vf_loss - entropy * 0.01
rl_opt_params, rl_opt = get_optimizer(learning_rate, self.rl_loss, )
def replace_the_nones(my_list):
'''
Assumes my_list[-1] is np array
'''
return [np.zeros(my_list[-1].shape, dtype = my_list[-1].dtype) if elt is None else elt for elt in my_list]
def postprocess_batch_depth(batch, state_desc):
obs, msg, act, act_post = batch
depths = replace_the_nones(obs[state_desc])
obs_past = np.array([depths[:-1]])
obs_fut = np.array([depths[1:]])
actions = np.array([replace_the_nones(act)])
actions_post = np.array([replace_the_nones(act_post)])
return obs_past, actions, actions_post, obs_fut
# def postprocess_batch_depth(batch):
# depths = np.array([[timepoint if timepoint is not None else np.zeros(obs['depths1'][-1].shape, dtype = obs['depths1'][-1].dtype) for timepoint in obs['depths1']] for obs in batch.states])
# actions = np.array(batch.actions)
# next_depth = np.array([batch.next_state['depths1']])
# return depths, actions, next_depth
def postprocess_batch_for_actionmap(batch, state_desc):
obs, msg, act = batch
prepped = {}
depths = replace_the_nones(obs[state_desc])
depths_past = np.array([depths[:-1]])
depths_fut = np.array([depths[:1]])
objects = np.array([replace_the_nones(obs[state_desc])[:-1]])
actions = np.array([replace_the_nones(act)])
action_ids_list = []
for i in range(2):
action_msg = msg[i]['msg']['actions'] if msg[i] is not None else []
if len(action_msg):
idx = int(action_msg[0]['id'])
else:
idx = -10000#just something that's not an id seen
action_ids_list.append(idx)
action_ids = np.array([action_ids_list])
return depths_past, objects, actions, action_ids, depths_fut
# def postprocess_batch_for_actionmap(batch):
# prepped = {}
# for desc in ['depths1', 'objects1']:
# prepped[desc] = np.array([[timepoint if timepoint is not None else np.zeros(obs[desc][-1].shape, dtype = obs[desc][-1].dtype) for timepoint in obs[desc]] for obs in batch.states])
# actions = np.array([[np.zeros(batch.next_state['action'][-1].shape, batch.next_state['action'][-1].dtype) if timepoint is None else timepoint for timepoint in batch.next_state['action']]])
# print('actions shape')
# print(actions.shape)
# print(len(batch.next_state['action']))
# action_ids_list = []
# for i in range(2):
# action_msg = batch.next_state['msg'][i]['msg']['actions'] if batch.next_state['msg'][i] is not None else []
# if len(action_msg):
# idx = int(action_msg[0]['id'])
# action_ids_list.append(idx)
# action_ids = np.array([action_ids_list])
# next_depths = np.array([batch.next_state['depths1']])
# return prepped['depths1'], prepped['objects1'], actions, action_ids, next_depths
class ExperienceReplayPostprocessor:
def __init__(self, big_save_keys = None, little_save_keys = None, big_save_len = None, big_save_freq = None, state_descriptor = None):
self.big_save_keys = big_save_keys
self.little_save_keys = little_save_keys
self.big_save_len = big_save_len
self.big_save_freq = big_save_freq
self.state_descriptor = state_descriptor
self.big_save_keys.append('map_draw')
self.little_save_keys.append('map_draw')
self.big_save_keys.extend(['act_lr', 'um_lr'])
self.little_save_keys.extend(['act_lr', 'um_lr'])
def postprocess(self, training_results, batch):
global_step = training_results['global_step']
res = {}
if (global_step) % self.big_save_freq < self.big_save_len:
save_keys = self.big_save_keys
#est_losses = [other[1] for other in batch['other']]
#action_sample = [other[2] for other in batch['other']]
res['batch'] = {}
for desc, val in batch.iteritems():
if desc not in ['recent', 'depths1', 'objects1', 'images1']:
res['batch'][desc] = val
res['recent'] = batch['recent']
else:
save_keys = self.little_save_keys
res.update(dict(pair for pair in training_results.iteritems() if pair[0] in save_keys))
#if 'other' in batch['recent']:
# entropies = [other[0] for other in batch['recent']['other']]
# entropies = np.mean(entropies)
# res['entropy'] = entropies
if 'msg' in batch['recent']:
looking_at_obj = [1 if msg is not None and msg['msg']['action_type'] == 'OBJ_ACT' else 0 for msg in batch['recent']['msg']]
res['obj_freq'] = np.mean(looking_at_obj)
elif type(batch['recent']) == list and len(batch['recent'][0]) > 0:
mean_per_provider = []
for provider_recent in batch['recent']:
looking_at_obj = [1 if msg is not None and msg['msg']['action_type'] == 'OBJ_ACT' else 0 for msg in provider_recent['msg']]
mean_per_provider.append(np.mean(looking_at_obj))
res['obj_freq'] = np.mean(mean_per_provider)
res['obj_freq_per_provider_noprint'] = mean_per_provider
return res
class UncertaintyPostprocessor:
def __init__(self, big_save_keys = None, little_save_keys = None, big_save_len = None, big_save_freq = None, state_descriptor = None):
self.big_save_keys = big_save_keys
self.little_save_keys = little_save_keys
self.big_save_len = big_save_len
self.big_save_freq = big_save_freq
self.state_descriptor = state_descriptor
def postprocess(self, training_results, batch):
global_step = training_results['global_step']
res = {}
print('postprocessor deets')
print(global_step)
print(self.big_save_freq)
print(self.big_save_len)
if (global_step) % self.big_save_freq < self.big_save_len:
print('big time')
save_keys = self.big_save_keys
est_losses = [other[1] for other in batch['recent']['other']]
action_sample = [other[2] for other in batch['recent']['other']]
res['batch'] = {'obs' : batch['depths1'], 'act' : batch['action'], 'act_post' : batch['action_post'], 'est_loss' : est_losses, 'action_sample' : action_sample}
res['msg'] = batch['recent']['msg']
else:
print('little time')
save_keys = self.little_save_keys
res.update(dict((k, v) for (k, v) in training_results.iteritems() if k in save_keys))
#res['msg'] = batch['msg'][-1]
entropies = [other[0] for other in batch['recent']['other']]
entropies = np.mean(entropies)
res['entropy'] = entropies
looking_at_obj = [1 if msg is not None and msg['msg']['action_type']['OBJ_ACT'] else 0 for msg in batch['recent']['msg']]
res['obj_freq'] = np.mean(looking_at_obj)
return res
class DataWriteUpdater:
def __init__(self, data_provider, updater_params):
self.data_provider = data_provider
fn = updater_params['hdf5_filename']
N = updater_params['N_save']
height, width = updater_params['image_shape']
act_dim = updater_params['act_dim']
print('setting up save loc')
self.hdf5 = hdf5 = h5py.File(fn, mode = 'a')
dt = h5py.special_dtype(vlen = str)
self.handles = {'msg' : hdf5.require_dataset('msg', shape = (N,), dtype = dt),
'depths1' : hdf5.require_dataset('depths1', shape = (N, height, width, 3), dtype = np.uint8),
'objects1' : hdf5.require_dataset('objects1', shape = (N, height, width, 3), dtype = np.uint8),
'images1': hdf5.require_dataset('images1', shape = (N, height, width, 3), dtype = np.uint8),
'action' : hdf5.require_dataset('action', shape = (N, act_dim), dtype = np.float32),
'action_post' : hdf5.require_dataset('action_post', shape = (N, act_dim), dtype = np.float32)}
print('save loc set up')
self.start = 0
def update(self):
batch = self.data_provider.dequeue_batch()
bs = len(batch['recent']['msg'])
end = self.start + bs
for k in ['depths1', 'objects1', 'images1', 'action', 'action_post']:
tosave = batch['recent'][k]
if k in ['action', 'action_post']:
tosave = tosave.astype(np.float32)
self.handles[k][self.start : end] = batch['recent'][k]
self.handles['msg'][self.start : end] = [json.dumps(msg) for msg in batch['recent']['msg']]
self.start = end
def close(self):
self.hdf5.close()
class LatentUncertaintyValidator:
def __init__(self, models, data_provider):
self.um = models['uncertainty_model']
self.wm = models['world_model']
self.targets = {
'act_pred' : self.wm.act_pred,
'fut_loss' : self.wm.fut_loss, 'act_loss' : self.wm.act_loss, 'um_loss' : self.um.uncertainty_loss,
'estimated_world_loss' : self.um.estimated_world_loss, 'loss_per_example' : self.um.true_loss,
'act_loss_per_example' : self.wm.act_loss_per_example
}
self.dp = data_provider
def run(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.states : batch['depths1'],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post'],
self.wm.obj_there : batch['obj_there']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res['batch'] = {}
for desc, val in batch.iteritems():
print(desc)
if desc == 'obj_there':
res['batch'][desc] = val
elif desc != 'recent':
res['batch'][desc] = val[:, -1]
res['recent'] = batch['recent']
class ObjectThereValidater:
def __init__(self, models, data_provider):
self.um = models['uncertainty_model']
self.wm = models['world_model']
self.targets = {'um_loss' : self.um.uncertainty_loss, 'loss_per_example' : self.um.true_loss,
'estimated_world_loss' : self.um.estimated_world_loss}
self.dp = data_provider
def run(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.states : batch['depths1'],
self.wm.action : batch['action'],
self.wm.obj_there : batch['obj_there']
}
return sess.run(self.targets, feed_dict = feed_dict)
class ActionUncertaintyValidator:
def __init__(self, models, data_provider):
self.um = um = models['uncertainty_model']
self.wm = wm = models['world_model']
self.targets = {'act_pred' : self.wm.act_pred, 'act_loss' : self.wm.act_loss,
'estimated_world_loss' : self.um.estimated_world_loss,
'um_loss' : self.um.uncertainty_loss, 'loss_per_example' : self.um.true_loss}
self.dp = data_provider
def run(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.states : batch['depths1'],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res['batch'] = batch
return res
class ActionUncertaintyValidatorWithReadouts:
def __init__(self, model, data_provider):
self.dp = data_provider
self.wm = model['world_model']
self.um = model['uncertainty_model']
self.targets = {}
self.targets.update({k : v for k, v in self.wm.readouts.items() if k not in self.wm.save_to_gfs})
self.targets.update({k : v for k, v in self.um.readouts.items() if k not in self.um.save_to_gfs})
#this should be changed for an online data provider, set to do nothing
self.map_draw_mode = 'specified_indices'
#relies on there being just one obs type
self.state_desc = data_provider.data_lengths['obs'].keys()[0]
self.insert_objthere = False if data_provider.num_objthere is None else True
def run(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.states : batch[self.state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch ['action_post']
}
if self.insert_objthere:
feed_dict[self.wm.obj_there_via_msg] = batch['obj_there']
res = sess.run(self.targets, feed_dict = feed_dict)
#TODO case it for online
res['recent'] = {}
#if self.map_draw_mode == 'specified_indices':
# map_draw_res = []
# for idx in self.map_draw_example_indices:
# obs_for_actor = [batch[self.state_desc][idx][t] for t in self.map_draw_timestep_indices]
# action_samples = self.action_sampler.sample_actions()
# action, entropy, estimated_world_loss = self.um.act(sess, action_samples, obs_for_actor)
# to_add = {'example_id' : idx, 'action_sample' : action, 'estimated_world_loss' : estimated_world_loss,
# 'action_samples' : action_samples, 'depths1' : batch[self.state_desc][idx],
# 'action' : batch['action'][idx], 'action_post' : batch['action_post'][idx]}
# map_draw_res.append(to_add)
#res['map_draw'] = map_draw_res
return res
class ObjectThereUpdater:
def __init__(self, world_model, uncertainty_model, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.data_provider = data_provider
self.wm = world_model
self.um = uncertainty_model
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets = {'um_loss' : self.um.uncertainty_loss, 'um_lr' : um_lr, 'um_optimizer' : um_opt,
'global_step' : self.global_step, 'loss_per_example' : self.um.true_loss,
'estimated_world_loss' : self.um.estimated_world_loss
}
self.state_desc = updater_params['state_desc']
def update(self, sess, visualize = False):
batch = self.data_provider.dequeue_batch()
state_desc = self.state_desc
feed_dict = {
self.wm.states : batch[state_desc],
self.wm.action : batch['action'],
self.wm.obj_there : batch['obj_there']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res = self.postprocessor.postprocess(res, batch)
return res
class SquareForceMagUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.dp = data_provider
self.wm = models['world_model']
self.um = models['uncertainty_model']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets = {'um_loss' : self.um.uncertainty_loss, 'um_lr' : um_lr, 'um_optimizer' : um_opt,
'global_step' : self.global_step, 'loss_per_example' : self.um.true_loss,
'estimated_world_loss' : self.um.estimated_world_loss
}
if self.um.exactly_whats_needed:
self.targets['oh_my_god'] = self.um.oh_my_god
self.state_desc = updater_params['state_desc']
def update(self, sess, visualize = False):
batch = self.dp.dequeue_batch()
state_desc = self.state_desc
feed_dict = {
self.wm.states : batch[state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
if self.um.insert_obj_there:
print('adding obj_there to feed dict')
feed_dict[self.um.obj_there] = batch['obj_there']
res = sess.run(self.targets, feed_dict = feed_dict)
res = self.postprocessor.postprocess(res, batch)
return res
class DebuggingForceMagUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.dp = data_provider
self.wm = models['world_model']
self.um = models['uncertainty_model']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0, dtype = tf.int32))
print(learning_rate_params.keys())
um_lr_params, um_lr = get_learning_rate(self.global_step, **learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'])
self.targets = {'um_loss' : self.um.uncertainty_loss, 'um_optimizer' : um_opt, 'global_step' : self.global_step,
'loss_per_example' : self.um.true_loss, 'estimated_world_loss' : self.um.estimated_world_loss, 'ans' : self.um.ans,
'oh_my_god' : self.um.oh_my_god, 'model_parameters' : self.um.var_list}
def update(self, sess):
batch = self.dp.dequeue_batch()
feed_dict = {
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post'],
self.um.obj_there : batch['obj_there']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res = self.postprocessor.postprocess(res, batch)
return res
class LatentFreezeUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.data_provider = data_provider\
if isinstance(data_provider, list) else [data_provider]
self.wm = models['world_model']
self.um = models['uncertainty_model']
freeze_wm = updater_params['freeze_wm']
freeze_um = updater_params['freeze_um']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.act_step = tf.get_variable('act_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.fut_step = tf.get_variable('fut_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.um_step = tf.get_variable('ext_uncertainty_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.targets = {}
self.state_desc = updater_params.get('state_desc', 'depths1')
if not freeze_wm:
act_lr_params, act_lr = get_learning_rate(self.act_step, **learning_rate_params['world_model']['act_model'])
fut_lr_params, fut_lr = get_learning_rate(self.fut_step, **learning_rate_params['world_model']['fut_model'])
act_opt_params, act_opt = get_optimizer(act_lr, self.wm.act_loss, self.act_step, optimizer_params['world_model']['act_model'], var_list = self.wm.act_var_list + self.wm.encode_var_list)
fut_opt_params, fut_opt = get_optimizer(fut_lr, self.wm.fut_loss, self.fut_step, optimizer_params['world_model']['fut_model'], var_list = self.wm.fut_var_list)
self.targets['act_opt'] = act_opt
self.targets['fut_opt'] = fut_opt
self.targets['act_lr'] = act_lr
self.targets['fut_lr'] = fut_lr
if not freeze_um:
um_lr_params, um_lr = get_learning_rate(self.um_step, **learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.um_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets['um_opt'] = um_opt
self.targets['um_lr'] = um_lr
self.targets['global_step'] = self.global_step
global_increment = tf.assign_add(self.global_step, 1)
um_increment = tf.assign_add(self.um.step, 1)
self.targets.update({'global_increment' : global_increment, 'um_increment' : um_increment})
self.targets.update(self.wm.readouts)
self.targets.update(self.um.readouts)
assert set(self.wm.readouts.keys()) != set(self.um.readouts.keys())
def update(self, sess, visualize = False):
if self.um.just_random:
print('Selecting action at random')
batch = {}
for i, dp in enumerate(self.data_provider):
provider_batch = dp.dequeue_batch()
for k in provider_batch:
if k in batch:
batch[k].append(provider_batch[k])
else:
batch[k] = [provider_batch[k]]
for k in ['action', 'action_post', self.state_desc]:
batch[k] = np.concatenate(batch[k], axis=0)
feed_dict = {
self.wm.states : batch[self.state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res.pop('um_increment')
res.pop('global_increment')
global_step = res['global_step']
#if self.map_draw_mode is not None and global_step % self.map_draw_freq == 0:
# if self.map_draw_mode == 'specified_indices':
# map_draw_res = []
# for idx in self.map_draw_example_indices:
# obs_for_actor = [batch[self.state_desc][idx][t] for t in self.map_draw_timestep_indices]
# action_samples = self.action_sampler.sample_actions()
# action, entropy, estimated_world_loss = self.um.act(sess, action_samples, obs_for_actor)
# to_add = {'example_id' : idx, 'action_sample' : action, 'estimated_world_loss' : estimated_world_loss,
# 'action_samples' : action_samples, 'depths1' : batch[self.state_desc][idx],
# 'action' : batch['action'][idx], 'action_post' : batch['action_post'][idx]}
# map_draw_res.append(to_add)
# res['map_draw'] = map_draw_res
res = self.postprocessor.postprocess(res, batch)
return res, global_step
class FreezeUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.data_provider = data_provider \
if isinstance(data_provider, list) else [data_provider]
self.wm = models['world_model']
self.um = models['uncertainty_model']
freeze_wm = updater_params['freeze_wm']
freeze_um = updater_params['freeze_um']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.act_lr_params, act_lr = get_learning_rate(self.global_step, ** learning_rate_params['world_model']['act_model'])
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
num_not_frozen = 0
self.targets = {}
self.state_desc = updater_params.get('state_desc', 'depths1')
if not freeze_wm:
num_not_frozen += 1
act_opt_params, act_opt = get_optimizer(act_lr, self.wm.act_loss, self.global_step, optimizer_params['world_model']['act_model'], var_list = self.wm.act_var_list + self.wm.encode_var_list)
self.targets['act_opt'] = act_opt
if not freeze_um:
num_not_frozen += 1
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets['um_opt'] = um_opt
if num_not_frozen == 0:
self.targets['global_step'] = self.global_step
self.targets['increment'] = tf.assign_add(self.global_step, 1)
else:
self.global_step = self.global_step / num_not_frozen
self.targets['global_step'] = self.global_step
self.targets.update({'act_lr' : act_lr, 'um_lr' : um_lr})
assert set(self.wm.readouts.keys()) != set(self.um.readouts.keys())
self.targets.update(self.wm.readouts)
self.targets.update(self.um.readouts)
um_increment = tf.assign_add(self.um.step, 1)
assert 'um_increment' not in self.targets
self.targets['um_increment'] = um_increment
self.obj_there_supervision = updater_params.get('include_obj_there', False)
#self.map_draw_mode = None
#Map drawing. Meant to have options, but for now just assuming one sort of specification
#self.state_desc = updater_params.get('state_desc', 'depths1')
#self.map_draw_mode = updater_params['map_draw_mode']
#this specification specifices batch example indices for which we do a forward pass.
#need to do one forward pass each index because action sampling is the 'batch.'
#self.action_sampler = action_sampler
#assert self.map_draw_mode == 'specified_indices' and self.action_sampler is not None, (self.map_draw_mode, action_sampler)
#self.map_draw_example_indices = updater_params['map_draw_example_indices']
#self.map_draw_timestep_indices = updater_params['map_draw_timestep_indices']
#self.map_draw_freq = updater_params['map_draw_freq']
def update(self, sess, visualize = False):
if self.um.just_random:
print('Selecting action at random')
batch = {}
for i, dp in enumerate(self.data_provider):
provider_batch = dp.dequeue_batch()
for k in provider_batch:
if k in batch:
batch[k].append(provider_batch[k])
else:
batch[k] = [provider_batch[k]]
for k in ['action', 'action_post', self.state_desc]:
batch[k] = np.concatenate(batch[k], axis=0)
feed_dict = {
self.wm.states : batch[self.state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
if self.obj_there_supervision:
batch['obj_there'] = np.concatenate(batch['obj_there'], axis = 0)
feed_dict[self.wm.obj_there_via_msg] = batch['obj_there']
print('state desc! ' + self.state_desc)
res = sess.run(self.targets, feed_dict = feed_dict)
res.pop('um_increment')
global_step = res['global_step']
#if self.map_draw_mode is not None and global_step % self.map_draw_freq == 0:
# if self.map_draw_mode == 'specified_indices':
# map_draw_res = []
# for idx in self.map_draw_example_indices:
# obs_for_actor = [batch[self.state_desc][idx][t] for t in self.map_draw_timestep_indices]
# action_samples = self.action_sampler.sample_actions()
# action, entropy, estimated_world_loss = self.um.act(sess, action_samples, obs_for_actor)
# to_add = {'example_id' : idx, 'action_sample' : action, 'estimated_world_loss' : estimated_world_loss,
# 'action_samples' : action_samples, 'depths1' : batch[self.state_desc][idx],
# 'action' : batch['action'][idx], 'action_post' : batch['action_post'][idx]}
# map_draw_res.append(to_add)
# res['map_draw'] = map_draw_res
res = self.postprocessor.postprocess(res, batch)
return res, global_step
class JustUncertaintyUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params, action_sampler = None):
self.data_provider = data_provider \
if isinstance(data_provider, list) else [data_provider]
self.wm = models['world_model']
self.um = models['uncertainty_model']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.targets = {'global_step' : self.global_step, 'um_optimizer' : um_opt}
assert set(self.wm.readouts.keys()) != set(self.um.readouts.keys())
self.targets.update(self.wm.readouts)
self.targets.update(self.um.readouts)
#self.targets = {
# 'fut_pred' : self.wm.fut_pred, 'act_pred' : self.wm.act_pred,
# 'fut_loss' : self.wm.fut_loss, 'act_loss' : self.wm.act_loss,
# 'estimated_world_loss' : self.um.estimated_world_loss,
# ''
# }
#self.targets.update({'um_loss' : self.um.uncertainty_loss, 'um_lr' : um_lr, 'um_optimizer' : um_opt,
# 'global_step' : self.global_step, 'loss_per_example' : self.um.true_loss})
self.map_draw_mode = None
#Map drawing. Meant to have options, but for now just assuming one sort of specification
self.state_desc = updater_params.get('state_desc', 'depths1')
self.map_draw_mode = updater_params['map_draw_mode']
#this specification specifices batch example indices for which we do a forward pass.
#need to do one forward pass each index because action sampling is the 'batch.'
self.action_sampler = action_sampler
assert self.map_draw_mode == 'specified_indices' and self.action_sampler is not None, (self.map_draw_mode, action_sampler)
self.map_draw_example_indices = updater_params['map_draw_example_indices']
self.map_draw_timestep_indices = updater_params['map_draw_timestep_indices']
self.map_draw_freq = updater_params['map_draw_freq']
def update(self, sess, visualize = False):
batch = {}
for i, dp in enumerate(self.data_provider):
provider_batch = dp.dequeue_batch()
for k in provider_batch:
if k in batch:
batch[k].append(provider_batch[k])
else:
batch[k] = [provider_batch[k]]
for k in ['action', 'action_post', 'depths1']:
batch[k] = np.concatenate(batch[k], axis=0)
feed_dict = {
self.wm.states : batch[self.state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
self.targets['global_step'] = self.global_step
res = sess.run(self.targets, feed_dict = feed_dict)
global_step = res['global_step']
if self.map_draw_mode is not None and global_step % self.map_draw_freq == 0:
if self.map_draw_mode == 'specified_indices':
map_draw_res = []
for idx in self.map_draw_example_indices:
obs_for_actor = [batch[self.state_desc][idx][t] for t in self.map_draw_timestep_indices]
action_samples = self.action_sampler.sample_actions()
action, entropy, estimated_world_loss = self.um.act(sess, action_samples, obs_for_actor)
to_add = {'example_id' : idx, 'action_sample' : action, 'estimated_world_loss' : estimated_world_loss,
'action_samples' : action_samples, 'depths1' : batch[self.state_desc][idx],
'action' : batch['action'][idx], 'action_post' : batch['action_post'][idx]}
map_draw_res.append(to_add)
res['map_draw'] = map_draw_res
res = self.postprocessor.postprocess(res, batch)
return res, global_step
class ActionUncertaintyUpdater:
def __init__(self, models, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params):
self.data_provider = data_provider \
if isinstance(data_provider, list) else [data_provider]
self.wm = models['world_model']
self.um = models['uncertainty_model']
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.act_lr_params, act_lr = get_learning_rate(self.global_step, ** learning_rate_params['world_model']['act_model'])
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
act_opt_params, act_opt = get_optimizer(act_lr, self.wm.act_loss, self.global_step, optimizer_params['world_model']['act_model'], var_list = self.wm.act_var_list + self.wm.encode_var_list)
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.global_step = self.global_step / 2
self.targets = {'act_pred' : self.wm.act_pred, 'act_loss' : self.wm.act_loss,
'act_optimizer' : act_opt, 'um_optimizer' : um_opt,
'estimated_world_loss' : self.um.estimated_world_loss,
'um_loss' : self.um.uncertainty_loss, 'loss_per_example' : self.um.true_loss,
'global_step' : self.global_step}
def update(self, sess, visualize = False):
batch = {}
for i, dp in enumerate(self.data_provider):
provider_batch = dp.dequeue_batch()
for k in provider_batch:
if k in batch:
batch[k].append(provider_batch[k])
else:
batch[k] = [provider_batch[k]]
for k in ['action', 'action_post', 'depths1']:
batch[k] = np.concatenate(batch[k], axis=0)
state_desc = 'depths1'
#depths, actions, actions_post, next_depth = postprocess_batch_depth(batch, state_desc)
feed_dict = {
self.wm.states : batch[state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
self.targets['global_step'] = self.global_step
res = sess.run(self.targets, feed_dict = feed_dict)
glstep = res['global_step']
res = self.postprocessor.postprocess(res, batch)
return res, glstep
class LatentUncertaintyUpdater:
def __init__(self, world_model, uncertainty_model, data_provider, optimizer_params, learning_rate_params, postprocessor, updater_params = None):
self.data_provider = data_provider
self.wm = world_model
self.um = uncertainty_model
self.postprocessor = postprocessor
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.act_lr_params, act_lr = get_learning_rate(self.global_step, ** learning_rate_params['world_model']['act_model'])
self.fut_lr_params, fut_lr = get_learning_rate(self.global_step, ** learning_rate_params['world_model']['fut_model'])
self.um_lr_params, um_lr = get_learning_rate(self.global_step, ** learning_rate_params['uncertainty_model'])
act_opt_params, act_opt = get_optimizer(act_lr, self.wm.act_loss, self.global_step, optimizer_params['world_model']['act_model'], var_list = self.wm.act_var_list + self.wm.encode_var_list)
fut_opt_params, fut_opt = get_optimizer(fut_lr, self.wm.fut_loss, self.global_step, optimizer_params['world_model']['fut_model'], var_list = self.wm.fut_var_list)
um_opt_params, um_opt = get_optimizer(um_lr, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'], var_list = self.um.var_list)
self.global_step = self.global_step / 3
self.targets = {'encoding_i' : self.wm.encoding_i, 'encoding_f' : self.wm.encoding_f,
'fut_pred' : self.wm.fut_pred, 'act_pred' : self.wm.act_pred,
'act_optimizer' : act_opt, 'fut_optimizer' : fut_opt,
'act_lr' : act_lr, 'fut_lr' : fut_lr,
'fut_loss' : self.wm.fut_loss, 'act_loss' : self.wm.act_loss,
'estimated_world_loss' : self.um.estimated_world_loss
}
self.targets.update({'um_loss' : self.um.uncertainty_loss, 'um_lr' : um_lr, 'um_optimizer' : um_opt,
'global_step' : self.global_step, 'loss_per_example' : self.um.true_loss})
self.state_desc = updater_params['state_desc']
#checking that we don't have repeat names
def start(self, sess):
self.data_provider.start_runner(sess)
sess.run(tf.global_variables_initializer())
def update(self, sess, visualize = False):
batch = self.data_provider.dequeue_batch()
state_desc = self.state_desc
#depths, actions, actions_post, next_depth = postprocess_batch_depth(batch, state_desc)
feed_dict = {
self.wm.states : batch[state_desc],
self.wm.action : batch['action'],
self.wm.action_post : batch['action_post']
}
res = sess.run(self.targets, feed_dict = feed_dict)
res = self.postprocessor.postprocess(res, batch)
return res
class UncertaintyUpdater:
def __init__(self, world_model, uncertainty_model, data_provider, optimizer_params, learning_rate_params, postprocessor):
self.data_provider = data_provider
self.world_model = world_model
self.um = uncertainty_model
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.wm_lr_params, wm_learning_rate = get_learning_rate(self.global_step, ** learning_rate_params['world_model'])
self.wm_opt_params, wm_opt = get_optimizer(wm_learning_rate, self.world_model.loss, self.global_step, optimizer_params['world_model'])
self.world_model_targets = {'given' : self.world_model.processed_input, 'loss' : self.world_model.loss, 'loss_per_example' : self.world_model.loss_per_example, 'learning_rate' : wm_learning_rate, 'optimizer' : wm_opt, 'prediction' : self.world_model.pred, 'tv' : self.world_model.tv}
self.inc_step = self.global_step.assign_add(1)
self.um_lr_params, um_learning_rate = get_learning_rate(self.global_step, **learning_rate_params['uncertainty_model'])
self.um_lr_params, um_opt = get_optimizer(um_learning_rate, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'])
self.global_step = self.global_step / 2
self.um_targets = {'loss' : self.um.uncertainty_loss, 'learning_rate' : um_learning_rate, 'optimizer' : um_opt, 'global_step' : self.global_step}
self.postprocessor = postprocessor
self.world_action_time = self.world_model.action.get_shape().as_list()[1]
def start(self, sess):
self.data_provider.start_runner(sess)
sess.run(tf.global_variables_initializer())
def update(self, sess, visualize = False):
batch = self.data_provider.dequeue_batch()
state_desc = self.um.state_descriptor
wm_feed_dict = {
self.world_model.states : batch[state_desc],
self.world_model.action : batch['action'][:, -self.world_action_time : ]
}
world_model_res = sess.run(self.world_model_targets, feed_dict = wm_feed_dict)
um_feed_dict = {
self.um.s_i : batch[state_desc][:, :-1],
self.um.action_sample : batch['action'][:, -1],
self.um.true_loss : world_model_res['loss_per_example']
}
um_res = sess.run(self.um_targets, feed_dict = um_feed_dict)
wm_res_new = dict(('wm_' + k, v) for k, v in world_model_res.iteritems())
um_res_new = dict(('um_' + k, v) for k, v in um_res.iteritems())
wm_res_new.update(um_res_new)
res = wm_res_new
res['global_step'] = res.pop('um_global_step')
res = self.postprocessor.postprocess(wm_res_new, batch)
return res
class DamianWMUncertaintyUpdater:
def __init__(self, world_model, uncertainty_model, data_provider, optimizer_params, learning_rate_params, postprocessor):
self.data_provider = data_provider
self.world_model = world_model
self.um = uncertainty_model
self.global_step = tf.get_variable('global_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
self.wm_lr_params, wm_learning_rate = get_learning_rate(self.global_step, ** learning_rate_params['world_model'])
self.wm_opt_params, wm_opt = get_optimizer(wm_learning_rate, self.world_model.loss, self.global_step, optimizer_params['world_model'])
self.world_model_targets = {'given' : self.world_model.processed_input, 'loss' : self.world_model.loss, 'learning_rate' : wm_learning_rate, 'optimizer' : wm_opt, 'prediction' : self.world_model.pred, 'tv' : self.world_model.tv}
self.inc_step = self.global_step.assign_add(1)
self.wm_lr_params, um_learning_rate = get_learning_rate(self.global_step, **learning_rate_params['uncertainty_model'])
self.wm_lr_params, um_opt = get_optimizer(um_learning_rate, self.um.uncertainty_loss, self.global_step, optimizer_params['uncertainty_model'])
self.um_targets = {'loss' : self.um.uncertainty_loss, 'learning_rate' : um_learning_rate, 'optimizer' : um_opt, 'global_step' : self.global_step}
self.postprocessor = postprocessor
def start(self, sess):
self.data_provider.start_runner(sess)
sess.run(tf.global_variables_initializer())
def update(self, sess, visualize = False):
batch = self.data_provider.dequeue_batch()
depths, objects, actions, action_ids, next_depth = postprocess_batch_for_actionmap(batch)
wm_feed_dict = {
self.world_model.s_i : depths,
self.world_model.s_f : next_depth,
self.world_model.action : actions,
self.world_model.action_id : action_ids,
self.world_model.objects : objects
}
world_model_res = sess.run(self.world_model_targets, feed_dict = wm_feed_dict)
if visualize:
cv2.imshow('pred', world_model_res['prediction'][0] / 4.)#TODO clean up w colors
cv2.imshow('tv', world_model_res['tv'][0] / 4.)
cv2.imshow('processed0', world_model_res['given'][0, 0] / 4.)
cv2.imshow('processed1', world_model_res['given'][0, 1] / 4.)
cv2.waitKey(1)
print('wm loss: ' + str(world_model_res['loss']))
um_feed_dict = {
self.um.s_i : depths,
self.um.action_sample : actions[:, -1],
self.um.true_loss : np.array([world_model_res['loss']])
}
um_res = sess.run(self.um_targets, feed_dict = um_feed_dict)
wm_res_new = dict(('wm_' + k, v) for k, v in world_model_res.iteritems())
um_res_new = dict(('um_' + k, v) for k, v in um_res.iteritems())
wm_res_new.update(um_res_new)
res['global_step'] = res.pop('um_global_step')
res = self.postprocessor.postprocess(wm_res_new, batch)
return res
| 2.078125 | 2 |
tests/conftest.py | alex-torok/pybuildkite | 1 | 12790161 | <filename>tests/conftest.py
import pytest
from unittest.mock import Mock
@pytest.fixture
def fake_client() -> Mock:
"""
Build a fake API client
"""
return Mock(get=Mock())
| 2.28125 | 2 |
ImgOperator/Oporation.py | keyofdeath/AugmentedReality | 0 | 12790162 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
class Oporation():
def __init__(self):
pass
def apply(self, img):
pass
if __name__ == "__main__":
pass | 1.398438 | 1 |
worknoteBookHelpers.py | JanKrAppel/worknoteBook | 0 | 12790163 | <reponame>JanKrAppel/worknoteBook
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 2 18:06:23 2015
@author: appel
"""
def parse_index(index):
from worknote.items import parse_index
return parse_index(index)[0:2]
def gen_index(index):
try:
return u':'.join([str(i) for i in index[0:2]])
except TypeError:
return unicode(index)
def zip_worknote(src_dir, target_fn):
from os.path import split, join
from os import listdir
from zipfile import ZipFile
wn_dir = split(src_dir)[1]
zf = ZipFile(target_fn, 'w')
zf.write(src_dir, wn_dir)
for fn in listdir(src_dir):
zf.write(join(src_dir, fn), join(wn_dir, fn))
zf.close()
def unzip_worknote(src_fn, target_dir = None):
from zipfile import ZipFile, BadZipfile
from os.path import split, exists, join
from os import makedirs, listdir
from shutil import move
from tempfile import gettempdir
try:
with ZipFile(src_fn, 'r') as zipfile:
files = zipfile.namelist()
wn_dir = split(files[0])[0]
if target_dir is None:
target_dir = wn_dir
if exists(target_dir):
raise OSError('The target directory already exists')
else:
makedirs(target_dir)
tmpdir = gettempdir()
zipfile.extractall(tmpdir)
for fn in listdir(join(tmpdir, wn_dir)):
move(join(join(tmpdir, wn_dir), fn), target_dir)
except BadZipfile, e:
raise IOError(str(e))
class Configuration(object):
def __init__(self, cfg_file, default_cfg = None):
from ConfigParser import SafeConfigParser
self.cfg_file = cfg_file
self.config = SafeConfigParser()
if not default_cfg is None:
for section in default_cfg:
for option in default_cfg[section]:
self.__put_item([section, option], default_cfg[section][option])
self.read_cfg_file()
def __getitem__(self, indices):
indices = indices[0:2]
section, option = indices
if not section in self.config.sections():
return None
if not option in self.config.options(section):
return None
val = self.config.get(section, option)
try:
val = int(val)
except ValueError:
pass
else:
return val
try:
val = float(val)
except ValueError:
pass
else:
return val
if val == 'False':
return False
elif val == 'True':
return True
return val
def __put_item(self, indices, value):
indices = indices[0:2]
value = str(value)
section, option = indices
if not section in self.config.sections():
self.config.add_section(section)
if not option in self.config.options(section):
self.config.set(section, option, value)
def __setitem__(self, indices, value):
self.__put_item(indices, value)
self.update_cfg_file()
def update_cfg_file(self):
with open(self.cfg_file, 'w') as outfile:
self.config.write(outfile)
def read_cfg_file(self):
from os.path import exists
if exists(self.cfg_file):
self.config.read(self.cfg_file)
def get_sections(self):
return self.config.sections()
def get_options(self, section):
return self.config.options(section)
| 2.421875 | 2 |
gpytorch/utils/grid.py | yushangdi/gpytorch | 0 | 12790164 | <filename>gpytorch/utils/grid.py<gh_stars>0
#!/usr/bin/env python3
import math
import torch
def scale_to_bounds(x, lower_bound, upper_bound):
"""
Scale the input data so that it lies in between the lower and upper bounds.
Args:
:attr:`x` (Tensor `n` or `b x n`):
the input
:attr:`lower_bound` (float)
:attr:`upper_bound` (float)
Returns:
:obj:`torch.Tensor`
"""
# Scale features so they fit inside grid bounds
min_val = x.min()
max_val = x.max()
diff = max_val - min_val
x = (x - min_val) * (0.95 * (upper_bound - lower_bound) / diff) + 0.95 * lower_bound
return x
def choose_grid_size(train_inputs, ratio=1.0):
"""
Given some training inputs, determine a good grid size for KISS-GP.
Args:
:attr:`train_inputs` (Tensor `n` or `n x d` or `b x n x d`):
training data
:attr:`ratio` (float, optional):
Ratio - number of grid points to the amount of data (default: 1.)
Returns:
:obj:`int`
"""
# Scale features so they fit inside grid bounds
num_data = train_inputs.numel() if train_inputs.dim() == 1 else train_inputs.size(-2)
num_dim = 1 if train_inputs.dim() == 1 else train_inputs.size(-1)
return int(ratio * math.pow(num_data, 1.0 / num_dim))
def create_data_from_grid(grid):
grid_size = grid.size(-2)
grid_dim = grid.size(-1)
grid_data = torch.zeros(int(pow(grid_size, grid_dim)), grid_dim, device=grid.device)
prev_points = None
for i in range(grid_dim):
for j in range(grid_size):
grid_data[j * grid_size ** i : (j + 1) * grid_size ** i, i].fill_(grid[j, i])
if prev_points is not None:
grid_data[j * grid_size ** i : (j + 1) * grid_size ** i, :i].copy_(prev_points)
prev_points = grid_data[: grid_size ** (i + 1), : (i + 1)]
return grid_data
| 2.78125 | 3 |
cookbook/c07/p11_inline_callback.py | Xiao-jiuguan/python3-cookbook | 3 | 12790165 | <reponame>Xiao-jiuguan/python3-cookbook
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 内联回调函数
Desc :
"""
from queue import Queue
from functools import wraps
def apply_async(func, args, *, callback):
# Compute the result
result = func(*args)
# Invoke the callback with the result
callback(result)
class Async:
def __init__(self, func, args):
self.func = func
self.args = args
def inlined_async(func):
@wraps(func)
def wrapper(*args):
f = func(*args)
result_queue = Queue()
result_queue.put(None)
while True:
print('1' * 15)
result = result_queue.get()
print('2' * 15)
try:
print('3' * 15)
print('result={}'.format(result))
a = f.send(result)
print('4' * 15)
apply_async(a.func, a.args, callback=result_queue.put)
print('5' * 15)
except StopIteration:
break
return wrapper
def add(x, y):
return x + y
@inlined_async
def test():
print('start'.center(20, '='))
r = yield Async(add, (2, 3))
print('last={}'.format(r))
r = yield Async(add, ('hello', 'world'))
print('last={}'.format(r))
# for n in range(10):
# r = yield Async(add, (n, n))
# print(r)
# print('Goodbye')
print('end'.center(20, '='))
if __name__ == '__main__':
test() | 3.625 | 4 |
huggingface_transformers/preprocessing_fn.py | ML3ngiRNErT/funniness-regression | 0 | 12790166 | import pandas as pd
import re
import nltk
import spacy
import torch
from nltk.corpus import stopwords
from cleantext import clean
from ekphrasis.classes.preprocessor import TextPreProcessor
from torch.utils.data import Dataset
# Params for
clean_text_param = {
"lower":False, # lowercase text
"no_line_breaks":True, # fully strip line breaks as opposed to only normalizing them
"no_urls":False, # replace all URLs with a special token
"no_emails":False, # replace all email addresses with a special token
"no_phone_numbers":False, # replace all phone numbers with a special token
"no_numbers":False, # replace all numbers with a special token
"no_digits":False, # replace all digits with a special token
"no_currency_symbols":True, # replace all currency symbols with a special token
"no_punct":True, # remove punctuations
"replace_with_punct":"", # instead of removing punctuations you may replace them
"replace_with_number":"",
"replace_with_digit":"",
"replace_with_currency_symbol":"",
"lang":"en" # set to 'de' for German special handling
}
nlp = spacy.load('en_core_web_sm')
class Task1Dataset(Dataset):
def __init__(self, train_data, labels):
self.x_train = train_data
self.y_train = labels
def __len__(self):
return len(self.y_train)
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.x_train.items()}
item['labels'] = torch.tensor(self.y_train[idx], dtype=torch.float)
return item
class Preprocessor:
@staticmethod
def PreprocessorBuilder():
return Preprocessor()
def __init__(self):
self.transformations = []
self.text_processor = TextPreProcessor(
fix_html=True, # fix HTML tokens
# corpus from which the word statistics are going to be used
# for word segmentation
segmenter="english",
# corpus from which the word statistics are going to be used
# for spell correction
corrector="english",
unpack_hashtags=False, # perform word segmentation on hashtags
unpack_contractions=False, # Unpack contractions (can't -> can not)
spell_correct=True, # spell correction for elongated words
)
self.punct = "[\.,:;\(\)\[\]@\-\$£]"
nltk.download('stopwords')
self.stops = stopwords.words('english')
self.nlp = spacy.load('en_core_web_lg')
def _capitalisation_by_ner(self, sentence, entities=['GPE', 'ORG', 'NORP', 'PERSON']):
edited_row = []
trial_doc = self.nlp(sentence)
for tok in trial_doc:
if tok.ent_type_ in entities:
edited_row.append(tok.text)
else:
edited_row.append(tok.text.lower())
return ' '.join(edited_row)
def with_word_replacement(self):
self.transformations.append(("apply", {"func": (lambda x: re.subn("<.*/>", x[1], x[0])[0]), "axis":1}))
return self
def with_capitalisation_by_ner(self):
self.transformations.append(("apply", {"func": (lambda x: self._capitalisation_by_ner(x))}))
return self
def with_joining_contraction_tokens(self):
self.transformations.append(("str.replace", {"pat": " (?P<one>\w*'\w+)", "repl": (lambda x: x.group("one"))}))
return self
def with_spell_check(self):
self.transformations.append(("apply", {"func": (lambda x: self.text_processor.pre_process_doc(x))}))
return self
def with_space_after_hashtags(self):
self.transformations.append(("str.replace", {"pat": "#", "repl": "# "}))
return self
def with_ascii_quotes_replacement(self):
self.transformations.append(("str.replace", {"pat": "[‘’]", "repl": "'"}))
return self
def with_possessive_elimination(self):
self.transformations.append(("str.replace", {"pat": "'s", "repl": ""}))
return self
def with_punct_removal(self):
self.transformations.append(("str.replace", {"pat": self.punct, "repl": "'"}))
return self
def with_digit_removal(self):
self.transformations.append(("str.replace", {"pat": "[0-9]", "repl": ""}))
return self
def with_stopwords_removal(self):
self.transformations.append(("apply", {"func": (lambda x: " ".join([w for w in x.split(" ") if w not in self.stops]))}))
return self
def build(self):
return self
def preprocess(self, df, clean_col_name='edited_sentence'):
_df = pd.DataFrame(index=df.index, columns=[clean_col_name, 'meanGrade'])
_df['meanGrade'] = df.meanGrade
transformed_cols = df[['original', 'edit']]
for (func, params) in self.transformations:
func_to_apply = transformed_cols
for f in func.split("."):
print(f)
func_to_apply = getattr(func_to_apply, f)
transformed_cols = func_to_apply(**params)
_df[clean_col_name] = transformed_cols
return _df, clean_col_name
| 2.6875 | 3 |
projecto/urls.py | bfaguiar/Venda-d-Garagem | 0 | 12790167 | """projecto URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('index/', views.index, name='index'),
path('', views.index, name='index'),
path('login/', views.login, name='login'),
path('signup/', views.signup, name='signup'),
path('profile/', views.profile, name='profile'),
path('wishlist/', views.wishlist, name='wishlist'),
path('about/', views.about, name='about'),
path('model/', views.model, name='model'),
path('add_announce/', views.add_announce, name='add_announce'),
path('deleteAccount/', views.deleteAccount, name='deleteAccount'),
path('remove_announcement/', views.remove_announcement, name="remove_announcement"),
path('fav/', views.fav, name="fav"),
path('rev/', views.rev, name="rev"),
]
| 2.75 | 3 |
models/support_vector_machine_regression_poly3.py | jtbai/glo-7027 | 0 | 12790168 | from models.basemodel import BaseModel
from sklearn import model_selection, svm
class SupportVectorMachineRegressionPoly3(BaseModel):
def _train(self, X_train, y_train):
parametres = {'gamma': [0.01, 0.1, 1], 'C': [1, 10, 100], 'degree': [2,3,4,5,6]}
grid_search = model_selection.GridSearchCV(svm.SVR(kernel="poly" ), parametres, n_jobs=6)
grid_search = grid_search.fit(X_train, y_train)
return grid_search
def compute_and_output_r2_metric(self, trained_grid_search, y_train, y_train_pred, y_test, y_test_pred):
self._printResults(y_train, y_train_pred, y_test, y_test_pred, str(trained_grid_search.best_params_))
| 2.5 | 2 |
tests/integration/load_balancer_types/test_load_balancer_types.py | cenkalti/hcloud-python | 0 | 12790169 | <filename>tests/integration/load_balancer_types/test_load_balancer_types.py
class TestLoadBalancerTypesClient(object):
def test_get_by_id(self, hetzner_client):
load_balancer_type = hetzner_client.load_balancer_types.get_by_id(1)
assert load_balancer_type.id == 1
assert load_balancer_type.name == "lb11"
def test_get_by_name(self, hetzner_client):
load_balancer_type = hetzner_client.load_balancer_types.get_by_name("lb11")
assert load_balancer_type.id == 1
assert load_balancer_type.name == "lb11"
def test_get_list(self, hetzner_client):
result = hetzner_client.load_balancer_types.get_list()
load_balancer_types = result.load_balancer_types
assert load_balancer_types[0].id == 1
assert load_balancer_types[0].name == "lb11"
| 2.28125 | 2 |
nets/zoo/hrnet_config.py | hin1115/building_extraction_in_satellite_image | 7 | 12790170 | <reponame>hin1115/building_extraction_in_satellite_image
import yaml
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname("__file__"))))
def parse(path):
with open(path, 'r') as f:
config = yaml.safe_load(f)
f.close()
return config
| 2.546875 | 3 |
src/player.py | thacd/tennis-scoring-system | 0 | 12790171 | <filename>src/player.py<gh_stars>0
class Player:
def __init__(self, name, set_point=0, game_point=0):
self.name = name
self.set_point = set_point
self.game_point = game_point
@property
def set_point(self):
return self.__set_point
@set_point.setter
def set_point(self, value):
if value < 0:
raise ValueError("Negative set point is impossible!")
self.__set_point = value
@property
def game_point(self):
return self.__game_point
@game_point.setter
def game_point(self, value):
if value < 0:
raise ValueError("Negative game point is impossible!")
self.__game_point = value
| 3.34375 | 3 |
example.py | jlenain/PhotALPsConv | 1 | 12790172 | #Example script to show use of PhotALPsConv.calc_conversion module.
# ---- Imports ------------------------------------- #
import numpy as np
import matplotlib.pyplot as plt
import PhotALPsConv.calc_conversion as CC
from PhotALPsConv.tools import median_contours
from optparse import OptionParser
# -------------------------------------------------- #
# read in the config file
parser=OptionParser()
parser.add_option("-c","--config",dest="c",help="config yaml file",action="store")
(opt, args) = parser.parse_args()
# init the conversion calculation
cc = CC.Calc_Conv(config = opt.c)
# the energy array in GeV
EGeV = np.logspace(cc.log10Estart,cc.log10Estop,cc.Estep)
# init matrices that will store the conversion probabilities
# for the different photon (t,u) and ALP (a) polarization states
Pt = np.zeros((cc.nsim,EGeV.shape[0]))
Pu = np.zeros((cc.nsim,EGeV.shape[0]))
Pa = np.zeros((cc.nsim,EGeV.shape[0]))
# calculate the mixing, nsim > 0 only if ICM or IGM are included
for i in range(cc.nsim):
try:
cc.scenario.index('Jet')
new_angles = False
except ValueError:
new_angles = True
# calculate the mixing for all energies. If new_angles = True,
# new random angles will be generated for each random realizatioin
Pt[i],Pu[i],Pa[i] = cc.calc_conversion(EGeV, new_angles = new_angles)
# calculate the median and 68% and 95% confidence contours
MedCon = median_contours(Pt + Pu)
# plot the results
plt.figure()
ax = plt.subplot(111)
ax.set_xscale("log")
ax.set_yscale("log")
# plot the contours if we are dealing with many realizations
if cc.nsim > 1:
plt.fill_between(EGeV,MedCon['conf_95'][0],y2 = MedCon['conf_95'][1], color = plt.cm.Greens(0.8))
plt.fill_between(EGeV,MedCon['conf_68'][0],y2 = MedCon['conf_68'][1], color = plt.cm.Greens(0.5))
plt.plot(EGeV,MedCon['median'], ls = '-', color = 'gold', label = 'median')
label = 'one realization'
else:
label = 'w/ ALPs'
# plot the standard attenuation
plt.plot(EGeV,np.exp(-1. * cc.ebl_norm * cc.tau.opt_depth_array(cc.z,EGeV / 1e3)[0]), ls = '--', color = 'red', label = r'w/o ALPs', lw = 3.)
# plot the photon survival probability including ALPs
plt.plot(EGeV,Pt[0] + Pu[0], ls = '-', color = '0.', label = label, lw = 3.)
plt.xlabel("Energy (GeV)", size = 'x-large')
plt.ylabel("Photon survival probability", size = 'x-large')
plt.legend(loc = 3, fontsize = 'x-large')
plt.axis([EGeV[0],EGeV[-1],1e-1,1.1])
plt.show()
| 2 | 2 |
daps/utils/deprecated/video.py | escorciav/deep-action-proposals | 28 | 12790173 | <filename>daps/utils/deprecated/video.py
import cv2
def dump_video(filename, clip, fourcc_str='X264', fps=30.0):
"""Write video on disk from a stack of images
Parameters
----------
filename : str
Fullpath of video-file to generate
clip : ndarray
ndarray where first dimension is used to refer to i-th frame
fourcc_str : str
str to retrieve fourcc from opencv
fps : float
frame rate of create video-stream
"""
fourcc = cv2.cv.CV_FOURCC(**list(fourcc_str))
fid = cv2.VideoWriter(filename, fourcc, fps, clip.shape[0:2])
if fid.isOpened():
for i in xrange(clip.shape[0]):
fid.write(clip[i, ...])
return True
else:
return False
| 3.046875 | 3 |
mathematics/number-theory/john-and-gcd-list.py | PingHuskar/hackerrank | 41 | 12790174 | # Mathematics > Number Theory > John and GCD list
# Help John in making a list from GCD list
#
# https://www.hackerrank.com/challenges/john-and-gcd-list/problem
#
import math
import functools
def gcd(*numbers):
""" greatest common divisor """
return functools.reduce(math.gcd, numbers)
def lcm(*numbers):
""" least common multiple """
return functools.reduce(lambda a, b: (a * b) // gcd(a, b), numbers, 1)
# la réponse est le ppcm "glissant" des Ai
for _ in range(int(input())):
n = int(input())
A = list(map(int, input().split()))
x = A[0]
B = [x]
i = 1
while i < n:
y = A[i]
B.append(lcm(x, y))
x = y
i += 1
B.append(A[-1])
print(*B)
| 3.78125 | 4 |
Python Advanced/Advanced/Exams/19 August 2020/Task03.py | IvanTodorovBG/SoftUni | 1 | 12790175 | def numbers_searching(*args):
answer = []
duplicate_nums = []
min_num, max_num = min(args), max(args)
for search_num in range(min_num, max_num + 1):
if search_num not in args:
answer.append(search_num)
for number in set(args):
if args.count(number) > 1:
duplicate_nums.append(number)
answer.append(sorted(duplicate_nums))
return answer
print(numbers_searching(1, 2, 4, 2, 5, 4))
print(numbers_searching(5, 5, 9, 10, 7, 8, 7, 9))
print(numbers_searching(50, 50, 47, 47, 48, 45, 49, 44, 47, 45, 44, 44, 48, 44, 48)) | 3.703125 | 4 |
examples/eeprom_example.py | melopero/Melopero_RV-3028 | 1 | 12790176 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: <NAME>
"""
import melopero_RV_3028 as mp
import time
def main():
# First initialize and create the rtc device
rtc = mp.RV_3028()
# setup the rtc to use the eeprom memory (disables the automatic configuration refresh)
rtc.use_eeprom()
my_reg_address = 0x00
my_data = 0x42
# to write to ram registers you must use rtc.write_register
# to write to eeprom you must use rtc.write_eeprom_register
# user eeprom address space : [0x00 - 0x2A]
# configuration eeprom address space : [0x30 - 0x37]
rtc.write_eeprom_register(register_address=my_reg_address, value=my_data)
print("Saved {} at address {} in eeprom".format(my_data, my_reg_address))
# give some time to execute writing operation
time.sleep(1)
# to read from ram registers you must use rtc.read_register
# to write to eeprom you must use rtc.read_eeprom_register
# user eeprom address space : [0x00 - 0x2A]
# configuration eeprom address space : [0x30 - 0x37]
my_saved_data = rtc.read_eeprom_register(register_address=my_reg_address)
print("Read {} from eeprom address {}".format(my_saved_data, my_reg_address))
if __name__ == "__main__":
main() | 3.109375 | 3 |
promort/clinical_annotations_manager/migrations/0018_auto_20211128_1525.py | mdrio/ProMort | 3 | 12790177 | # Generated by Django 3.1.13 on 2021-11-28 15:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clinical_annotations_manager', '0017_auto_20210424_1321'),
]
operations = [
migrations.RenameField(
model_name='coreannotation',
old_name='creation_start_date',
new_name='action_start_time',
),
migrations.RenameField(
model_name='focusregionannotation',
old_name='creation_start_date',
new_name='action_start_time',
),
migrations.RenameField(
model_name='gleasonelement',
old_name='creation_start_date',
new_name='action_start_time',
),
migrations.RenameField(
model_name='sliceannotation',
old_name='creation_start_date',
new_name='action_start_time',
),
]
| 1.601563 | 2 |
ChallengeJinja/challengejinja.py | subash-kc/2022-01-04-Python | 1 | 12790178 | """
192.168.30.22 hostA.localdomain # hostA
192.168.30.33 hostB.localdomain # hostB
192.168.30.44 hostC.localdomain # hostB
"""
"""
groups = [{"hostname": "hostA","ip": "192.168.30.22", "fqdn": "hostA.localdomain"},
{"hostname": "hostB", "ip": "192.168.30.33", "fqdn": "hostB.localdomain"},
{"hostname": "hostC", "ip": "192.168.30.44", "fqdn": "hostC.localdomain"}]
"""
from flask import Flask, request, redirect, url_for, session, render_template
# from flask import request
# from flask import redirect
# from flask import url_for
# from flask import session
# from flask import render_template
app = Flask(__name__)
app.secret_key= "random random RANDOM!"
groups = [{"hostname": "hostA","ip": "192.168.30.22", "fqdn": "hostA.localdomain"},
{"hostname": "hostB", "ip": "192.168.30.33", "fqdn": "hostB.localdomain"},
{"hostname": "hostC", "ip": "192.168.30.44", "fqdn": "hostC.localdomain"}]
@app.route("/", methods= ["GET","POST"])
def hosts():
# GET returns the rendered hosts
# POST adds new hosts, then returns rendered hosts
if "username" in session and session["username"] == "admin":
if request.method == "POST":
# pull all values from posted form
hostname = request.form.get("hostname")
ip = request.form.get("ip")
fqdn = request.form.get("fqdn")
# create a new dictionary with values, add to groups
groups.append({"hostname": hostname, "ip": ip, "fqdn": fqdn})
return render_template("hosts.j2", groups=groups)
@app.route("/form", methods=["GET","POST"])
def form():
# HTML form that collects hostname, ip, and fqdn values
if request.method == "POST":
session["username"] = request.form.get("username")
if "username" in session and session["username"] == "admin":
return render_template("formcollector.html.j2")
else:
return """
<form action = "" method = "post">
<p>Invalid Login.</p>
<p><input type = text name = username></p>
<p><input type = submit value = Login></p>
</form>
"""
@app.route("/logout")
def logout():
# accessing this page pops the value of username of the session
session.pop("username", None)
return redirect("/")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=2224)
| 2.8125 | 3 |
CNN.py | tahanakabi/Deep-Reinforcenment-learning-for-TCL-control | 6 | 12790179 | import numpy as np
import tensorflow as tf
from read_data import get_X_y
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import pickle
class NN():
def __init__(self, batch_size = 300, graph = tf.get_default_graph(),test_size = 0.1, steps_back=8, num_TCL=30):
self.num_TCL = num_TCL
with graph.as_default():
# Training Parameters
self.learning_rate = 0.1
self.num_steps = 100000
self.steps_back = steps_back
self.batch_size = batch_size
if batch_size==1:
self.test_proportion = 0
else:
self.test_proportion = test_size
self.batch_tr_size = int(self.batch_size * (1 - self.test_proportion))
self.test_size = int(self.test_proportion*self.batch_size)
self.keep_prob = tf.placeholder(tf.float32, name='keep_prob') # dropout (keep probability)
# display_step = 10
# Network Parameters
self.cnn_num_input = num_TCL # MNIST data input
self.fc_num_input = 4
self.num_output = 1 # MNIST total classes (0-9 digits)
self.dropout = 0.85 # Dropout, probability to keep units
# Placeholders
self.Xb = tf.placeholder(tf.float32, [self.batch_tr_size, self.steps_back, self.cnn_num_input],name='Xb')
self.Xe = tf.placeholder(tf.float32, [self.batch_tr_size, 1, 4], name='Xe')
self.Y = tf.placeholder(tf.float32, [self.batch_tr_size, self.num_output], name='Y')
if self.test_proportion != 0:
# Test Placeholders
self.Xb_test = tf.placeholder(tf.float32, [self.test_size, self.steps_back, self.cnn_num_input],name='Xb_test')
self.Xe_test = tf.placeholder(tf.float32, [self.test_size, 1, 4], name='Xe_test')
self.Y_test = tf.placeholder(tf.float32, [self.test_size, self.num_output], name='Y_test')
# Store layers weight & bias
self.weights = {
# 5x5 conv
'wc1': tf.Variable(tf.random_normal([2, 8, 1, 32])),
# 5x5 conv, 32 inputs, 64 outputs
'wc2': tf.Variable(tf.random_normal([2, 8, 32, 64])),
# fully connected for cnn
'wd1': tf.Variable(tf.random_normal([self.steps_back*self.cnn_num_input*64//4, 1024])),
'wd11': tf.Variable(tf.random_normal([1024, 20])),
# fully connected for fl_net,
'wd2': tf.Variable(tf.random_normal([4, 20])),
# 1024+10 inputs, 1 output (class prediction)
'out': tf.Variable(tf.random_normal([20+20, 50])),
# second fuly connected layer 100 inputs and 1 output
'out2': tf.Variable(tf.random_normal([50, self.num_output]))
}
self.biases = {
'bc1': tf.Variable(tf.random_normal([32])),
'bc2': tf.Variable(tf.random_normal([64])),
'bd1': tf.Variable(tf.random_normal([1024])),
'bd11': tf.Variable(tf.random_normal([20])),
'bd2': tf.Variable(tf.random_normal([20])),
'out': tf.Variable(tf.random_normal([50])),
'out2': tf.Variable(tf.random_normal([self.num_output]))
}
# Create some wrappers for simplicity
def conv2d(self, x, W, b, strides=1):
# Conv2D wrapper, with bias and relu activation
x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME')
x = tf.nn.bias_add(x, b)
return tf.nn.relu(x)
def maxpool2d(self, x, k=2):
# MaxPool2D wrapper
return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1],
padding='SAME')
# Create model
def conv_net(self,xb):
xb = tf.reshape(xb, shape=[-1, self.steps_back, self.num_TCL, 1])
# Convolution Layer
conv1 = self.conv2d(xb, self.weights['wc1'],self.biases['bc1'])
# Max Pooling (down-sampling)
conv1 = self.maxpool2d(conv1, k=2)
# Convolution Layer
conv2 = self.conv2d(conv1, self.weights['wc2'], self.biases['bc2'])
# Max Pooling (down-sampling)
# conv2 = self.maxpool2d(conv2, k=2)
# Fully connected layer
# Reshape conv2 output to fit fully connected layer input
conv2_reshaped = tf.reshape(conv2, [-1, self.weights['wd1'].get_shape().as_list()[0]])
fc1 = tf.add(tf.matmul(conv2_reshaped, self.weights['wd1']), self.biases['bd1'])
fc1_relued = tf.nn.relu(fc1)
fc11 = tf.add(tf.matmul(fc1_relued, self.weights['wd11']), self.biases['bd11'])
fc11_relued = tf.nn.relu(fc11)
## Apply Dropout
return tf.nn.dropout(fc11_relued, self.keep_prob)
def fc_net(self,xe):
xe = tf.reshape(xe, shape=[-1, self.weights['wd2'].get_shape().as_list()[0]])
fc2 = tf.add(tf.matmul(xe, self.weights['wd2']), self.biases['bd2'])
return tf.nn.relu(fc2)
def combined_net(self, graph = tf.get_default_graph()):
with graph.as_default():
conv_component = self.conv_net(self.Xb)
fc_component = self.fc_net(self.Xe)
# concatenate the to components
fc = tf.concat([conv_component,fc_component], axis=1)
# another fc net with sigmoid
fc3 = tf.add(tf.matmul(fc, self.weights['out']), self.biases['out'])
fc3_sigmoided = tf.nn.sigmoid(fc3)
#linear fc
prediction = tf.add(tf.matmul(fc3_sigmoided, self.weights['out2']), self.biases['out2'], name="prediction")
# Define loss and optimizer
loss_op = tf.losses.mean_squared_error(predictions = prediction ,labels = self.Y)
optimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)
train_op = optimizer.minimize(loss_op,name="train_op")
if self.test_proportion != 0:
# Test graph
conv_component_test = self.conv_net(graph.get_tensor_by_name("Xb_test:0"))
fc_component_test = self.fc_net(graph.get_tensor_by_name("Xe_test:0"))
# concatenate the to components
fc_test = tf.concat([conv_component_test, fc_component_test], axis=1)
# another fc net with sigmoid
fc3_test = tf.add(tf.matmul(fc_test, self.weights['out']), self.biases['out'])
fc3_sigmoided_test = tf.nn.sigmoid(fc3_test)
# linear fc
prediction_test = tf.add(tf.matmul(fc3_sigmoided_test, self.weights['out2']), self.biases['out2'], name="prediction_test")
loss_op_test = tf.losses.mean_squared_error(predictions=prediction_test, labels=self.Y_test)
def run_sess(self, sess, batch_xb, batch_xe, batch_y, saver, name):
graph = sess.graph
batch_xe = np.reshape(batch_xe,[-1,1,self.fc_num_input])
batch_xb = np.reshape(batch_xb, [-1, self.steps_back, self.cnn_num_input])
batch_y = np.reshape(batch_y,[-1,self.num_output])
batch_tr_xe = batch_xe[:self.batch_tr_size]
batch_test_xe = batch_xe[self.batch_tr_size:]
batch_tr_xb = batch_xb[:self.batch_tr_size]
batch_test_xb = batch_xb[self.batch_tr_size:]
batch_tr_y = batch_y[:self.batch_tr_size]
batch_test_y = batch_y[self.batch_tr_size:]
overfitting=0
for step in range(1, self.num_steps + 1):
# Run optimization op (backprop)
sess.run("train_op", feed_dict={graph.get_tensor_by_name("Xb:0"): batch_tr_xb,
graph.get_tensor_by_name("Xe:0"): batch_tr_xe,
graph.get_tensor_by_name("Y:0"): batch_tr_y,
graph.get_tensor_by_name("keep_prob:0"): self.dropout})
# Calculate batch loss
training_l = sess.run("mean_squared_error/value:0",
feed_dict={graph.get_tensor_by_name("Xb:0"): batch_tr_xb,
graph.get_tensor_by_name("Xe:0"): batch_tr_xe,
graph.get_tensor_by_name("Y:0"): batch_tr_y,
graph.get_tensor_by_name("keep_prob:0"): 1.0})
test_l = sess.run("mean_squared_error_1/value:0",
feed_dict={graph.get_tensor_by_name("Xb_test:0"): batch_test_xb,
graph.get_tensor_by_name("Xe_test:0"): batch_test_xe,
graph.get_tensor_by_name("Y_test:0"): batch_test_y,
graph.get_tensor_by_name("keep_prob:0"): 1.0})
if step % 10 == 0 or step == 1:
print("Step " + str(step) + ", Minibatch training Loss= " + str(training_l))
print("Step " + str(step) + ", Minibatch validation Loss= " + str(test_l))
if test_l - training_l> 0.015:
overfitting += 1
else: overfitting = 0
if overfitting >= 30 and training_l <= 0.01 :
print("condition satisfied")
break
if test_l < 0.009 and training_l < 0.009 :
print("condition satisfied")
break
# self.training_loss.append(training_l)
# self.validation_loss.append(test_l)
print("Optimization Finished!")
# Save the variables to disk.
save_path = saver.save(sess, name)
print("Model saved in path: %s" % save_path)
def train(self,xb, xe, y, name = "./model0.ckpt", graph = tf.get_default_graph() ):
self.training_loss = []
self.validation_loss = []
with tf.Session(graph=graph) as sess:
saver = tf.train.Saver()
try:
saver.restore(sess, name)
except:
sess.run(tf.global_variables_initializer())
for i in range(xb.shape[0]//self.batch_size):
# Run the initializer
index = i*self.batch_size
self.run_sess(sess, xb[index:index+self.batch_size],xe[index:index+self.batch_size],y[index:index+self.batch_size], saver, name= name)
# plt.plot(range(len(self.training_loss)), self.training_loss, label='Training')
# plt.plot(range(len(self.validation_loss)), self.validation_loss, label='Validation')
# plt.xlabel('Steps')
# # plt.ylabel('Loss')
#
# plt.title("Loss function")
#
# plt.legend()
#
# plt.show()
# def retrain(self,xb, xe, y,sess):
# saver.restore(sess, "./model.ckpt")
# self.run_sess(sess,xb,xe,y)
def predict(self, xb, xe, sess):
# tf Graph input
graph = sess.graph
xb = np.reshape(xb, [-1, self.steps_back, self.cnn_num_input])
xe = np.reshape(xe, [-1, 1, self.fc_num_input])
p = sess.run("prediction:0", feed_dict={graph.get_tensor_by_name("Xb:0"): xb, graph.get_tensor_by_name("Xe:0"): xe, graph.get_tensor_by_name("keep_prob:0"): 1.0})
return p
if __name__ == '__main__':
xb, xe, y = get_X_y(steps_back=7, filename="Q_data0.csv")
neural_net = NN(batch_size = 100, steps_back=8)
scaler1 = {}
for i in range(xb.shape[1]):
scaler1[i] = MinMaxScaler(feature_range=(0,1), copy=True)
xb[:,i,:] = scaler1[i].fit_transform(xb[:,i,:])
scaler2 = MinMaxScaler(feature_range=(0,1), copy=True).fit(xe)
scaler3 = MinMaxScaler(feature_range=(0, 1), copy=True).fit(y.reshape(-1,1))
xe= scaler2.transform(xe)
y= scaler3.transform(y.reshape(-1,1))
# graph = tf.Graph()
neural_net.combined_net()
# saver = tf.train.Saver()
# keep_prob = neural_net.keep_prob
# init = tf.global_variables_initializer()
# graph = tf.get_default_graph()
neural_net.train(xb, xe, y)
| 2.75 | 3 |
_untuned_modeling.py | mcvenkat/Python-Programs | 1 | 12790180 | <reponame>mcvenkat/Python-Programs<filename>_untuned_modeling.py
######################################################
# _untuned_modeling.py
# author: <NAME>, <EMAIL>
# licence: FreeBSD
"""
Copyright (c) 2015, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
"""
#first run feature_extraction.py
#then run this file from the same directory
######################################################
# import dependencies
import csv
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.ensemble import GradientBoostingClassifier,ExtraTreesClassifier
from sklearn.metrics import log_loss
######################################################
# list ids and labels
trainids=[]
labels=[]
with open('trainLabels.csv','r') as f:
r=csv.reader(f)
r.next() # skip header
for row in r:
trainids.append(row[0])
labels.append(float(row[1]))
testids=[]
with open('sampleSubmission.csv','r') as f:
r=csv.reader(f)
r.next()
for row in r:
testids.append(row[0])
######################################################
# general functions
def readdata(fname,header=True,selectedcols=None):
with open(fname,'r') as f:
r=csv.reader(f)
names = r.next() if header else None
if selectedcols:
assert header==True
data = [[float(e) for i,e in enumerate(row) if names[i] in selectedcols] for row in r]
names = [name for name in names if name in selectedcols]
else:
data = [[float(e) for e in row] for row in r]
return data,names
def writedata(data,fname,header=None):
with open(fname,'w') as f:
w=csv.writer(f)
if header:
w.writerow(header)
for row in data:
w.writerow(row)
######################################################
# cross validation
"""
function docv
input: classifier, kfolds object, features, labels, number of data rows
output: holdout-set-predictions for all rows
* run cross validation
"""
def docv(clf,kf,x,y,nrow,nlab=9):
pred = np.zeros((nrow,nlab))
for trainidx, testidx in kf:
clf.fit(x[trainidx],y[trainidx])
pred[testidx] = clf.predict_proba(x[testidx])
return pred
"""
function runcv
input: name of train/ test file, classifier 1 and 2 to be used
output: writes holdout-set-predictions for all rows to file
* run cross validation by calling docv for both classifiers, combine and save results
"""
def runcv(filename,c1,c2):
y=np.array(labels)
nrow=len(y)
x,_=readdata('train_%s'%filename)
x=np.array(x)
kf = KFold(nrow,10,shuffle=True)
p1=docv(c1,kf,x,y,nrow)
p2=docv(c2,kf,x,y,nrow)
pcombi=0.667*p1+0.333*p2
print '%.4f %.4f %.4f'%(log_loss(y,p1),log_loss(y,p2),log_loss(y,pcombi))
with open('pred_%s'%filename,'w') as f:
w=csv.writer(f)
for row in pcombi:
w.writerow(row)
######################################################
# submit and print feature importance
"""
function writesubm
input: name of train/ test file, classifier 1 and 2 to be used
output: writes testset predictions to file
* train classifiers using all traindata, create testset predictions, combine and save results
"""
def writesubm(filename,c1,c2):
xtrain,names=readdata('train_%s'%filename)
xtest,_=readdata('test_%s'%filename)
c1.fit(xtrain,labels)
c2.fit(xtrain,labels)
p1=c1.predict_proba(xtest)
p2=c2.predict_proba(xtest)
p=0.667*p1+0.333*p2
with open('subm_%s'%filename,'w') as f:
w=csv.writer(f)
w.writerow(['Id']+['Prediction%d'%num for num in xrange(1,10)])
for inum,i in enumerate(testids):
w.writerow([i]+list(p[inum]))
######################################################
# go
if __name__ == '__main__':
gbm=GradientBoostingClassifier(
n_estimators=400, max_features=5)
xtr=ExtraTreesClassifier(
n_estimators=400,max_features=None,
min_samples_leaf=2,min_samples_split=3,
n_jobs=7)
for filename in [
'45c.csv',
]:
print filename
runcv(filename,gbm,xtr)
writesubm(filename,gbm,xtr)
print ''
"""
45c.csv
0.0117 0.0168 0.0101
public LB: 0.008071379
private LB: 0.007615772
""" | 1.757813 | 2 |
tests/test_key.py | dajiaji/pyseto | 25 | 12790181 | <filename>tests/test_key.py
from secrets import token_bytes
import pytest
from pyseto import DecryptError, Key, NotSupportedError
from pyseto.key_interface import KeyInterface
from pyseto.utils import base64url_decode
from .utils import load_jwk, load_key
class TestKey:
"""
Tests for Key.
"""
@pytest.mark.parametrize(
"version, purpose, key",
[
(1, "local", token_bytes(32)),
(2, "local", token_bytes(32)),
(3, "local", token_bytes(32)),
(4, "local", token_bytes(32)),
],
)
def test_key_new_local(self, version, purpose, key):
k = Key.new(version, purpose, key)
assert isinstance(k, KeyInterface)
assert k.version == version
assert k.purpose == purpose
with pytest.raises(NotSupportedError) as err:
k.sign(b"Hello world!")
pytest.fail("Key.sign() should fail.")
assert "A key for local does not have sign()." in str(err.value)
with pytest.raises(NotSupportedError) as err:
k.verify(b"xxxxxx")
pytest.fail("Key.verify() should fail.")
assert "A key for local does not have verify()." in str(err.value)
@pytest.mark.parametrize(
"version, purpose, key",
[
(1, "public", load_key("keys/private_key_rsa.pem")),
(1, "public", load_key("keys/public_key_rsa.pem")),
(2, "public", load_key("keys/private_key_ed25519.pem")),
(2, "public", load_key("keys/public_key_ed25519.pem")),
(3, "public", load_key("keys/private_key_ecdsa_p384.pem")),
(3, "public", load_key("keys/public_key_ecdsa_p384.pem")),
(4, "public", load_key("keys/private_key_ed25519.pem")),
(4, "public", load_key("keys/public_key_ed25519.pem")),
],
)
def test_key_new_public(self, version, purpose, key):
k = Key.new(version, purpose, key)
assert isinstance(k, KeyInterface)
assert k.version == version
assert k.purpose == purpose
with pytest.raises(NotSupportedError) as err:
k.encrypt(b"Hello world!")
pytest.fail("Key.sign() should fail.")
assert "A key for public does not have encrypt()." in str(err.value)
with pytest.raises(NotSupportedError) as err:
k.decrypt(b"xxxxxx")
pytest.fail("Key.verify() should fail.")
assert "A key for public does not have decrypt()." in str(err.value)
@pytest.mark.parametrize(
"version, key, msg",
[
(1, load_key("keys/private_key_ed25519.pem"), "The key is not RSA key."),
(1, load_key("keys/public_key_ed25519.pem"), "The key is not RSA key."),
(
1,
load_key("keys/private_key_ecdsa_p384.pem"),
"The key is not RSA key.",
),
(
1,
load_key("keys/public_key_ecdsa_p384.pem"),
"The key is not RSA key.",
),
(2, load_key("keys/private_key_rsa.pem"), "The key is not Ed25519 key."),
(2, load_key("keys/public_key_rsa.pem"), "The key is not Ed25519 key."),
(
2,
load_key("keys/private_key_ecdsa_p384.pem"),
"The key is not Ed25519 key.",
),
(
2,
load_key("keys/public_key_ecdsa_p384.pem"),
"The key is not Ed25519 key.",
),
(
3,
load_key("keys/private_key_ed25519.pem"),
"The key is not ECDSA key.",
),
(
3,
load_key("keys/public_key_ed25519.pem"),
"The key is not ECDSA key.",
),
(3, load_key("keys/private_key_rsa.pem"), "The key is not ECDSA key."),
(3, load_key("keys/public_key_rsa.pem"), "The key is not ECDSA key."),
(4, load_key("keys/private_key_rsa.pem"), "The key is not Ed25519 key."),
(4, load_key("keys/public_key_rsa.pem"), "The key is not Ed25519 key."),
(
4,
load_key("keys/private_key_ecdsa_p384.pem"),
"The key is not Ed25519 key.",
),
(
4,
load_key("keys/public_key_ecdsa_p384.pem"),
"The key is not Ed25519 key.",
),
],
)
def test_key_new_public_with_wrong_key(self, version, key, msg):
with pytest.raises(ValueError) as err:
Key.new(version, "public", key)
pytest.fail("Key.new should fail.")
assert msg in str(err.value)
@pytest.mark.parametrize(
"version, purpose, key, msg",
[
("v*", "local", token_bytes(32), "Invalid version: v*."),
("v0", "local", token_bytes(32), "Invalid version: v0."),
(0, "local", token_bytes(32), "Invalid version: 0."),
(
"v*",
"public",
load_key("keys/private_key_rsa.pem"),
"Invalid version: v*.",
),
(
"v0",
"public",
load_key("keys/private_key_rsa.pem"),
"Invalid version: v0.",
),
(
0,
"public",
load_key("keys/private_key_rsa.pem"),
"Invalid version: 0.",
),
(1, "xxx", token_bytes(32), "Invalid purpose: xxx."),
(1, "public", "-----BEGIN BAD", "Invalid or unsupported PEM format."),
],
)
def test_key_new_with_invalid_arg(self, version, purpose, key, msg):
with pytest.raises(ValueError) as err:
Key.new(version, purpose, key)
pytest.fail("Key.new should fail.")
assert msg in str(err.value)
@pytest.mark.parametrize(
"version, key",
[
# (1, load_jwk("keys/private_key_rsa.json")),
# (1, load_jwk("keys/public_key_rsa.json")),
(2, load_jwk("keys/private_key_ed25519.json")),
(2, load_jwk("keys/public_key_ed25519.json")),
(3, load_jwk("keys/private_key_ecdsa_p384.json")),
(3, load_jwk("keys/public_key_ecdsa_p384.json")),
(4, load_jwk("keys/private_key_ed25519.json")),
(4, load_jwk("keys/public_key_ed25519.json")),
],
)
def test_key_from_asymmetric_params(self, version, key):
k = Key.from_asymmetric_key_params(version, x=key["x"], y=key["y"], d=key["d"])
assert isinstance(k, KeyInterface)
assert k.version == version
assert k.purpose == "public"
@pytest.mark.parametrize(
"paserk",
[
"k1.local.AAAAAAAAAAAAAAAA",
"k1.public.AAAAAAAAAAAAAAAA",
"k2.local.AAAAAAAAAAAAAAAA",
"k2.public.AAAAAAAAAAAAAAAA",
"k3.local.AAAAAAAAAAAAAAAA",
"k3.public.AAAAAAAAAAAAAAAA",
"k4.local.AAAAAAAAAAAAAAAA",
"k4.public.AAAAAAAAAAAAAAAA",
],
)
def test_key_from_paserk_with_wrapping_key_and_password(self, paserk):
with pytest.raises(ValueError) as err:
Key.from_paserk(paserk, wrapping_key="xxx", password="<PASSWORD>")
pytest.fail("Key.from_paserk should fail.")
assert "Only one of wrapping_key or password should be specified." in str(err.value)
@pytest.mark.parametrize(
"paserk, msg",
[
("k1.local.AAAAAAAAAAAAAAAA", "Invalid PASERK type: local."),
("k1.public.AAAAAAAAAAAAAAAA", "Invalid PASERK type: public."),
("k2.local.AAAAAAAAAAAAAAAA", "Invalid PASERK type: local."),
("k2.public.AAAAAAAAAAAAAAAA", "Invalid PASERK type: public."),
("k3.local.AAAAAAAAAAAAAAAA", "Invalid PASERK type: local."),
("k3.public.AAAAAAAAAAAAAAAA", "Invalid PASERK type: public."),
("k4.local.AAAAAAAAAAAAAAAA", "Invalid PASERK type: local."),
("k4.public.AAAAAAAAAAAAAAAA", "Invalid PASERK type: public."),
],
)
def test_key_from_paserk_with_password_for_wrong_paserk(self, paserk, msg):
with pytest.raises(ValueError) as err:
Key.from_paserk(paserk, password="<PASSWORD>")
pytest.fail("Key.from_paserk should fail.")
assert msg in str(err.value)
@pytest.mark.parametrize(
"paserk, msg",
[
("v1.local.AAAAAAAAAAAAAAAA", "Invalid PASERK version: v1."),
("*.local.AAAAAAAAAAAAAAAA", "Invalid PASERK version: *."),
("k1.xxx.AAAAAAAAAAAAAAAA", "Invalid PASERK key type: xxx."),
],
)
def test_key_from_paserk_with_invalid_args(self, paserk, msg):
with pytest.raises(ValueError) as err:
Key.from_paserk(paserk)
pytest.fail("Key.from_paserk should fail.")
assert msg in str(err.value)
@pytest.mark.parametrize(
"version",
[
1,
2,
3,
4,
],
)
def test_key_from_paserk_for_local_with_wrong_wrapping_key(self, version):
k = Key.new(version, "local", token_bytes(32))
wk1 = token_bytes(32)
wk2 = token_bytes(32)
wpk = k.to_paserk(wrapping_key=wk1)
with pytest.raises(DecryptError) as err:
Key.from_paserk(wpk, wrapping_key=wk2)
pytest.fail("Key.from_paserk() should fail.")
assert "Failed to unwrap a key." in str(err.value)
@pytest.mark.parametrize(
"version",
[
1,
2,
3,
4,
],
)
def test_key_from_paserk_for_local_with_wrong_password(self, version):
k = Key.new(version, "local", token_bytes(32))
wk1 = token_bytes(32)
wk2 = token_bytes(32)
wpk = k.to_paserk(password=wk1)
with pytest.raises(DecryptError) as err:
Key.from_paserk(wpk, password=<PASSWORD>)
pytest.fail("Key.from_paserk() should fail.")
assert "Failed to unwrap a key." in str(err.value)
@pytest.mark.parametrize(
"version, key",
[
(1, load_key("keys/private_key_rsa.pem")),
(2, load_key("keys/private_key_ed25519.pem")),
(3, load_key("keys/private_key_ecdsa_p384.pem")),
(4, load_key("keys/private_key_ed25519.pem")),
],
)
def test_key_from_paserk_for_private_key_with_wrong_wrapping_key(self, version, key):
k = Key.new(version, "public", key)
wk1 = token_bytes(32)
wk2 = token_bytes(32)
wpk = k.to_paserk(wrapping_key=wk1)
with pytest.raises(DecryptError) as err:
Key.from_paserk(wpk, wrapping_key=wk2)
pytest.fail("Key.from_paserk() should fail.")
assert "Failed to unwrap a key." in str(err.value)
@pytest.mark.parametrize(
"version, key",
[
(1, load_key("keys/public_key_rsa.pem")),
(2, load_key("keys/public_key_ed25519.pem")),
(3, load_key("keys/public_key_ecdsa_p384.pem")),
(4, load_key("keys/public_key_ed25519.pem")),
],
)
def test_key_from_paserk_for_public_key_with_wrapping_key(self, version, key):
k = Key.new(version, "public", key)
wk = token_bytes(32)
with pytest.raises(ValueError) as err:
k.to_paserk(wrapping_key=wk)
pytest.fail("to_paserk() should fail.")
assert "Public key cannot be wrapped." in str(err.value)
@pytest.mark.parametrize(
"version, key",
[
(1, load_key("keys/public_key_rsa.pem")),
(2, load_key("keys/public_key_ed25519.pem")),
(3, load_key("keys/public_key_ecdsa_p384.pem")),
(4, load_key("keys/public_key_ed25519.pem")),
],
)
def test_key_from_paserk_for_public_key_with_password(self, version, key):
k = Key.new(version, "public", key)
wk = token_bytes(32)
with pytest.raises(ValueError) as err:
k.to_paserk(password=wk)
pytest.fail("to_paserk() should fail.")
assert "Public key cannot be wrapped." in str(err.value)
@pytest.mark.parametrize(
"version, key, msg",
[
(
1,
load_jwk("keys/private_key_rsa.json"),
"v1.public is not supported on from_key_parameters.",
),
(999, load_jwk("keys/private_key_ed25519.json"), "Invalid version: 999."),
(0, load_jwk("keys/private_key_ed25519.json"), "Invalid version: 0."),
(
2,
{"x": b"xxx", "y": b"", "d": b"ddd"},
"Only one of x or d should be set for v2.public.",
),
(2, {"x": b"xxx", "y": b"", "d": b""}, "Failed to load key."),
(2, {"x": b"", "y": b"", "d": b"ddd"}, "Failed to load key."),
(
2,
{"x": b"", "y": b"", "d": b""},
"x or d should be set for v2.public.",
),
(
3,
{"x": b"xxx", "y": b"", "d": b"ddd"},
"x and y (and d) should be set for v3.public.",
),
(
3,
{"x": b"", "y": b"yyy", "d": b"ddd"},
"x and y (and d) should be set for v3.public.",
),
(3, {"x": b"xxx", "y": b"yyy", "d": b""}, "Failed to load key."),
(
3,
{
"x": base64url_decode("_XyN9woHaS0mPimSW-etwJMEDSzxIMjp4PjezavU8SHJoClz1bQrcmPb1ZJxHxhI"),
"y": base64url_decode("<KEY>"),
"d": b"ddd",
},
"Failed to load key.",
),
(
4,
{"x": b"xxx", "y": b"", "d": b"ddd"},
"Only one of x or d should be set for v4.public.",
),
(4, {"x": b"xxx", "y": b"", "d": b""}, "Failed to load key."),
(4, {"x": b"", "y": b"", "d": b"ddd"}, "Failed to load key."),
(
4,
{"x": b"", "y": b"", "d": b""},
"x or d should be set for v4.public.",
),
],
)
def test_key_from_asymmetric_params_with_invalid_arg(self, version, key, msg):
with pytest.raises(ValueError) as err:
Key.from_asymmetric_key_params(version, x=key["x"], y=key["y"], d=key["d"])
pytest.fail("Key.from_asymmetric_key_params() should fail.")
assert msg in str(err.value)
@pytest.mark.parametrize(
"version, purpose, key",
[
(1, "public", load_key("keys/public_key_rsa.pem")),
(2, "public", load_key("keys/public_key_ed25519.pem")),
(3, "public", load_key("keys/public_key_ecdsa_p384.pem")),
(4, "public", load_key("keys/public_key_ed25519.pem")),
],
)
def test_key_to_paserk_public(self, version, purpose, key):
k = Key.new(version, purpose, key)
assert k.to_paserk().startswith(f"k{k.version}.public.")
@pytest.mark.parametrize(
"version, purpose, key",
[
(1, "public", load_key("keys/private_key_rsa.pem")),
(2, "public", load_key("keys/private_key_ed25519.pem")),
(3, "public", load_key("keys/private_key_ecdsa_p384.pem")),
(4, "public", load_key("keys/private_key_ed25519.pem")),
],
)
def test_key_to_paserk_secret(self, version, purpose, key):
k = Key.new(version, purpose, key)
assert k.to_paserk().startswith(f"k{k.version}.secret.")
@pytest.mark.parametrize(
"version, purpose, key",
[
(1, "local", token_bytes(32)),
(2, "local", token_bytes(32)),
(3, "local", token_bytes(32)),
(4, "local", token_bytes(32)),
(1, "public", load_key("keys/private_key_rsa.pem")),
(2, "public", load_key("keys/private_key_ed25519.pem")),
(3, "public", load_key("keys/private_key_ecdsa_p384.pem")),
(4, "public", load_key("keys/private_key_ed25519.pem")),
],
)
def test_key_to_paserk_secret_with_wrapping_key_and_password(self, version, purpose, key):
k = Key.new(version, purpose, key)
with pytest.raises(ValueError) as err:
k.to_paserk(wrapping_key="xxx", password="<PASSWORD>")
pytest.fail("to_paserk() should fail.")
assert "Only one of wrapping_key or password should be specified." in str(err.value)
| 2.3125 | 2 |
week4_2.py | SwapnilNair/Algorithms | 0 | 12790182 | def DFSInit(adjMAT):
(rows,cols) = adjMAT.shape
(visited,parents) = ({},{})
for i in range rows:
visited[i] = False
parents[i] = -1
return (visited,parents)
def DFS(adjMAT,visited,parent,v):
visited[v] = True
for k in neighbours(v):
if (not visited[k]):
parent[k] = v
(visited,parent) = DFS(adjMAT,visited,parent,k)
return (visited,parent)
| 3.359375 | 3 |
solutions/067_add_binary.py | abawchen/leetcode | 0 | 12790183 | # Given two binary strings, return their sum (also a binary string).
# For example,
# a = "11"
# b = "1"
# Return "100".
class Solution:
# @param {string} a
# @param {string} b
# @return {string}
def addBinary(self, a, b):
if len(a) > len(b):
b = "0" * (len(a)-len(b)) + b
else:
a = "0" * (len(b)-len(a)) + a
ans = ""
c = 0
for i in xrange(len(a)-1, -1, -1):
val = int(a[i]) + int(b[i]) + c
r, c = val % 2, int(val >= 2)
ans = str(r) + ans
if c == 1:
ans = "1" + ans
return ans
| 3.546875 | 4 |
Stack/Tower_of_Hanoi.py | harshil2004/Data-Structures-and-Algorithms | 14 | 12790184 | <gh_stars>10-100
#Contribiuted by <NAME> [github/YA12SHYAM]
#implementing recusion solution of Tower of Hanoi in python
def solve_hanoi(n,from_rod,to_rod,use_rod):
if(n==1):
print("Move disk 1 from rod {} to_rod {}".format(from_rod,to_rod))
else:
#solve top n-1 disc from source rod to auxillary/Using rod
solve_hanoi(n-1,from_rod,use_rod,to_rod)
#Move remaining largest disc from Source rod to target rod
print("Move disk {} from rod {} to rod {}".format(n,from_rod,to_rod))
#solve n-1 disc from auxillary rod to target rod
solve_hanoi(n-1,use_rod,to_rod,from_rod)
def main():
print("Enter the Number of disc")
# the number of disc is n
n=int(input())
print()
# Source rod is rod 'a',Target rod is rod 'b',Using/auxillary rod is rod 'c'
solve_hanoi(n,'a','b','c')
if __name__ == "__main__":
main()
| 4.03125 | 4 |
scripts/resources/config.py | KoMaTo3/Biominator | 0 | 12790185 | <gh_stars>0
config_platform = 'win32'
#config_platform = 'android'
#config_platform = 'linux'
| 1.101563 | 1 |
17B-162/HI/imaging/feather_comparisons.py | e-koch/VLA_Lband | 1 | 12790186 |
'''
Compare the data where they overlap in the uv plane.
No offset correction is needed.
'''
from spectral_cube import SpectralCube
import numpy as np
import astropy.units as u
import matplotlib.pyplot as plt
import os
import scipy.ndimage as nd
from uvcombine.scale_factor import find_scale_factor
from cube_analysis.feather_cubes import feather_compare_cube
from paths import (seventeenB_HI_data_02kms_path,
seventeenB_HI_data_1kms_path,
data_path, allfigs_path)
from constants import hi_freq
from plotting_styles import onecolumn_figure
# Compare with the 1 km/s cube. Higher S/N
# vla_cube = SpectralCube.read(seventeenB_HI_data_02kms_path("M33_14B_17B_HI_contsub_width_02kms.image.pbcor.fits"))
vla_cube = SpectralCube.read(seventeenB_HI_data_1kms_path("M33_14B_17B_HI_contsub_width_1kms.image.pbcor.fits"))
# pb_cube = SpectralCube.read(seventeenB_HI_data_02kms_path("M33_14B_17B_HI_contsub_width_02kms.pb.fits"))
pb_cube = SpectralCube.read(seventeenB_HI_data_1kms_path("M33_14B_17B_HI_contsub_width_1kms.pb.fits"))
# PB minimally changes over the frequency range. So just grab one plane
pb_plane = pb_cube[0]
# We need to define a tapered weighting function to ignore emission outside
# of the VLA mosaic
def taper_weights(mask, sigma, nsig_cut=3):
dist = nd.distance_transform_edt(mask)
gauss_dists = np.where(np.logical_and(dist < nsig_cut * sigma, dist > 0.))
flat_dists = np.where(dist >= nsig_cut * sigma)
weight_arr = np.zeros_like(mask, dtype=float)
weight_arr[gauss_dists] = \
np.exp(- (dist[gauss_dists] - nsig_cut * sigma)**2 / (2 * sigma**2))
weight_arr[flat_dists] = 1.
return weight_arr
weight = taper_weights(np.isfinite(pb_plane), 30, nsig_cut=5)
gbt_path = os.path.join(data_path, "GBT")
# gbt_cube = SpectralCube.read(os.path.join(gbt_path, "17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_02kms.fits"))
gbt_cube = SpectralCube.read(os.path.join(gbt_path, "17B-162_items/m33_gbt_vlsr_highres_Tmb_17B162_1kms.fits"))
beam_fwhm = lambda diam: ((1.18 * hi_freq.to(u.cm, u.spectral())) / diam.to(u.cm)) * u.rad
# Already determined from the 14B HI analysis. Lowered spatial resolution
# due to lack of overlap in the GBT fields centered at M33. So the data were
# gridded with a Gaussian kernel, rather than a jinc function
gbt_eff_beam = beam_fwhm(87.5 * u.m)
# The shortest baseline in the 14B-088 data is ~44 m.
las = (hi_freq.to(u.cm, u.spectral()) / (44 * u.m)).to(u.arcsec, u.dimensionless_angles())
radii, ratios, high_pts, low_pts, chan_out = \
feather_compare_cube(vla_cube, gbt_cube, las,
num_cores=1,
lowresfwhm=gbt_eff_beam,
chunk=50,
verbose=False,
weights=weight,
relax_spectral_check=False,
# NOTE: there is an offset of ~0.4 km/s between the cubes
# The big GBT beam means this really doesn't matter (I
# manually checked). The difference is 0.36 times the
# channel size. I have no idea where this shift is coming
# from since the freq axis used in `gbt_regrid.py` matches
# the frequency in the individual channel MSs used in
# imaging. It's not even a half-channel offset like I
# would expect if the MS frequency was the channel edge...
spec_check_kwargs={'rtol': 0.4})
onecolumn_figure()
sc_factor, sc_err = find_scale_factor(np.hstack(low_pts), np.hstack(high_pts),
method='distrib',
verbose=True)
plt.grid(True)
plt.xlabel(r"ln I$_{\rm int}$ / I$_{\rm SD}$")
plt.tight_layout()
plt.savefig(allfigs_path("Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.png"))
plt.savefig(allfigs_path("Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"))
print("Factor: {0}+/-{1}".format(sc_factor, sc_err))
# Factor: 1.125046+/-0.00394768
# This isn't a fantastic fit, so this error was significantly underestimated
plt.close()
# Compare properties per-channel
sc_factor_chans = []
sc_err_chans = []
for low, high in zip(low_pts, high_pts):
sc_f, sc_e = \
find_scale_factor(low, high,
method='distrib',
verbose=False)
sc_factor_chans.append(sc_f)
sc_err_chans.append(sc_e)
sc_factor_chans_linfit = []
sc_err_chans_linfit = []
for low, high in zip(low_pts, high_pts):
sc_f, sc_e = \
find_scale_factor(low, high,
method='linfit',
verbose=False)
sc_factor_chans_linfit.append(sc_f)
sc_err_chans_linfit.append(sc_e)
sc_factor_chans_linfit = np.array(sc_factor_chans_linfit)
sc_err_chans_linfit = np.array(sc_err_chans_linfit)
chans = np.arange(len(low_pts))
onecolumn_figure()
plt.errorbar(chans, sc_factor_chans,
yerr=sc_err_chans,
alpha=0.5, label='Distrib Fit')
plt.errorbar(chans, sc_factor_chans_linfit,
yerr=[sc_factor_chans_linfit - sc_err_chans_linfit[:, 0],
sc_err_chans_linfit[:, 1] - sc_factor_chans_linfit],
alpha=0.5, label='Linear fit')
# plt.plot(chans, slope_lowess_85)
plt.axhline(1, linestyle='--')
plt.legend(frameon=True)
plt.ylabel(r"Scale Factor")
plt.xlabel("Channels")
plt.grid(True)
plt.tight_layout()
plt.savefig(allfigs_path("Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.png"))
plt.savefig(allfigs_path("Imaging/ratio_hist_perchan_17B_vla_gbt_9.8arcmin_v3_w_weights.pdf"))
plt.close()
# Now refit with the channels near the systemic velocity, where most of the HI
# structure falls within the mosaic PB
chan_range = slice(80, 160)
onecolumn_figure()
sc_factor_chrange, sc_err_chrange = \
find_scale_factor(np.hstack(low_pts[chan_range]),
np.hstack(high_pts[chan_range]),
method='distrib',
verbose=True)
plt.grid(True)
plt.xlabel(r"ln I$_{\rm int}$ / I$_{\rm SD}$")
plt.tight_layout()
plt.savefig(allfigs_path(f"Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.png"))
plt.savefig(allfigs_path(f"Imaging/ratio_hist_17B_vla_gbt_9.8arcmin_chan_{chan_range.start}_{chan_range.stop}_v3_w_weights.pdf"))
print("Factor: {0}+/-{1}".format(sc_factor_chrange, sc_err_chrange))
# Factor: 1.105133+/-0.00463
# Error still underestimated
# The >1 factor is due to some emission in the GBT data being cut-off by the
# PB limit of the VLA mosaic. The factor increases far from the systemic
# velocity, where bright HI gets cut-off (compared to the larger 14B data).
# So, despite the != 1 factor, no factor will be applied to the SD data.
# Besides, the 14B mosaic comparison gives a 1.0 factor with the GBT data.
# The tests here were for consistency and that's what we find.
plt.close()
| 2.21875 | 2 |
Shellcodes/Encoder-Scripts/xor_encoder.py | noamts/Malware | 6 | 12790187 | <filename>Shellcodes/Encoder-Scripts/xor_encoder.py
#! /bin/python
original_shellcode=("Enter the shellcode")
encoder_byte="Enter the encoder byte"
encoded_shellcode_format1=[]
encoded_shellcode_format2=[]
for byt in bytearray(original_shellcode):
xor=byt^encoder_byte
xor="%02x" %xor
xor1="\\x" + xor
xor2="0x" + xor +","
encoded_shellcode_format1.append(xor1)
encoded_shellcode_format2.append(xor2)
print("Format 1:\n")
print''.join(encoded_shellcode_format1)
print("\n\n\n")
print("Format 2:\n")
print''.join(encoded_shellcode_format2)
print("\n")
print("Length:" +str(len(bytearray(original_shellcode))))
| 3.15625 | 3 |
a.py | Pzzzzz5142/My-Music | 2 | 12790188 | <filename>a.py
from tqdm import tqdm
from random import randint
class A(object):
def __init__(self):
self.a=self.aa()
def aa(self):
return 'aaa'
class B(A):
def __init__(self):
super().__init__()
def aa(self):
return 'aaaaaa'
a=A()
print(a.a) | 2.984375 | 3 |
exams/migrations/0020_auto_20210805_1401.py | ankanb240/otis-web | 15 | 12790189 | # Generated by Django 3.2.5 on 2021-08-05 18:01
import django.core.validators
from django.db import migrations, models
import re
class Migration(migrations.Migration):
dependencies = [
('exams', '0019_auto_20210805_1334'),
]
operations = [
migrations.AlterField(
model_name='examattempt',
name='guess1',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 1 response'),
),
migrations.AlterField(
model_name='examattempt',
name='guess2',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 2 response'),
),
migrations.AlterField(
model_name='examattempt',
name='guess3',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 3 response'),
),
migrations.AlterField(
model_name='examattempt',
name='guess4',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 4 response'),
),
migrations.AlterField(
model_name='examattempt',
name='guess5',
field=models.IntegerField(blank=True, null=True, validators=[django.core.validators.MinValueValidator(-1000000000), django.core.validators.MaxValueValidator(1000000000)], verbose_name='Problem 5 response'),
),
migrations.AlterField(
model_name='practiceexam',
name='answer1',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
migrations.AlterField(
model_name='practiceexam',
name='answer2',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
migrations.AlterField(
model_name='practiceexam',
name='answer3',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
migrations.AlterField(
model_name='practiceexam',
name='answer4',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
migrations.AlterField(
model_name='practiceexam',
name='answer5',
field=models.CharField(default='', max_length=64, validators=[django.core.validators.RegexValidator(re.compile('^\\d+(?:,\\d+)*\\Z'), code='invalid', message='Enter only digits separated by commas.')]),
preserve_default=False,
),
]
| 1.585938 | 2 |
utils/dirty.py | chua-n/particle | 1 | 12790190 | <gh_stars>1-10
import time
from functools import wraps
import numpy as np
import scipy
from scipy.spatial import ConvexHull
from scipy.spatial.distance import jensenshannon
from skimage import io, filters
from skimage.color import rgb2gray
from skimage.util import img_as_ubyte
from skimage.filters import threshold_otsu
from skimage import measure, transform
from scipy import ndimage as ndi
import torch
def loadNnData(sourcePath, keyword: str = None) -> torch.Tensor:
if sourcePath.endswith('.npy'):
data = torch.from_numpy(np.load(sourcePath))
elif sourcePath.endswith('.npz'):
data = torch.from_numpy(np.load(sourcePath)[keyword])
if data.max() == 255:
data[data == 255] = 1
return data
def project(tensor: torch.tensor, dim: int):
return torch.max(tensor, dim=dim).values
class TVSHelper:
@staticmethod
def getOutline(imgFile="img.jpg"):
img = io.imread(imgFile, as_gray=True)
# 检测到边缘并进行滤波
edge = filters.sobel(img)
edge = img_as_ubyte(rgb2gray(edge))
edge = ndi.median_filter(edge, 3)
# 二值化
thrd = threshold_otsu(edge)
maskOfWhite = edge > thrd
edge[maskOfWhite] = 255
edge[~maskOfWhite] = 0
# 提取
labeledImg = measure.label(edge, connectivity=2)
regionProps = measure.regionprops(labeledImg)
regionProps.sort(key=lambda prop: prop.area, reverse=True)
targetImage = regionProps[0].filled_image
return targetImage
@staticmethod
def putIntoCube(image, size=48, **kwargs):
longToWidthRatio = max(image.shape) / min(image.shape)
image = transform.resize(
image, (size, size // longToWidthRatio), **kwargs)
cube = np.zeros((64, 64), dtype=np.uint8)
initRowInd, initColInd = (
np.array(cube.shape) - np.array(image.shape))//2
cube[initRowInd:initRowInd+image.shape[0],
initColInd:initColInd+image.shape[1]] = image
return cube
class Circumsphere:
"""Copied from GitHub at https://github.com/shrx/mbsc.git, not my original.
"""
@classmethod
def fit(cls, array):
"""Compute exact minimum bounding sphere of a 3D point cloud (or a
triangular surface mesh) using Welzl's algorithm.
- X : M-by-3 list of point co-ordinates or a triangular surface
mesh specified as a TriRep object.
- R : radius of the sphere.
- C : 1-by-3 vector specifying the centroid of the sphere.
- Xb : subset of X, listing K-by-3 list of point coordinates from
which R and C were computed. See function titled
'FitSphere2Points' for more info.
REREFERENCES:
[1] Welzl, E. (1991), 'Smallest enclosing disks (balls and ellipsoids)',
Lecture Notes in Computer Science, Vol. 555, pp. 359-370
Matlab code author: <NAME> (<EMAIL>)
Date: Dec.2014"""
# Get the convex hull of the point set
hull = ConvexHull(array)
hull_array = array[hull.vertices]
hull_array = np.unique(hull_array, axis=0)
# print(len(hull_array))
# Randomly permute the point set
hull_array = np.random.permutation(hull_array)
if len(hull_array) <= 4:
R, C = cls.fit_base(hull_array)
return R, C, hull_array
elif len(hull_array) < 1000:
# try:
R, C, _ = cls.B_min_sphere(hull_array, [])
# Coordiantes of the points used to compute parameters of the
# minimum bounding sphere
D = np.sum(np.square(hull_array - C), axis=1)
idx = np.argsort(D - R**2)
D = D[idx]
Xb = hull_array[idx[:5]]
D = D[:5]
Xb = Xb[D < 1E-6]
idx = np.argsort(Xb[:, 0])
Xb = Xb[idx]
return R, C, Xb
# except:
#raise Exception
else:
M = len(hull_array)
dM = min([M // 4, 300])
# unnecessary ?
# res = M % dM
# n = np.ceil(M/dM)
# idx = dM * np.ones((1, n))
# if res > 0:
# idx[-1] = res
#
# if res <= 0.25 * dM:
# idx[n-2] = idx[n-2] + idx[n-1]
# idx = idx[:-1]
# n -= 1
hull_array = np.array_split(hull_array, dM)
Xb = np.empty([0, 3])
for i in range(len(hull_array)):
R, C, Xi = cls.B_min_sphere(
np.vstack([Xb, hull_array[i]]), [])
# 40 points closest to the sphere
D = np.abs(np.sqrt(np.sum((Xi - C)**2, axis=1)) - R)
idx = np.argsort(D, axis=0)
Xb = Xi[idx[:40]]
D = np.sort(D, axis=0)[:4]
# print(Xb)
# print(D)
#print(np.where(D/R < 1e-3)[0])
Xb = np.take(Xb, np.where(D/R < 1e-3)[0], axis=0)
Xb = np.sort(Xb, axis=0)
# print(Xb)
return R, C, Xb
@classmethod
def fit_base(cls, array):
"""Fit a sphere to a set of 2, 3, or at most 4 points in 3D space. Note that
point configurations with 3 collinear or 4 coplanar points do not have
well-defined solutions (i.e., they lie on spheres with inf radius).
- X : M-by-3 array of point coordinates, where M<=4.
- R : radius of the sphere. R=Inf when the sphere is undefined, as
specified above.
- C : 1-by-3 vector specifying the centroid of the sphere.
C=nan(1,3) when the sphere is undefined, as specified above.
Matlab code author: <NAME> (<EMAIL>)
Date: Dec.2014"""
N = len(array)
if N > 4:
print('Input must a N-by-3 array of point coordinates, with N<=4')
return
# Empty set
elif N == 0:
R = np.nan
C = np.full(3, np.nan)
return R, C
# A single point
elif N == 1:
R = 0.
C = array[0]
return R, C
# Line segment
elif N == 2:
R = np.linalg.norm(array[1] - array[0]) / 2
C = np.mean(array, axis=0)
return R, C
else: # 3 or 4 points
# Remove duplicate vertices, if there are any
uniq, index = np.unique(array, axis=0, return_index=True)
array_nd = uniq[index.argsort()]
if not np.array_equal(array, array_nd):
print("found duplicate")
print(array_nd)
R, C = cls.fit_base(array_nd)
return R, C
tol = 0.01 # collinearity/co-planarity threshold (in degrees)
if N == 3:
# Check for collinearity
D12 = array[1] - array[0]
D12 = D12 / np.linalg.norm(D12)
D13 = array[2] - array[0]
D13 = D13 / np.linalg.norm(D13)
chk = np.clip(np.abs(np.dot(D12, D13)), 0., 1.)
if np.arccos(chk)/np.pi*180 < tol:
R = np.inf
C = np.full(3, np.nan)
return R, C
# Make plane formed by the points parallel with the xy-plane
n = np.cross(D13, D12)
n = n / np.linalg.norm(n)
##print("n", n)
r = np.cross(n, np.array([0, 0, 1]))
if np.linalg.norm(r) != 0:
# Euler rotation vector
r = np.arccos(n[2]) * r / np.linalg.norm(r)
##print("r", r)
Rmat = scipy.linalg.expm(np.array([
[0., -r[2], r[1]],
[r[2], 0., -r[0]],
[-r[1], r[0], 0.]
]))
##print("Rmat", Rmat)
#Xr = np.transpose(Rmat*np.transpose(array))
Xr = np.transpose(np.dot(Rmat, np.transpose(array)))
##print("Xr", Xr)
# Circle centroid
x = Xr[:, :2]
A = 2 * (x[1:] - np.full(2, x[0]))
b = np.sum(
(np.square(x[1:]) - np.square(np.full(2, x[0]))), axis=1)
C = np.transpose(np.linalg.solve(A, b))
# Circle radius
R = np.sqrt(np.sum(np.square(x[0] - C)))
# Rotate centroid back into the original frame of reference
C = np.append(C, [np.mean(Xr[:, 2])], axis=0)
C = np.transpose(np.dot(np.transpose(Rmat), C))
return R, C
# If we got to this point then we have 4 unique, though possibly co-linear
# or co-planar points.
else:
# Check if the the points are co-linear
D12 = array[1] - array[0]
D12 = D12 / np.linalg.norm(D12)
D13 = array[2] - array[0]
D13 = D13 / np.linalg.norm(D13)
D14 = array[3] - array[0]
D14 = D14 / np.linalg.norm(D14)
chk1 = np.clip(np.abs(np.dot(D12, D13)), 0., 1.)
chk2 = np.clip(np.abs(np.dot(D12, D14)), 0., 1.)
if np.arccos(chk1)/np.pi*180 < tol or np.arccos(chk2)/np.pi*180 < tol:
R = np.inf
C = np.full(3, np.nan)
return R, C
# Check if the the points are co-planar
n1 = np.linalg.norm(np.cross(D12, D13))
n2 = np.linalg.norm(np.cross(D12, D14))
chk = np.clip(np.abs(np.dot(n1, n2)), 0., 1.)
if np.arccos(chk)/np.pi*180 < tol:
R = np.inf
C = np.full(3, np.nan)
return R, C
# Centroid of the sphere
A = 2 * (array[1:] - np.full(len(array)-1, array[0]))
b = np.sum(
(np.square(array[1:]) - np.square(np.full(len(array)-1, array[0]))), axis=1)
C = np.transpose(np.linalg.solve(A, b))
# Radius of the sphere
R = np.sqrt(np.sum(np.square(array[0] - C), axis=0))
return R, C
@classmethod
def B_min_sphere(cls, P, B):
eps = 1E-6
if len(B) == 4 or len(P) == 0:
R, C = cls.fit_base(B) # fit sphere to boundary points
return R, C, P
# Remove the last (i.e., end) point, p, from the list
P_new = P[:-1].copy()
p = P[-1].copy()
# Check if p is on or inside the bounding sphere. If not, it must be
# part of the new boundary.
R, C, P_new = cls.B_min_sphere(P_new, B)
if np.isnan(R) or np.isinf(R) or R < eps:
chk = True
else:
chk = np.linalg.norm(p - C) > (R + eps)
if chk:
if len(B) == 0:
B = np.array([p])
else:
B = np.array(np.insert(B, 0, p, axis=0))
R, C, _ = cls.B_min_sphere(P_new, B)
P = np.insert(P_new.copy(), 0, p, axis=0)
return R, C, P
def timer(func):
"""Decorator that reports the function execution time.
"""
@wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
res = func(*args, **kwargs)
end = time.time()
timeCost = end - start
hour, timeCost = divmod(timeCost, 3600)
minute, second = divmod(timeCost, 60)
hour, minute, second = int(hour), int(minute), round(second, 1)
print(
f"Function `{func.__name__}` runs for {hour}h {minute}min {second}s")
return res
return wrapper
class Entropy:
def __init__(self) -> None:
pass
@staticmethod
def JSDivergence(p, q, tolerance=0.98, base=2):
assert tolerance < np.sum(p) <= 1 and tolerance < np.sum(q) <= 1
return jensenshannon(p, q, base=base)
if __name__ == '__main__':
pass
| 1.84375 | 2 |
script/compile_fit_result.py | Wangyiquan95/NA_EPI | 1 | 12790191 | <filename>script/compile_fit_result.py<gh_stars>1-10
#!/usr/bin/python
import os
import sys
import glob
import numpy as np
from Bio import SeqIO
from collections import defaultdict
from Bio.SeqUtils.ProtParam import ProteinAnalysis as PA
def Mut2ID(Mut, WTseq, residues):
ID = ''
for residue, aa in zip(residues, WTseq):
if str(residue) in Mut: ID += Mut.rsplit(str(residue))[1][0]
else: ID += aa
return ID
def check_mut_in_lib(mut, lib_variants):
if mut == 'WT': return 'yes'
for m in mut.rsplit('-'):
pos = m[1:-1]
aa = m[-1]
if pos not in lib_variants.keys():
return 'no'
elif aa not in lib_variants[pos]:
return 'no'
return 'yes'
def read_lib_variants(filename):
lib_variants = {}
infile = open(filename, 'r')
for line in infile.readlines():
if 'pos' in line: continue
pos, muts = line.rstrip().rsplit("\t")
lib_variants[pos] = muts.rsplit(',')
return lib_variants
def read_WT_seqs(filename):
WT_seqs = {}
infile = open(filename, 'r')
for line in infile.readlines():
if 'strain' in line: continue
strain, seq = line.rstrip().rsplit("\t")
WT_seqs[strain] = seq
return WT_seqs
def read_fasta(filename):
refseq_dict = {}
records = SeqIO.parse(filename,"fasta")
for record in records:
ID = str(record.id)
seq = str(record.seq)
refseq_dict[ID] = seq
return refseq_dict
def read_fitness_data(filename, fit_dict, WTseq, lib_variants, strain):
infile = open(filename, 'r')
for line in infile.readlines():
if 'Mut' in line: continue
mut, sample, input_count, rep1_count, rep2_count, rep1_fit, rep2_fit, fit = line.rstrip().rsplit("\t")
if check_mut_in_lib(mut, lib_variants) == 'no': continue
if int(input_count) < 10: continue
ID = WTseq if mut == 'WT' else Mut2ID(mut, WTseq, sorted(lib_variants.keys(), key= lambda x:int(x)))
fit_dict[strain][ID] = {'input_count': input_count, 'fit':fit, 'fit_R1':rep1_fit, 'fit_R2':rep2_fit}
return fit_dict
def mutate_full_protein(WT_seq, mut, positions):
mut_seq = WT_seq
for pos, mutaa in zip(positions, mut):
mut_seq = mut_seq[0:pos-1]+mutaa+mut_seq[pos::]
return mut_seq
def calculate_charge(mut):
charge = 0
charge -= mut.count('D')
charge -= mut.count('E')
charge += mut.count('K')
charge += mut.count('R')
return charge
def write_output_file(outfile, fit_dict, N2_seqs, lib_variants):
print ("writing: %s" % outfile)
outfile = open(outfile, 'w')
mut_list = fit_dict['HK68'].keys()
header = "\t".join(['ID', 'strain', 'pI', 'charge', 'rep1_fit', 'rep2_fit', 'fit'])
outfile.write(header+"\n")
positions = (sorted(map(int,list(lib_variants.keys()))))
for strain in ['HK68','Bk79','Bei89','Mos99','Vic11','HK19']:
for mut in mut_list:
WT_seq = N2_seqs[strain]
full_seq = mutate_full_protein(WT_seq, mut, positions)
#pI = str(np.mean([PA('A'+aa+'A').isoelectric_point() for aa in mut]))
#pI = str(PA('A'+mut+'A').isoelectric_point())
#pI = str(PA(mut).isoelectric_point())
pI = str(PA(full_seq).isoelectric_point())
chg = str(calculate_charge(mut))
fit = fit_dict[strain][mut]['fit']
R1fit = fit_dict[strain][mut]['fit_R1']
R2fit = fit_dict[strain][mut]['fit_R2']
outfile.write("\t".join([mut, strain, pI, chg, R1fit, R2fit, fit])+"\n")
outfile.close()
def main():
filenames = glob.glob('result/NA_Epi_*.tsv')
outfile = 'result/NA_compile_results.tsv'
lib_variants = read_lib_variants('data/lib_variants.tsv')
N2_seqs = read_fasta('Fasta/N2.fa')
WT_seqs = read_WT_seqs('data/WT_seq.tsv')
fit_dict = defaultdict(dict)
for filename in filenames:
strain = filename.rsplit('_')[-1].rsplit('.')[0]
WTseq = WT_seqs[strain]
fit_dict = read_fitness_data(filename, fit_dict, WTseq, lib_variants, strain)
write_output_file(outfile, fit_dict, N2_seqs, lib_variants)
if __name__ == "__main__":
main()
| 2.359375 | 2 |
test/tests/api/redfish_1_0/schema_tests.py | smiller171/RackHD | 0 | 12790192 | <filename>test/tests/api/redfish_1_0/schema_tests.py
from config.redfish1_0_config import *
from modules.logger import Log
from on_http_redfish_1_0 import RedfishvApi as redfish
from on_http_redfish_1_0 import rest
from datetime import datetime
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_false
from proboscis.asserts import assert_raises
from proboscis.asserts import assert_not_equal
from proboscis.asserts import assert_true
from proboscis.asserts import fail
from proboscis import SkipTest
from proboscis import test
from json import loads,dumps
LOG = Log(__name__)
@test(groups=['redfish.schema.tests'], depends_on_groups=['obm.tests'])
class SchemaTests(object):
def __init__(self):
self.__client = config.api_client
self.__schemaList = None
self.__membersList = None
self.__locationUri = []
def __get_data(self):
return loads(self.__client.last_response.data)
@test(groups=['redfish.list_schemas'])
def test_list_schemas(self):
""" Testing GET /Schemas """
redfish().list_schemas()
schemas = self.__get_data()
LOG.debug(schemas,json=True)
assert_not_equal(0, len(schemas), message='Schema list was empty!')
self.__schemaList = schemas
@test(groups=['redfish.get_schema'], depends_on_groups=['redfish.list_schemas'])
def test_get_schema(self):
""" Testing GET /Schemas/{identifier} """
self.__membersList = self.__schemaList.get('Members')
assert_not_equal(None, self.__membersList)
for member in self.__membersList:
dataId = member.get('@odata.id')
assert_not_equal(None,dataId)
dataId = dataId.split('/redfish/v1/Schemas/')[1]
redfish().get_schema(dataId)
schema_ref = self.__get_data()
LOG.debug(schema_ref,json=True)
id = schema_ref.get('Id')
assert_equal(dataId, id, message='unexpected id {0}, expected {1}'.format(id,dataId))
assert_equal(type(schema_ref.get('Location')), list, message='expected list not found')
location = schema_ref.get('Location')[0]
assert_equal(type(location.get('Uri')), unicode, message='expected uri string not found')
self.__locationUri.append(location.get('Uri'))
@test(groups=['redfish.get_schema_invalid'], depends_on_groups=['redfish.list_schemas'])
def test_get_schema_invalid(self):
""" Testing GET /Schemas/{identifier} 404s properly """
self.__membersList = self.__schemaList.get('Members')
assert_not_equal(None, self.__membersList)
for member in self.__membersList:
dataId = member.get('@odata.id')
assert_not_equal(None,dataId)
dataId = dataId.split('/redfish/v1/Schemas/')[1]
try:
redfish().get_schema(dataId + '-invalid')
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
break
@test(groups=['redfish.get_schema_content'], depends_on_groups=['redfish.get_schema'])
def test_get_schema_content(self):
""" Testing GET /SchemaStore/en/{identifier} """
assert_not_equal([], self.__locationUri)
for member in self.__locationUri:
assert_not_equal(None,member)
dataId = member.split('/redfish/v1/SchemaStore/en/')[1]
redfish().get_schema_content(dataId)
schema_file_contents = self.__get_data()
@test(groups=['redfish.get_schema_content_invalid'], depends_on_groups=['redfish.get_schema'])
def test_get_schema_content_invalid(self):
""" Testing GET /Schemas/en/{identifier} 404s properly """
assert_not_equal([], self.__locationUri)
for member in self.__locationUri:
assert_not_equal(None,member)
dataId = member.split('/redfish/v1/SchemaStore/en/')[1]
try:
redfish().get_schema_content(dataId + '-invalid')
fail(message='did not raise exception')
except rest.ApiException as e:
assert_equal(404, e.status, message='unexpected response {0}, expected 404'.format(e.status))
break
| 2.171875 | 2 |
sdk/python/pulumi_okta/app/get_app.py | brinnehlops/pulumi-okta | 0 | 12790193 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'GetAppResult',
'AwaitableGetAppResult',
'get_app',
]
@pulumi.output_type
class GetAppResult:
"""
A collection of values returned by getApp.
"""
def __init__(__self__, active_only=None, description=None, id=None, label=None, label_prefix=None, name=None, status=None):
if active_only and not isinstance(active_only, bool):
raise TypeError("Expected argument 'active_only' to be a bool")
pulumi.set(__self__, "active_only", active_only)
if description and not isinstance(description, str):
raise TypeError("Expected argument 'description' to be a str")
pulumi.set(__self__, "description", description)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if label and not isinstance(label, str):
raise TypeError("Expected argument 'label' to be a str")
pulumi.set(__self__, "label", label)
if label_prefix and not isinstance(label_prefix, str):
raise TypeError("Expected argument 'label_prefix' to be a str")
pulumi.set(__self__, "label_prefix", label_prefix)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if status and not isinstance(status, str):
raise TypeError("Expected argument 'status' to be a str")
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="activeOnly")
def active_only(self) -> Optional[bool]:
return pulumi.get(self, "active_only")
@property
@pulumi.getter
def description(self) -> str:
"""
`description` of application.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
`id` of application.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def label(self) -> Optional[str]:
"""
`label` of application.
"""
return pulumi.get(self, "label")
@property
@pulumi.getter(name="labelPrefix")
def label_prefix(self) -> Optional[str]:
return pulumi.get(self, "label_prefix")
@property
@pulumi.getter
def name(self) -> str:
"""
`name` of application.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def status(self) -> str:
"""
`status` of application.
"""
return pulumi.get(self, "status")
class AwaitableGetAppResult(GetAppResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAppResult(
active_only=self.active_only,
description=self.description,
id=self.id,
label=self.label,
label_prefix=self.label_prefix,
name=self.name,
status=self.status)
def get_app(active_only: Optional[bool] = None,
id: Optional[str] = None,
label: Optional[str] = None,
label_prefix: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAppResult:
"""
Use this data source to retrieve the collaborators for a given repository.
## Example Usage
```python
import pulumi
import pulumi_okta as okta
example = okta.app.get_app(label="Example App")
```
:param bool active_only: tells the provider to query for only `ACTIVE` applications.
:param str id: `id` of application to retrieve, conflicts with `label` and `label_prefix`.
:param str label: The label of the app to retrieve, conflicts with `label_prefix` and `id`.
:param str label_prefix: Label prefix of the app to retrieve, conflicts with `label` and `id`. This will tell the provider to do a `starts with` query as opposed to an `equals` query.
"""
__args__ = dict()
__args__['activeOnly'] = active_only
__args__['id'] = id
__args__['label'] = label
__args__['labelPrefix'] = label_prefix
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('okta:app/getApp:getApp', __args__, opts=opts, typ=GetAppResult).value
return AwaitableGetAppResult(
active_only=__ret__.active_only,
description=__ret__.description,
id=__ret__.id,
label=__ret__.label,
label_prefix=__ret__.label_prefix,
name=__ret__.name,
status=__ret__.status)
| 1.851563 | 2 |
tutorials/W0D1_PythonWorkshop1/solutions/W0D1_Tutorial1_Solution_01b86875.py | amita-kapoor/course-content | 1 | 12790194 | <gh_stars>1-10
# set random number generator
np.random.seed(2020)
# initialize step_end, t_range, v and syn
step_end = int(t_max/dt)
t_range = np.linspace(0, t_max, num=step_end)
v = el * np.ones(step_end)
syn = i_mean * (1 + 0.1*(t_max/dt)**(0.5)*(2*np.random.random(step_end)-1))
# loop for step_end values of syn
for step, i in enumerate(syn):
# skip first iteration
if step==0:
continue
v[step] = v[step-1] + dt/tau * (el - v[step-1] + r*i)
with plt.xkcd():
# initialize the figure
plt.figure()
plt.title('$V_m$ with random I(t)')
plt.xlabel('time (s)')
plt.ylabel(r'$V_m$ (V)')
plt.plot(t_range, v, 'k')
plt.show() | 2.5 | 2 |
modifications/2020-03-11-ASCDB/add_dataset.py | PierMorgante/MQCAS | 4 | 12790195 | #!/usr/bin/env Python # This line is needed only for unix-based systems.
# Written by <NAME>, <NAME>, <NAME>.
# March 2020.
#
import qcportal as ptl
from qcfractal import FractalSnowflake
import pandas as pd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--dry-run", action="store_true")
args = parser.parse_args()
SNOWFLAKE = args.dry_run
if SNOWFLAKE:
snowflake = FractalSnowflake()
client = snowflake.client()
else:
client = ptl.FractalClient.from_file()
print(client)
# The new subset you want to add.
dataset_name = "ASCDB"
ds = ptl.collections.ReactionDataset(dataset_name, client=client)
# Add the paper
ds.data.metadata["citations"] = [
ptl.models.Citation(
bibtex="""
@article{morgante2019statistically,
title={Statistically representative databases for density functional theory via data science},
author={<NAME> and <NAME>},
journal={Physical Chemistry Chemical Physics},
volume={21},
number={35},
pages={19092--19103},
year={2019},
publisher={Royal Society of Chemistry}
}
""",
acs_citation="<NAME>. & <NAME>. Statistically representative databases for density functional theory via data science. <em>Phys. Chem. Chem. Phys., </em><b>2019</b><i>, 21</i>, 19092-19103.",
url="https://pubs.rsc.org/en/content/articlehtml/2019/cp/c9cp03211h",
doi="10.1039/C9CP03211H",
)
]
# The .csv file needed to build everything.
filename = "ASCDB.csv"
# We read the ASCDB.csv file. The encoding flag is optional,
# but necessary if the csv is generated (for example) with Microsoft Excel.
#
with open(filename, "r", encoding="utf-8-sig") as handle:
rxns = [x.split(",") for x in handle.read().splitlines()]
# Where to find the geometry files (in .xyz)
gpath = "ACCDB/Geometries"
# We put names and reactions in the following lists:
contrib_name = []
contrib_value = []
for row in rxns:
# Datapoint's name.
name = row[0]
# Datapoint's reference energy.
energy = row[1]
# Datapoint's reaction: from 2 to the end of the rxns list.
rxn = row[2:]
# This is used to handle the list.
half = len(rxn) // 2
molecules = rxn[:half]
coefs = rxn[half:]
rxn_data = []
# This loop handles the definition of a reaction, putting together molecules
# and stoichiometric coefficients.
#
for mol_name, coef in zip(molecules, coefs):
mol = ptl.Molecule.from_file(gpath + "/" + mol_name + ".xyz")
coef = float(coef)
rxn_data.append((mol, coef))
rxn = {"default": rxn_data}
# We add the reaction to the dataset.
ds.add_rxn(name, rxn)
# We store the values to add in the "Contributed value" dictionary (see below).
contrib_name.append(name)
contrib_value.append(float(energy))
# Save the new subset.
ds.save()
#
# Adding a contributed value based on the ASCDB csv file and the molecules
# handled above.
#
contrib = {
"name": "Benchmark",
"theory_level": "CCSD(T), CASPT2, Experiment (see ref)",
"values": contrib_value,
"index": contrib_name,
"theory_level_details": {"driver": "energy"},
"units": "kcal / mol",
}
ds.units = "kcal/mol"
ds.set_default_benchmark("Benchmark")
ds.add_contributed_values(contrib)
ds.save()
# Test
ds = client.get_collection("ReactionDataset", dataset_name)
print(ds.list_values())
ds._ensure_contributed_values()
print(ds.get_values(native=False))
print(ds.data.metadata['citations']) | 1.953125 | 2 |
resource_daily_scheduler/booking_req_views.py | weijia/resource-daily-scheduler | 0 | 12790196 | <filename>resource_daily_scheduler/booking_req_views.py
import json
from compat import View
import datetime
from django import forms
from django.db.models import Q
from django.http import HttpResponse
from django.views.generic.edit import CreateView, UpdateView
import pytz
from djangoautoconf.class_based_views.ajax_views import AjaxableResponseMixin
from djangoautoconf.class_based_views.create_view_factory import AjaxableFormContextUpdateMixin
from djangoautoconf.django_utils import retrieve_param
from guardian.shortcuts import assign_perm
from resource_daily_scheduler.models import BookingRequest, get_timezone_aware_datetime_from_date_str
class BookingRequestForm(forms.ModelForm):
start = forms.DateField(
required=False, widget=forms.TextInput(attrs={
'class': 'datepicker'
}))
end = forms.DateField(
required=False, widget=forms.TextInput(attrs={
'class': 'datepicker'
}))
class Meta:
model = BookingRequest
exclude = ["is_approved", "requester", "approver", "is_ongoing", "is_completed", "is_canceled"]
class BookingRequestUpdateForm(BookingRequestForm):
class Meta:
model = BookingRequest
exclude = ["requester", "approver"]
class AjaxableBookingRequestCreateView(AjaxableResponseMixin, AjaxableFormContextUpdateMixin,
CreateView):
form_class = BookingRequestForm
template_name = "form_view_base_template.html"
submit_button_text = "Create"
def get_form_kwargs(self):
"""
Used to update end date in UI, as the end date in UI is included in the reservation
:return:
"""
kwargs = super(AjaxableBookingRequestCreateView, self).get_form_kwargs()
# kwargs.update({"prefix": "update"})
# tz = pytz.timezone("Asia/Shanghai")
# kwargs["instance"].start = kwargs["instance"].start.astimezone(tz).strftime("%m/%d/%Y")
# kwargs["instance"].end = (kwargs["instance"].end-datetime.timedelta(
# days=1)).astimezone(tz).strftime("%m/%d/%Y")
data = retrieve_param(self.request)
if ("start" in data) and ("resourceId" in data):
kwargs["initial"] = {"start": data["start"], "resource": int(data["resourceId"])}
return kwargs
def form_valid(self, form):
candidate = form.save(commit=False)
candidate.requester = self.request.user # use your own profile here
candidate.save()
response = super(AjaxableBookingRequestCreateView, self).form_valid(form)
return response
class RequestApprovalMixin(object):
def is_request_can_be_approved(self, approval_request):
conflicts = self.model.objects.filter(
start__lt=approval_request.end,
end__gte=approval_request.start,
resource=approval_request.resource,
)
conflicts_filter = conflicts.filter(~Q(id=approval_request.pk) & (Q(is_approved=True) | Q(is_ongoing=True)))
# if any(conflicts_filter):
# return False
for i in conflicts_filter:
return False
return True
def is_valid_approval(self, approval_request):
return (approval_request.approver is None) and approval_request.is_approved and \
self.is_request_can_be_approved(approval_request)
class AjaxableBookingRequestUpdateView(AjaxableResponseMixin, AjaxableFormContextUpdateMixin, RequestApprovalMixin,
UpdateView):
form_class = BookingRequestUpdateForm
model = BookingRequest
ajax_form_id = "bookingReqEditForm"
template_name = "form_view_base_template.html"
submit_button_text = "Update"
success_url = "../"
def get_form_kwargs(self):
"""
Used to update end date in UI, as the end date in UI is included in the reservation
:return:
"""
kwargs = super(AjaxableBookingRequestUpdateView, self).get_form_kwargs()
kwargs.update({"prefix": "update"})
tz = pytz.timezone("Asia/Shanghai")
kwargs["instance"].start = kwargs["instance"].start.astimezone(tz).strftime("%m/%d/%Y")
kwargs["instance"].end = (kwargs["instance"].end - datetime.timedelta(
days=1)).astimezone(tz).strftime("%m/%d/%Y")
return kwargs
def form_valid(self, form):
candidate = form.save(commit=False)
if self.is_valid_approval(candidate):
candidate.approver = self.request.user
candidate.save()
else:
candidate.is_approved = False
# In ModelFormMixin.form_valid, form.save() and its parent's form_valid will be called
# And in FormMixin (ModelFormMixin's parent) HttpResponseRedirect(self.get_success_url()) will be called
response = super(AjaxableBookingRequestUpdateView, self).form_valid(form)
return response
class ResourceApproverUpdater(object):
create_resource_permission = 'change_bookableresource'
def form_valid(self, form):
candidate = form.save(commit=False)
candidate.approver = self.request.user
candidate.save()
assign_perm(self.create_resource_permission, self.request.user, candidate)
# In ModelFormMixin.form_valid, form.save() and its parent's form_valid will be called
# And in FormMixin (ModelFormMixin's parent) HttpResponseRedirect(self.get_success_url()) will be called
response = super(ResourceApproverUpdater, self).form_valid(form)
return response
class ColorSchema(object):
COLOR_0_YOUR_REQUEST = "tomato"
COLOR_1_WAITING_FOR_YOUR_APPROVAL = "yellow"
COLOR_2_WAITING_FOR_APPROVAL_FROM_OTHERS = "limegreen"
COLOR_3_APPROVED_COMMA_YOU_CAN_CHANGE = "DeepPink"
COLOR_4_APPROVED_COMMA_YOU_CANNOT_CHANGE = "blue"
COLOR_5_ONGOING = "green"
# COLOR_CONFLICT = "DarkGray" # "black"
COLOR_6_COMPLETED = "aqua"
COLOR_7_CANCELED = "grey"
def get_colors(self):
colors = {}
for attr in dir(ColorSchema):
if attr != attr.upper():
continue
if attr[:6] != "COLOR_":
continue
value = getattr(ColorSchema, attr)
index = int(attr[6])
attr_name = attr[8:]
attr_name = attr_name.replace("_COMMA", ",")
attr_name = attr_name.lower()
attr_name = attr_name.replace("_", " ")
colors[attr_name.capitalize()] = (index, value)
return colors
class GetScheduleView(View, ColorSchema, RequestApprovalMixin):
model = BookingRequest
resource_approval_permission = "change_bookableresource"
def get(self, request, *args, **kwargs):
data = retrieve_param(request)
tz = pytz.timezone("Asia/Shanghai")
start = get_timezone_aware_datetime_from_date_str(data["start"])
end = get_timezone_aware_datetime_from_date_str(data["end"])
start_query = Q(end__lt=start)
end_query = Q(start__gt=end)
res_query = self.model.objects.filter(~(end_query | start_query))
res = []
for event in res_query:
color = self.get_color(event)
event = {"id": "%d" % event.pk, "resourceId": "%d" % event.resource.pk, "start": str(event.start),
"end": str(event.end), "title": event.project, "color": color}
if color in [self.COLOR_1_WAITING_FOR_YOUR_APPROVAL, self.COLOR_5_ONGOING,
self.COLOR_3_APPROVED_COMMA_YOU_CAN_CHANGE, self.COLOR_0_YOUR_REQUEST]:
event["className"] = "todo"
res.append(event)
return HttpResponse(json.dumps(res), content_type="application/json")
def get_color(self, event):
color = self.COLOR_2_WAITING_FOR_APPROVAL_FROM_OTHERS
has_perm = self.has_permission_to_manage_resource(event)
if event.is_canceled:
color = self.COLOR_7_CANCELED
elif event.is_completed:
color = self.COLOR_6_COMPLETED
elif event.is_ongoing:
# tz = pytz.timezone("Asia/Shanghai")
# end_datetime = event.end
# if event.end < end_datetime.astimezone(tz):
# color = self.COLOR_5_ONGOING
color = self.COLOR_5_ONGOING
elif event.is_approved:
if has_perm:
color = self.COLOR_3_APPROVED_COMMA_YOU_CAN_CHANGE
else:
color = self.COLOR_4_APPROVED_COMMA_YOU_CANNOT_CHANGE
elif has_perm:
if self.is_request_can_be_approved(event):
color = self.COLOR_1_WAITING_FOR_YOUR_APPROVAL
# else:
# color = self.COLOR_CONFLICT
elif event.requester == self.request.user:
color = self.COLOR_0_YOUR_REQUEST
return color
def has_permission_to_manage_resource(self, event):
return self.request.user.has_perm(self.resource_approval_permission, event.resource)
class ApproveRequestView(View, RequestApprovalMixin):
model = BookingRequest
def get(self, request, *args, **kwargs):
data = retrieve_param(request)
req_id = data["requestId"]
r = self.model.objects.get(pk=int(req_id))
if r.is_approved:
r.is_approved = False
result = "false"
else:
r.is_approved = True
result = "true"
r.save()
return HttpResponse(json.dumps({"result": result}), content_type="application/json")
| 2.015625 | 2 |
transcrypt/development/automated_tests/transcrypt/classes/__init__.py | JMCanning78/Transcrypt | 1 | 12790197 | def run (autoTester):
autoTester.check ('<br>General<br>')
class A:
p = 123
def __init__ (self, x):
self.x = x
autoTester.check (self.p)
def show (self, label):
autoTester.check ('A.show', label, self.x)
def show2 (self, label):
autoTester.check ('A.show2', label, self.x)
class B:
p, q = 456, 789
def __init__ (self, y):
autoTester.check ('In B constructor')
self.y = y
autoTester.check (self.p)
def show (self, label):
autoTester.check ('B.show', label, self.y)
class C (A, B):
def __init__ (self, x, y):
autoTester.check ('In C constructor')
A.__init__ (self, x)
B.__init__ (self, y)
def show (self, label):
A.show (self, label)
B.show (self, label)
autoTester.check ('C.show', label, self.x, self.y)
a = A (1001)
a.show ('america')
autoTester.check (A.p)
autoTester.check (a.p)
b = B (2002)
b.show ('russia')
autoTester.check (B.p)
autoTester.check (b.p)
autoTester.check (b.q)
autoTester.check (A.p)
autoTester.check (a.p)
c = C (3003, 4004)
c.show ('netherlands')
autoTester.check (C.p)
autoTester.check (c.p)
autoTester.check (c.q)
c.show2 ('amsterdam')
A.show2 (c, 'rotterdam')
show3 = c.show
show3 ('copy')
autoTester.check (hasattr (a, 'x'))
autoTester.check (hasattr (a, 'y'))
autoTester.check (hasattr (a, 'p'))
autoTester.check (hasattr (a, 'q'))
autoTester.check ('<br><br>Augmented isinstance and issubclass<br>')
# Augmented meaning: compatible with native JavaScript types
simpleTypes = (dict, list, A, B, C, bool, str, float, int, object)
tupleTypes = ((dict, list), (bool, int), (bool, A), (C, B), (B, object))
for i, types in enumerate ((simpleTypes, tupleTypes)):
for j, aType in enumerate (types):
for k, anObject in enumerate (({'a': 1}, [], a, C, c, C, b, True, 'a', 1, 1.2)):
autoTester.check (i, j, k, isinstance (anObject, aType))
if types == simpleTypes:
autoTester.check (i, j, k, isinstance (anObject, simpleTypes))
for i, types in enumerate ((simpleTypes, tupleTypes)):
for j, aType in enumerate (types):
for k, aClass in enumerate ((dict, list, A, C, B, bool, str, int, float)):
autoTester.check (i + 2, j, k, issubclass (aClass, aType))
if types == simpleTypes:
autoTester.check (i + 2, j, k, issubclass (aClass, simpleTypes))
autoTester.check ('<br><br>Method resolution order<br>')
def mro (aClass, result = None):
''' Recursively assemble method resolution order from all base classes'''
last = 0
if result is None:
result = [aClass]
last = 1
for aBase in aClass.__bases__:
if not aBase in result and aBase != object:
result.append (aBase)
mro (aBase, result)
if last and object in aClass.__bases__:
aRoot.append (object)
return result
autoTester.check ([aClass.__name__ for aClass in mro (C)])
| 2.796875 | 3 |
polybius/utils/font.py | TStalnaker44/flappy_bird_game | 0 | 12790198 |
import pygame
class Font():
def __init__(self, name, size):
self._name = name
self._size = size
self._font = pygame.font.SysFont(name, size)
#TODO Add support for font files
def getFontName(self):
return self._name
def getFontSize(self):
return self._size
def render(self, text, antialias, color, background=None):
return self._font.render(text, antialias, color, background)
def size(self, text):
return self._font.size(text)
def get_height(self):
return self._font.get_height()
| 3.03125 | 3 |
test/mocks/mock_direct_receiver.py | markgreene74/mite | 17 | 12790199 | from mite.utils import pack_msg
class DirectReceiverMock:
def __init__(self):
self._listeners = []
self._raw_listeners = []
def __call__(self, msg):
return
def add_listener(self, listener):
self._listeners.append(listener)
def add_raw_listener(self, raw_listener):
self._raw_listeners.append(raw_listener)
def recieve(self, msg):
for listener in self._listeners:
listener(msg)
packed_msg = pack_msg(msg)
for raw_listener in self._raw_listeners:
raw_listener(packed_msg)
| 2.3125 | 2 |
statusbar.py | rmayherr/python | 0 | 12790200 | <gh_stars>0
#!/usr/bin/env python3
import time, subprocess,os
class Progressbar():
def __init__(self):
self.line = ""
self.arrow = ""
self.initbar()
def initbar(self):
print(("{0:143}{1}").format("[","]"))
def terminal_length(self):
cols,rows = os.get_terminal_size()
return cols
def update(self):
self.line += "="
self.arrow = ">"
print("{0}{1}{2:{l}}{3}".format("[", self.line, self.arrow, "]",l = 140 - len(self.line)))
bar = Progressbar()
for i in range(1,140):
subprocess.call(["clear"])
# print(i)
bar.update()
time.sleep(0.02)
| 2.8125 | 3 |
inheritance/Multilevel.py | Anilkumar95/python-75-hackathon | 0 | 12790201 | class My(object):
y = 5988
class Mt(My):
z = 598
class Mat(Mt):
x = 54
p1=Mat()
print(p1.x)
print(p1.y)
print(p1.z)
| 3.265625 | 3 |
airmozilla/main/tests/test_views.py | peterbe/airmozilla | 0 | 12790202 | <filename>airmozilla/main/tests/test_views.py<gh_stars>0
import datetime
import uuid
from django.contrib.auth.models import Group
from django.test import TestCase
from django.utils.timezone import utc
from funfactory.urlresolvers import reverse
from nose.tools import eq_, ok_
from airmozilla.main.models import (
Approval,
Event,
EventOldSlug,
Participant,
Tag
)
class TestPages(TestCase):
fixtures = ['airmozilla/manage/tests/main_testdata.json']
def setUp(self):
# Make the fixture event live as of the test.
event = Event.objects.get(title='Test event')
event.start_time = datetime.datetime.utcnow().replace(tzinfo=utc)
event.archive_time = None
event.save()
def test_home(self):
"""Index page loads and paginates correctly."""
response = self.client.get(reverse('main:home'))
eq_(response.status_code, 200)
response_empty_page = self.client.get(reverse('main:home',
kwargs={'page': 10000}))
eq_(response_empty_page.status_code, 200)
def test_event(self):
"""Event view page loads correctly if the event is public and
scheduled and approved; request a login otherwise."""
event = Event.objects.get(title='Test event')
group = Group.objects.get()
approval = Approval(event=event, group=group)
approval.save()
event_page = reverse('main:event', kwargs={'slug': event.slug})
response_fail_approval = self.client.get(event_page)
eq_(response_fail_approval.status_code, 200)
ok_('not approved' in response_fail_approval.content)
approval.approved = True
approval.processed = True
approval.save()
response_ok = self.client.get(event_page)
eq_(response_ok.status_code, 200)
event.public = False
event.save()
response_fail = self.client.get(event_page)
self.assertRedirects(response_fail, reverse('main:login'))
event.public = True
event.status = Event.STATUS_INITIATED
event.save()
response_fail = self.client.get(event_page)
eq_(response_fail.status_code, 200)
ok_('not scheduled' in response_fail.content)
def test_old_slug(self):
"""An old slug will redirect properly to the current event page."""
old_event_slug = EventOldSlug.objects.get(slug='test-old-slug')
response = self.client.get(
reverse('main:event', kwargs={'slug': old_event_slug.slug})
)
self.assertRedirects(
response,
reverse('main:event', kwargs={'slug': old_event_slug.event.slug})
)
def test_participant(self):
"""Participant pages always respond successfully."""
participant = Participant.objects.get(name='<NAME>')
participant_page = reverse('main:participant',
kwargs={'slug': participant.slug})
response_ok = self.client.get(participant_page)
eq_(response_ok.status_code, 200)
participant.cleared = Participant.CLEARED_NO
participant.save()
response_ok = self.client.get(participant_page)
eq_(response_ok.status_code, 200)
def test_participant_clear(self):
"""Visiting a participant clear token page changes the Participant
status as expected."""
participant = Participant.objects.get(name='<NAME>')
participant.cleared = Participant.CLEARED_NO
token = str(uuid.uuid4())
participant.clear_token = token
participant.save()
url = reverse('main:participant_clear', kwargs={'clear_token': token})
response_ok = self.client.get(url)
eq_(response_ok.status_code, 200)
response_changed = self.client.post(url)
eq_(response_changed.status_code, 200)
participant = Participant.objects.get(name='<NAME>')
eq_(participant.clear_token, '')
eq_(participant.cleared, Participant.CLEARED_YES)
def test_calendars(self):
"""Calendars respond successfully."""
response_public = self.client.get(reverse('main:calendar'))
eq_(response_public.status_code, 200)
ok_('LOCATION:Mountain View' in response_public.content)
response_private = self.client.get(reverse('main:private_calendar'))
eq_(response_private.status_code, 200)
# Cache tests
event_change = Event.objects.get(id=22)
event_change.title = 'Hello cache clear!'
event_change.save()
response_changed = self.client.get(reverse('main:calendar'))
ok_(response_changed.content != response_public.content)
ok_('cache clear' in response_changed.content)
def test_calendars_description(self):
event = Event.objects.get(title='Test event')
event.description = """
Check out the <a href="http://example.com">Example</a> page
and <strong>THIS PAGE</strong> here.
Lorem Ipsum is simply dummy text of the printing and typesetting
industry. Lorem Ipsum has been the industry's standard dummy text
ever since the 1500s, when an unknown printer took a galley of type
and scrambled it to make a type specimen book.
If the text is getting really long it will be truncated.
""".strip()
event.save()
response_public = self.client.get(reverse('main:calendar'))
eq_(response_public.status_code, 200)
ok_('Check out the Example page' in response_public.content)
ok_('and THIS PAGE here' in response_public.content)
ok_('will be truncated' not in response_public.content)
event.short_description = 'One-liner'
event.save()
response_public = self.client.get(reverse('main:calendar'))
eq_(response_public.status_code, 200)
ok_('Check out the' not in response_public.content)
ok_('One-liner' in response_public.content)
def test_filter_by_tags(self):
url = reverse('main:home')
delay = datetime.timedelta(days=1)
event1 = Event.objects.get(title='Test event')
event1.status = Event.STATUS_SCHEDULED
event1.start_time -= delay
event1.archive_time = event1.start_time
event1.save()
eq_(Event.objects.approved().count(), 1)
eq_(Event.objects.archived().count(), 1)
event2 = Event.objects.create(
title='Second test event',
description='Anything',
start_time=event1.start_time,
archive_time=event1.archive_time,
public=True,
status=event1.status,
placeholder_img=event1.placeholder_img,
)
eq_(Event.objects.approved().count(), 2)
eq_(Event.objects.archived().count(), 2)
tag1 = Tag.objects.create(name='tag1')
tag2 = Tag.objects.create(name='tag2')
tag3 = Tag.objects.create(name='tag3')
event1.tags.add(tag1)
event1.tags.add(tag2)
event2.tags.add(tag2)
event2.tags.add(tag3)
# check that both events appear
response = self.client.get(url)
ok_('Test event' in response.content)
ok_('Second test event' in response.content)
response = self.client.get(url, {'tag': 'tag2'})
ok_('Test event' in response.content)
ok_('Second test event' in response.content)
response = self.client.get(url, {'tag': 'tag1'})
ok_('Test event' in response.content)
ok_('Second test event' not in response.content)
response = self.client.get(url, {'tag': 'tag3'})
ok_('Test event' not in response.content)
ok_('Second test event' in response.content)
response = self.client.get(url, {'tag': ['tag1', 'tag3']})
ok_('Test event' in response.content)
ok_('Second test event' in response.content)
response = self.client.get(url, {'tag': 'Bogus'})
eq_(response.status_code, 301)
response = self.client.get(url, {'tag': ['tag1', 'Bogus']})
eq_(response.status_code, 301)
# the good tag stays
ok_('?tag=tag1' in response['Location'])
| 2.0625 | 2 |
neurolab/data/caltech101.py | udday2014/HebbianLearning | 6 | 12790203 | <gh_stars>1-10
from torchvision.datasets.utils import extract_archive
from torchvision.datasets import ImageFolder
from torch.utils.data import Subset, WeightedRandomSampler
import random
import os
from ..utils import download_large_file_from_drive
from .data import DataManager
from .. import params as P
# Caltech101 Dataset
class Caltech101(ImageFolder):
def __init__(self, root, download=False, **kwargs):
self.root = root
self.download = download
self.CALTECH101_DRIVE_ID = '137RyRjvTBkBiIfeYBNZBtViDHQ6_Ewsp'
self.CALTECH101_ZIP_FILE = '101_ObjectCategories.tar.gz'
self.CALTECH101_ZIP_PATH = os.path.join(self.root, self.CALTECH101_ZIP_FILE)
self.CALTECH101_FOLDER = os.path.join(self.root, '101_ObjectCategories')
if not os.path.exists(self.CALTECH101_FOLDER):
# Extract Caltech101 zip file
if not os.path.exists(self.CALTECH101_ZIP_PATH):
if not self.download: raise(FileNotFoundError("Dataset files not found"))
print("Downloading {} file...".format(self.CALTECH101_ZIP_FILE))
download_large_file_from_drive(self.CALTECH101_DRIVE_ID, self.CALTECH101_ZIP_PATH)
print("Done!")
print("Extracting {} file...".format(self.CALTECH101_ZIP_FILE))
extract_archive(self.CALTECH101_ZIP_PATH, self.root)
print("Done!")
super(Caltech101, self).__init__(self.CALTECH101_FOLDER, **kwargs)
self.samples_per_class = {k: self.targets.count(k) for k in self.class_to_idx.values()}
# Data manager class for Caltech101
class Caltech101DataManager(DataManager):
def __init__(self, config):
self.CALTECH101_SIZE = 9144
self.indices = list(range(self.CALTECH101_SIZE))
super().__init__(config)
def get_dataset_metadata(self):
return {
P.KEY_DATASET: 'caltech101',
P.KEY_DS_TRN_SET_SIZE: 8144,
P.KEY_DS_VAL_SET_SIZE: 1000,
P.KEY_DS_TST_SET_SIZE: 1000,
P.KEY_DS_INPUT_SHAPE: (3, 224, 224),
P.KEY_DS_NUM_CLASSES: 102
}
def prepare_rnd_indices(self):
random.shuffle(self.indices)
def get_sampler(self, split):
# split is a Subset of Caltech101. split.dataset is Caltech101. From this we can access the number of samples
# per class, to be used for weighted random sampling, together with the length of the split, to determine how
# many samples to fetch.
return WeightedRandomSampler([w for w in split.dataset.samples_per_class.values()], len(split))
def get_train_split(self, transform=None, num_samples=None):
if num_samples is None: num_samples = self.TOT_TRN_SAMPLES
return Subset(Caltech101(root=self.DATASET_FOLDER, download=True, transform=transform), self.indices[:num_samples])
def get_val_split(self, transform=None, num_samples=None):
if num_samples is None: num_samples = self.NUM_VAL_SAMPLES
if self.TRN_SET_SIZE - self.TOT_TRN_SAMPLES >= num_samples:
# If there are enough samples left in the training set, use them for validation
return Subset(Caltech101(root=self.DATASET_FOLDER, download=True, transform=transform), self.indices[self.TRN_SET_SIZE - num_samples:self.TRN_SET_SIZE])
# Validate directly on test set otherwise
return Subset(Caltech101(root=self.DATASET_FOLDER, download=True, transform=transform), self.indices[self.TRN_SET_SIZE:self.TRN_SET_SIZE + num_samples])
def get_test_split(self, transform=None, num_samples=None):
if num_samples is None: num_samples = self.NUM_TST_SAMPLES
return Subset(Caltech101(root=self.DATASET_FOLDER, download=True, transform=transform), self.indices[self.TRN_SET_SIZE:self.TRN_SET_SIZE + num_samples])
| 2.359375 | 2 |
users/schema.py | theNegativeEntropy/digitalmenu | 0 | 12790204 | from django.contrib.auth import authenticate, login
import graphene
import django_filters
from graphene_django import DjangoObjectType, DjangoListField
from graphene_django.filter import DjangoFilterConnectionField
from graphql_jwt.decorators import login_required
from .models import User
class UserType(DjangoObjectType):
class Meta:
model = User
fields = '__all__'
filter_fields = ['email']
interfaces = (graphene.relay.Node, )
class UserInput(graphene.InputObjectType):
email = graphene.String(required=True)
password = graphene.String(required=True)
first_name = graphene.String()
last_name = graphene.String()
class Query:
me = graphene.Field(UserType)
all_users = DjangoFilterConnectionField(UserType)
@login_required
def resolve_me(self, info):
user = info.context.user
return user
class RegisterUserMutation(graphene.Mutation):
class Arguments:
user_data = UserInput(required=True)
user = graphene.Field(UserType)
def mutate(self, info, user_data=None):
user = User.objects.create(
email=user_data.email
)
user.set_password(user_data.password)
user.save()
return RegisterUserMutation(user=user)
class CreateUserMutation(graphene.Mutation):
class Arguments:
user_data = UserInput(required=True)
user = graphene.Field(UserType)
def mutate(self, info, user_data=None):
user = User.objects.create(
email=user_data.email,
first_name=user_data.first_name,
last_name=user_data.last_name
)
user.set_password(user_data.password)
user.save()
return CreateUserMutation(user=user)
class UpdateUserMutation(graphene.Mutation):
class Arguments:
user_data = UserInput(required=True)
user = graphene.Field(UserType)
def mutate(self, info, user_data):
user = User.objects.get(email=user_data.email)
user.first_name = user_data.first_name
user.last_name = user_data.last_name
user.save()
return UpdateUserMutation(user=user)
class DeleteUserMutation(graphene.Mutation):
class Arguments:
email = graphene.String(required=True)
user = graphene.Field(UserType)
def mutate(self, info, email):
user = User.objects.get(email=email)
user.is_active = False
#shop.slug = slug+'_deleted'
user.save()
return DeleteUserMutation(user=user)
class UndeleteUserMutation(graphene.Mutation):
class Arguments:
email = graphene.String(required=True)
user = graphene.Field(UserType)
def mutate(self, info, email):
user = User.objects.get(email=email)
user.is_active = True
#shop.slug = slug.replace('_deleted', '')
user.save()
return UndeleteUserMutation(user=user)
class Mutation(graphene.ObjectType):
register_user = RegisterUserMutation.Field()
create_user = CreateUserMutation.Field()
update_user = UpdateUserMutation.Field()
delete_user = DeleteUserMutation.Field()
undelete_user = UndeleteUserMutation.Field()
| 2.265625 | 2 |
src/scheduler/cli.py | PyconUK/ConferenceScheduler-cli | 0 | 12790205 | """Procedures to define the Command Line Interface (cli)"""
from pathlib import Path
import click
from conference_scheduler.scheduler import event_schedule_difference
from conference_scheduler.converter import solution_to_schedule
from conference_scheduler.validator import (
is_valid_solution, solution_violations)
import daiquiri
import scheduler.calculate as calc
from scheduler.decorators import timed
import scheduler.define as defn
from scheduler import convert, io, logging, session
logger = daiquiri.getLogger(__name__)
def events_and_slots(resources):
slots = defn.slots(resources)
events = defn.events(resources)
unavailability = defn.unavailability(resources, slots)
clashes = defn.clashes(resources)
unsuitability = defn.unsuitability(resources, slots)
defn.add_unavailability_to_events(events, slots, unavailability)
defn.add_clashes_to_events(events, clashes)
defn.add_unsuitability_to_events(events, slots, unsuitability)
return events, slots
@click.version_option(message='%(prog)s %(version)s :: UK Python Association')
@click.group()
@click.option(
'--verbosity', '-v', default='info',
type=click.Choice(['critical', 'error', 'warning', 'info', 'debug']),
help='Logging verbosity')
def scheduler(verbosity):
pass
@scheduler.command()
@click.option(
'--verbosity', '-v', default='info',
type=click.Choice(['critical', 'error', 'warning', 'info', 'debug']),
help='Logging verbosity')
@click.option(
'--algorithm', '-a', default='pulp_cbc_cmd',
type=click.Choice(
['pulp_cbc_cmd', 'glpk', 'hill_climber', 'simulated_annealing']),
help='Solver algorithm')
@click.option(
'--objective', '-o', default=None,
type=click.Choice(['efficiency', 'equity', 'consistency']),
help='Objective Function')
@click.option('--diff/--no-diff', default=False, help='Show schedule diff')
@click.option(
'--input_dir', '-i', default=None, help='Directory for input files')
@click.option(
'--solution_dir', '-s', default=None, help='Directory for solution files')
@click.option(
'--build_dir', '-b', default=None, help='Directory for output yaml files')
@timed
def build(
verbosity, algorithm, objective, diff, input_dir, solution_dir, build_dir
):
logging.setup(verbosity)
if input_dir:
session.folders['input'] = Path(input_dir)
if solution_dir:
session.folders['solution'] = Path(solution_dir)
if build_dir:
session.folders['build'] = Path(build_dir)
resources = defn.resources()
events, slots = events_and_slots(resources)
slots_by_index = {
idx: f'{slot.starts_at} {slot.venue}'
for idx, slot in enumerate(slots)}
logger.debug(f'\nSlots List:\n{slots_by_index}')
kwargs = {}
if objective == 'consistency' or algorithm == 'simulated_annealing' or diff:
original_solution = io.import_solution(session.folders['solution'])
revised_solution = [
item for item in original_solution
if item[0] < len(events)]
original_schedule = solution_to_schedule(
revised_solution, events, slots)
diff = True
kwargs['original_schedule'] = original_schedule
solution = calc.solution(events, slots, algorithm, objective, **kwargs)
if diff:
schedule = solution_to_schedule(solution, events, slots)
event_diff = event_schedule_difference(original_schedule, schedule)
logger.debug(f'\nevent_diff:')
for item in event_diff:
logger.debug(f'{item.event.name} has moved from {item.old_slot.venue} at {item.old_slot.starts_at} to {item.new_slot.venue} at {item.new_slot.starts_at}')
if solution is not None:
allocations = defn.allocations(resources)
unbounded = defn.unbounded(resources)
defn.add_allocations(events, slots, solution, allocations, unbounded)
logger.debug(convert.schedule_to_text(solution, events, slots))
io.export_solution_and_definition(
resources, events, slots, allocations, solution,
session.folders['solution'])
@scheduler.command()
@click.option(
'--verbosity', '-v', default='info',
type=click.Choice(['critical', 'error', 'warning', 'info', 'debug']),
help='Logging verbosity')
@click.option(
'--input_dir', '-i', default=None, help='Directory for input files')
@click.option(
'--solution_dir', '-s', default=None, help='Directory for solution files')
@click.option(
'--reload/--no-reload', default=False, help='Reload YAML definition')
@timed
def validate(verbosity, input_dir, solution_dir, reload):
logging.setup(verbosity)
if solution_dir:
session.folders['solution'] = Path(solution_dir)
solution = io.import_solution(session.folders['solution'])
if reload:
resources = defn.resources()
events, slots = events_and_slots(resources)
original_solution = io.import_solution(session.folders['solution'])
solution = [
item for item in original_solution
if item[0] < len(events)]
else:
solution = io.import_solution(session.folders['solution'])
definition = io.import_schedule_definition(session.folders['solution'])
events = definition['events']
slots = definition['slots']
logger.info('Validating schedule...')
if is_valid_solution(solution, events, slots):
logger.info('Imported solution is valid')
else:
for v in solution_violations(
solution, definition['events'], definition['slots']):
logger.error(v)
| 2.4375 | 2 |
api/save_containers_stats_job.py | NathanReis/BaleiaAzul | 1 | 12790206 | <reponame>NathanReis/BaleiaAzul
from datetime import datetime
from src.database.connection import DBConnection
from src.helpers.docker_helper import extract_stats_container_data
from src.services import container_service
import schedule
import time
def save_containers_stats():
db_connection = DBConnection()
db = db_connection.get_database()
containers = container_service.get_all()
for container in containers:
stats = container_service.get_stats(container, True, False)
stats_data = extract_stats_container_data(container, stats)
db['container_stats'].insert_one(stats_data)
print(datetime.utcnow())
time_to_wait = 10
schedule.every(time_to_wait).seconds.do(save_containers_stats)
while True:
schedule.run_pending()
time.sleep(time_to_wait)
| 2.40625 | 2 |
src/questrade/api/streamer/JSONStreamObserver.py | stvhwrd/QuestradeAPI_PythonWrapper | 31 | 12790207 | <filename>src/questrade/api/streamer/JSONStreamObserver.py
'''JSON Stream Observer
@summary: An Observer in the Publish/Subscriber design pattern. This
observer prints the JSON object returned from the stream.
@see: http://www.questrade.com/api/documentation/streaming
@copyright: 2016
@author: <NAME>
@license: Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
from Observer import Observer
import json
class JSONStreamObserver(Observer):
def __init__(self):
self.dict_symbs = {}
def update(self, payload, isBinary):
if not isBinary and payload is not None:
s = payload.decode('utf8')
j = json.loads(s)
if 'quotes' in j:
quotes = j.get('quotes')
for q in quotes:
symbol = q.get('symbol', '_na_')
if symbol != '_na_':
self.dict_symbs[symbol] = q
def get_symbols(self):
return self.dict_symbs.keys()
def get_latest_quote(self, symbol):
return self.dict_symbs[symbol]
| 2.3125 | 2 |
documents/views.py | livcarman/spekit | 0 | 12790208 | <reponame>livcarman/spekit<gh_stars>0
from rest_framework import viewsets
from rest_framework import permissions
from documents.models import Document, Folder, Topic
from documents.serializers import DocumentSerializer, FolderSerializer, TopicSerializer
from documents.filters import DocumentFilter, FolderFilter, TopicFilter
class FolderViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows CRUD operations on Folder objects
"""
queryset = Folder.objects.all().order_by('-created_at')
serializer_class = FolderSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
filterset_class = FolderFilter
class DocumentViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows CRUD operations on Document objects
"""
queryset = Document.objects.all().order_by('-created_at')
serializer_class = DocumentSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
filterset_class = DocumentFilter
class TopicViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows CRUD operations on Topic objects
"""
queryset = Topic.objects.all().order_by('-created_at')
serializer_class = TopicSerializer
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
filterset_class = TopicFilter
| 2.3125 | 2 |
benchmarks/ligra-partition/graphtools/cilkpub_v105/perf_scripts/perf_summarize_file.py | jordanjohnston/manseglib | 4 | 12790209 | # Copyright (C) 2013 Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
##
# \file perf_summarize_file.py
#
# \brief [<i>Performance test script</i>]: Reads in a file of performance
# data, and merges all data points with the same key together.
#
# \author <NAME>
# \version 1.02
import os
import sys
from PerfDataPoint import *;
from PerfDataSet import *;
import PerfDataParser;
##
# Script to read in input file, and merge all data points with the
# same key together.
#
# Usage: <this script> <infile> [outfile] [merge_type]
# <infile> is a required argument."
# <outfile> is \"tmp_summary_output.dat\" by default"
# [merge_type] is \"avg\" by default (can also be \"min\")"
#
def main():
infile = None
outfile = "tmp_summary_output.dat"
merge_type = "avg"
if (len(sys.argv) > 1):
infile = sys.argv[1]
else:
print "Usage: <this script> <infile> <outfile> <merge_type>"
print " <infile> is a required argument."
print " <outfile> is \"tmp_summary_output.dat\" by default"
print " <merge_type> is \"avg\" by default (can be \"min\")"
exit(0)
if (len(sys.argv) > 2):
outfile = sys.argv[2]
if (len(sys.argv) > 3):
merge_type = sys.argv[3]
if (merge_type not in { "avg", "min" }):
merge_type = "avg"
print "# Input file: %s" % infile
print "# Output file: %s" % outfile
print "# Merge type: %s" % merge_type
# Compute a merged input/output file names.
(infile_head, infile_tail) = os.path.split(infile)
merged_in_fname = infile_head + merge_type + "_" + infile_tail
(outfile_head, outfile_tail) = os.path.split(outfile)
merged_out_fname = outfile_head + merge_type + "_" + outfile_tail
print "Generating output file %s" % merged_out_fname
if (merge_type == "min"):
merge_val = PerfDataPoint.MERGE_MIN
elif (merge_type == "max"):
merge_val = PerfDataPoint.MERGE_MAX
else:
merge_val = PerfDataPoint.MERGE_SUM
input_data = PerfDataParser.parse_data_file(infile,
default_desc=merged_in_fname,
merge_type=merge_val,
compute_average=True)
input_data.output_to_file(merged_out_fname)
# Run if we aren't importing.
# Right now, we can't really import this file...
if __name__ == "__main__":
main()
| 1.164063 | 1 |
pybiotools4p/utils.py | btrspg/pybiotools4p | 0 | 12790210 | # AUTOGENERATED! DO NOT EDIT! File to edit: utils.ipynb (unless otherwise specified).
__all__ = ['load_config', 'config', 'load_yaml', 'default_yaml', 'dict_to_paras']
# Cell
import pkg_resources
import configparser
import yaml
# Cell
def load_config(*configs):
config = configparser.ConfigParser()
config.read(configs)
return config
def config(new_config=None):
default_config=pkg_resources.resource_filename('pybiotools4p','default.ini')
if None is new_config:
print('loading default_config['+default_config+']')
return load_config(default_config)
else:
print('loading default_config and '+ new_config)
return load_config(pkg_resources.resource_filename('pybiotools4p','default.ini'),new_config)
def load_yaml(*yamls):
my_dict={}
for y in yamls:
with open(y,'r') as yf:
my_dict.update(yaml.load(yf))
return my_dict
def default_yaml(new_yaml=None):
default_config=pkg_resources.resource_filename('pybiotools4p','default.yaml')
if None is new_yaml:
print('loading default_config['+default_config+']')
return load_yaml(default_config)
else:
print('loading default_config and '+ new_yaml)
return load_yaml(pkg_resources.resource_filename('pybiotools4p','default.yaml'),new_yaml)
def dict_to_paras(mydict):
'''
using dict to store extension parameters
'''
return ' '.join([f'{i} {mydict[i]}' for i in mydict.keys()])
| 2.3125 | 2 |
oembed/tests/tests/base.py | ericholscher/djangoembed | 1 | 12790211 | <reponame>ericholscher/djangoembed<filename>oembed/tests/tests/base.py
import simplejson
from django.conf import settings
from django.core.urlresolvers import reverse, NoReverseMatch
from django.test import TestCase
import oembed
from oembed.providers import BaseProvider
from oembed.resources import OEmbedResource
from oembed.tests.settings import MEDIA_ROOT, MEDIA_URL
class BaseOEmbedTestCase(TestCase):
fixtures = ['oembed_testdata.json']
urls = 'oembed.tests.urls'
# third party providers (StoredProvider)
flickr_url = 'http://www.flickr.com/photos/neilkrug/2554073003/'
youtube_url = 'http://www.youtube.com/watch?v=nda_OSWeyn8'
# django providers (DjangoProvider and DjangoDatebasedProvider)
category_url = 'http://example.com/testapp/category/1/'
blog_url = 'http://example.com/testapp/blog/2010/may/01/entry-1/'
rich_url = 'http://example.com/testapp/rich/rich-one/'
category_embed = '<img src="http://example.com/media/images/breugel_babel2_800x661.jpg" alt="Category 1" ></img>'
def setUp(self):
"Set up test environment"
# load up all the providers and register the test-only provider
oembed.autodiscover()
# refresh the attribute-cached time the db providers were last updated
oembed.site._db_updated = None
self.media_root, self.media_url = settings.MEDIA_ROOT, settings.MEDIA_URL
settings.MEDIA_ROOT = MEDIA_ROOT
settings.MEDIA_URL = MEDIA_URL
def tearDown(self):
settings.MEDIA_ROOT = self.media_root
settings.MEDIA_URL = self.media_url
def _sort_by_pk(self, list_or_qs):
# decorate, sort, undecorate using the pk of the items
# in the list or queryset
annotated = [(item.pk, item) for item in list_or_qs]
annotated.sort()
return map(lambda item_tuple: item_tuple[1], annotated)
def assertQuerysetEqual(self, a, b):
# assert list or queryset a is the same as list or queryset b
return self.assertEqual(self._sort_by_pk(a), self._sort_by_pk(b))
| 2.125 | 2 |
tests/validation.py | GeoscienceAustralia/sira | 1 | 12790212 | <filename>tests/validation.py
"""
Params lists and functions for validation testing of models and config files
Validates model and config files based on rules
"""
from pathlib import Path
# -----------------------------------------------------------------------------
# Paths
SIRA_ROOT_DIR = Path(__file__).resolve().parent
path_to_test_models = Path(SIRA_ROOT_DIR, "tests", "models")
# -----------------------------------------------------------------------------
# Worksheet names / primary JSON keys
# XL_WORKSHEET_NAMES
required_model_worksheets = [
'system_meta',
'component_list',
'component_connections',
'supply_setup',
'output_setup',
'comp_type_dmg_algo',
'damage_state_def'
]
# XL_COMPONENT_LIST_HEADERS
required_col_names_clist = [
'component_id',
'component_type',
'component_class',
'cost_fraction',
'node_type',
'node_cluster',
'operating_capacity',
'pos_x',
'pos_y'
]
# MODEL_COMPONENT_HEADERS
required_component_headers = [
"component_type",
"component_class",
"cost_fraction",
"node_type",
"node_cluster",
"operating_capacity",
"pos_x",
"pos_y",
"damages_states_constructor"
]
# MODEL_CONNECTION_HEADERS
required_col_names_conn = [
'origin',
'destination',
'link_capacity',
'weight'
]
# ALGORITHM_DEFINITION_PARAMS
required_col_names = [
'is_piecewise',
'damage_function',
'damage_ratio',
'functionality',
'recovery_function',
'recovery_param1',
'recovery_param2'
]
# MODEL_SECTIONS
required_headers = [
"component_list",
"node_conn_df",
"sysinp_setup",
"sysout_setup"
]
# -----------------------------------------------------------------------------
| 2.5625 | 3 |
server/blogsley/security.py | blogsley/blogsley-next | 2 | 12790213 | import os
import hashlib
def generate_password_hash(password):
salt = os.urandom(32)
key = hashlib.pbkdf2_hmac('sha256', password.encode('utf-8'), salt, 100000)
return (salt, key)
def check_password_hash(password, salt, key):
# Use the exact same setup you used to generate the key, but this time put in the password to check
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'), # Convert the password to bytes
salt,
100000
)
success = False
if new_key == key:
success = True
print('Password is correct')
else:
print('Password is incorrect')
return success
| 3.46875 | 3 |
archives/histograma01.py | LBarros77/Python | 0 | 12790214 | '''
Pasos:
Pida al usuario el nombre del archivo de entrada.
Lea el archivo (si es posible) y cuente todas las letras latinas (las letras mayúsculas y minúsculas se tratan como iguales).
Imprima un histograma simple en orden alfabético (solo se deben presentar recuentos distintos de cero).
Questión:
Crea un archivo de prueba para tu código y verifica si tu histograma contiene resultados válidos.
'''
from sys import path
try:
fl = f"{path[0]}/" + input("Escribe el nobre del archvo: ")
with open(fl, "w") as f:
f.write("aBca")
except Exception as e:
print(e)
dic = {}
try:
f = open(fl, "r")
data = f.read()
f.close()
for k in data:
if k not in dic.keys():
dic[k] = 1
else:
dic[k] += 1
for k, v in dic.items():
print(f"{k} -> {v}")
except Exception as e:
print(e) | 3.453125 | 3 |
node/blockchain/inner_models/signed_change_request/base.py | thenewboston-developers/Node | 18 | 12790215 | <filename>node/blockchain/inner_models/signed_change_request/base.py
from typing import Type as TypingType
from typing import TypeVar, cast
from pydantic import root_validator
from node.blockchain.constants import BLOCK_LOCK
from node.blockchain.mixins.crypto import HashableMixin, validate_signature_helper
from node.blockchain.mixins.validatable import ValidatableMixin
from node.blockchain.utils.lock import lock
from node.core.exceptions import ValidationError
from node.core.utils.cryptography import derive_public_key
from ...types import AccountNumber, Signature, SigningKey
from ..base import BaseModel
from ..signed_change_request_message import SignedChangeRequestMessage
T = TypeVar('T', bound='SignedChangeRequest')
class SignedChangeRequest(ValidatableMixin, BaseModel, HashableMixin):
signer: AccountNumber
signature: Signature
message: SignedChangeRequestMessage
@classmethod
def create_from_signed_change_request_message(
cls: TypingType[T], message: SignedChangeRequestMessage, signing_key: SigningKey
) -> T:
from node.blockchain.inner_models.type_map import get_signed_change_request_subclass
class_ = get_signed_change_request_subclass(message.type)
assert class_ # because message.type should be validated by now
class_ = cast(TypingType[T], class_)
return class_(
signer=derive_public_key(signing_key),
signature=message.make_signature(signing_key),
message=message,
)
@classmethod
def parse_obj(cls, *args, **kwargs):
obj = super().parse_obj(*args, **kwargs)
type_ = obj.message.type
from node.blockchain.inner_models.type_map import get_signed_change_request_subclass
class_ = get_signed_change_request_subclass(type_)
assert class_ # because message.type should be validated by now
if cls == class_: # avoid recursion
return obj
return class_.parse_obj(*args, **kwargs)
@root_validator
def validate_signature(cls, values):
if cls == SignedChangeRequest: # only child classes signature validation makes sense
return values
return validate_signature_helper(values)
def validate_business_logic(self):
self.message.validate_business_logic()
def validate_account_lock(self, blockchain_facade):
if blockchain_facade.get_account_lock(self.signer) != self.message.account_lock:
raise ValidationError('Invalid account lock')
@lock(BLOCK_LOCK, expect_locked=True)
def validate_blockchain_state_dependent(self, blockchain_facade):
self.message.validate_blockchain_state_dependent(blockchain_facade, bypass_lock_validation=True)
self.validate_account_lock(blockchain_facade)
def get_type(self):
return self.message.type
| 2.28125 | 2 |
src/straitjacket/__main__.py | pombredanne/gitlab.com-mbarkhau-straitjacket | 0 | 12790216 | #!/usr/bin/env python
# This file is part of the straitjacket project
# https://gitlab.com/mbarkhau/straitjacket
#
# Copyright (c) 2018 <NAME> (<EMAIL>) - MIT License
# SPDX-License-Identifier: MIT
try:
import backtrace
# To enable pretty tracebacks:
# echo "export ENABLE_BACKTRACE=1;" >> ~/.bashrc
backtrace.hook(align=True, strip_path=True, enable_on_envvar_only=True)
except ImportError:
pass
import sys
from . import sjfmt
if __name__ == '__main__':
sjfmt.main()
sys.exit(0)
| 1.375 | 1 |
terraform_to_ansible/parser.py | mrlesmithjr/terraform-to-ansible | 16 | 12790217 | """terraform_to_ansible/parser.py"""
import json
import logging
import os
import subprocess
import sys
class Parser:
"""Main Terraform tfstate parser."""
def __init__(self, args):
"""Init a thing."""
# Define dictionary to hold all parsed resources
self.all_resources = {}
# Define Terraform tfstate file to load
self.tfstate = args.tfstate
# Define Terraform tfstate directory to load
self.tfstatedir = args.tfstatedir
# Setup logging
self.logger = logging.getLogger(__name__)
def load(self):
"""Load Terraform tfstate file."""
# Attempt to load tfstate file directly
if self.tfstate is not None:
# Log tfstate file path
self.logger.info("Loading --tfstate %s", self.tfstate)
try:
# Open tfstate file
with open(self.tfstate, "r") as stream:
# Load JSON data
try:
data = json.load(stream)
# Log and exit if JSON data not found
except json.JSONDecodeError as error:
self.logger.error(error)
sys.exit(1)
# Log and exit if file not found
except FileNotFoundError as error:
self.logger.error(error)
sys.exit(1)
# Attempt to load tfstate from directory using terraform state pull
else:
# Log tfstate directory
self.logger.info("Loading --tfstatedir %s", self.tfstatedir)
try:
# Capture current working directory prior to changing to the
# tfstate directory. So, we can changing back.
current_dir = os.getcwd()
# Change to the tfstate directory
os.chdir(self.tfstatedir)
try:
# Try to load JSON output from terraform state pull command
data = json.loads(
subprocess.getoutput("terraform state pull")
)
# Log and exit if JSON data not found
except json.decoder.JSONDecodeError as error:
self.logger.error(error)
sys.exit(1)
# Change back to the original current working directory
os.chdir(current_dir)
# Log and exit if file/directory not found
except FileNotFoundError as error:
self.logger.error(error)
sys.exit(1)
# Capture Terraform version from tfstate
terraform_version = data.get("terraform_version")
# Log Terraform version for additional logic if needed. Not used at
# this time.
self.logger.info("terraform_version: %s", terraform_version)
# Capture resources to parse
resources = data.get("resources")
if resources is None:
resources = []
modules = data.get("modules")
if modules is not None:
resources = modules[0].get("resources")
return resources
def parse(self):
"""Parse Terraform tfstate file."""
# Load resources up
resources = self.load()
# Check if resources are a list - newer Terraform versions
if isinstance(resources, list):
for resource in resources:
self.resource_types(resource)
instances = resource.get("instances")
if instances is not None:
for instance in instances:
self.all_resources[resource["type"]].append(
instance["attributes"]
)
# Check if resources are a dict - older Terraform versions
elif isinstance(resources, dict):
for resource, resource_config in resources.items():
self.resource_types(resource_config)
self.all_resources[resource_config["type"]].append(
resource_config["primary"]["attributes"]
)
return self.all_resources
def resource_types(self, resource):
"""Populate resource types."""
# Check to see if all_resources is already populated with resource type
resource_type_lookup = self.all_resources.get(resource["type"])
# Add resource type to all resources if not found in lookup
if resource_type_lookup is None:
self.all_resources[resource["type"]] = []
| 2.65625 | 3 |
setup.py | sam-bailey/tyme | 0 | 12790218 | #!/usr/bin/env python3
# Set this to True to enable building extensions using Cython.
# Set it to False to build extensions from the C file (that
# was previously created using Cython).
# Set it to 'auto' to build with Cython if available, otherwise
# from the C file.
import sys
from setuptools import setup, find_packages, Extension
from distutils.command.sdist import sdist as _sdist
import numpy
USE_CYTHON = "auto"
if USE_CYTHON:
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
except ImportError:
if USE_CYTHON == "auto":
USE_CYTHON = False
else:
raise
class CythonModule(object):
def __init__(self, name: str, path: str):
self.name = name
self.path = path
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, name: str) -> None:
self._name = name
@property
def path(self) -> str:
return self._path
@path.setter
def path(self, path: str) -> None:
self._path = path
@property
def pyx(self) -> str:
return self.path + ".pyx"
@property
def c(self) -> str:
return self.path + ".c"
cython_modules = [
CythonModule(
name="tyme.base_forecasters.exponential_smoothing_cy",
path="src/cython/exponential_smoothing_cy",
),
CythonModule(
name="tyme.base_forecasters.robust_exponential_smoothing_cy",
path="src/cython/robust_exponential_smoothing_cy",
),
]
if sys.version_info[0] == 2:
raise Exception("Python 2.x is no longer supported")
if USE_CYTHON:
class sdist(_sdist):
def run(self):
# Make sure the compiled Cython files in the distribution are up-to-date
cythonize([module.pyx for module in cython_modules])
_sdist.run(self)
ext_modules = [
Extension(module.name, [module.pyx]) for module in cython_modules
]
cmdclass = dict(build_ext=build_ext, sdist=sdist)
else:
ext_modules = [
Extension(module.name, [module.c]) for module in cython_modules
]
cmdclass = {}
requirements = [
"Bottleneck",
"cycler",
"kiwisolver",
"numpy",
"pandas",
"Pillow",
"pyparsing",
"python-dateutil",
"pytz",
"six",
"scipy",
"Cython",
]
requirements_dev = ["pytest", "pytest-cov", "Cython", "pre-commit", "tox"]
setup(
name="tyme",
# version="0.1.0",
description="A timeseries forecasting package, specialised in forecasting grouped timeseries",
author="<NAME>",
author_email="<EMAIL>",
url="http://github.com/sam-bailey/tyme",
packages=find_packages(where="src"),
package_dir={"": "src"},
cmdclass=cmdclass,
ext_modules=ext_modules,
include_dirs=[numpy.get_include()],
long_description=open("README.md").read(),
install_requires=requirements,
extras_require={"dev": requirements_dev},
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.1",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Cython",
"Topic :: Scientific/Engineering :: Mathematics",
],
keywords="timeseries forecast forecasting time",
)
| 2.015625 | 2 |
peek_plugin_base/agent/PluginAgentEntryHookABC.py | Synerty/peek_plugin_base | 0 | 12790219 | from typing import Optional
from peek_plugin_base.PluginCommonEntryHookABC import PluginCommonEntryHookABC
from peek_plugin_base.agent.PeekAgentPlatformHookABC import PeekAgentPlatformHookABC
class PluginAgentEntryHookABC(PluginCommonEntryHookABC):
def __init__(self, pluginName: str, pluginRootDir: str, platform: PeekAgentPlatformHookABC):
PluginCommonEntryHookABC.__init__(self, pluginName=pluginName, pluginRootDir=pluginRootDir)
self._platform = platform
@property
def platform(self) -> PeekAgentPlatformHookABC:
return self._platform
@property
def publishedAgentApi(self) -> Optional[object]:
return None
| 2.203125 | 2 |
test/testers/winforms/panel-regression.py | ABEMBARKA/monoUI | 1 | 12790220 | <filename>test/testers/winforms/panel-regression.py
#!/usr/bin/env python
# vim: set tabstop=4 shiftwidth=4 expandtab
##############################################################################
# Written by: <NAME> <<EMAIL>>
# Date: 10/21/2008
# Description: main test script of panel
# ../samples/winforms/panel.py is the test sample script
# panel/* is the wrapper of panel test sample script
##############################################################################
# The docstring below is used in the generated log file
"""
Test accessibility of Panel widget
"""
# imports
from panel import *
from helpers import *
from states import *
from actions import *
from sys import argv
app_path = None
try:
app_path = argv[1]
except IndexError:
pass #expected
# open the panel sample application
try:
app = launchPanel(app_path)
except IOError, msg:
print "ERROR: %s" % msg
exit(2)
# make sure we got the app back
if app is None:
exit(4)
# just an alias to make things shorter
pFrame = app.panelFrame
###########################
# check children's AtkAction
###########################
# check if checkbox and radiobutton in panel still have correct actions
actionsCheck(pFrame.check1, "CheckBox")
actionsCheck(pFrame.check2, "CheckBox")
actionsCheck(pFrame.check3, "CheckBox")
actionsCheck(pFrame.check4, "CheckBox")
actionsCheck(pFrame.radio1, "RadioButton")
actionsCheck(pFrame.radio2, "RadioButton")
actionsCheck(pFrame.radio3, "RadioButton")
###########################
# check panel's AtkAccessible
###########################
# check panel states
statesCheck(pFrame.panel1, "Panel")
statesCheck(pFrame.panel2, "Panel")
###########################
# check children's AtkAccessible
###########################
# panel should be focused, but focus on its child
# check if checkbox and radiobutton in panel still have correct states
statesCheck(pFrame.check1, "CheckBox", add_states=["focused"])
statesCheck(pFrame.check2, "CheckBox")
statesCheck(pFrame.check3, "CheckBox", add_states=["checked"])
statesCheck(pFrame.check4, "CheckBox",
invalid_states=["sensitive", "enabled","focusable"])
statesCheck(pFrame.radio1, "RadioButton", add_states=["checked"])
statesCheck(pFrame.radio2, "RadioButton")
statesCheck(pFrame.radio3, "RadioButton",
invalid_states=["focusable", "sensitive", "enabled"])
# check if label in panel still have correct states
statesCheck(pFrame.label1, "Label")
statesCheck(pFrame.label2, "Label")
statesCheck(pFrame.label3, "Label")
# click on checkbox which is in panel1 to assert checked state rising
pFrame.check2.click(log=True)
sleep(config.SHORT_DELAY)
# BUG525428 - doing click action doesn't move focus on that control
#statesCheck(pFrame.check2, "CheckBox", add_states=["checked", "focused"])
statesCheck(pFrame.check3, "CheckBox", add_states=["checked"])
# doesn't change panel1's state
statesCheck(pFrame.panel1, "Panel")
# click on radiobutton which is in panel2 to update label
pFrame.radio2.click(log=True)
sleep(config.SHORT_DELAY)
pFrame.assertText(pFrame.label3, 'You are Female')
# BUG525428 - doing click action doesn't move focus on that control
#statesCheck(pFrame.radio2, "RadioButton", add_states=["checked", "focused"])
# doesn't change panel2's state
statesCheck(pFrame.panel2, "Panel")
# close application frame window
pFrame.quit()
print "INFO: Log written to: %s" % config.OUTPUT_DIR
| 1.820313 | 2 |
tests/generated_test_extensions/extend_test_compute.py | rosalexander/oci-cli | 0 | 12790221 | # coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
# These mappings are used by generated tests to look up operations that have been moved in code in the CLI.
MOVED_COMMANDS = {
('compute', 'app_catalog_listing', 'get'): ['compute', 'pic', 'listing', 'get'],
('compute', 'app_catalog_listing', 'list'): ['compute', 'pic', 'listing', 'list'],
('compute', 'app_catalog_listing_resource_version', 'get'): ['compute', 'pic', 'version', 'get'],
('compute', 'app_catalog_listing_resource_version', 'list'): ['compute', 'pic', 'version', 'list'],
('compute', 'app_catalog_listing_resource_version_agreements', 'get-app-catalog-listing-agreements'): ['compute', 'pic', 'agreements', 'get'],
('compute', 'app_catalog_subscription', 'create'): ['compute', 'pic', 'subscription', 'create'],
('compute', 'app_catalog_subscription', 'delete'): ['compute', 'pic', 'subscription', 'delete'],
('compute', 'app_catalog_subscription', 'list'): ['compute', 'pic', 'subscription', 'list']
}
| 1.625 | 2 |
contrib/cookiecutter/ckan_extension/{{cookiecutter.project}}/ckanext/{{cookiecutter.project_shortname}}/logic/schema.py | gg2/ckan | 2,805 | 12790222 | <reponame>gg2/ckan
import ckan.plugins.toolkit as tk
def {{cookiecutter.project_shortname}}_get_sum():
not_empty = tk.get_validator("not_empty")
convert_int = tk.get_validator("convert_int")
return {
"left": [not_empty, convert_int],
"right": [not_empty, convert_int]
}
| 1.859375 | 2 |
src/hots/count_words.py | Brokenwind/hots | 0 | 12790223 | <reponame>Brokenwind/hots<filename>src/hots/count_words.py<gh_stars>0
# -*- coding: utf-8 -*-
# @Time : 2020/9/1 18:00
# @Author : WangKun
# @Email : <EMAIL>
import os
import pickle
import sys
from collections import Counter
import numpy as np
from joblib import Parallel, delayed
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
from hots import common
from nlpyutil import preprocess as preprocore
from hots import corpus
from nlpyutil import Logger
from .ltp import Ltp
_mds = set()
DEFAULT_CSV_COLS = 5
_logger = Logger()
_ltp = Ltp(exword_path=common.SELF_USER_WV_DICT)
_HOT_FILTER_WORDS = set([word.strip() for word in open(common.FILTER_HOT_WORDS_PATH, encoding='UTF-8').readlines()])
_STOPWORDS = preprocore.get_stopwords()
def word_count_and_idf(corpus, idf=False):
"""
词频统计 和 idf值计算
:param corpus: [文章1 空白分割, 文章2 空白分割]
:return:
word_list: 词典列表
word_count_list: 词统计列表
idf_list: idfs列表
"""
if not corpus:
return None, None, None
cv = CountVectorizer(stop_words=_STOPWORDS)
cv_fit = cv.fit_transform(corpus)
# ['bird', 'cat', 'dog', 'fish'] 列表形式呈现文章生成的词典
word_list = cv.get_feature_names()
# 词频统计
word_count_list = cv_fit.sum(axis=0).tolist()
word_count_list = word_count_list[0]
assert len(word_list) == len(word_count_list)
# analyzer='word',token_pattern=u"(?u)\\b\\w+\\b"
idf_list = []
if idf:
transformer = TfidfTransformer()
tfidf = transformer.fit(cv_fit)
# 计算全局的tfidf值
idf_list = tfidf.idf_
assert len(idf_list) == len(word_count_list)
return word_list, word_count_list, idf_list
def count_and_filter_single(partial_corpus):
word_list, word_count_list, idf_list = word_count_and_idf(partial_corpus, idf=False)
if not word_list or not word_count_list:
return Counter()
# word_count_list = np.array(word_count_list)
# idf_list = np.array(idf_list)
# tfidf_list = word_count_list * idf_list
# word_statistics_list = list(zip(word_list, word_count_list, idf_list, tfidf_list))
word_counter = Counter(dict(zip(word_list, word_count_list)))
return word_counter
def count_words_with_corpus(corpus_data, name_prefix, data_mark, allow_tags=['n', 'j', 'i'], deny_tags=['nt', 'nd']):
"""
在处理好的数据集上进行词频统计
:param filepath:
:param count_base: 词的最低频次
:param idf_base: 最低idf限制
:param tfidf_base: 最低tfidf限制
:param allow_tags: 统计指定词性的词,如果为空则表示所有词性
:return:
"""
corpus_data, _ = corpus.process_origin_corpus_data(corpus_data, split=False,
level=corpus.CorpusItemLevel.Article)
_logger.info("start counting with multi thread")
num_task = max(1, common.NUM_CPU)
partial_len = int(np.ceil(len(corpus_data) / num_task))
if partial_len == 0:
partial_len = len(corpus_data)
num_task = 1
partial_results = Parallel(n_jobs=num_task, backend="multiprocessing")(
delayed(count_and_filter_single)(corpus_data[idx:idx + partial_len])
for idx in range(0, len(corpus_data), partial_len))
total_counter = Counter()
for counter in partial_results:
total_counter.update(counter)
# 根据条件进行过滤
_logger.info("start filtering the with postag and count")
# 根据词性过滤
for word in list(total_counter.keys()):
if total_counter[word] < 10:
del total_counter[word]
continue
if word in _HOT_FILTER_WORDS:
del total_counter[word]
continue
word_tags = _ltp.ltp_postagger(word)
if len(word_tags) == 1:
word_tag = word_tags[0]
# 如果当前词的词性属于deny_tags中,则直接跳过
deny = any(word_tag[1].startswith(tag) for tag in deny_tags)
if deny:
del total_counter[word]
continue
allow = any(word_tag[1].startswith(tag) for tag in allow_tags)
if not allow:
del total_counter[word]
"""
word_statistics_list_path = os.path.join(common.HOT_WORDS_PATH,
"{}_word_statistics_{}.pkl".format(data_mark, name_prefix))
_logger.info("dump to pickle file")
with open(word_statistics_list_path, 'wb') as f:
pickle.dump(total_counter, f)
_logger.info("finished the dump")
"""
return total_counter
def load_word_statistics(name_prefix) -> Counter:
"""
:param name_prefix:
:return: 返回词统计结果列表,每个元素:('词', 个数, idf值, 全局的tf-idf值)
"""
word_counter_path = os.path.join(common.HOT_WORDS_PATH, "word_statistics_{}.pkl".format(name_prefix))
with open(word_counter_path, 'rb') as f:
word_counter = pickle.load(f)
return word_counter
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError("parameter error")
name_prefix = sys.argv[1]
filepath = os.path.join(common.DATA_PROCESSED_PATH, name_prefix + ".corpus")
count_words_with_corpus(filepath)
"""
corpus = ["我 来到 北京 清华大学",
"他 来到 了 网易 杭研 大厦",
"小明 硕士 毕业 与 中国 科学院",
"我 爱 北京 天安门"]
word_list, word_count_list, idf_list = word_count_and_idf(corpus)
print(word_list)
print(word_count_list)
print(idf_list)
"""
| 2.1875 | 2 |
udacity.py | davidvartanian/dand-ml-identify-fraud-enron-email | 0 | 12790224 | """
Code provided by Udacity
"""
import numpy as np
def featureFormat( dictionary, features, remove_NaN=True, remove_all_zeroes=True, remove_any_zeroes=False, sort_keys = False):
""" convert dictionary to numpy array of features
remove_NaN = True will convert "NaN" string to 0.0
remove_all_zeroes = True will omit any data points for which
all the features you seek are 0.0
remove_any_zeroes = True will omit any data points for which
any of the features you seek are 0.0
sort_keys = True sorts keys by alphabetical order. Setting the value as
a string opens the corresponding pickle file with a preset key
order (this is used for Python 3 compatibility, and sort_keys
should be left as False for the course mini-projects).
NOTE: first feature is assumed to be 'poi' and is not checked for
removal for zero or missing values.
"""
return_list = []
# Key order - first branch is for Python 3 compatibility on mini-projects,
# second branch is for compatibility on final project.
if isinstance(sort_keys, str):
import pickle
keys = pickle.load(open(sort_keys, "rb"))
elif sort_keys:
keys = sorted(dictionary.keys())
else:
keys = dictionary.keys()
for key in keys:
tmp_list = []
for feature in features:
try:
dictionary[key][feature]
except KeyError:
print "error: key ", feature, " not present"
return
value = dictionary[key][feature]
if value=="NaN" and remove_NaN:
value = 0
tmp_list.append( float(value) )
# Logic for deciding whether or not to add the data point.
append = True
# exclude 'poi' class as criteria.
if features[0] == 'poi':
test_list = tmp_list[1:]
else:
test_list = tmp_list
### if all features are zero and you want to remove
### data points that are all zero, do that here
if remove_all_zeroes:
append = False
for item in test_list:
if item != 0 and item != "NaN":
append = True
break
### if any features for a given data point are zero
### and you want to remove data points with any zeroes,
### handle that here
if remove_any_zeroes:
if 0 in test_list or "NaN" in test_list:
append = False
### Append the data point if flagged for addition.
if append:
return_list.append( np.array(tmp_list) )
return np.array(return_list)
def targetFeatureSplit( data ):
"""
given a numpy array like the one returned from
featureFormat, separate out the first feature
and put it into its own list (this should be the
quantity you want to predict)
return targets and features as separate lists
(sklearn can generally handle both lists and numpy arrays as
input formats when training/predicting)
"""
target = []
features = []
for item in data:
target.append( item[0] )
features.append( item[1:] )
return target, features | 3.40625 | 3 |
emonitor/modules/settings/settings.py | Durburz/eMonitor | 21 | 12790225 | import os, math, yaml
from emonitor.extensions import db
class Struct(dict):
def __init__(self, **entries):
self.__dict__.update(entries)
class Settings(db.Model):
"""Settings class"""
__tablename__ = 'settings'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64))
_value = db.Column('value', db.Text)
def __init__(self, name, value=""):
self.name = name
self._value = value
@property
def value(self):
return yaml.load(self._value)
@value.setter
def value(self, val):
self._value = yaml.safe_dump(val, encoding='utf-8')
@staticmethod
def num2deg(xtile, ytile, zoom=17 or db.config.get('DEFAULTZOOM')):
"""
Translate tile into coordinate (lat, lon)
:param xtile: x-coordinate of tile
:param ytile: y-coordinate of tile
:param zoom: zoom level
:return: lat, lon tuple
"""
n = 2.0 ** zoom
lon_deg = xtile / n * 360.0 - 180.0
lat_deg = math.degrees(math.atan(math.sinh(math.pi * (1 - 2 * ytile / n))))
return lat_deg, lon_deg
def getCarTypeNames(self):
return self.value
@staticmethod
def getCarTypes():
ctypes = Settings.query.filter_by(name='cartypes')
if ctypes.count():
return ctypes.one().value
return ""
@staticmethod
def get_byType(type):
return Settings.query.filter_by(name=type).first() or ""
@staticmethod
def getMapTiles(mid=0, zoom=17 or db.app.config.get('DEFAULTZOOM')):
from emonitor.modules.maps.map import Map
_map = Map.getMaps(mid)
tiles = []
try:
for ts in [f for f in os.listdir(_map.path + str(zoom) + '/') if f.endswith('png')]:
tiles.append(ts.replace('-', '/'))
except:
pass
return tiles
@staticmethod
def getFrontendSettings(area=""):
s = Settings.query.filter_by(name='frontend.default')
if s.count() == 1:
if area == "":
return s.first().value
elif area in s.first().value.keys():
return s.first().value[area]
return {'module': 'default', 'width': '.2', 'visible': '0', 'center': {'module': 'default'}, 'west': {'module': 'default', 'width': '.2'}, 'east': {'module': 'default', 'width': '.2'}}
@staticmethod
def get(option, default=''):
"""
Getter for option values
:param option: name as string
:param optional default: default value if not found in database
:return: value of option
"""
s = Settings.query.filter_by(name=option)
if s.count() == 1: # update
return s.first().value
return default # deliver default value
@staticmethod
def set(option, val):
"""
Setter for option
:param option: name as string
:param val: value of option
:return: value of option
"""
s = Settings.query.filter_by(name=option).first()
if s: # update settings
s.value = val
else: # add value
s = Settings(option, yaml.safe_dump(val, encoding='utf-8'))
db.session.add(s)
db.session.commit()
return s
@staticmethod
def getIntList(option, default=[]):
try:
return map(int, Settings.get(option, '').split(','))
except ValueError:
return default
@staticmethod
def getYaml(option):
try:
return Struct(**(Settings.get(option)))
except TypeError:
return Struct()
| 2.453125 | 2 |
tests/test_ark.py | DerekRies/arkpy | 19 | 12790226 | <gh_stars>10-100
import pytest
import pep8
import random
import os
from context import arktypes, ark, binary, utils
data_dir = 'data/'
output_dir = 'tests/output/'
class TestArkProfile:
def test_write_read(self):
profile = ark.ArkProfile()
profile.map_name = ark.GameMapMap.the_center
profile.map_path = ark.GameMapMap.the_center_path
profile.unique_id.set('11111111111111111')
profile.player_name.set('<NAME>')
profile.player_id.set(utils._gen_player_id())
profile.first_spawned.set(True)
profile.character.name.set('GeneratedCharacter')
profile.character.level_ups.set(9)
profile.character.experience.set(450)
profile.character.stat_points[ark.StatMap.Health].set(9)
profile.save_to_file(output_dir + 'generatedprofile.arkprofile')
# TODO: Create some functionality to compare the data dicts directly
# to see if they are the same
profile2 = ark.ArkProfile(output_dir + 'generatedprofile.arkprofile')
assert profile2.map_name == profile.map_name
assert profile2.map_path == profile.map_path
assert profile2.unique_id.value == profile.unique_id.value
assert profile2.player_name.value == profile.player_name.value
assert profile2.player_id.value == profile.player_id.value
profile2_female = profile2.character.isFemale.value
profile_female = profile.character.isFemale.value
assert profile2_female == profile_female
assert profile2.character.stat_points[ark.StatMap.Health].value == 9
assert profile2.header_size != 0
assert profile2.header_size == profile.header_size
class TestArkTribe:
def test_write_read(self):
owner_id = utils._gen_player_id()
owner_name = arktypes.StrProperty(value='Generated Owner')
tribe = ark.ArkTribe()
tribe.name.set('Generated Tribe')
tribe.tribe_id.set(utils._gen_tribe_id())
tribe.owner_id.set(owner_id)
tribe.members_names.value.append(owner_name)
member_id = arktypes.UInt32Property(value=owner_id)
tribe.members_ids.value.append(member_id)
tribe.save_to_file(output_dir + 'generatedtribe.arktribe')
tribe2 = ark.ArkTribe(output_dir + 'generatedtribe.arktribe')
assert tribe2.name.value == tribe.name.value
assert tribe2.tribe_id.value == tribe.tribe_id.value
assert tribe2.members_names.value[
0].value == tribe.members_names.value[0].value
assert tribe2.owner_id.value == tribe.owner_id.value
@pytest.mark.slow
class TestBulkReads:
def test_read_profiles(self):
path = 'data/Servers/Server01/'
files = os.listdir(path)
for file in files:
if '.arkprofile' in file:
try:
file_path = path + file
profile = ark.ArkProfile(file_path)
except:
print 'FAILED: %s' % file
assert False
def test_read_tribes(self):
path = 'data/Servers/Server01/'
files = os.listdir(path)
for file in files:
if '.arktribe' in file:
try:
file_path = path + file
tribe = ark.ArkTribe(file_path)
except:
print 'FAILED: %s' % file
assert False
assert True
@pytest.mark.style
class TestStyle:
def test_pep8(self):
pep8style = pep8.StyleGuide()
result = pep8style.check_files([
'tests/test_ark.py',
'tests/test_arktypes.py',
'arkpy/binary.py',
'arkpy/utils.py',
'arkpy/ark.py',
'arkpy/arktypes.py',
])
assert result.total_errors == 0
| 2.328125 | 2 |
onadata/kobocat/__init__.py | BuildAMovement/whistler-kobocat | 38 | 12790227 | <reponame>BuildAMovement/whistler-kobocat<filename>onadata/kobocat/__init__.py
from onadata.koboform import context_processors
# remove this file when all servers are using new settings
print """
context_processors setting must be changed from
kobocat.context_processors to koboform.context_processors
""" | 1.3125 | 1 |
scripts/sources/s_checklist_scenariobased_step04.py | dpopadic/arpmRes | 6 | 12790228 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.5
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_checklist_scenariobased_step04 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step04&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-4-copmarg).
# +
import numpy as np
import pandas as pd
from scipy.stats import t as tstu
import matplotlib.pyplot as plt
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from arpym.statistics import quantile_sp, simulate_markov_chain_multiv, \
simulate_t, project_trans_matrix
from arpym.tools import histogram_sp, add_logo
# -
# ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-parameters)
# t_now is 31-Aug-2012. Set t_hor>t_now
t_hor = np.datetime64('2012-10-26') # the future investment horizon
j_ = 5000 # number of scenarios
d_plot = 97 # projected risk driver to plot
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step00): Load data
# +
path = '../../../databases/temporary-databases/'
# Risk drivers identification
# realizations of risk drivers up to and including time t_now
db_riskdrivers_series = pd.read_csv(path + 'db_riskdrivers_series.csv',
index_col=0, parse_dates=True)
x = db_riskdrivers_series.values
risk_drivers_names = db_riskdrivers_series.columns
# additional information
db_riskdrivers_tools = pd.read_csv(path + 'db_riskdrivers_tools.csv')
d_ = int(db_riskdrivers_tools.d_.dropna())
d_credit = int(db_riskdrivers_tools.d_credit.dropna())
n_stocks = int(db_riskdrivers_tools.n_stocks.dropna())
d_implvol = int(db_riskdrivers_tools.d_implvol.dropna())
n_bonds = int(db_riskdrivers_tools.n_bonds.dropna())
i_bonds = n_bonds * 4 # 4 NS parameters x n_bonds
c_ = int(db_riskdrivers_tools.c_.dropna())
ratings_tnow = np.array(db_riskdrivers_tools.ratings_tnow.dropna())
t_now = np.datetime64(db_riskdrivers_tools.t_now[0], 'D')
# Quest for invariance
# values of invariants
db_invariants_series = pd.read_csv(path + 'db_invariants_series.csv',
index_col=0, parse_dates=True)
epsi = db_invariants_series.values
t_, i_ = np.shape(epsi)
# next step models
db_invariants_nextstep = pd.read_csv(path + 'db_invariants_nextstep.csv')
# parameters for next step models
db_invariants_param = pd.read_csv(path + 'db_invariants_param.csv', index_col=0)
# parameters for GARCH(1,1) next step models
db_garch_sig2 = pd.read_csv(path + 'db_garch_sig2.csv', index_col=0,
parse_dates=True)
# estimated annual credit transition matrix
p_credit = pd.read_csv(path +
'db_invariants_p_credit.csv').values.reshape(c_ + 1, c_ + 1)
# Estimation
# parameters for invariants modeled using Student t distribution
db_estimation_parametric = pd.read_csv(path + 'db_estimation_parametric.csv',
index_col=0)
# estimated probabilities for nonparametric distributions
db_estimation_nonparametric = pd.read_csv(path + 'db_estimation_nonparametric.csv',
index_col=False)
p_marginal = db_estimation_nonparametric.values
# parameters for estimated Student t copula
db_estimation_copula = pd.read_csv(path + 'db_estimation_copula.csv')
nu_copula = int(db_estimation_copula['nu'].iloc[0])
rho2_copula = np.array(db_estimation_copula['rho2']).reshape(i_, i_)
# parameters for the credit copula
db_estimation_credit_copula = pd.read_csv(path + 'db_estimation_credit_copula.csv')
rho2_credit = db_estimation_credit_copula.rho2_credit.values.reshape(2, 2)
nu_credit = db_estimation_credit_copula.nu_credit[0]
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step01): Determine number of projection steps and scenario probabilities
# number of monitoring times
m_ = np.busday_count(t_now, t_hor)
# projection scenario probabilities
p = np.ones(j_) / j_
# invariants modeled parametrically
ind_parametric = np.arange(n_stocks + 1 + d_implvol,
n_stocks + 1 + d_implvol + i_bonds)
# invariants modeled nonparametrically
ind_nonparametric = list(set(range(i_)) - set(ind_parametric))
# ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step02): Projection of invariants
# +
epsi_proj = np.zeros((j_, m_, i_))
for m in range(m_):
# copula scenarios
# simulate standardized invariants scenarios for copula
epsi_tilde_proj = simulate_t(np.zeros(i_), rho2_copula, nu_copula, j_)
# generate invariants scenarios
# invariants modeled nonparametrically
for i in ind_nonparametric:
# project t-copula standardized invariants scenarios
u_proj = tstu.cdf(epsi_tilde_proj[:, i], nu_copula)
epsi_proj[:, m, i] = quantile_sp(u_proj, epsi[:, i], p_marginal[:, i])
# invariants modeled parametrically (estimated as Student t distributed)
for i in ind_parametric:
# project t-copula standardized invariants scenarios
u_proj = tstu.cdf(epsi_tilde_proj[:, i], nu_copula)
mu_marg = db_estimation_parametric.loc['mu', str(i)]
sig2_marg = db_estimation_parametric.loc['sig2', str(i)]
nu_marg = db_estimation_parametric.loc['nu', str(i)]
epsi_proj[:, m, i] = mu_marg + np.sqrt(sig2_marg) * tstu.ppf(u_proj, nu_marg)
# -
# ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step03): Projection of risk drivers
# +
x_proj = np.zeros((j_, m_ + 1, d_))
dx_proj = np.zeros((j_, m_ + 1, d_))
sig2_garch = np.zeros((j_, m_ + 1, d_))
a_garch = db_invariants_param.loc['a'].values
b_garch = db_invariants_param.loc['b'].values
c_garch = db_invariants_param.loc['c'].values
mu_garch = db_invariants_param.loc['mu'].values
# risk drivers at time t_now are the starting values for all scenarios
x_proj[:, 0, :] = db_riskdrivers_series.iloc[-1, :]
# initialize parameters for GARCH(1,1) projection
d_garch = [d for d in range(d_)
if db_invariants_nextstep.iloc[0, d] == 'GARCH(1,1)']
for d in d_garch:
sig2_garch[:, 0, d] = db_garch_sig2.iloc[-1, d]
dx_proj[:, 0, d] = x[-1, d] - x[-2, d]
# project daily scenarios
for m in range(1, m_ + 1):
for d in range(d_):
# risk drivers modeled as random walk
if db_invariants_nextstep.iloc[0, d] == 'Random walk':
x_proj[:, m, d] = x_proj[:, m - 1, d] + epsi_proj[:, m - 1, d]
# risk drivers modeled as GARCH(1,1)
elif db_invariants_nextstep.iloc[0, d] == 'GARCH(1,1)':
sig2_garch[:, m, d] = c_garch[d] + \
b_garch[d] * sig2_garch[:, m - 1, d] + \
a_garch[d] * (dx_proj[:, m - 1, d] - mu_garch[d]) ** 2
dx_proj[:, m, d] = mu_garch[d] + \
np.sqrt(sig2_garch[:, m, d]) * epsi_proj[:, m - 1, d]
x_proj[:, m, d] = x_proj[:, m - 1, d] + dx_proj[:, m, d]
# risk drivers modeled as AR(1)
elif db_invariants_nextstep.iloc[0, d] == 'AR(1)':
b_ar1 = db_invariants_param.loc['b'][d]
x_proj[:, m, d] = b_ar1 * x_proj[:, m - 1, d] + epsi_proj[:, m - 1, d]
# -
# ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step04): Projection of credit ratings
# +
# compute the daily credit transition matrix
p_credit_daily = project_trans_matrix(p_credit, 1 / 252)
# project ratings
ratings_proj = simulate_markov_chain_multiv(ratings_tnow, p_credit_daily,
m_, rho2=rho2_credit,
nu=nu_credit, j_=j_)
# -
# ## [Step 5](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step04-implementation-step05): Save databases
# +
# delete big files
del dx_proj, sig2_garch
# projected risk drivers
out = pd.DataFrame({risk_drivers_names[d]:
x_proj[:, :, d].reshape((j_ * (m_ + 1),))
for d in range(d_)})
out = out[list(risk_drivers_names[:d_].values)]
out.to_csv(path + 'db_projection_riskdrivers.csv', index=None)
del out
# projected credit ratings
out = pd.DataFrame({'GE': ratings_proj[:, :, 0].reshape((j_ * (m_ + 1),)),
'JPM': ratings_proj[:, :, 1].reshape((j_ * (m_ + 1),))})
out.to_csv(path + 'db_projection_ratings.csv', index=None)
del out
# number of scenarios and future investment horizon
out = pd.DataFrame({'j_': pd.Series(j_),
't_hor': pd.Series(t_hor)})
out.to_csv(path + 'db_projection_tools.csv', index=None)
del out
# projected scenario probabilities
out = pd.DataFrame({'p': pd.Series(p)})
out.to_csv(path + 'db_scenario_probs.csv', index=None)
del out
# -
# ## Plots
# +
plt.style.use('arpm')
# number of paths to plot
num_plot = min(j_, 20)
# market risk driver path
fig1 = plt.figure(figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)
# plot historical series
f1 = plt.plot(np.arange(t_ + 1), db_riskdrivers_series.iloc[:, d_plot - 1], lw=1)
# plot projected series
for j in range(num_plot):
f1 = plt.plot(np.arange(t_ + 1, t_ + 1 + m_ + 1), x_proj[j, :, d_plot - 1], lw=1)
f, xp = histogram_sp(x_proj[:, -1, d_plot - 1], k_=10 * np.log(j_))
f1 = plt.barh(xp, f / 10, height=xp[1] - xp[0], left=t_ + 1 + m_,
facecolor=[.3, .3, .3], edgecolor='k')
plt.title('Projected path: ' + risk_drivers_names[d_plot - 1],
fontweight='bold', fontsize=20)
plt.xlabel('t (days)', fontsize=17)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
add_logo(fig1, set_fig_size=False)
fig1.tight_layout()
# plot projected ratings
# select paths with rating changes
ind_j_plot_GE = np.zeros(1)
ind_j_plot_GE[0] = 0
k = 0
while k < num_plot:
k = k + 1
for j in range(j_):
if (j not in ind_j_plot_GE and
ratings_proj[j, -1, 0] != ratings_proj[k, -1, 0]):
ind_j_plot_GE = np.append(ind_j_plot_GE, j)
break
ind_j_plot_JPM = np.zeros(1)
ind_j_plot_JPM[0] = 0
k = 0
while k < num_plot:
k = k + 1
for j in range(j_):
if (j not in ind_j_plot_JPM and
ratings_proj[j, -1, 1] != ratings_proj[k, -1, 1]):
ind_j_plot_JPM = np.append(ind_j_plot_JPM, j)
break
fig2, ax = plt.subplots(2, 1, figsize=(1280.0 / 72.0, 720.0 / 72.0), dpi=72.0)
plt.sca(ax[0])
for j in ind_j_plot_GE:
f5 = plt.plot(np.arange(m_ + 1), ratings_proj[int(j), :, 0] + 1)
plt.title('Projected rating GE', fontweight='bold', fontsize=20)
plt.yticks(np.arange(10), fontsize=14)
ax[0].set_yticklabels(['', 'AAA', 'AA', 'A', 'BBB', 'BB', 'B', 'CCC', 'D', ''])
plt.gca().invert_yaxis()
plt.sca(ax[1])
for j in ind_j_plot_JPM:
plt.plot(np.arange(m_ + 1), ratings_proj[int(j), :, 1] + 1)
plt.title('Projected rating JPM', fontweight='bold', fontsize=20)
plt.yticks(np.arange(10), fontsize=14)
ax[1].set_yticklabels(['', 'AAA', 'AA', 'A', 'BBB', 'BB', 'B', 'CCC', 'D', ''])
plt.gca().invert_yaxis()
add_logo(fig2, set_fig_size=False)
fig2.tight_layout()
| 1.945313 | 2 |
functions/proto_optim.py | jwen307/diamondintherough | 4 | 12790229 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 4 08:24:48 2021
proto_optim.py
- Find the protoimage vectors
"""
import torch
import torch.optim as optim
import torch.nn as nn
import math
from tqdm import tqdm
from . import utils
#Optimization to get the protoimages
def proto_optim(og_latents,recovered_latents, model, model_type, device, lr = 0.1, Lambda = 3.0, epsilon = 0.0001, minibatch_size = 16):
'''
Lambda: weight for the cosine similarity loss (3.0 for celeba_pgan, 5.0 for church_pgan, 10.0 for celebaHQ_pgan)
'''
print('Searching for the protoimage latent vector...')
#Get the number of samples and the dimensionality
num_samples, num_dims = og_latents.size()
#Number of batches needed
num_batches = int(math.ceil(num_samples/minibatch_size))
#Vector for the found protoimage latent vectors
protoLatents = torch.zeros(num_samples,num_dims)
for batch_num in tqdm(range(num_batches)):
#Check if there are fewer than minibatch_size images left
if (batch_num + 1) * minibatch_size > num_samples:
end = num_samples + 1
else:
end = (batch_num + 1) * minibatch_size
#Original latent vectors
x = og_latents[batch_num * minibatch_size: end].to(device)
batch_size,_ = x.size()
#Recovered latent vectors
y = recovered_latents[batch_num * minibatch_size: end].to(device)
#Put both on the device
x = x.detach().requires_grad_(False).to(device)
y = y.detach().requires_grad_(False).to(device)
og_x = x * 1.0
alpha = torch.ones(batch_size,512,device=device)
alpha = alpha.requires_grad_(True)
#Initial direction
diff = y - x
opt = optim.Adam([alpha], lr = lr)
cosSim = nn.CosineSimilarity()
#Learning rate scheduler
sched = optim.lr_scheduler.StepLR(optimizer = opt, step_size = 200, gamma = 0.1)
oldLoss = 0
for i in range(501):
#Zero the gradients
opt.zero_grad()
#Move the direction of the difference vector
ynew = y + (torch.mm(alpha,diff.t()).diagonal().unsqueeze(1) * ((diff / (diff.norm(dim=1).unsqueeze(1)**2))))
ynew = utils.normalize(ynew)
#Get the images of the current latent vectors
currImgs = model.netG(ynew)
#Get the discriminator score
discrimScore = model.netD(currImgs,getFeature = False)
#Calculate the loss
if model_type == 'wgangp':
loss = discrimScore.mean() + 0.2*cosSim(ynew,og_x).mean() + 1.0*discrimScore.std() + 3.0*cosSim(ynew,og_x).std()
else:
loss = discrimScore.mean() + Lambda*cosSim(ynew,og_x).mean()
#Backpropagate the error
loss.backward()
#Take a step with the optimizer
opt.step()
sched.step()
#Early stopping condition
if abs(loss-oldLoss) < epsilon:
break
else:
oldLoss = loss
x = y * 1.0
y = ynew.detach()
diff = y - x
#Show the progress
# if i % 1 == 0:
# print('Iterations: ' + str(i))
# print('Loss: ' + str(loss))
protoLatents[batch_num * minibatch_size: end] = ynew.detach().cpu()
return protoLatents
| 2.640625 | 3 |
tfc/tfc.py | emregeldegul/tfc | 29 | 12790230 | <reponame>emregeldegul/tfc
from .notify import Notify
from googletrans import Translator
from time import sleep
from xerox import paste
class Translation:
def __init__(self, dest, src):
self.translate = Translator()
self.notify = Notify()
self.dest = dest
self.src = src
def start(self):
temp_value = ''
while True:
recent_value = paste()
if recent_value != temp_value:
try:
result = self.translate.translate(recent_value, dest=self.dest, src=self.src)
self.notify.send(recent_value, result.text)
except Exception as e:
self.notify.send('A Problem Occurred', str(e))
temp_value = recent_value
sleep(2)
| 2.484375 | 2 |
scripts/gen_key_label.py | hua1024/OpenOCR | 3 | 12790231 | # coding=utf-8
# @Time : 2021/5/11 11:20
# @Auto : zzf-jeff
train_list_file = '../test/train_rec_05.txt'
test_list_file = '../test/test_rec_05.txt'
keys_file = './key.txt'
fid_key = open(keys_file, 'a+', encoding='utf-8')
keys = ''
def read_txt(txt_path, split_type):
'''
读取txt文件的标注信息,格式为
xxx/a/1.png,a
xxx/a/2.png,a
Args:
txt_path: train/valid/test data txt or json
Returns:
imgs:list, all data info
'''
with open(txt_path, 'r', encoding='utf-8') as f:
infos = list(map(lambda line: line.strip().split(split_type), f))
return infos
infos = read_txt(train_list_file, split_type=' ')
for info in infos:
txt_file, use_flag = info
if int(use_flag) == 1:
with open(txt_file, 'r', encoding='utf-8') as fid_train:
lines = fid_train.readlines()
for line in lines:
line = line.strip().split('\t')
keys += line[-1]
infos = read_txt(test_list_file, split_type=' ')
for info in infos:
txt_file, use_flag = info
if int(use_flag) == 1:
with open(txt_file, 'r', encoding='utf-8') as fid_train:
lines = fid_train.readlines()
for line in lines:
line = line.strip().split('\t')
keys += line[-1]
key = ''.join(list(set(list(keys))))
for _key in key:
fid_key.write(_key + '\n')
| 2.78125 | 3 |
bblfsh_sonar_checks/checks/java/RSPEC-1215.py | stg-tud/sonar-checks | 3 | 12790232 | import bblfsh_sonar_checks.utils as utils
import bblfsh
def check(uast):
findings = []
fin_calls = bblfsh.filter(uast, "//MethodInvocation//"
"Identifier[@roleCall and @roleReceiver and @Name='System']/parent::MethodInvocation/"
"Identifier[@roleCall and @roleCallee and @Name='gc']/parent::MethodInvocation")
if len(list(fin_calls)):
findings.append({"msg": "Don't use System.gc()", "pos": None})
fin_calls = bblfsh.filter(uast, "//MethodInvocation//"
"Identifier[@roleCall and @roleReceiver and @Name='Runtime']/parent::MethodInvocation//"
"Identifier[@roleCall and @roleCallee and @Name='getRuntime']/parent::MethodInvocation/parent::MethodInvocation//"
"Identifier[@roleCall and @roleCallee and @Name='gc']/parent::MethodInvocation")
if len(list(fin_calls)):
findings.append({"msg": "Don't use Runtime.getRuntime().gc(})", "pos": None})
return findings
if __name__ == '__main__': utils.run_default_fixture(__file__, check)
| 2.15625 | 2 |
Exercicios/tempCodeRunnerFile.py | eduardodarocha/Introducao_Ciencia_da_Computacao_com_Python_Parte_2_Coursera | 1 | 12790233 | print(elefantes(4)) | 1.1875 | 1 |
polymorphism.py | Sp-X/PCAP | 0 | 12790234 | <reponame>Sp-X/PCAP
class One:
def do_it(self):
print("do_it from One")
def doanything(self):
self.do_it()
class Two(One):
def do_it(self):
print("do_it from Two")
one = One()
two = Two()
one.doanything()
two.doanything()
| 2.53125 | 3 |
boundary/plugin_manifest.py | jdgwartney/boundary-api-cli | 0 | 12790235 | <filename>boundary/plugin_manifest.py<gh_stars>0
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
"""
Reads and provides access to a plugin.json file the manifest of plugins.
"""
class PluginManifest(object):
def __init__(self, path="plugin.json"):
"""
Initialize the PluginManifest instance
"""
self.path = path
self.manifest_json = None
self.manifest = None
def get_metric_names(self):
"""
Returns the list of metrics associated with the plugin manifest
"""
return self.manifest['metrics']
def read(self):
"""
Load the metrics file from the given path
"""
f = open(self.path, "r")
self.manifest_json = f.read()
def parse(self):
"""
Parses the manifest JSON into a dictionary
"""
self.manifest = json.loads(self.manifest_json)
def load(self):
"""
Read the JSON file and parse into a dictionary
"""
self.read()
self.parse()
@property
def command(self):
return self.manifest['command']
@property
def command_lua(self):
return self.manifest['command_lua']
@property
def description(self):
return self.manifest['description']
@property
def icon(self):
return self.manifest['icon']
@property
def ignore(self):
return self.manifest['ignore']
@property
def metrics(self):
return self.manifest['metrics']
@property
def name(self):
logging.debug(self.manifest)
return self.manifest['name']
@property
def param_array(self):
return self.manifest['paramArray']
@property
def post_extract(self):
return self.manifest['postExtract']
@property
def post_extract_lua(self):
return self.manifest['postExtract_lua']
@property
def tags(self):
return self.manifest['tags']
@property
def version(self):
return self.manifest['version']
def get_manifest(self):
"""
Returns the dictionary from the parse JSON plugin manifest
"""
return self.manifest
| 2.34375 | 2 |
setup.py | btcfy/cloudify-utilities-plugin | 0 | 12790236 | <filename>setup.py
# Copyright (c) 2017 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import setuptools
setuptools.setup(
name='cloudify-utilities-plugin',
version='1.9.6',
author='<EMAIL>',
author_email='<EMAIL>',
description='Utilities for extending Cloudify',
packages=['cloudify_deployment_proxy',
'cloudify_ssh_key',
'cloudify_files',
'cloudify_terminal',
'cloudify_configuration',
'cloudify_custom_workflow',
'cloudify_suspend',
'cloudify_cloudinit',
'cloudify_rest', 'cloudify_rest/rest_sdk',
'cloudify_scalelist'],
license='LICENSE',
install_requires=[
'cloudify-plugins-common>=3.4.2',
'cloudify-rest-client>=4.0', # deployment_proxy
'paramiko', # terminal
"Jinja2>=2.7.2", # terminal
'pycrypto', # ssh_key
'pyyaml', # cloudinit and rest
'xmltodict'] # rest
)
| 1.164063 | 1 |
math/iteration_power.py | dimascapella/Python | 1 | 12790237 | <filename>math/iteration_power.py
def iteration_power(number: int, exp: int) -> int:
"""
Perpangkatan dengan metode iteratif atau perulangan
rumus matematika: number^exp
>>> iteration_power(2, 5)
32
>>> iteration_power(100, 0)
1
>>> iteration_power(0, 100)
0
>>> iteration_power(1, 100)
1
"""
result = 1
for _ in range(exp):
result *= number
return result
if __name__ == "__main__":
import doctest
doctest.testmod()
# Pangkat lebih dari 1
print(iteration_power(number=2, exp=5))
# Pangkat sama dengan 0
print(iteration_power(number=100, exp=0))
# Angka 0 dengan pangkat lebih dari 1
print(iteration_power(number=0, exp=100))
# Angka 1 dengan pangkat lebih dari 1
print(iteration_power(number=1, exp=100))
| 4.375 | 4 |
webtul/img.py | zagfai/webtul | 1 | 12790238 | <filename>webtul/img.py
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Image tools
"""
__author__ = 'Zagfai'
__date__ = '2018-02'
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
class BoxWrapper(object):
"""BoxWrapper for Chinese and Alphas text wrapping"""
def __init__(self, font_path, font_size):
super(BoxWrapper, self).__init__()
self.font_path = font_path
self.font = ImageFont.truetype(font_path, font_size)
def line_size(self, virtual_line):
return self.font.getsize(virtual_line)
def split(self, text):
words = []
word_tmp = ''
for chx in text:
if ord(chx) > 255:
if word_tmp:
words.append(word_tmp)
word_tmp = ''
words.append(chx)
else:
word_tmp += chx
if word_tmp:
words.append(word_tmp)
word_tmp = ''
# split Alpha words
split_words = []
for word in words:
if len(word) == 1:
split_words.append(word)
else:
split_words.extend(word.split())
for wordpos, word in enumerate(split_words):
if len(word) > 1 and wordpos > 0 and \
len(split_words[wordpos-1]) > 1:
split_words[wordpos] = ' ' + split_words[wordpos]
# print(split_words)
return split_words
def wrap_as_box(self, text, box_width, color, place='left'):
words = self.split(text)
lines = []
line = ''
for word in words:
line_tmp = line + word
size = self.line_size(line_tmp)
text_h = size[1]
if size[0] <= box_width:
line = line_tmp
else:
lines.append(line)
line = word.strip()
if line:
lines.append(line)
return lines, (box_width, text_h*len(lines)), text_h
def mkimg(self, text, box_width, color='#000', place='left',
mode='RGBA', back_color=(255, 255, 255, 0), line_padding=0):
lines, (w, h), line_h = self.wrap_as_box(text, box_width, color)
image = Image.new(
mode, (box_width, h+line_padding*len(lines)), color=back_color)
draw = ImageDraw.Draw(image)
x = 0
y = 0
for index, line in enumerate(lines):
draw.text((x, y), line, font=self.font, fill=color)
y += line_h + line_padding
return image
if __name__ == "__main__":
charpath = '/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc'
text = "【以色列钻石交易所:将发行两种数字货币】世界大型交易所之一的以色列钻石交易所宣布,将发行两种数字货币:一种为Carat,定位于广大投资券;另一种为Cut,预计将用于钻石市场专业参与者间的结算。25%的Carat价值将基于交易平台所拥有的钻石。" # NOQA
x = BoxWrapper(charpath, 24)
x.mkimg(text, 390, '#000')
| 3.046875 | 3 |
tools/dox.py | RyanSchuster/vos64 | 1 | 12790239 | <gh_stars>1-10
#!/usr/bin/env python3
import os
import sys
import scanner
import codemap
# ------------------------------------------------------------------------------
# Parameterized output filenames
def func_index_name():
return 'Function-Index.md'
def func_index_graph_name():
return 'func_index_graph.dot'
def mod_index_name():
return 'Module-Index.md'
def mod_index_graph_name():
return 'mod_index_graph.dot'
def func_page_name(func_name):
return os.path.join('funcs', '{}.md'.format(func_name))
def func_graph_name(func_name):
return os.path.join('funcs', '{}.dot'.format(func_name))
def mod_page_name(mod_name):
return os.path.join('mods', '{}.md'.format(mod_name))
def mod_graph_name(mod_name):
return os.path.join('mods', '{}.dot'.format(mod_name))
# ------------------------------------------------------------------------------
# Helpful builder functions
def link(text, filename):
return '[{}]({})'.format(text, text)
def table_list(f, line_list):
has_table = False
has_lines = False
table = '|location||description|\n'
table += '|:---|:---:|:---|\n'
lines = ''
for line in line_list:
findPtr = line.find('->')
findEq = line.find('=')
if findPtr > 0:
has_table = True
name = line[:findPtr].lstrip().rstrip()
desc = line[findPtr + 2:].lstrip().rstrip()
table += '|`{}`|->|{}|\n'.format(name, desc)
elif findEq > 0:
has_table = True
name = line[:findEq].lstrip().rstrip()
desc = line[findEq + 2:].lstrip().rstrip()
table += '|`{}`|=|{}|\n'.format(name, desc)
elif line != '':
has_lines = True
lines += '* {}\n'.format(line)
if (not has_table) and (not has_lines):
f.write('None\n\n')
elif has_table:
f.write('{}\n'.format(table))
elif has_lines:
f.write('{}\n'.format(lines))
def callgraph(f, code_map, mod_filter, func_filter, mark_mod=None, mark_func=None):
f.write('digraph Callgraph {\n')
for mod_name, mod in code_map.mods_sorted(mod_filter):
f.write('\tsubgraph cluster_{} {{\n'.format(mod_name))
f.write('\t\tnode [style=filled, color=white];\n')
f.write('\t\tstyle = filled;\n')
if mod_name == mark_mod:
f.write('\t\tcolor = lightblue;\n')
else:
f.write('\t\tcolor = grey;\n')
f.write('\t\tlabel = "Module {}";\n'.format(mod_name))
for func_name in mod.functions:
func = code_map.funcs[func_name]
if func_filter((func_name, func)):
f.write('\t\t{};\n'.format(func_name))
f.write('\t}\n')
for func_name, func in code_map.funcs_sorted(func_filter):
if func_name == mark_func:
f.write('{} [color=lightblue];\n'.format(func_name))
elif func.private:
f.write('{} [color=lightgrey];\n'.format(func_name))
for callee_name in func.calls:
callee = code_map.funcs[callee_name]
if not func_filter((callee_name, callee)):
continue
f.write('\t{} -> {};\n'.format(func_name, callee_name))
f.write('}\n')
# ------------------------------------------------------------------------------
# Output file builders
def graph_mod_index(f, code_map):
f.write('digraph Callgraph {\n')
for mod_name, mod in code_map.mods_sorted():
for callee_name in mod.calls:
f.write('\t{} -> {};\n'.format(mod_name, callee_name))
f.write('}\n')
def page_mod_index(f, code_map):
f.write('# Module Index\n\n')
f.write('\n\n'.format(mod_index_graph_name()))
for mod_name, mod in code_map.mods_sorted():
mod_link = link(mod_name, mod_page_name(mod_name))
f.write('* {} - {}\n'.format(mod_link, mod.brief))
def graph_func_index(f, code_map):
def all_mods(kv):
return True
def public_funcs(kv):
return not kv[1].private
callgraph(f, code_map, all_mods, public_funcs)
def page_func_index(f, code_map):
f.write('# Function Index\n\n')
f.write('\n\n'.format(func_index_graph_name()))
for mod_name, mod in code_map.mods_sorted():
mod_link = link(mod_name, mod_page_name(mod_name))
f.write('* Module {} - {}:\n'.format(mod_link, mod.brief))
inmod = codemap.inmodule(mod_name)
ispub = codemap.ispublic()
filt = lambda f : inmod(f) and ispub(f)
for func_name, func in code_map.funcs_sorted(filt):
func_link = link(func_name, func_page_name(func_name))
f.write(' * {} - {}\n'.format(func_link, func.brief))
def graph_module(f, code_map, mod_name, mod):
# Filter for functions that are in, are called by, or call a module
def func_touches_mod(kv):
test_name = kv[0]
test = kv[1]
if test_name in mod.functions:
return True
for func_name in mod.functions:
func = code_map.funcs[func_name]
if func_name in test.calls:
return True
if test_name in func.calls:
return True
return False
# Filter for modules that are, are called by, or call a module
def mod_touches_mod(kv):
test_name = kv[0]
test = kv[1]
if test_name == mod_name:
return True
if test_name in mod.calls:
return True
if mod_name in code_map.mods[test_name].calls:
return True
return False
callgraph(f, code_map, mod_touches_mod, func_touches_mod, mark_mod=mod_name)
def page_module(f, code_map, mod_name, mod):
f.write('# Module {}\n\n'.format(mod_name))
f.write('{}\n\n'.format(mod.brief))
f.write('## Detail:\n\n')
f.write('{}\n\n'.format(mod.detail))
f.write('## Callgraph:\n\n')
f.write('\n\n'.format(mod_graph_name(mod_name)))
f.write('## Public Functions:\n\n')
inmod = codemap.inmodule(mod_name)
ispub = codemap.ispublic()
filt = lambda f : inmod(f) and ispub(f)
for func_name, func in code_map.funcs_sorted(filt):
func_link = link(func_name, func_page_name(func_name))
f.write('* {} - {}\n'.format(func_link, func.brief))
f.write('\n')
f.write('## Modules called by {}\n\n'.format(mod_name))
filt = codemap.iscalledby(mod)
for callee_name, callee in code_map.mods_sorted(filt):
callee_link = link(callee_name, mod_page_name(callee_name))
f.write('* {} - {}\n'.format(callee_link, callee.brief))
f.write('\n')
f.write('## Modules that call {}\n\n'.format(mod_name))
filt = codemap.calls(mod_name)
for caller_name, caller in code_map.mods_sorted(filt):
caller_link = link(caller_name, mod_page_name(caller_name))
f.write('* {} - {}\n'.format(caller_name, caller.brief))
f.write('\n')
def graph_function(f, code_map, func_name, func):
# Filter for modules that contain or contain callers/callees of func
def mod_touches_func(kv):
test_name = kv[0]
test = kv[1]
if test_name == func.module:
return True
for f_name in test.functions:
f = code_map.funcs[f_name]
if func_name in f.calls:
return True
if f_name in func.calls:
return True
return False
# Filter for functions that are or are callers/callees of func
def func_touches_func(kv):
test_name = kv[0]
test = kv[1]
if test_name == func_name:
return True
if test_name in func.calls:
return True
if func_name in test.calls:
return True
return False
callgraph(f, code_map, mod_touches_func, func_touches_func, mark_func=func_name)
def page_function(f, code_map, func_name, func):
mod_link = link(func.module, mod_page_name(func.module))
f.write('# Function {}.{}\n\n'.format(mod_link, func_name))
f.write('{}\n\n'.format(func.brief))
f.write('## Detail:\n\n')
f.write('{}\n\n'.format(func.detail))
f.write('## Pass:\n\n')
table_list(f, func.inputs)
f.write('## Return:\n\n')
table_list(f, func.outputs)
f.write('## Side Effects:\n\n')
table_list(f, func.sideeffects)
f.write('## Callgraph:\n\n')
f.write('\n\n'.format(func_graph_name(func_name)))
f.write('## Functions called by {}\n\n'.format(func_name))
filt = codemap.iscalledby(func)
for callee_name, callee in code_map.funcs_sorted(filt):
if callee.private:
f.write('* {} - {}\n'.format(callee_name, callee.brief))
else:
callee_link = link(callee_name, func_page_name(callee_name))
f.write('* {} - {}\n'.format(callee_link, callee.brief))
f.write('\n')
f.write('## Functions that call {}\n\n'.format(func_name))
filt = codemap.calls(func_name)
for caller_name, caller in code_map.funcs_sorted(filt):
if caller.private:
f.write('* {} - {}\n'.format(caller_name, caller.brief))
else:
caller_link = link(caller_name, func_page_name(caller_name))
f.write('* {} - {}\n'.format(caller_link, caller.brief))
f.write('\n')
# ------------------------------------------------------------------------------
# Top-level documentation builder
def build_dox(code_map, output_dir):
f = sys.stdout
# Module index page
filename = os.path.join(output_dir, mod_index_name())
with open(filename, 'w') as f:
page_mod_index(f, code_map)
filename = os.path.join(output_dir, mod_index_graph_name())
with open(filename, 'w') as f:
graph_mod_index(f, code_map)
os.system('dot -Tpng -O {}'.format(filename))
# Function index page
filename = os.path.join(output_dir, func_index_name())
with open(filename, 'w') as f:
page_func_index(f, code_map)
filename = os.path.join(output_dir, func_index_graph_name())
with open(filename, 'w') as f:
graph_func_index(f, code_map)
os.system('dot -Tpng -O {}'.format(filename))
# Module pages
for mod_name, mod in code_map.mods_sorted():
filename = os.path.join(output_dir, mod_page_name(mod_name))
with open(filename, 'w') as f:
page_module(f, code_map, mod_name, mod)
filename = os.path.join(output_dir, mod_graph_name(mod_name))
with open(filename, 'w') as f:
graph_module(f, code_map, mod_name, mod)
os.system('dot -Tpng -O {}'.format(filename))
# Function pages
def ispub(kv):
return not kv[1].private
for func_name, func in code_map.funcs_sorted(ispub):
filename = os.path.join(output_dir, func_page_name(func_name))
with open(filename, 'w') as f:
page_function(f, code_map, func_name, func)
filename = os.path.join(output_dir, func_graph_name(func_name))
with open(filename, 'w') as f:
graph_function(f, code_map, func_name, func)
os.system('dot -Tpng -O {}'.format(filename))
# ------------------------------------------------------------------------------
def test():
pass
def main():
code_map = codemap.CodeMap()
builder = codemap.MapBuilder(code_map)
for token in scanner.scan_project('./src'):
builder.absorb_token(token)
build_dox(code_map, './doc/vos64.wiki')
if __name__ == '__main__':
main()
| 2.25 | 2 |
setup.py | ndraeger/rt1 | 0 | 12790240 | # -*- coding: UTF-8 -*-
"""
This file is part of RT1.
(c) 2016- <NAME>
For COPYING and LICENSE details, please refer to the LICENSE file
"""
from setuptools import setup
#from setuptools import find_packages
from rt1 import __version__
setup(name='rt1',
version=__version__,
description='RT1 - bistatic single scattering radiative transfer model',
packages=['rt1'],
package_dir={'rt1': 'rt1'},
include_package_data=False,
author="<NAME>",
author_email='<EMAIL>',
maintainer='<NAME>',
maintainer_email='<EMAIL>',
#~ license='APACHE 2',
url='https://github.com/TUW-GEO/rt1',
long_description=('A module to perform forward-simulation and ' +
'parameter-inversion of incidence-angle dependent ' +
'backscatter observations based on a first-order ' +
'radiative-transfer model describing a rough surface' +
'covered by a homogeneous layer of scattering' +
'media.'),
install_requires=["numpy>=1.16", "sympy>=1.4", "scipy>=1.2",
"pandas>=0.24", "matplotlib>=3.0"],
extras_require={'symengine' : ["symengine>=0.4"]},
keywords=["physics", "radiative transfer"],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Atmospheric Science',
# Pick your license as you wish (should match "license" above)
#~ 'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7'
],
)
| 1.523438 | 2 |
sdk/python/pulumi_ucloud/vpc/vpc.py | AaronFriel/pulumi-ucloud | 4 | 12790241 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VPCArgs', 'VPC']
@pulumi.input_type
class VPCArgs:
def __init__(__self__, *,
cidr_blocks: pulumi.Input[Sequence[pulumi.Input[str]]],
name: Optional[pulumi.Input[str]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VPC resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_blocks: The CIDR blocks of VPC.
:param pulumi.Input[str] remark: The remarks of the VPC. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
pulumi.set(__self__, "cidr_blocks", cidr_blocks)
if name is not None:
pulumi.set(__self__, "name", name)
if remark is not None:
pulumi.set(__self__, "remark", remark)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter(name="cidrBlocks")
def cidr_blocks(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
The CIDR blocks of VPC.
"""
return pulumi.get(self, "cidr_blocks")
@cidr_blocks.setter
def cidr_blocks(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "cidr_blocks", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def remark(self) -> Optional[pulumi.Input[str]]:
"""
The remarks of the VPC. (Default: `""`).
"""
return pulumi.get(self, "remark")
@remark.setter
def remark(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remark", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class _VPCState:
def __init__(__self__, *,
cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
create_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_infos: Optional[pulumi.Input[Sequence[pulumi.Input['VPCNetworkInfoArgs']]]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering VPC resources.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_blocks: The CIDR blocks of VPC.
:param pulumi.Input[str] create_time: The time of creation for VPC, formatted in RFC3339 time string.
:param pulumi.Input[Sequence[pulumi.Input['VPCNetworkInfoArgs']]] network_infos: It is a nested type which documented below.
:param pulumi.Input[str] remark: The remarks of the VPC. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
:param pulumi.Input[str] update_time: The time whenever there is a change made to VPC, formatted in RFC3339 time string.
"""
if cidr_blocks is not None:
pulumi.set(__self__, "cidr_blocks", cidr_blocks)
if create_time is not None:
pulumi.set(__self__, "create_time", create_time)
if name is not None:
pulumi.set(__self__, "name", name)
if network_infos is not None:
pulumi.set(__self__, "network_infos", network_infos)
if remark is not None:
pulumi.set(__self__, "remark", remark)
if tag is not None:
pulumi.set(__self__, "tag", tag)
if update_time is not None:
pulumi.set(__self__, "update_time", update_time)
@property
@pulumi.getter(name="cidrBlocks")
def cidr_blocks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The CIDR blocks of VPC.
"""
return pulumi.get(self, "cidr_blocks")
@cidr_blocks.setter
def cidr_blocks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "cidr_blocks", value)
@property
@pulumi.getter(name="createTime")
def create_time(self) -> Optional[pulumi.Input[str]]:
"""
The time of creation for VPC, formatted in RFC3339 time string.
"""
return pulumi.get(self, "create_time")
@create_time.setter
def create_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "create_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="networkInfos")
def network_infos(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VPCNetworkInfoArgs']]]]:
"""
It is a nested type which documented below.
"""
return pulumi.get(self, "network_infos")
@network_infos.setter
def network_infos(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VPCNetworkInfoArgs']]]]):
pulumi.set(self, "network_infos", value)
@property
@pulumi.getter
def remark(self) -> Optional[pulumi.Input[str]]:
"""
The remarks of the VPC. (Default: `""`).
"""
return pulumi.get(self, "remark")
@remark.setter
def remark(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "remark", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> Optional[pulumi.Input[str]]:
"""
The time whenever there is a change made to VPC, formatted in RFC3339 time string.
"""
return pulumi.get(self, "update_time")
@update_time.setter
def update_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "update_time", value)
class VPC(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a VPC resource.
> **Note** The network segment can only be created or deleted, can not perform both of them at the same time.
## Example Usage
```python
import pulumi
import pulumi_ucloud as ucloud
example = ucloud.vpc.VPC("example",
cidr_blocks=["192.168.0.0/16"],
tag="tf-example")
```
## Import
VPC can be imported using the `id`, e.g.
```sh
$ pulumi import ucloud:vpc/vPC:VPC example uvnet-abc123456
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_blocks: The CIDR blocks of VPC.
:param pulumi.Input[str] remark: The remarks of the VPC. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VPCArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a VPC resource.
> **Note** The network segment can only be created or deleted, can not perform both of them at the same time.
## Example Usage
```python
import pulumi
import pulumi_ucloud as ucloud
example = ucloud.vpc.VPC("example",
cidr_blocks=["192.168.0.0/16"],
tag="tf-example")
```
## Import
VPC can be imported using the `id`, e.g.
```sh
$ pulumi import ucloud:vpc/vPC:VPC example uvnet-abc123456
```
:param str resource_name: The name of the resource.
:param VPCArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VPCArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VPCArgs.__new__(VPCArgs)
if cidr_blocks is None and not opts.urn:
raise TypeError("Missing required property 'cidr_blocks'")
__props__.__dict__["cidr_blocks"] = cidr_blocks
__props__.__dict__["name"] = name
__props__.__dict__["remark"] = remark
__props__.__dict__["tag"] = tag
__props__.__dict__["create_time"] = None
__props__.__dict__["network_infos"] = None
__props__.__dict__["update_time"] = None
super(VPC, __self__).__init__(
'ucloud:vpc/vPC:VPC',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cidr_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
create_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
network_infos: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VPCNetworkInfoArgs']]]]] = None,
remark: Optional[pulumi.Input[str]] = None,
tag: Optional[pulumi.Input[str]] = None,
update_time: Optional[pulumi.Input[str]] = None) -> 'VPC':
"""
Get an existing VPC resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] cidr_blocks: The CIDR blocks of VPC.
:param pulumi.Input[str] create_time: The time of creation for VPC, formatted in RFC3339 time string.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VPCNetworkInfoArgs']]]] network_infos: It is a nested type which documented below.
:param pulumi.Input[str] remark: The remarks of the VPC. (Default: `""`).
:param pulumi.Input[str] tag: A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
:param pulumi.Input[str] update_time: The time whenever there is a change made to VPC, formatted in RFC3339 time string.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VPCState.__new__(_VPCState)
__props__.__dict__["cidr_blocks"] = cidr_blocks
__props__.__dict__["create_time"] = create_time
__props__.__dict__["name"] = name
__props__.__dict__["network_infos"] = network_infos
__props__.__dict__["remark"] = remark
__props__.__dict__["tag"] = tag
__props__.__dict__["update_time"] = update_time
return VPC(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="cidrBlocks")
def cidr_blocks(self) -> pulumi.Output[Sequence[str]]:
"""
The CIDR blocks of VPC.
"""
return pulumi.get(self, "cidr_blocks")
@property
@pulumi.getter(name="createTime")
def create_time(self) -> pulumi.Output[str]:
"""
The time of creation for VPC, formatted in RFC3339 time string.
"""
return pulumi.get(self, "create_time")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkInfos")
def network_infos(self) -> pulumi.Output[Sequence['outputs.VPCNetworkInfo']]:
"""
It is a nested type which documented below.
"""
return pulumi.get(self, "network_infos")
@property
@pulumi.getter
def remark(self) -> pulumi.Output[str]:
"""
The remarks of the VPC. (Default: `""`).
"""
return pulumi.get(self, "remark")
@property
@pulumi.getter
def tag(self) -> pulumi.Output[Optional[str]]:
"""
A tag assigned to VPC, which contains at most 63 characters and only support Chinese, English, numbers, '-', '_', and '.'. If it is not filled in or a empty string is filled in, then default tag will be assigned. (Default: `Default`).
"""
return pulumi.get(self, "tag")
@property
@pulumi.getter(name="updateTime")
def update_time(self) -> pulumi.Output[str]:
"""
The time whenever there is a change made to VPC, formatted in RFC3339 time string.
"""
return pulumi.get(self, "update_time")
| 2.125 | 2 |
Sparse Representation/compute_PSNR_SSIM.py | ashishpatel26/SuperResolution-Medical-Imaging-using-SRGAN | 1 | 12790242 | <reponame>ashishpatel26/SuperResolution-Medical-Imaging-using-SRGAN
import scipy
import numpy as np
import skimage
import scipy.misc
import skimage.measure
image_list = ['27', '78', '403', '414', '480', '579', '587', '664', '711', '715', '756', '771', '788', '793', '826', '947', '994', '1076', '1097', '1099', '1141', '1197', '1263', '1320', '1389', '1463', '1563']
#image_list = ['27', '78', '403', '414', '480', '579']
gnd_truth_hr_image_path = 'Data/MRI/PaperTestData/HR_gnd/'
generated_hr_image_path = 'Data/MRI/PaperTestData/HR_gen/'
avg_psnr = 0
avg_ssim = 0
for im in image_list:
gnd_truth_hr_img = scipy.misc.imread(gnd_truth_hr_image_path+'valid_hr-id-'+im+'.png', mode='L')
generated_hr_img = scipy.misc.imread(generated_hr_image_path+'valid_hr_gen-id-'+im+'.png', mode='L')
# print out PSNR and SSIM
psnr_i = skimage.measure.compare_psnr(gnd_truth_hr_img, generated_hr_img)
ssim_i = skimage.measure.compare_ssim(gnd_truth_hr_img, generated_hr_img, data_range=generated_hr_img.max() - generated_hr_img.min())
print('PSNR = ' + str(psnr_i) + ', SSIM = ' + str(ssim_i))
avg_psnr += psnr_i
avg_ssim += ssim_i
avg_psnr /= len(image_list)
avg_ssim /= len(image_list)
print('Average PSNR = ' + str(avg_psnr))
print('Average SSIM = ' + str(avg_ssim))
# resize ground truth to (384x384) image
#gnd_truth_hr_img = scipy.misc.imread(gnd_truth_hr_image_path, mode='L')
#gnd_truth_hr_img_resized = scipy.misc.imresize(gnd_truth_hr_img, [384, 384], interp='bicubic', mode='L')
# read generated (384x384) image
#generated_hr_img = scipy.misc.imread(generated_hr_image_path, mode='L')
# print out PSNR
#print(skimage.measure.compare_psnr(gnd_truth_hr_img_resized, generated_hr_img))
# print out SSIM
#print(skimage.measure.compare_ssim(gnd_truth_hr_img_resized, generated_hr_img, data_range=generated_hr_img.max() - generated_hr_img.min()))
| 2.640625 | 3 |
nlutestframework/__main__.py | emundo/nlutestframework | 0 | 12790243 | <gh_stars>0
import argparse
import asyncio
import logging
from signal import SIGINT, SIGTERM
import sys
from typing import Any
import yaml
from .nlu_benchmarker import NLUBenchmarker
def eprint(*args: Any, **kwargs: Any) -> None:
print(*args, file=sys.stderr, **kwargs)
def main() -> None:
parser = argparse.ArgumentParser(description="Benchmark NLU frameworks.")
parser.add_argument(
"-c", "--config-file",
dest = "CONFIG",
type = str,
default = "config.yml",
help = (
"Path to the configuration file."
" Defaults to \"config.yml\" in the current working directory."
"\nWARNING: Always verify externally supplied configurations before using them."
)
)
parser.add_argument(
"-v", "--verbose",
dest = "VERBOSE",
action = "store_const",
const = True,
default = False,
help = "Enable verbose/debug output."
)
parser.add_argument(
"-p", "--python",
dest = "python",
type = str,
help = (
"Path to the python executable to use instead of the current one."
" Overrides the corresponding setting in the configuration file."
"\nWARNING: Always verify externally supplied configurations before using them."
)
)
parser.add_argument(
"-i", "--iterations",
dest = "iterations",
type = int,
help = (
"The number of iterations to benchmark."
" Overrides the corresponding setting in the configuration file."
)
)
parser.add_argument(
"--ignore-cache",
dest = "ignore_cache",
action = "store_const",
const = True,
help = (
"If set, ignore all sorts of cached data."
" Overrides the corresponding setting in the configuration file."
)
)
args = parser.parse_args()
# Set the general log level to DEBUG or INFO
logging.basicConfig(level = logging.DEBUG if args.VERBOSE else logging.INFO)
# Prevent various modules from spamming the log
logging.getLogger("asyncio").setLevel(logging.INFO)
logging.getLogger("docker").setLevel(logging.INFO)
logging.getLogger("matplotlib").setLevel(logging.INFO)
logging.getLogger("snips_nlu").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.INFO)
logging.getLogger("msrest").setLevel(logging.INFO)
global_config_override = {}
for label, value in vars(args).items():
if label.islower() and value is not None:
global_config_override[label] = value
async def main_runner() -> None:
def cancel(_sig: int) -> None:
print(
"Aborting execution in the next possible situation (this may take a while, as the"
" current operation has to finish first)."
)
NLUBenchmarker.getInstance().cancel()
loop = asyncio.get_event_loop()
for sig in (SIGINT, SIGTERM):
loop.add_signal_handler(sig, cancel, sig)
try:
await NLUBenchmarker.getInstance().runFromConfigFile(
args.CONFIG,
**global_config_override
)
except OSError as e:
eprint("Error reading the config file: {}".format(e))
except yaml.YAMLError as e:
eprint("Malformed YAML in the config file: {}".format(e))
asyncio.run(main_runner())
if __name__ == "__main__":
main()
| 2.375 | 2 |
lpot/utils/logger.py | intelkevinputnam/lpot-docs | 172 | 12790244 | <reponame>intelkevinputnam/lpot-docs
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
class Logger(object):
__instance = None
def __new__(cls):
if Logger.__instance is None:
Logger.__instance = object.__new__(cls)
Logger.__instance._log()
return Logger.__instance
def _log(self):
LOGLEVEL = os.environ.get('LOGLEVEL', 'INFO').upper()
self._logger = logging.getLogger()
self._logger.handlers.clear()
self._logger.setLevel(LOGLEVEL)
formatter = logging.Formatter(
'%(asctime)s [%(levelname)s] %(message)s',
"%Y-%m-%d %H:%M:%S")
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
self._logger.addHandler(streamHandler)
self._logger.propagate = False
def get_logger(self):
return self._logger
def _pretty_dict(value, indent=0):
prefix = '\n' + ' ' * (indent + 4)
if isinstance(value, dict):
items = [
prefix + repr(key) + ': ' + _pretty_dict(value[key], indent + 4)
for key in value
]
return '{%s}' % (','.join(items) + '\n' + ' ' * indent)
elif isinstance(value, list):
items = [
prefix + _pretty_dict(item, indent + 4)
for item in value
]
return '[%s]' % (','.join(items) + '\n' + ' ' * indent)
elif isinstance(value, tuple):
items = [
prefix + _pretty_dict(item, indent + 4)
for item in value
]
return '(%s)' % (','.join(items) + '\n' + ' ' * indent)
else:
return repr(value)
level = Logger().get_logger().level
DEBUG = logging.DEBUG
def log(level, msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().log(level, line, *args, **kwargs)
else:
Logger().get_logger().log(level, msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().debug(line, *args, **kwargs)
else:
Logger().get_logger().debug(msg, *args, **kwargs)
def error(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().error(line, *args, **kwargs)
else:
Logger().get_logger().error(msg, *args, **kwargs)
def fatal(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().fatal(line, *args, **kwargs)
else:
Logger().get_logger().fatal(msg, *args, **kwargs)
def info(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().info(line, *args, **kwargs)
else:
Logger().get_logger().info(msg, *args, **kwargs)
def warn(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().warning(line, *args, **kwargs)
else:
Logger().get_logger().warning(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
if isinstance(msg, dict):
for _, line in enumerate(_pretty_dict(msg).split('\n')):
Logger().get_logger().warning(line, *args, **kwargs)
else:
Logger().get_logger().warning(msg, *args, **kwargs)
| 2.328125 | 2 |
models/tl/ex1.py | bhneo/SparsePooling | 0 | 12790245 | import os
import sys
sys.path.append(os.getcwd())
import tensorflow as tf
tf.get_logger().setLevel('ERROR')
from tensorflow import keras
from common.inputs.voc2010 import voc_parts
from common import layers, losses, utils, train, attacks
from common.ops.routing import activated_entropy, coupling_entropy
import numpy as np
import config
WEIGHT_DECAY = 1e-4
kernel_regularizer = keras.regularizers.l2(WEIGHT_DECAY)
kernel_initializer = keras.initializers.he_normal()
BASE_NAME = 'ex4_3'
def build_model_name(params):
model_name = BASE_NAME
model_name += '_{}'.format(params.model.backbone)
model_name += '_fine{}'.format(params.model.fine)
model_name += '_part{}'.format(params.caps.parts)
model_name += '_{}'.format(params.routing.type)
if params.routing.type == 'DR' or params.routing.type == 'EM':
model_name += '_iter{}'.format(params.routing.iter_num)
model_name += '_temper{}'.format(params.routing.temper)
model_name += '_atoms{}'.format(params.caps.atoms)
model_name += '_trial{}'.format(str(params.training.idx))
model_name += '_bs{}'.format(str(params.training.batch_size))
if params.dataset.flip:
model_name += '_flip'
if params.dataset.crop:
model_name += '_crop'
return model_name
def get_loss_opt(type):
optimizer = keras.optimizers.Adam(0.0001)
if type == 'DR' or type == 'EM':
loss = losses.MarginLoss(sparse=False, upper_margin=0.9, bottom_margin=0.1, down_weight=0.5)
else:
loss = keras.losses.CategoricalCrossentropy(from_logits=True)
return loss, optimizer
def build_model(num_out, params):
model_name = build_model_name(params)
inputs, probs, tensor_log = build(num_out,
params.model.backbone,
params.model.fine,
params.routing.type,
params.routing.iter_num,
params.routing.temper,
params.caps.parts,
params.caps.atoms
)
model = keras.Model(inputs=inputs, outputs=probs, name=model_name)
log_model = keras.Model(inputs=inputs, outputs=tensor_log.get_outputs(), name=model_name + '_log')
tensor_log.set_model(log_model)
loss, optimizer = get_loss_opt(params.routing.type)
model.compile(optimizer=optimizer,
loss=loss,
metrics=[])
model.summary()
model.callbacks = []
return model, tensor_log
def build(num_out, backbone, fine, routing, iter_num, temper, parts, atoms):
log = utils.TensorLog()
if backbone == 'VGG16':
in_shape = (224, 224, 3)
base = keras.applications.VGG16(include_top=False, input_shape=in_shape)
elif backbone == 'VGG19':
in_shape = (224, 224, 3)
base = keras.applications.VGG19(include_top=False, input_shape=in_shape)
elif backbone == 'InceptionV3':
in_shape = (299, 299, 3)
base = keras.applications.InceptionV3(include_top=False, input_shape=in_shape)
elif backbone == 'ResNet50':
in_shape = (224, 224, 3)
base = keras.applications.ResNet50(include_top=False, input_shape=in_shape)
else:
in_shape = (299, 299, 3)
base = keras.applications.InceptionV3(include_top=False, input_shape=in_shape)
layer_num = len(base.layers)
for i, layer in enumerate(base.layers):
if i < layer_num-fine:
layer.trainable = False
else:
for w in layer.weights:
if 'kernel' in w.name:
r = kernel_regularizer(w)
layer.add_loss(lambda: r)
inputs = keras.Input(in_shape)
features = base(inputs)
interpretable = keras.layers.Conv2D(filters=parts,
kernel_size=1,
activation='relu',
kernel_initializer=kernel_initializer,
kernel_regularizer=kernel_regularizer)(features)
shape = interpretable.get_shape().as_list()
if routing == 'avg':
pool = keras.layers.GlobalAveragePooling2D()(interpretable)
output = keras.layers.Dense(num_out)(pool)
elif routing == 'max':
pool = keras.layers.GlobalMaxPooling2D()(interpretable)
output = keras.layers.Dense(num_out)(pool)
elif routing == 'DR':
child_pose, child_prob = layers.CapsuleGroups(height=shape[1], width=shape[2], channel=shape[3],
atoms=16,
method='channel',
activation='squash')(interpretable)
log.add_hist('child_activation', child_prob)
transformed_caps = layers.CapsuleTransformDense(num_out=num_out,
out_atom=atoms,
share_weights=False,
initializer=keras.initializers.glorot_normal(),
regularizer=kernel_regularizer)(child_pose)
parent_poses, parent_probs, cs = layers.DynamicRouting(num_routing=iter_num,
softmax_in=False,
temper=temper,
activation='squash',
pooling=False,
log=log)((transformed_caps, child_prob))
log.add_hist('parent_activation', parent_probs[-1])
output = parent_probs[-1]
return inputs, output, log
def main():
args, params = config.parse_args()
if params.task == 'train':
params.dataset.name = 'voc2010'
if params.model.backbone == 'InceptionV3':
data_shape = (299, 299, 3)
else:
data_shape = (224, 224, 3)
train_set, test_set, info = voc_parts.build_dataset3(batch_size=params.training.batch_size,
shape=data_shape,
arch=params.model.backbone)
model, tensor_log = build_model(num_out=info.features['label'].num_classes,
params=params)
trainer = train.Trainer(model, params, info, tensor_log, finetune=True, inference_label=False, max_save=1)
trainer.metrics['accuracy'] = tf.keras.metrics.CategoricalAccuracy(name='accuracy')
if args.train:
trainer.fit(train_set, test_set)
else:
trainer.evaluate(test_set)
elif params.task == 'attack':
do_adv(os.getcwd())
elif params.task == 'score':
compute_entropies(os.getcwd())
def load_ckpt(model, model_dir):
model.compile(optimizer=keras.optimizers.Adam(0.0001),
loss=keras.losses.CategoricalCrossentropy(from_logits=False),
metrics=[])
ckpt = tf.train.Checkpoint(optimizer=model.optimizer, net=model)
manager = tf.train.CheckpointManager(ckpt, model_dir, max_to_keep=3)
ckpt.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
def get_model_dir(backbone, log='log', routing='avg', dataset='voc2010',
iter_num=None, temper=None, atoms=None,
finetune=0, parts=128, bs=32, idx=1):
model_dir = '{}/{}/{}_{}_fine{}_part{}_{}'.format(log, dataset, BASE_NAME, backbone, finetune, parts, routing)
if routing == 'DR' or routing == 'EM':
model_dir += '_iter{}'.format(iter_num)
model_dir += '_temper{}'.format(temper)
model_dir += '_atoms{}'.format(atoms)
model_dir += '_trial{}_bs{}_flip_crop'.format(idx, bs)
if not os.path.exists(model_dir):
raise Exception('model not exist:{}'.format(model_dir))
return model_dir
def load_model(backbone, iter_num, temper, atoms=16,
log='log', routing='DR',
finetune=0, parts=128, bs=128, idx=1):
data_shape = utils.get_shape(backbone)
model_dir = get_model_dir(backbone=backbone,
log=log,
routing=routing,
finetune=finetune,
parts=parts,
bs=bs,
iter_num=iter_num,
temper=temper,
atoms=atoms,
idx=idx)
inputs, probs, log = build(6, backbone, finetune, routing, iter_num, temper, parts, atoms)
model = keras.Model(inputs=inputs, outputs=probs, name='x')
load_ckpt(model, model_dir)
return model, data_shape, model_dir
def evaluate_attack(epsilons, root='', log='log', backbone='InceptionV3', metric='acc', all_target=False,
method='FGSM', steps=10,
finetune=0, routing='DR', black_box=False, iter_num=10, temper=1.0, atoms=16, parts=128, bs=64, idx=1):
model, data_shape, model_dir = load_model(log=root + log,
backbone=backbone,
routing=routing,
iter_num=iter_num,
temper=temper,
atoms=atoms,
parts=parts,
bs=bs,
finetune=finetune,
idx=idx)
if black_box:
print('load black box source model')
model_src, data_shape, model_dir = load_model(log=root + log,
backbone=backbone,
routing=routing,
iter_num=iter_num,
temper=temper,
atoms=atoms,
parts=parts,
bs=bs,
finetune=finetune,
idx=2)
else:
model_src = model
loss, _ = get_loss_opt(routing)
_, test_set, info = voc_parts.build_dataset3(root + 'data', batch_size=32, shape=data_shape)
acc_adv = keras.metrics.CategoricalAccuracy(name='acc_adv')
if metric == 'acc':
results = attacks.evaluate_model_after_attacks(epsilons, acc_adv, test_set, model, loss, method=method, steps=steps, label_sparse=False, cost=True, model_src=model_src)
elif metric == 'success':
if all_target:
categories = [i for i in range(6)]
results = attacks.evaluate_attacks_success_rate_all_target(epsilons, test_set, model, loss, categories, method=method, steps=steps, label_sparse=False, cost=True, model_src=model_src)
else:
results = attacks.evaluate_attacks_success_rate(epsilons, test_set, model, loss, method=method, steps=steps, label_sparse=False, cost=True, model_src=model_src)
return results
def do_adv(root):
epsilons = [0.1, 0.2, 0.3]
tempers = [0.0, 20.0, 40.0, 60.0, 80.0]
parts_list = [128]
all_target = False
black_box = False
methods = ['PGD', 'BIM', 'FGSM']
backbones = ['InceptionV3']
routing = 'DR'
for backbone in backbones:
print('backbone:', backbone)
for parts in parts_list:
print('parts:', parts)
for method in methods:
print('method:', method)
if routing == 'avg' or routing == 'max':
tempers = [-1]
for temper in tempers:
print('temper:', temper)
if all_target:
epsilons = [0.1]
evaluate_attack(epsilons,
root=root,
backbone=backbone,
metric='success',
all_target=all_target,
method=method,
steps=5,
routing=routing,
black_box=black_box,
parts=parts,
iter_num=2,
temper=temper,
atoms=16,
bs=64,
idx=1)
def compute_entropy(root,
backbone='InceptionV3',
iter_num=2,
activated=True,
temper=10.0,
atoms=16,
routing='DR',
finetune=0,
parts=128,
bs=32):
model, data_shape, model_dir = load_model(log=root + 'log',
backbone=backbone,
iter_num=iter_num,
temper=temper,
atoms=atoms,
routing=routing,
finetune=finetune,
parts=parts,
bs=bs)
train_set, test_set, info = voc_parts.build_dataset3(root + 'data', batch_size=32, shape=data_shape)
test_model = keras.Model(model.layers[0].input, [model.layers[3].output, model.layers[5].output])
results = []
for images, labels in test_set:
(child_poses, child_probs), (parent_poses, parent_probs, cs) = test_model(images)
c = cs[-1]
if activated:
entropy = activated_entropy(c, child_probs)
else:
entropy = coupling_entropy(c)
results.append(entropy)
results = np.concatenate(results, 0)
mean = np.mean(results)
std = np.std(results)
print('{:.4}/{:.3}'.format(mean, std))
def compute_entropies(root):
tempers = [0.0, 20.0, 40.0, 60.0, 80.0]
for temper in tempers:
print('temper:{}'.format(temper))
compute_entropy(root,
backbone='InceptionV3',
iter_num=2,
temper=temper,
atoms=16,
routing='DR',
finetune=0,
parts=128,
bs=64)
if __name__ == "__main__":
main()
| 1.890625 | 2 |
code/ml/preprocessing/show_augmented_images.py | lbechberger/LearningPsychologicalSpaces | 6 | 12790246 | # -*- coding: utf-8 -*-
"""
Displays some of the augmented images. Can be used to visually check that augmentation is working fine.
Created on Wed May 8 12:12:26 2019
@author: lbechberger
"""
import argparse, pickle
import matplotlib.pyplot as plt
import tensorflow as tf
parser = argparse.ArgumentParser(description='Visualizing augmented images')
parser.add_argument('input_file', help = 'pickle file containing the augmented images to display')
parser.add_argument('-r', '--rows', type = int, help = 'number of rows', default = 3)
parser.add_argument('-c', '--columns', type = int, help = 'number of columns', default = 4)
args = parser.parse_args()
with open(args.input_file, "rb") as f:
images = pickle.load(f)
# need to convert tensorflow string representation into numbers
tf_image_string = tf.placeholder(tf.string)
decoder = tf.image.decode_jpeg(tf_image_string)
fig = plt.figure(figsize=(16,10))
with tf.Session() as session:
session.run(tf.global_variables_initializer())
for i in range(args.rows * args.columns):
ax = fig.add_subplot(args.rows, args.columns, i+1)
img = session.run(decoder, feed_dict = {tf_image_string : images[i]})
# deal with greyscale images
if img.shape[2] == 1:
img = img.reshape((img.shape[0], img.shape[1]))
ax.imshow(img, cmap = "gray")
else:
ax.imshow(img)
plt.show() | 3.328125 | 3 |
main.py | HelParadox/Nagito-Bot | 0 | 12790247 | <filename>main.py
import discord, keep_alive, os, random, time
from discord.ext import commands, tasks
from asyncio import sleep
import urllib.parse
import urllib.request
bot = commands.Bot(command_prefix="n!", case_insensitive=True)
client = discord.Client()
@bot.event
async def on_ready():
print("bot ready!")
@bot.command()
async def hello(ctx):
await ctx.send(f"Hello")
@bot.command()
async def CommandHelp(ctx):
embed = discord.Embed(
title = 'Help',
description = 'Help For Commands!',
colour = 0x33F2FF
)
embed.set_footer(text="")
embed.set_author(name=ctx.message.author.name)
embed.add_field(name='Misc', value='`n!misc` for misc commands')
await ctx.send(embed=embed)
@bot.command()
async def misc(ctx):
embed = discord.Embed(
title = 'Misc',
description = 'Misc Commands',
colour = 0x33F2FF
)
embed.set_footer(text="Misc")
embed.set_author(name=ctx.message.author.name)
embed.add_field(name='Youtube', value='`n!Youtube (video) this will search youtube for videos')
@bot.command()
async def ping(ctx):
await ctx.send(f"Pong! - {round(bot.latency * 1000)}ms!")
@bot.command()
async def youtube(ctx, *, search):
query_string = urllib.parse.urlencode({
'search_query': search
})
htm_content = urllib.request.urlopen(
'http://www.youtube.com/results?' + query_string
)
search_results = re.findall("href=\"\\/watch\\?v=(.{11})", htm_content.read().decode())
await ctx.send('http://www.youtube.com/watch?v=' + search_results[0])
@bot.command()
async def purge(ctx, amount):
await ctx.channel.purge(limit=amount)
@bot.command()
async def kick(ctx, member : discord.Member, *, reason=None):
await member.kick(reason=reason)
@bot.command()
async def ban(ctx, member : discord.Member, *, reason=None):
await member.ban(reason=reason)
@bot.command()
async def neon(ctx):
await ctx.send(f"Join NeonGDPS today! https://discord.gg/YYfyYjJuH6")
@bot.command()
async def embed(ctx):
embed = discord.Embed(
title = 'UselessEmbed',
colour = 0x33F2FF
)
embed.set_footer(text="")
embed.set_author(name=ctx.message.author.name)
embed.add_field(name='Why', value='Did u use this command?')
embed.set_image(url='https://cdn.discordapp.com/avatars/582670290950291476/5723169a306c581dd6d0d9ae41fa6a3c.png?size=1024')
await ctx.send(embed=embed)
@bot.command()
async def dm(ctx,member : discord.Member,*,message= ""):
await member.send(f"{message}")
await ctx.channel.purge(limit=1)
person = await bot.fetch_user(member.id)
await ctx.channel.send(f"The DM to **{person}** was sent!")
keep_alive.keep_alive()
token = os.environ.get("token")
bot.run(token, bot=True, reconnect=True)
@client.event
async def on_member_join(member):
await member.send(f"Welcome To The discord!")
| 2.8125 | 3 |
p100-109/p105.py | kbrose/project_euler | 1 | 12790248 | from itertools import combinations
f = open("p105_text.txt")
fr = f.readline()
sets = []
while fr:
fs = (fr.strip()).split()
for i in range(len(fs)):
fs[i] = int(fs[i])
fs.sort()
sets.append(fs)
fr = f.readline()
def test_rest(set, com):
ret_set = list(set)
for num in com:
del(ret_set[ret_set.index(num)])
for m in range(1,len(ret_set)+1):
for s in list(combinations(ret_set, m)):
if sum(s) == sum(com):
return False
return True
# sum(B) != sum(C) for non-empty, disjoint subsets B and C
def test_1(s):
for size in range(1,(len(s) / 2) + 1):
for c in combinations(s, size):
if not test_rest(s,c):
return False
return True
# If B contains more elements than C then S(B) > S(C).
def test_2(s):
l = len(s)
left = 2
right = l - 1
while right >= left:
sum_1 = sum(s[:left])
sum_2 = sum(s[right:])
if not sum_1 > sum_2:
return False
left += 1
right -= 1
return True
spec_sum_sets_sum = 0
for i in range(len(sets)):
if test_2(sets[i]) and test_1(sets[i]): # reverse order for speed
# print i, '\t', sets[i]
spec_sum_sets_sum += sum(sets[i])
# for set in sets:
# if test_2(set) and test_1(set):
# spec_sum_sets_sum += sum(set)
print spec_sum_sets_sum
| 3 | 3 |
humanlikehearing/library/__init__.py | neural-reckoning/HumanlikeHearing | 6 | 12790249 | <reponame>neural-reckoning/HumanlikeHearing
from . import a_weighting
from . import audio_format
from . import speech_voltmeter_svp56
from . import voice_activity_detection | 1.015625 | 1 |
wk_camera_pylon.py | WAKU-TAKE-A/pypylon_sample001 | 1 | 12790250 | <filename>wk_camera_pylon.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import time
from pypylon import pylon
import cv2
class CameraPylon:
def __init__(self, id=0, exposure_us=30000, gain=0.0):
"""
Init
* When exposure_us is zero, auto exposure is enabled.
* When gain is zero, auto gain is enabled.
"""
self.factory = pylon.TlFactory.GetInstance()
# DeviceInfo class get the number of connected cameras, camera name, etc.
self.devices = self.factory.EnumerateDevices()
if len(self.devices) == 0:
raise Exception('no camera present.')
# Create a class to control the camera.
self.camera = pylon.InstantCamera(self.factory.CreateDevice(self.devices[id]))
# The camera settings cannot be changed without opening.
self.camera.Open()
if exposure_us == 0:
self.camera.ExposureAuto.SetValue('Continuous')
else:
self.camera.ExposureAuto.SetValue('Off')
self.camera.ExposureTime.SetValue(exposure_us)
if gain == -1.0:
self.camera.GainAuto.SetValue('Continuous')
else:
self.camera.GainAuto.SetValue('Off')
self.camera.Gain.SetValue(gain)
print('--------------------')
print('CameraPylon.__init__')
print('--------------------')
print('Name = {0}'.format(self.devices[id].GetFriendlyName()))
print('Width = {0}'.format(self.camera.Width.GetValue()))
print('Height = {0}'.format(self.camera.Height.GetValue()))
print('ExposureAuto = {0}'.format(self.camera.ExposureAuto.GetValue()))
print('ExposureTime = {0}[us]'.format(self.camera.ExposureTime.GetValue()))
print('GainAuto = {0}'.format(self.camera.GainAuto.GetValue()))
print('Gain = {0}'.format(self.camera.Gain.GetValue()))
self.camera.Close()
# Set ImageFormatConverter.
self.converter_bgr = pylon.ImageFormatConverter()
self.converter_bgr.OutputPixelFormat = pylon.PixelType_BGR8packed
self.converter_bgr.OutputBitAlignment = 'MsbAligned'
# Set display magnification.
self.disp_mag = 50
def open(self):
"""
Open.
"""
self.camera.Open()
def close(self):
"""
Close.
"""
self.camera.Close()
def setExposureTime(self, exposure_us=10000, en_print=True):
"""
Set exposure time.
* When exposure_us is zero, auto exposure is enabled.
* When en_print is True, display the set value.
"""
if not self.camera.IsOpen():
raise Exception('camera is not open.')
if exposure_us == 0:
self.camera.ExposureAuto.SetValue('Continuous')
else:
self.camera.ExposureAuto.SetValue('Off')
self.camera.ExposureTime.SetValue(exposure_us)
if en_print:
print('ExposureAuto = {0}'.format(self.camera.ExposureAuto.GetValue()))
print('ExposureTime = {0}[us]'.format(self.camera.ExposureTime.GetValue()))
def setGain(self, gain=0.0, en_print=True):
"""
Set gain.
* When gain is -1, auto gain is enabled.
* When en_print is True, display the set value.
"""
if not self.camera.IsOpen():
raise Exception('camera is not open.')
if gain == -1.0:
self.camera.GainAuto.SetValue('Continuous')
else:
self.camera.GainAuto.SetValue('Off')
self.camera.Gain.SetValue(gain)
if en_print:
print('GainAuto = {0}'.format(self.camera.GainAuto.GetValue()))
print('Gain = {0}[dB]'.format(self.camera.Gain.GetValue()))
def grab(self, timeout=1000, en_print=True):
"""
Grab.
* Run StartGrabbing and StopGrabbing each time.
* All convert to 24-bit BGR.
* When en_print is True, display the set value.
"""
if not self.camera.IsOpen():
raise Exception('camera is not open.')
t_start = time.time()
self.camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
grabResult = self.camera.RetrieveResult(timeout, pylon.TimeoutHandling_ThrowException)
rslt_conv = self.converter_bgr.Convert(grabResult)
grabResult.Release()
self.camera.StopGrabbing()
proc_time = time.time() - t_start
if en_print:
print('grab time : {0} ms'.format(proc_time))
return rslt_conv.GetArray()
def view(self, delay=1):
"""
View.
* Close with ESC.
"""
if not self.camera.IsOpen():
raise Exception('camera is not open.')
k = 0
while k != 27:
img = self.grab(en_print=False)
w = int(self.camera.Width.GetValue() * self.disp_mag / 100)
h = int(self.camera.Height.GetValue() * self.disp_mag / 100)
img_resize = cv2.resize(img, (w, h))
cv2.imshow("img", img_resize)
exp_cur = int(self.camera.ExposureTime.GetValue() / 1000)
exp_max = int(self.camera.AutoExposureTimeUpperLimit.GetValue() / 1000)
gain_cur = int(self.camera.Gain.GetValue())
gain_max = int(self.camera.AutoGainUpperLimit.GetValue())
mag_cur = int(self.disp_mag)
mag_max = int(200)
cv2.createTrackbar("Exp[ms]", "img", exp_cur, exp_max, self._changeExposure)
cv2.createTrackbar("Gain[dB]", "img", gain_cur, gain_max, self._changeGain)
cv2.createTrackbar("Mag[%]", "img", mag_cur, mag_max, self._changeMag)
k = cv2.waitKey(delay)
cv2.destroyAllWindows()
def _changeExposure(self, val):
exp = cv2.getTrackbarPos("Exp[ms]", "img")
self.setExposureTime(exp * 1000, False)
def _changeGain(self, val):
gain = cv2.getTrackbarPos("Gain[dB]", "img")
self.setGain(gain, False)
def _changeMag(self, val):
mag = cv2.getTrackbarPos("Mag[%]", "img")
self.disp_mag = int(mag)
| 2.859375 | 3 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.