id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
8180100
|
<filename>examples/example_1_bike_without_classes.py<gh_stars>0
def update_sale_price(bike, sale_price):
if bike['sold'] is True:
raise Exception("Action not allowed. Bike has already been sold")
bike['sale_price'] = sale_price
def create_bike(description, cost, sale_price, condition):
return {
'description': description,
'cost': cost,
'sale_price': sale_price,
'condition': condition,
'sold': False,
}
def sell(bike, sold_for=None):
if sold_for:
update_sale_price(bike, sold_for)
bike['sold'] = True
profit = bike['sale_price'] - bike['cost']
return profit
bike1 = create_bike('Univega Alpina, orange', cost=100, sale_price=500, condition=0.5)
# bike1 = {
# 'cost': 100,
# 'condition': 0.5,
# 'description': 'Univega Alpina, orange',
# 'sale_price': 500,
# 'sold': False,
# }
update_sale_price(bike1, 350)
# bike1['sale_price'] = 350.00
print(sell(bike1))
# bike1['sold'] = True
|
StarcoderdataPython
|
5174168
|
import i18n
import nmap
import os
import yaml
class HelperNmap:
def __init__(self,args):
self.args = args
self.net = ""
self.template = ""
def process(self):
if self.__validateParams():
print i18n.t("help.running_scan")
nm = nmap.PortScanner()
nm.scan(hosts=str(self.net), arguments=str(self.arguments))
for host in nm.all_hosts():
print('----------------------------------------------------')
print('Host : %s (%s)' % (host, nm[host].hostname()))
print('State : %s' % nm[host].state())
for proto in nm[host].all_protocols():
print('----------')
print('Protocol : %s' % proto)
lport = nm[host][proto].keys()
lport.sort()
for port in lport:
if nm[host][proto][port]['state'] == 'open':
print ('port : %s\tstate : %s %s %s ' % (port, nm[host][proto][port]['state'], nm[host][proto][port]['product'], nm[host][proto][port]['version']))
if 'script' in nm[host][proto][port].keys():
for k,v in nm[host][proto][port]['script'].items():
print k+" => "+v
else:
pass
else:
pass
def __validateParams(self):
argsdic = {}
if self.args.find('net:') != -1 or self.args.find('template:') != -1:
if len(self.args.split(":")) == 3:
argsdic.update({
self.args.split(":")[0]:self.args.split(":")[1].split(" ")[0],
self.args.split(":")[1].split(" ")[1]:self.args.split(":")[2].split(" ")[0]
})
elif len(self.args.split(":")) == 2:
argsdic.update({
self.args.split(":")[0]:self.args.split(":")[1].split(" ")[0]
})
else:
pass
else:
print i18n.t("help.help_run_error")
return self.__setParams(**argsdic)
#private function to set params
def __setParams(self,**kwargs):
if kwargs is not None:
if kwargs.has_key("net") and kwargs.has_key("template"):
net = kwargs["net"]
arguments = kwargs["template"]
self.net = net
stream = open(os.getcwd()+"/templates/"+arguments+".yaml","r")
item = yaml.load(stream)
values =item[arguments]["arguments"]
self.arguments = values
return True
else:
return False
else:
return False
|
StarcoderdataPython
|
277995
|
import sqlalchemy
engine = sqlalchemy.create_engine('sqlite:///:memory:', echo=True)
Base = sqlalchemy.declarative_base()
class Specialization(Base):
__tablename__ = "specialization"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
name = sqlalchemy.Column(sqlalchemy.String)
class Psychotherapist(Base):
__tablename__ = "psychotherapists"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
first_name = sqlalchemy.Column(sqlalchemy.String)
last_name = sqlalchemy.Column(sqlalchemy.String)
address = sqlalchemy.Column(sqlalchemy.String, nullable=True)
postal_code = sqlalchemy.Column(sqlalchemy.String, nullable=True)
town = sqlalchemy.Column(sqlalchemy.String, nullable=True)
fee_in_eur = sqlalchemy.Column(sqlalchemy.Integer, nullable=True)
specialization = sqlalchemy.Column(sqlalchemy.Integer,
sqlalchemy.ForeignKey('specialization.id'),
nullable=True)
|
StarcoderdataPython
|
3255254
|
<gh_stars>0
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/user', methods=['GET'])
def signin_form():
return 'user: csh'
def registe():
print('user registe')
app.run()
|
StarcoderdataPython
|
4817085
|
<filename>izi_grpc/servicer.py
from types import FunctionType
from functools import wraps
from izi_grpc import current_app
def wrap_handler(handler):
h = handler
for m in current_app.middlewares:
h = m(current_app, h, handler)
@wraps(handler)
def wrapped(self, request, context):
return h(self, request, context)
return wrapped
class ServicerMeta(type):
def __new__(cls, name, bases, kws):
_kws = {}
for k, v in kws.items():
if isinstance(v, FunctionType):
v = wrap_handler(v)
_kws[k] = v
return super().__new__(cls, name, bases, _kws)
|
StarcoderdataPython
|
305772
|
<reponame>LaudateCorpus1/streetlearn
# Copyright 2018 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The StreetLearn RL environment.
Episodes take place either in a mini-map created by performing a breadth-first
traversal of the StreetView graph starting from a starting location, or in
the entire fully-connected graph.
Observations:
{
view_image: numpy array of dimension [3, height, width] containing the
street imagery.
graph_image: numpy array of dimension [3, graph_height, graph_width]
containing the map graph image.
view_image_hwc: numpy array of dimension [height, width, 3] containing
the street imagery.
graph_image_hwc: numpy array of dimension [graph_height, graph_width, 3]
containing the map graph images.
metadata: learning_deepmind.datasets.street_learn.Pano proto
without compressed_image.
target_metadata: learning_deepmind.datasets.street_learn.Pano proto
without compressed_image.
}
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
import inflection
import numpy as np
import six
import time
from streetlearn.engine.python import color
from streetlearn.engine.python import streetlearn_engine
from streetlearn.python.environment import default_config
from streetlearn.python.environment import observations
_MIN_ZOOM = 1
_MAX_ZOOM = 32
def _action(*entries):
return np.array(entries, dtype=np.float)
ACTIONS = {
'move_forward': _action(1, 0.0, 0.0, 0.0),
'horizontal_rotation': _action(0, 1.0, 0.0, 0.0),
'vertical_rotation': _action(0, 0.0, 1.0, 0.0),
'map_zoom': _action(0, 0.0, 0.0, 1.0),
}
NUM_ACTIONS = 4
ACTION_SETS = {
"streetlearn_default": lambda rotation_speed: (
ACTIONS["move_forward"],
ACTIONS["horizontal_rotation"] * (-rotation_speed),
ACTIONS["horizontal_rotation"] * rotation_speed),
"streetlearn_fast_rotate": lambda rotation_speed: (
ACTIONS["move_forward"],
ACTIONS["horizontal_rotation"] * (-rotation_speed),
ACTIONS["horizontal_rotation"] * (-rotation_speed * 3),
ACTIONS["horizontal_rotation"] * rotation_speed,
ACTIONS["horizontal_rotation"] * rotation_speed * 3),
"streetlearn_tilt": lambda rotation_speed: (
ACTIONS["move_forward"],
ACTIONS["horizontal_rotation"] * (-rotation_speed),
ACTIONS["horizontal_rotation"] * rotation_speed,
ACTIONS["vertical_rotation"] * rotation_speed,
ACTIONS["vertical_rotation"] * (-rotation_speed)),
}
def get_action_set(action_spec, rotation_speed):
"""Returns the set of StreetLearn actions for the given action_spec."""
# If action_spec is a string, it should be the name of a standard action set.
if isinstance(action_spec, six.string_types):
if action_spec not in ACTION_SETS:
raise ValueError("Unrecognized action specification %s." % action_spec)
else:
return np.array(ACTION_SETS[action_spec](rotation_speed), dtype=np.float)
raise ValueError("Action specification %s not a string." % action_spec)
def _log_dictionary(dictionary):
for k, v in dictionary.items():
v = dictionary[k]
if isinstance(v, (int, float, bool, list, str)):
logging.info(k + ': ' + str(v))
class StreetLearn(object):
"""The Streetlearn environment."""
def __init__(self, dataset_path, config, game, engine=None):
"""Construct the StreetLearn environment.
Args:
dataset_path: filesystem path where the dataset resides.
config: dictionary containing various config settings. Will be extended
with defaults from default_config.DEFAULT_CONFIG.
game: an instance of Game.
engine: an instance of the StreetLearn engine (used when cloning an
environment).
"""
assert game, "Did not provide game."
logging.info('dataset_path:')
logging.info(dataset_path)
logging.info('config:')
_log_dictionary(config)
logging.info('game:')
logging.info(game)
self._config = default_config.ApplyDefaults(config)
self._seed = self._config["seed"]
self._start_pano_id = self._config["start_pano"]
self._zoom = self._config["graph_zoom"]
self._black_on_white = self._config["graph_black_on_white"]
self._frame_cap = self._config["frame_cap"]
self._field_of_view = self._config["field_of_view"]
self._neighbor_resolution = self._config["neighbor_resolution"]
self._sample_graph_depth = self._config["sample_graph_depth"]
self._min_graph_depth = self._config["min_graph_depth"]
self._max_graph_depth = self._config["max_graph_depth"]
self._full_graph = self._config["full_graph"]
self._color_for_observer = color.Color(*self._config["color_for_observer"])
self._action_spec = self._config["action_spec"]
self._rotation_speed = self._config["rotation_speed"]
self._auto_reset = self._config["auto_reset"]
self._action_set = get_action_set(self._action_spec, self._rotation_speed)
logging.info('Action set:')
logging.info(self._action_set)
self._bbox_lat_min = self._config["bbox_lat_min"]
self._bbox_lat_max = self._config["bbox_lat_max"]
self._bbox_lng_min = self._config["bbox_lng_min"]
self._bbox_lng_max = self._config["bbox_lng_max"]
self._game = game
self._current_pano_id = None
self._episode_id = -1
self._frame_count = 0
self._prev_reset = time.time()
if engine:
logging.info("Cloning an existing StreetLearnEngine.")
self._engine = engine.Clone(
width=self._config["width"],
height=self._config["height"],
graph_width=self._config["graph_width"],
graph_height=self._config["graph_height"],
status_height=self._config["status_height"],
field_of_view=self._field_of_view,
min_graph_depth=self._min_graph_depth,
max_graph_depth=self._max_graph_depth)
else:
logging.info("Creating an new StreetLearnEngine.")
self._engine = streetlearn_engine.StreetLearnEngine.Create(
dataset_path,
width=self._config["width"],
height=self._config["height"],
graph_width=self._config["graph_width"],
graph_height=self._config["graph_height"],
status_height=self._config["status_height"],
field_of_view=self._field_of_view,
min_graph_depth=self._min_graph_depth,
max_graph_depth=self._max_graph_depth,
max_cache_size=self._config["max_cache_size"])
assert self._engine, "Could not initialise engine from %r." % dataset_path
self._observations = []
for name in self._config["observations"]:
try:
self._observations.append(observations.Observation.create(name, self))
except ValueError as e:
logging.warning(str(e))
self._reward = 0
self._prev_reward = 0
self._prev_action = self._action_set[0]
self._done = False
self._info = {}
@property
def config(self):
return self._config
@property
def seed(self):
return self._seed
@property
def game(self):
return self._game
@property
def field_of_view(self):
return self._field_of_view
@property
def current_pano_id(self):
return self._current_pano_id
@property
def frame_cap(self):
return self._frame_cap
@frame_cap.setter
def frame_cap(self, value):
self._frame_cap = value
@property
def frame_count(self):
return self._frame_count
@property
def graph(self):
return self._graph
@property
def engine(self):
return self._engine
@property
def neighbor_resolution(self):
return self._neighbor_resolution
@property
def bbox_lat_min(self):
return self._bbox_lat_min
@property
def bbox_lat_max(self):
return self._bbox_lat_max
@property
def bbox_lng_min(self):
return self._bbox_lng_min
@property
def bbox_lng_max(self):
return self._bbox_lng_max
@property
def cache_size(self):
return self._engine.GetNodeCacheSize()
def observation_spec(self):
"""Returns the observation spec, dependent on the observation format."""
return {observation.name: observation.observation_spec
for observation in self._observations}
def action_set(self):
"""Returns the set of actions, mapping integer actions to 1D arrays."""
return self._action_set
def action_spec(self):
"""Returns the action spec."""
return ACTIONS
def reset(self):
"""Start a new episode."""
reset_time = time.time()
logging.info('reset: seed %d, previous episode (%d frames) lasted %f sec',
self._seed, self._frame_count, reset_time - self._prev_reset)
self._prev_reset = reset_time
self._prev_reward = 0
self._prev_action = self._action_set[0]
self._frame_count = 0
self._episode_id += 1
if self._sample_graph_depth:
max_depth = np.random.randint(self._min_graph_depth,
self._max_graph_depth + 1)
self._engine.SetGraphDepth(self._min_graph_depth, max_depth)
self._engine.InitEpisode(self._episode_id, self._seed)
# Build a new graph if we don't have one yet.
if not self._current_pano_id:
if self._full_graph:
self._current_pano_id = self._engine.BuildEntireGraph()
elif self._start_pano_id:
self._current_pano_id = self._engine.BuildGraphWithRoot(
self._start_pano_id)
else:
self._current_pano_id = self._engine.BuildRandomGraph()
logging.info('seed %d: built new graph with root %s',
self._seed, self._current_pano_id)
# else respawn in current graph.
elif not self._start_pano_id:
self._current_pano_id = np.random.choice(
list(self._engine.GetGraph().keys()))
self._engine.SetPosition(self._current_pano_id)
logging.info('seed %d: reusing existing graph and respawning at %s',
self._seed, self._current_pano_id)
self._graph = self._engine.GetGraph()
highlighted_panos = self._game.on_reset(self)
self._engine.InitGraphRenderer(self._color_for_observer, highlighted_panos,
self._black_on_white)
self._engine.SetZoom(_MAX_ZOOM)
def goto(self, pano_id, yaw):
"""Go to a specific pano and yaw in the environment.
Args:
pano_id: a string containing the ID of a pano.
yaw: a float with relative yaw w.r.t. north.
Returns:
observation: tuple with observations.
"""
current_pano_id = self._engine.SetPosition(pano_id)
assert pano_id == current_pano_id
yaw = (yaw + 180) % 360 - 180
self._engine.RotateObserver(yaw, 0.0)
assert yaw == self._engine.GetYaw()
return self.observation()
def step(self, action):
"""Takes a step in the environment.
Args:
action: a 1d array containing a combination of actions.
Returns:
observation: tuple with observations for the last time step.
reward: scalar reward at the last time step.
done: boolean indicating the end of an episode.
info: dictionary with additional debug information.
"""
self._frame_count += 1
if type(action) != np.ndarray:
action = np.array(action, dtype=np.float)
self._prev_action = action
assert action.size == NUM_ACTIONS, "Wrong number of actions."
move_forward = np.dot(action, ACTIONS['move_forward'])
horizontal_rotation = np.dot(action, ACTIONS['horizontal_rotation'])
vertical_rotation = np.dot(action, ACTIONS['vertical_rotation'])
map_zoom = np.dot(action, ACTIONS['map_zoom'])
if move_forward:
self._current_pano_id = self._engine.MoveToNextPano()
if map_zoom > 0:
self._zoom = min(self._zoom * 2, _MAX_ZOOM)
elif map_zoom < 0:
self._zoom = max(self._zoom / 2, _MIN_ZOOM)
if horizontal_rotation or vertical_rotation:
self._engine.RotateObserver(horizontal_rotation, vertical_rotation)
self._engine.SetZoom(self._zoom)
self._game.on_step(self)
# Update the reward and done flag. Because we do not know the code logic
# inside each game, it is safer to obtain these immediately after step(),
# and store them for subsequent calls to reward(), done() and info().
self._prev_reward = self._reward
self._reward = self._game.get_reward(self)
self._done = (self._frame_count > self._frame_cap) or self._game.done()
self._info = self._game.get_info(self)
if self._auto_reset and self._done:
self.reset()
# Return
return self.observation(), self.reward(), self.done(), self.info()
def observation(self):
"""Returns the observations for the last time step."""
return {item.name: item.observation for item in self._observations}
def reward(self):
"""Returns the reward for the last time step."""
return self._reward
def done(self):
"""Return a flag indicating the end of the current episode."""
return self._done
def info(self):
"""Return a dictionary with environment information at the current step."""
return self._info
def prev_reward(self):
"""Returns the reward for the previous time step."""
return self._prev_reward
def prev_action(self):
"""Returns the action for the previous time step."""
return self._prev_action
def get_metadata(self, pano_id):
"""Return the metadata corresponding to the selected pano.
Args:
pano_id: a string containing the ID of a pano.
Returns:
metadata: a protocol buffer with the pano metadata.
"""
if hasattr(self, '_graph') and pano_id in self.graph:
return self._engine.GetMetadata(pano_id)
else:
return None
def render(self):
"""Empty function, for compatibility with OpenAI Gym."""
pass
|
StarcoderdataPython
|
6445698
|
import pelib
print(sum(filter(lambda x: (x%2 == 0), pelib.fib(1,2,4000000))))
|
StarcoderdataPython
|
12816185
|
<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Create template graphs png of single graphs.
Usage:
stampagraficisingle.py [<template>]... [options]
Options:
-a DIR Print the entire DIR directory.
-h --help Show this screen.
-l LIST Create graphs from a list.
-r Remove old graphs.
"""
import csv, re, pdb, ast, time, os
from docopt import docopt
import datetime
import matplotlib.pyplot as plot
import numpy as np
import matplotlib.dates as mdates
import tarfile
init = datetime.date(2001, 1, 1)
def singlegraph(inputtemplate):
coord = csv.reader(open(inputtemplate, "r"))
directory = inputtemplate.rsplit("/", 1)[0]
filename = inputtemplate.split("/")[-1]
filename = filename.replace(".csv", "")
value = 0
x = []
y = []
for c in coord:
date = init + datetime.timedelta(days=int(c[0]))
x.append(date)
y.append(c[1])
fig = plot.figure()
ax = fig.add_subplot(111)
years = mdates.YearLocator()
months = mdates.MonthLocator()
yearsFmt = mdates.DateFormatter('%Y')
monthsFmt = mdates.DateFormatter('%m')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
#ax.xaxis.set_minor_formatter(monthsFmt)
#second_axes = plot.twinx()
#second_axes.set_yticks([int(y[-1])])
ax.grid(which='minor', alpha=0.2)
ax.grid(which='major', alpha=0.5)
plot.xlabel('Time')
plot.ylabel(filename + " Occurrences")
plot.plot(x, y)
if not os.path.exists("png"):
os.makedirs("png")
print("Creating " + inputtemplate + ".png...")
plot.savefig("png/" + filename + ".png")
plot.close()
def categorygraph(inputtemplate, category):
coord = csv.reader(open(inputtemplate, "r"))
directory = inputtemplate.rsplit("/", 1)[0]
filename = inputtemplate.split("/")[-1]
filename = filename.replace(".csv", "")
value = 0
x = []
y = []
for c in coord:
date = init + datetime.timedelta(days=int(c[0]))
x.append(date)
y.append(c[1])
fig = plot.figure()
ax = fig.add_subplot(111)
years = mdates.YearLocator()
months = mdates.MonthLocator()
yearsFmt = mdates.DateFormatter('%Y')
monthsFmt = mdates.DateFormatter('%m')
ax.xaxis.set_major_locator(years)
ax.xaxis.set_major_formatter(yearsFmt)
ax.xaxis.set_minor_locator(months)
#ax.xaxis.set_minor_formatter(monthsFmt)
#second_axes = plot.twinx()
#second_axes.set_yticks([int(y[-1])])
ax.grid(which='minor', alpha=0.2)
ax.grid(which='major', alpha=0.5)
plot.xlabel('Time')
plot.ylabel(filename + " Occurrences")
plot.plot(x, y)
if not os.path.exists("png"):
os.makedirs("png")
if not os.path.exists("png/" + category):
os.makedirs("png/" + category)
print("Creating " + inputtemplate + ".png...")
plot.savefig("png/" + category + "/" + filename + ".png")
plot.close()
if __name__ == "__main__":
arguments = docopt(__doc__)
i = []
if arguments['-r']:
print("Removing old .png...")
if os.path.exists("png"):
for f in os.listdir("png/"):
if f.endswith(".png"):
os.remove("png/" + f)
if arguments['-l']:
r = open(arguments['-l'])
l = r.read().splitlines()
for line in l:
i.append(line)
if arguments['-a']:
for f in os.listdir(arguments['-a']):
if f.endswith(".csv"):
i.append(f)
else:
graphs = arguments['<template>']
for g in graphs:
i.append(g)
for graph in i:
singlegraph(graph)
|
StarcoderdataPython
|
9742022
|
#!/bin/python3
# Complete the matrixRotation function below.
def matrixRotation(matrix, r):
height = len(matrix)
width = len(matrix[0])
for i in range(min(height // 2, width // 2)):
state = []
# top-left to top-right
for j in range(i, width - i):
state.append(matrix[i][j])
# top-right to bottom-right
for j in range(i + 1, height - 1 - i):
# in Python, a[len(a) - 1 - i] = a[-1 - i]
state.append(matrix[j][-1 - i])
# bottom-right to bottom-left
for j in range(width - 1 - i, i - 1, -1):
state.append(matrix[-1 - i][j])
# left-bottom to left-top
for j in range(height - 2 - i, i, -1):
state.append(matrix[j][i])
# rotate by R
# no. of nodes
no = 2 * (height - 2 * i) + 2 * (width - (2 * i + 2))
k = r % no
state = state[k:] + state[:k]
# populate A with rotated matrix same as above
flag = 0
for j in range(i, width - i):
matrix[i][j] = state[flag]
flag += 1
for j in range(i + 1, height - 1 - i):
matrix[j][-1 - i] = state[flag]
flag += 1
for j in range(width - 1 - i, i - 1, -1):
matrix[-1 - i][j] = state[flag]
flag += 1
for j in range(height - 2 - i, i, -1):
matrix[j][i] = state[flag]
flag += 1
for row in matrix:
print(*row, end=' ')
print('')
if __name__ == '__main__':
mnr = input().rstrip().split()
m = int(mnr[0])
n = int(mnr[1])
r = int(mnr[2])
matrix = []
for _ in range(m):
matrix.append(list(map(int, input().rstrip().split())))
matrixRotation(matrix, r)
|
StarcoderdataPython
|
11234768
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 13 00:48:39 2021
@author: yoonseok
1. Python Komoran 사용자 사전 추가 https://lovit.github.io/nlp/2018/04/06/komoran/
"""
import pandas as pd
import numpy as np
import re
import random
import os
from ckonlpy.tag import Twitter, Postprocessor
from konlpy.tag import Komoran
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, recall_score, f1_score, confusion_matrix, precision_score
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
def tokenizeDocuments(documentColumn, model, stopWords, desc = None):
container = []
for string in tqdm(documentColumn, desc=desc):
string = re.sub('[-=+,#/\?:^$.@*\"“”※~&%ⅰⅱⅲ○●ㆍ°’『!』」\\‘|\(\)\[\]\<\>`\'…》]', '', string) # 특수문자 제거
# string = re.sub('\w*[0-9]\w*', '', string) # 숫자 제거
string = re.sub('\w*[a-zA-Z]\w*', '', string) # 알파벳 제거
string = string.strip() # 문서 앞뒤 공백 제거
string = ' '.join(string.split()) # Replace Multiple whitespace into one
tokenList = model(string) # 형태소분석기로 문서 형태소 추출
for i in tokenList: # 추출된 형태소 중 불용어 제거
if i in stopwords:
tokenList.remove(i)
container.append(tokenList)
return container
def vectorizeCorpus(corpusArray, model):
"""
Parameters
----------
corpusArray : TYPE
DESCRIPTION.
model : TYPE
DESCRIPTION.
Returns
-------
container : TYPE
DESCRIPTION.
"""
container = []
for i in corpusArray:
vec = model.infer_vector(i)
container.append(vec)
return container
def transformDF2Corpus(df):
df["document"] = df.document.apply(lambda x: x.split())
container = []
for i in df["docID"]:
li = []
li.append(i)
container.append(li)
df["docID"] = container
doc_df = df[['docID','document']].values.tolist()
train_corpus = [TaggedDocument(words=document2, tags=docID) for docID, document2 in doc_df]
return train_corpus
def returnScore(y_test, y_predict, average):
"""
Parameters
----------
y_test : TYPE
DESCRIPTION.
y_predict : TYPE
DESCRIPTION.
average : TYPE
DESCRIPTION.
Returns
-------
total_score : TYPE
DESCRIPTION.
"""
accuracy = accuracy_score(y_test, y_predict)
precision = precision_score(y_test, y_predict, average = average)
recall = recall_score(y_test, y_predict, average = average)
f1 = f1_score(y_test, y_predict, average = average)
print("Accuracy:{:.4f}".format(accuracy))
print("Precision: {:.4f}".format(precision))
print("Recall: {:.4f}".format(recall))
print("F1: {:.4f}".format(f1))
print("Confusion Matrix")
print(confusion_matrix(y_test, y_predict))
total_score = [accuracy, precision, recall, f1]
return total_score
def vectorizeDoc2Vec(df, model="doc2Vec"):
corpusTrain = transformDF2Corpus(df)
vecList = []
for i in corpusTrain:
vec = model.infer_vector(i.words)
vecList.append(vec)
return np.array(vecList)
# Set working directory
os.chdir(r"C:\analytics")
# Tune NLP Tool
tokenizer_tuned = Komoran(userdic='Ngramdictionary.txt')
tokenizer = Komoran()
dictionary = pd.read_csv("dataset6.dictionary.csv", names=["noun"])
# nounList = dictionary.noun.to_list()
# twitter.add_dictionary(nounList, 'Noun')
stopWord = pd.read_csv("dataset7.stopwords.csv", names=["stopword"])
stopwords = stopWord.stopword.to_list()
# postprocessor = Postprocessor(tokenizer.nouns, stopwords = stopwords)
# postprocessor_tuned = Postprocessor(tokenizer_tuned.nouns, stopwords = stopwords)
# import preprocessed dataset
df = pd.read_excel("dataset3.preprocessed(2017-2019).xlsx", sheet_name="data")
# preprocessing
df["token"] = tokenizeDocuments(df["documents"], tokenizer.nouns, stopwords, desc="사용자 사전 미반영")
df["token_tuned"] = tokenizeDocuments(df["documents"], tokenizer_tuned.nouns, stopwords, desc="사용자 사전 반영")
# drop blank cells
# drop_index = df[df['document'] == ''].index
# df = df.drop(drop_index)
# # sample and export training data
# dfLabel = df['document'].sample(n=1000, random_state=1)
# dfLabel.to_excel("dataset4.trainingData.xlsx")
# # Word Embedding - Counter
# countVec = CountVectorizer()
# countVecMatrix = countVec.fit_transform(df["document"])
# # Word Embedding - TF-IDF
# tfidfVec = TfidfVectorizer()
# tfidfVecMatrix = tfidfVec.fit_transform(df["document"])
# # Word Embedding - LDA
# ldaVec = LatentDirichletAllocation(n_components=10, random_state=1)
# ldaVecMatrix = ldaVec.fit_transform(countVecMatrix)
# # Word Embedding - Doc2Vec
# doc2Vec = Doc2Vec()
# dfDoc2Vec = df
# train_corpus = transformDF2Corpus(dfDoc2Vec)
# doc2Vec.build_vocab(train_corpus)
# doc2Vec.train(train_corpus, total_examples=doc2Vec.corpus_count, epochs=doc2Vec.epochs)
# doc2VecMatrix = vectorizeDoc2Vec(df)
# # Set Pipeline - NB
# NB = MultinomialNB()
# Pipeline_NB_TfIdf = Pipeline([
# ('vect', tfidfVec),
# ('clf', NB)
# ])
# Pipeline_NB_LDA = Pipeline([
# ('vect', ldaVec),
# ('clf', NB)
# ])
# Pipeline_NB_Doc2Vec = Pipeline([
# ('vect', doc2Vec),
# ('clf', NB)
# ])
|
StarcoderdataPython
|
6692755
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
'''Dihedral'''
import math
class Dihedral:
'''Dihedral'''
def __init__(self, controller):
self.CONSTANT_Pi = 3.1415926535897932
self.module_name = "dihedral"
self.h_atom_a = []
self.h_atom_b = []
self.h_atom_c = []
self.h_atom_d = []
self.h_ipn = []
self.h_pn = []
self.h_pk = []
self.h_gamc = []
self.h_gams = []
self.dihedral_numbers = 0
if controller.amber_parm is not None:
file_path = controller.amber_parm
self.read_information_from_amberfile(file_path)
self.is_initialized = 1
else:
self.read_in_file(controller)
def read_in_file(self, controller):
"""read_in_file"""
print("START INITIALIZING DIHEDRAL:")
name = self.module_name + "_in_file"
if name in controller.Command_Set:
path = controller.Command_Set[name]
file = open(path, 'r')
context = file.readlines()
self.dihedral_numbers = int(context[0].strip())
print(" dihedral_numbers is ", self.dihedral_numbers)
for i in range(self.dihedral_numbers):
val = list(map(float, context[i + 1].strip().split()))
self.h_atom_a.append(int(val[0]))
self.h_atom_b.append(int(val[1]))
self.h_atom_c.append(int(val[2]))
self.h_atom_d.append(int(val[3]))
self.h_ipn.append(val[4])
self.h_pn.append(val[4])
self.h_pk.append(val[5])
self.h_gamc.append(math.cos(val[6]) * val[5])
self.h_gams.append(math.sin(val[6]) * val[5])
self.is_initialized = 1
file.close()
print("END INITIALIZING DIHEDRAL")
def read_information_from_amberfile(self, file_path):
'''read amber file'''
file = open(file_path, 'r')
context = file.readlines()
file.close()
for idx, val in enumerate(context):
if idx < len(context) - 1:
if "%FLAG POINTERS" in val + context[idx + 1] and "%FORMAT(10I8)" in val + context[idx + 1]:
start_idx = idx + 2
count = 0
value = list(map(int, context[start_idx].strip().split()))
self.dihedral_with_hydrogen = value[6]
self.dihedral_numbers = value[7]
self.dihedral_numbers += self.dihedral_with_hydrogen
information = []
information.extend(value)
while count < 15:
start_idx += 1
value = list(map(int, context[start_idx].strip().split()))
information.extend(value)
count += len(value)
self.dihedral_type_numbers = information[17]
print("dihedral type numbers ", self.dihedral_type_numbers)
break
self.phase_type = [0] * self.dihedral_type_numbers
self.pk_type = [0] * self.dihedral_type_numbers
self.pn_type = [0] * self.dihedral_type_numbers
for idx, val in enumerate(context):
if "%FLAG DIHEDRAL_FORCE_CONSTANT" in val:
count = 0
start_idx = idx
information = []
while count < self.dihedral_type_numbers:
start_idx += 1
if "%FORMAT" in context[start_idx]:
continue
else:
value = list(map(float, context[start_idx].strip().split()))
information.extend(value)
count += len(value)
self.pk_type = information[:self.dihedral_type_numbers]
break
for idx, val in enumerate(context):
if "%FLAG DIHEDRAL_PHASE" in val:
count = 0
start_idx = idx
information = []
while count < self.dihedral_type_numbers:
start_idx += 1
if "%FORMAT" in context[start_idx]:
continue
else:
value = list(map(float, context[start_idx].strip().split()))
information.extend(value)
count += len(value)
self.phase_type = information[:self.dihedral_type_numbers]
break
for idx, val in enumerate(context):
if "%FLAG DIHEDRAL_PERIODICITY" in val:
count = 0
start_idx = idx
information = []
while count < self.dihedral_type_numbers:
start_idx += 1
if "%FORMAT" in context[start_idx]:
continue
else:
value = list(map(float, context[start_idx].strip().split()))
information.extend(value)
count += len(value)
self.pn_type = information[:self.dihedral_type_numbers]
break
self.processor(context)
def processor(self, context):
'''processor'''
self.h_atom_a = [0] * self.dihedral_numbers
self.h_atom_b = [0] * self.dihedral_numbers
self.h_atom_c = [0] * self.dihedral_numbers
self.h_atom_d = [0] * self.dihedral_numbers
self.h_pk = []
self.h_gamc = []
self.h_gams = []
self.h_pn = []
self.h_ipn = []
for idx, val in enumerate(context):
if "%FLAG DIHEDRALS_INC_HYDROGEN" in val:
count = 0
start_idx = idx
information = []
while count < 5 * self.dihedral_with_hydrogen:
start_idx += 1
if "%FORMAT" in context[start_idx]:
continue
else:
value = list(map(int, context[start_idx].strip().split()))
information.extend(value)
count += len(value)
for i in range(self.dihedral_with_hydrogen):
self.h_atom_a[i] = information[i * 5 + 0] / 3
self.h_atom_b[i] = information[i * 5 + 1] / 3
self.h_atom_c[i] = information[i * 5 + 2] / 3
self.h_atom_d[i] = abs(information[i * 5 + 3] / 3)
tmpi = information[i * 5 + 4] - 1
self.h_pk.append(self.pk_type[tmpi])
tmpf = self.phase_type[tmpi]
if abs(tmpf - self.CONSTANT_Pi) <= 0.001:
tmpf = self.CONSTANT_Pi
tmpf2 = math.cos(tmpf)
if abs(tmpf2) < 1e-6:
tmpf2 = 0
self.h_gamc.append(tmpf2 * self.h_pk[i])
tmpf2 = math.sin(tmpf)
if abs(tmpf2) < 1e-6:
tmpf2 = 0
self.h_gams.append(tmpf2 * self.h_pk[i])
self.h_pn.append(abs(self.pn_type[tmpi]))
self.h_ipn.append(int(self.h_pn[i] + 0.001))
break
for idx, val in enumerate(context):
if "%FLAG DIHEDRALS_WITHOUT_HYDROGEN" in val:
count = 0
start_idx = idx
information = []
while count < 5 * (self.dihedral_numbers - self.dihedral_with_hydrogen):
start_idx += 1
if "%FORMAT" in context[start_idx]:
continue
else:
value = list(map(int, context[start_idx].strip().split()))
information.extend(value)
count += len(value)
for i in range(self.dihedral_with_hydrogen, self.dihedral_numbers):
self.h_atom_a[i] = information[(i - self.dihedral_with_hydrogen) * 5 + 0] / 3
self.h_atom_b[i] = information[(i - self.dihedral_with_hydrogen) * 5 + 1] / 3
self.h_atom_c[i] = information[(i - self.dihedral_with_hydrogen) * 5 + 2] / 3
self.h_atom_d[i] = abs(information[(i - self.dihedral_with_hydrogen) * 5 + 3] / 3)
tmpi = information[(i - self.dihedral_with_hydrogen) * 5 + 4] - 1
self.h_pk.append(self.pk_type[tmpi])
tmpf = self.phase_type[tmpi]
if abs(tmpf - self.CONSTANT_Pi) <= 0.001:
tmpf = self.CONSTANT_Pi
tmpf2 = math.cos(tmpf)
if abs(tmpf2) < 1e-6:
tmpf2 = 0
self.h_gamc.append(tmpf2 * self.h_pk[i])
tmpf2 = math.sin(tmpf)
if abs(tmpf2) < 1e-6:
tmpf2 = 0
self.h_gams.append(tmpf2 * self.h_pk[i])
self.h_pn.append(abs(self.pn_type[tmpi]))
self.h_ipn.append(int(self.h_pn[i] + 0.001))
break
for i in range(self.dihedral_numbers):
if self.h_atom_c[i] < 0:
self.h_atom_c[i] *= -1
|
StarcoderdataPython
|
6538977
|
<filename>datastruct/testsuite/test_nested.py
from datastruct import DataStruct, exceptions
class ExampleSingle(DataStruct):
a: int
class ExampleNested(DataStruct):
b: int
n1: ExampleSingle
class ExampleNestedNested(DataStruct):
c: int
n2: ExampleNested
def test_nested():
arg = dict(b=1, n1=dict(a=2))
o = ExampleNested(arg)
assert o.b == 1
assert o.n1.a == 2
arg = dict(b=1, n1=dict(z=3))
errs = (
exceptions.UnexpectedKeyError("z", ExampleSingle, path=("n1",)),
exceptions.MissingValueError("a", ExampleSingle, path=("n1",)),
)
o = ExampleNested(arg)
assert o.get_errors() == errs
def test_nested_nested():
arg = dict(c=3, n2=dict(b=2, n1=dict(a=1)))
o = ExampleNestedNested(arg)
assert o.c == 3
assert o.n2.b == 2
assert o.n2.n1.a == 1
arg = dict(c=3, n2=dict(b=2, n1=dict(z=3)))
errs = (
exceptions.UnexpectedKeyError(
"z",
ExampleSingle,
path=(
"n2",
"n1",
),
),
exceptions.MissingValueError(
"a",
ExampleSingle,
path=(
"n2",
"n1",
),
),
)
o = ExampleNestedNested(arg)
assert o.get_errors() == errs
def test_round_trip():
arg = dict(b=1, n1=dict(a=2))
o = ExampleNested(arg)
assert o.to_dict() == arg
arg = dict(c=3, n2=dict(b=2, n1=dict(a=1)))
o = ExampleNestedNested(arg)
assert o.to_dict() == arg
|
StarcoderdataPython
|
4869865
|
<reponame>IDilettant/training-mini-projects
from transliterate import translit
from num2words import num2words
def preparing_speech():
numbers_from_speech = [78, 15, 3, 40, 8]
print(translit('''Ladies and gentlemen, I'm 78 years old and I finally got 15 minutes of fame once in a lifetime and I guess that this is mine. People have also told me to make these next few minutes escruciatingly embarrassing and to take vengeance of my enemies. Neither will happen.
More than 3 years ago I moved to Novo-Novsk, but worked on new Magnetic Storage for last 40. When I was 8...''', 'ru'))
print()
for number in numbers_from_speech:
print(number, translit(num2words(number, lang='en'), 'ru'))
if __name__ == '__main__':
preparing_speech()
|
StarcoderdataPython
|
9737062
|
from django.contrib import admin
from intro.models import *
# Register your models here.
admin.site.register(IntroReg)
|
StarcoderdataPython
|
1830659
|
#!python
# ###################################################################
#
# Disclaimer and Notice of Copyright
# ==================================
#
# Copyright (c) 2015, Los Alamos National Security, LLC
# All rights reserved.
#
# Copyright 2015. Los Alamos National Security, LLC.
# This software was produced under U.S. Government contract
# DE-AC52-06NA25396 for Los Alamos National Laboratory (LANL),
# which is operated by Los Alamos National Security, LLC for
# the U.S. Department of Energy. The U.S. Government has rights
# to use, reproduce, and distribute this software. NEITHER
# THE GOVERNMENT NOR LOS ALAMOS NATIONAL SECURITY, LLC MAKES
# ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY
# FOR THE USE OF THIS SOFTWARE. If software is modified to
# produce derivative works, such modified software should be
# clearly marked, so as not to confuse it with the version
# available from LANL.
#
# Additionally, redistribution and use in source and binary
# forms, with or without modification, are permitted provided
# that the following conditions are met:
# - Redistributions of source code must retain the
# above copyright notice, this list of conditions
# and the following disclaimer.
# - Redistributions in binary form must reproduce the
# above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of Los Alamos National Security, LLC,
# Los Alamos National Laboratory, LANL, the U.S. Government,
# nor the names of its contributors may be used to endorse
# or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY LOS ALAMOS NATIONAL SECURITY, LLC
# AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL LOS ALAMOS NATIONAL SECURITY, LLC OR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
#
# ###################################################################
""" plug-in that supports using the LDMS tool
"""
import os
import subprocess
import datetime
import logging
class LDMS():
def __init__(self, my_te):
my_name = self.__class__.__name__
self.logger = logging.getLogger('pav.' + my_name)
self.logger.info('initialize %s to run: ' % my_name)
self.pid = str(os.getpid())
self.lh = my_name
params = my_te.get_values()
self.name = my_te.get_name()
self.install_dir = str(params['ldms']['install_dir'])
self.start_cmd = str(params['ldms']['start_cmd'])
self.output_dir_root = str(params['ldms']['output_dir_root'])
self.freq = params['ldms']['freq']
self.metric_list = str(params['ldms']['metric_list'])
self.output_dir = self.create_output_dir()
self.build_start_cmd()
def create_output_dir(self):
# This dir must be created before LDMS can start and should
# be unique so that each new test run does not stomp on
# existing data from a prior one.
sub_dir = self.name + "-" + datetime.datetime.now().strftime('%m-%d-%YT%H:%M:%S:%f')
if "HOME" in self.output_dir_root:
root = os.environ['HOME']
else:
root = self.output_dir_root
output_dir = root + "/ldmsData/" + sub_dir
self.logger.info(self.lh + " Make metrics directory: " + output_dir)
try:
os.umask(0o002)
os.makedirs(output_dir, 0o755)
except OSError:
print " Error creating metrics directory : \n\t" + output_dir
self.logger.info(self.lh + " Error creating metrics directory : \n\t" + output_dir)
output_dir = ''
pass
print "Created ldms metrics dir: " + output_dir
os.environ['LDMS_OUTPUT_DIR'] = output_dir
return output_dir
def build_start_cmd(self):
full_cmd = self.install_dir + "/" + self.start_cmd
full_cmd += " -f " + str(self.freq)
full_cmd += " -m " + self.metric_list
full_cmd += " -s " + self.output_dir
os.environ['LDMS_START_CMD'] = full_cmd
# define some static methods for LDMS job control
@staticmethod
def start():
# start and don't wait. Report success or fail in the log(s).
outfile = os.environ['LDMS_OUTPUT_DIR'] + "/ldms.out"
print " starting LDMS with: \n " + os.environ['LDMS_START_CMD']
text_file = open(outfile, "w")
try:
subprocess.Popen(os.environ['LDMS_START_CMD'], stdout=text_file, stdin=open(os.devnull), shell=True)
except subprocess.CalledProcessError as e:
ret = e.returncode
if ret in (1, 2):
print("the command failed")
elif ret in (3, 4, 5):
print("the command failed very much")
pass
@staticmethod
def status(jid):
pass
@staticmethod
def stop(jid):
pass
if __name__ == "__main__":
print LDMS.__doc__
|
StarcoderdataPython
|
3349642
|
import json
import time
import sys
import logging
#TODO Change this to cryptography.io
from ecdsa import VerifyingKey, BadSignatureError, NIST256p
import hashlib
import traceback
import requests
import jwkest
from jwkest.jwk import load_jwks_from_url, load_jwks
from jwkest.jws import JWS
jws = JWS()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
AWS_REGION = "us-east-2"
USER_POOL_ID = "us-east-2_fiNazAdBU"
def verify_meta_data_text(raw_line):
# The data from the serial port comes in as raw bytes, but they are ascii encoded
parts = raw_line.split(b',TXT-SHA:')
logger.debug("Parts after split on TXT-SHA:")
logger.debug(parts)
try:
meta_data_bytes = parts[0]
sha_and_signature = parts[1].split(b',SIG:')
text_sha = sha_and_signature[0]
sha_signature = sha_and_signature[1]
logger.debug("Bytes to Verify: {}".format(meta_data_bytes))
logger.debug("Claimed SHA-256: {}".format(text_sha))
logger.debug("Claimed Signature: {}".format(sha_signature))
m = hashlib.sha256()
m.update(meta_data_bytes)
caclulated_sha = m.hexdigest().upper()
sha_hex = m.digest()
logger.debug("Calculated SHA_256: {}".format(caclulated_sha))
if caclulated_sha != text_sha.decode('ascii'):
logger.debug("SHA 256 Digests in text file doesn't match the calculated value.")
return False
public_key_bytes = bytearray.fromhex(meta_data_bytes.split(b'PUB:')[1][:128].decode('ascii'))
signature_hex = bytearray.fromhex(sha_signature[:128].decode('ascii'))
try:
vk = VerifyingKey.from_string(bytes(public_key_bytes), curve=NIST256p)
except:
logger.debug(traceback.format_exc())
return False
try:
vk.verify_digest(signature_hex, sha_hex)
logger.debug("good signature")
return True
except BadSignatureError:
logger.debug("BAD SIGNATURE")
return False
except IndexError:
logger.debug(traceback.format_exc())
return False
def decode_jwt(token):
"""
Validate and decode the web token from the Amazon Cognito.
Stores the public key needed to decrypt the token.
Returns
"""
url="https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json".format(AWS_REGION,USER_POOL_ID)
try:
r = requests.get(url)
logger.debug(r.status_code)
key_set = load_jwks(r.text)
except:
logger.debug(traceback.format_exc())
return False
try:
token_dict = jws.verify_compact(token, keys=key_set)
logger.info(token_dict)
if token_dict['exp'] < time.time():
logger.debug("Token Expired")
return False
if token_dict['email_verified']:
return {"user_id":token_dict['sub'],
"user_email":token_dict['email']}
else:
logger.debug("E-mail not verfied.")
return False
except:
logger.debug(traceback.format_exc())
return False
def get_timestamp(seconds):
try:
return time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime(seconds))
except ValueError:
return "Not Available"
def lambdaResponse(statusCode,
body,
headers={},
isBase64Encoded=False):
"""
A utility to wrap the lambda function call returns with the right status code,
body, and switches.
"""
# Make sure the body is a json object
if not isinstance(body, str):
body = json.dumps(body)
# Make sure the content type is json
header = headers
header["Content-Type"] = "application/json"
header['Access-Control-Allow-Headers'] = 'Content-Type'
header['Access-Control-Allow-Origin'] = '*'
header['Access-Control-Allow-Methods'] = 'OPTIONS,POST,GET'
response = {
"isBase64Encoded": isBase64Encoded,
"statusCode": statusCode,
"headers": header,
"body": body
}
# These print statement create entries in cloudwatch
print("Response")
for k, v in response.items():
print("{}: {}".format(k, repr(v)[:100]))
return response
|
StarcoderdataPython
|
6441800
|
<filename>Leetcode/medium/reverse-words-in-a-string.py
"""
# REVERSE WORDS IN A STRING
Given an input string s, reverse the order of the words.
A word is defined as a sequence of non-space characters. The words in s will be separated by at least one space.
Return a string of the words in reverse order concatenated by a single space.
Note that s may contain leading or trailing spaces or multiple spaces between two words. The returned string should only have a single space separating the words. Do not include any extra spaces.
Example 1:
Input: s = "the sky is blue"
Output: "blue is sky the"
Example 2:
Input: s = " hello world "
Output: "world hello"
Explanation: Your reversed string should not contain leading or trailing spaces.
Example 3:
Input: s = "a good example"
Output: "example good a"
Explanation: You need to reduce multiple spaces between two words to a single space in the reversed string.
Example 4:
Input: s = " Bob Loves Alice "
Output: "<NAME> Bob"
Example 5:
Input: s = "Alice does not even like bob"
Output: "bob like even not does Alice"
Constraints:
1 <= s.length <= 104
s contains English letters (upper-case and lower-case), digits, and spaces ' '.
There is at least one word in s.
Follow up:
Could you solve it in-place with O(1) extra space?
"""
class Solution:
def reverseWords(self, s: str) -> str:
temp = ""
res = ""
s += " "
for x in s:
if x != " ":
temp += x
else:
if temp != "":
res = temp + " " + res
temp = ""
return res[:-1]
|
StarcoderdataPython
|
1980923
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 15 09:26:01 2019
Apply the Frank Wolf algorithm and Projected gradient descent to solve optimization problem
min|X - Y|_F^2 s.t. \sum max(|X_i|) < k
@author: wx100
"""
import matplotlib.pyplot as plt
import numpy as np
import cvxpy as cp
import time
# The Frank Wollfe algorithm
"""
Input: original m*n matrix Y of real value and constraint k
Output: X
"""
def Frank_Wolfe(Y, epsilon):
nrow = len(Y)
ncol = len(Y[0])
X = np.zeros((nrow, ncol)) # X store the final output value.
record_X = np.ones((nrow,ncol)) # record_X store the X value in the last loop to judge if X converges.
k = 0; # k is the loop index
# compute S = min S * |X - Y| s.t. \sum max(|S_i|) < k
while np.linalg.norm(X - record_X, 'fro') > 0.01:
k = k + 1;
temp = X - Y
abs_temp = np.abs(temp)
# find the column vector in matrix X-Y with biggest norm-1
temp_sum = np.sum(abs_temp,axis = 0)
S = np.zeros((nrow,ncol))
S_value = np.amax(temp_sum)
S_index = np.where(temp_sum == S_value)
for i in range(0,len(S_index)):
S[:, S_index[i][0]] = - epsilon / len(S_index) * np.sign(temp[:, S_index[i][0]])
#update X
mylambda = 2 / (k+1)
record_X = X
X = (1 - mylambda) * X + mylambda * S
return X
def cvx(Y,epsilon):
nrow = len(Y)
ncol = len(Y[0])
X = cp.Variable((nrow, ncol))
constraints = 0
for i in range(0,ncol):
constraints += cp.norm(X[:,i], 'inf')
prob = cp.Problem(cp.Minimize(cp.norm(X-Y,"fro")), constraints <= epsilon)
prob.solve()
return X.value
start = 10
end = 250
step = 10
epsilon = 1
loop_time = 100
length = int((end - start -1) / step) + 1
run_time1 = np.zeros(length) # store the computation time of <NAME> algorithm
run_time2 = np.empty(length) # store the computation time of convex package
matrix_size = np.arange(start,end,step)
for loop_index in range(0,loop_time):
index = 0
for i in matrix_size:
# create the n*n random matrix Y and then compute the corresponded projected matrix X
Y = 5 * np.random.rand(i,i)
begin_time = time.clock()
X1 = Frank_Wolfe(Y,epsilon)
run_time1[index] += time.clock() - begin_time
# X2 = cvx(Y,epsilon)
# run_time2[index] = time.clock() - run_time1[index] - begin_time
index = index + 1
ave_run_time1 = run_time1 / loop_time
plt.plot(matrix_size, run_time1, label='Frank_Wolfe')
#plt.plot(matrix_size, run_time2, label='cvx')
#plt.plot(matrix_size, run_time3, label='cvx')
plt.xlabel('matrix size n')
plt.ylabel('run time/s')
plt.title("Run time comparision")
plt.legend()
plt.show()
#epsilon = 1
#Y = 0.1 * np.ones((5,5))
#X = Frank_Wolfe(Y,epsilon)
|
StarcoderdataPython
|
9606665
|
<reponame>DoNnMyTh/pixabay
from unittest import TestCase
from pixabay import Image, Video
import os
api_key = os.getenv('PIXABAY_API_KEY')
image = Image(api_key)
video = Video(api_key)
class TestPythonPixabay(TestCase):
def test_custom_image_search(self):
self.assertIn(
"hits",
image.search(q="water",
page=1,
safesearch="true",
editors_choice="true"))
self.assertEqual(
image.search(q="apple", page=1)["hits"][0]["pageURL"],
"https://pixabay.com/photos/apples-fruit-red-juicy-ripe-634572/"
)
self.assertEqual(
image.search(q="apple",
page=1,
safesearch="false",
editors_choice="true")["totalHits"], 155)
def test_custom_video_search(self):
self.assertEqual(
video.search(q="apple",
page=1,
safesearch="false",
editors_choice="true")["hits"][0]["pageURL"],
"https://pixabay.com/videos/id-1019/")
self.assertEqual(
video.search(q="apple",
page=1,
safesearch="true",
editors_choice="true")["totalHits"], 1)
|
StarcoderdataPython
|
6654222
|
<gh_stars>1-10
import asyncio
import io
import re
import urllib.parse
from typing import ClassVar, Optional
import aiohttp
import telethon as tg
from .. import command, module, util
LOGIN_CODE_REGEX = r"[Ll]ogin code: (\d+)"
class NetworkModule(module.Module):
name: ClassVar[str] = "Network"
@command.desc("Pong")
async def cmd_ping(self, ctx: command.Context):
before = util.time.msec()
await ctx.respond("Calculating response time...")
after = util.time.msec()
return f"Request response time: {after - before:.0f} ms"
async def on_message(self, msg: tg.events.NewMessage.Event) -> None:
# Only check Telegram service messages
if msg.sender_id != 777000:
return
# Print login code if present
match = re.search(LOGIN_CODE_REGEX, msg.raw_text)
if match is not None:
self.log.info(f"Received Telegram login code: {match.group(1)}")
@command.desc("Upload given file to file.io")
@command.usage("[expiry time?]", optional=True)
async def cmd_fileio(self, ctx: command.Context) -> str:
expires = ctx.input
if not ctx.msg.is_reply:
return "__Reply to a file to upload it.__"
if expires == "help":
return "__Expiry format: 1y/12m/52w/365d__"
if expires:
if expires[-1] not in ["y", "m", "w", "d"]:
return "__Unknown unit. Expiry format: 1y/12m/52w/365d__"
try:
int(expires[:-1])
except ValueError:
return "__Invalid number. Expiry format: 1y/12m/52w/365d__"
else:
expires = "2d"
reply_msg = await ctx.msg.get_reply_message()
if not reply_msg.file:
return "__That message doesn't contain a file.__"
data = await util.tg.download_file(ctx, reply_msg)
buf = io.BytesIO(data)
buf.name = reply_msg.file.name
await ctx.respond("Uploading file to [file.io](https://file.io/)...")
async with self.bot.http.post(
f"https://file.io/?expires={expires}", data={"file": buf}
) as resp:
resp_data = await resp.json()
if not resp_data["success"]:
return f"__Error uploading file — status code {resp.status}__"
return str(resp_data["link"])
@command.desc("Update the embed for a link")
@command.usage("[link?, or reply]", optional=True)
@command.alias("upde", "updl", "updatelink", "ul", "ulink")
async def cmd_update_link(self, ctx: command.Context) -> Optional[str]:
link = ctx.input
if not (link or ctx.msg.is_reply):
return "__Provide or reply to a link to update it.__"
if not link:
reply_msg = await ctx.msg.get_reply_message()
for entity, text in reply_msg.get_entities_text():
if isinstance(
entity,
(tg.tl.types.MessageEntityUrl, tg.tl.types.MessageEntityTextUrl),
):
link = text
if not link:
return "__That message doesn't contain any links."
await ctx.respond(f"Updating embed for [link]({link})...")
async with self.bot.client.conversation("WebpageBot") as conv:
await conv.send_message(link)
response = await conv.get_response()
await conv.mark_read()
if "Link previews was updated successfully" in response.raw_text:
# Provide a status update
await ctx.respond("Waiting for embed update to propagate...")
# Give Telegram some time to propagate the update
await asyncio.sleep(1)
# Send the new preview
await ctx.respond(f"Updated embed for link: {link}", link_preview=True)
else:
# Failed for some reason, send the error
await ctx.respond(
f"Error updating embed for [link]({link}): `{response.raw_text}`"
)
return None
@command.desc("Generate a LMGTFY link (Let Me Google That For You)")
@command.usage("[search query]")
async def cmd_lmgtfy(self, ctx: command.Context) -> str:
query = ctx.input
params = urllib.parse.urlencode({"q": query})
return f"https://lmgtfy.com/?{params}"
|
StarcoderdataPython
|
1765777
|
<filename>GenomicData/utils/regression_hops_sampler.py
import pandas as pd
import utils.hops_sampler as hops_sampler
import numpy as np
from sklearn.preprocessing import LabelEncoder
from torch_geometric.data import Data
from .hops_sampler import hops_sampler
import torch
class regression_hops_sampler(hops_sampler):
"""
This class in designed for the regression purpose of pathway dataset.
The general funciton of `regression_sampler_hop` is almost the same as `hops_sampler`.
Instead of considering the pathway structure, here we still sample the data based on the num_hops, but discard the non-protein genomic
The `batch_size` here is not allowed to input manually and always set to $1$.
Inputs: `pathway`: a pathway object
`num_hops`: the number of hops wanted to sample
`least_size(optional)`: the option that narrow down some nodes that do not have links
Return: `regression_samples`: the loader of sub-graph without the consideration of graph relation
`regression_samples.activ_free/cancer`: the training data
`regression_samples.activ_free/cancer_target`: the target value
"""
def __init__(self, pathway, num_hops, least_size=5):
self.data = pathway
self.num_hops = num_hops
self.batch_size = 1
self.least_size = least_size
self._setup()
self._splits_protein()
# From here, extract the remained graph based on batached_node_id-list
le = LabelEncoder()
le.fit(self.data.remained_protein)
self.regression_samples = []
for test_id, batch in zip(self.batch_splits, self.batched_node_list):
self.regression_samples.append(self.regeression_sampler(test_id.numpy(), batch, le))
def _splits_protein(self):
# Start from here is the test of sub-sampling linkage based on num of hops
# First, split the started node into desgined batch_size
self.batch_splits = torch.split(torch.tensor(self.save_ids), self.batch_size)
# function that find all parent hops
self.batched_node_list = []
for batch in self.batch_splits:
# the ids i want to keep in further sub-sampling part
this_batch_ids = batch.numpy()
for num in range(self.num_hops):
this_batch_ids = np.hstack([this_batch_ids, self.node_i[np.in1d(self.node_j, this_batch_ids)]])
self.batched_node_list.append(np.intersect1d(self.data.remained_protein_id, np.unique(this_batch_ids)))
def regeression_sampler(self, test_id, batch, le):
pthway = Data()
pthway.test_id = test_id
pthway.operate_id = np.setdiff1d(batch, test_id)
pthway.pthway_NameList = self.data.pthway_NameList.iloc[pthway.operate_id,:]
if (self.data.pthway_NameList.iloc[pthway.test_id,:]['GenomeType'].values != 'protein'):
raise IndexError('The test element is not the protein type. \t')
if (np.all(pthway.pthway_NameList['GenomeType'].values == 'protein') is False):
raise IndexError('Some of Related Genome ar not protein type.\t')
# The pathway name list that exclude the target protein
pthway.Genome_NameList = pthway.pthway_NameList['GenomeName'].values
# Only the name of target protein
pthway.Test_NameList = self.data.pthway_NameList.iloc[pthway.test_id,:]['GenomeName'].values
# The overall trained dataset
activ_id = le.transform(pthway.Genome_NameList)
pthway.activ_free = self.data.activ_free[activ_id]
pthway.activ_cancer = self.data.activ_cancer[activ_id]
# the target value
activ_id_test = le.transform(pthway.Test_NameList)
pthway.activ_free_target = self.data.activ_free[activ_id_test]
pthway.activ_cancer_target = self.data.activ_cancer[activ_id_test]
return pthway
|
StarcoderdataPython
|
1622266
|
<reponame>cmput401-fall2018/web-app-ci-cd-with-travis-ci-SethBergen
from selenium import webdriver
from selenium.webdriver.common.key import keys
def test_home():
driver = webdriver.Chrome()
driver.get("http://127.0.0.1:8000")
elem = driver.find_element_by_id("name")
assert elem != None
elem = driver.find_element_by_id("about")
assert elem != None
elem = driver.find_element_by_id("skills")
assert elem != None
elem = driver.find_element_by_id("education")
assert elem != None
elem = driver.find_element_by_id("work")
assert elem != None
elem = driver.find_element_by_id("contact")
assert elem != None
|
StarcoderdataPython
|
4927557
|
from __future__ import print_function, unicode_literals
from collections import OrderedDict
from unittest import TestCase
import mock
import six
from subui.step import StatefulUrlParamsTestStep, TestStep
from .utils import patch_class_method_with_original
TESTING_MODULE = 'subui.step'
class TestTestStep(TestCase):
def test__init__(self):
"""
Test that __init__ overwrites attributes with
given parameters
"""
step = TestStep(hello='world')
self.assertTrue(hasattr(step, 'hello'))
self.assertEqual(getattr(step, 'hello'), 'world')
def test_init(self):
"""
Test that init stores given parameters as attributes
"""
step = TestStep()
step.init(mock.sentinel.client,
mock.sentinel.steps,
mock.sentinel.step_index,
mock.sentinel.step_key,
mock.sentinel.state)
self.assertEqual(step.client, mock.sentinel.client)
self.assertEqual(step.steps, mock.sentinel.steps)
self.assertEqual(step.step_index, mock.sentinel.step_index)
self.assertEqual(step.step_key, mock.sentinel.step_key)
self.assertEqual(step.state, mock.sentinel.state)
def test_steps(self):
"""
Test prev_steps, next_steps, prev_step and next_step
properties
"""
step = TestStep()
step.steps = OrderedDict((
('one', 1),
('two', 2),
('three', 3),
('four', 4),
('five', 5),
))
step.step_index = 2
self.assertEqual(
step.prev_steps,
OrderedDict((
('two', 2),
('one', 1),
))
)
self.assertEqual(
step.prev_step,
2,
)
self.assertEqual(
step.next_step,
4,
)
def test_get_content_type(self):
"""
Test that get_content_type returns content_type if defined
"""
step = TestStep()
self.assertEqual(step.get_content_type(), '')
step.content_type = 'application/json'
self.assertEqual(step.get_content_type(), 'application/json')
def test_get_override_settings(self):
"""
Test that get_override_settings returns overriden_settings if defined
"""
step = TestStep()
self.assertDictEqual(step.get_override_settings(), {})
step.overriden_settings = {
'ROOT_URLCONF': 'services.urls'
}
self.assertEqual(step.get_override_settings(), {'ROOT_URLCONF': 'services.urls'})
def test_get_urlconf(self):
"""
Test that get_urlconf returns urlconf if defined
"""
step = TestStep()
self.assertIsNone(step.get_urlconf())
step.urlconf = 'services.urls'
self.assertEqual(step.get_urlconf(), 'services.urls')
def test_get_url_args(self):
"""
Test that get_url_args returns url args if defined
"""
step = TestStep()
self.assertTupleEqual(step.get_url_args(), tuple())
step.url_args = ('foo',)
self.assertTupleEqual(step.get_url_args(), ('foo',))
def test_get_url_kwargs(self):
"""
Test that get_url_kwargs returns url kwargs if defined
"""
step = TestStep()
self.assertDictEqual(step.get_url_kwargs(), {})
step.url_kwargs = {'foo': 'bar'}
self.assertDictEqual(step.get_url_kwargs(), {'foo': 'bar'})
@patch_class_method_with_original(TestStep, 'get_url_kwargs')
@patch_class_method_with_original(TestStep, 'get_url_args')
@mock.patch(TESTING_MODULE + '.reverse')
def test_get_url(self,
mock_get_url_args,
mock_get_url_kwargs,
mock_reverse):
mock_reverse.return_value = mock.sentinel.reverse
step = TestStep()
step.url_name = 'url-name'
actual = step.get_url()
self.assertEqual(actual, mock.sentinel.reverse)
mock_get_url_args.assert_called_once_with(step)
mock_get_url_kwargs.assert_called_once_with(step)
mock_reverse.assert_called_once_with(
'url-name', args=tuple(), urlconf=None, kwargs={})
def test_get_request_data(self):
"""
Test that get_request_data returns data if defined
"""
step = TestStep()
actual = step.get_request_data()
self.assertEqual(actual, {})
step.data = {'hello': 'world'}
actual = step.get_request_data()
self.assertEqual(actual, {'hello': 'world'})
actual = step.get_request_data({'foo': 'bar'})
self.assertEqual(actual, {'foo': 'bar'})
def test_get_validators(self):
"""
Test that get_validators returns validators
"""
step = TestStep()
step.validators = mock.sentinel.validators
self.assertEqual(step.get_validators(), mock.sentinel.validators)
@patch_class_method_with_original(TestStep, 'post_request_hook')
@patch_class_method_with_original(TestStep, 'pre_request_hook')
@mock.patch.object(TestStep, 'get_url')
def test_request(self,
mock_pre_request_hook,
mock_post_response_hook,
mock_get_url):
"""
Test that request correctly calls the client
"""
mock_client = mock.MagicMock()
mock_client.methodname.return_value = mock.sentinel.response
mock_get_url.return_value = mock.sentinel.url
step = TestStep(client=mock_client, data={'hello': 'world'})
step.request_method = 'methodname'
actual = step.request()
self.assertEqual(actual, mock.sentinel.response)
mock_pre_request_hook.assert_called_once_with(step)
mock_post_response_hook.assert_called_once_with(step)
@mock.patch.object(TestStep, 'get_url')
def test_request_with_error(self, mock_get_url):
"""
Test that request exceptions are handled correctly
"""
mock_client = mock.MagicMock()
mock_client.methodname.side_effect = ValueError
mock_get_url.return_value = mock.sentinel.url
step = TestStep(client=mock_client, step_key='foo', data={'hello': 'world'})
step.request_method = 'methodname'
with self.assertRaises(Exception) as e:
step.request()
self.assertEqual(e.exception.__class__.__name__, 'ValueError')
self.assertEqual(
six.text_type(e.exception).splitlines()[0],
'Response for {foo:TestStep} requesting "sentinel.url" failed:'
)
@patch_class_method_with_original(TestStep, 'post_test_response')
@patch_class_method_with_original(TestStep, 'pre_test_response')
@mock.patch('inspect.isclass')
def test_test_response(self,
mock_pre_test_response,
mock_post_test_response,
mock_isclass):
"""
Test that test_response loops over all validators
and correctly calls them
"""
mock_isclass.side_effect = [True, False]
validator = mock.MagicMock()
class_validator = mock.MagicMock()
step = TestStep(validators=[class_validator, validator])
step.test_response()
validator.test.assert_called_once_with(step)
class_validator.assert_called_once_with(step)
class_validator().test.assert_called_once_with(step)
mock_pre_test_response.assert_called_once_with(step)
mock_post_test_response.assert_called_once_with(step)
class TestStatefulUrlParamsTestStep(TestCase):
@mock.patch.object(TestStep, 'get_url_args')
def test_get_url_args(self, mock_get_url_args):
"""
Test that get_url_args returns args from the state if present
"""
mock_get_url_args.return_value = mock.sentinel.super
mock_state = mock.MagicMock()
mock_state.get.return_value = mock.sentinel.args
step = StatefulUrlParamsTestStep(state=mock_state)
actual = step.get_url_args()
self.assertEqual(actual, mock.sentinel.args)
mock_state.get.assert_called_once_with('url_args', mock.sentinel.super)
@mock.patch.object(TestStep, 'get_url_kwargs')
def test_get_url_kwargs(self, mock_get_url_kwargs):
"""
Test that get_url_args returns kwargs from the state if present
"""
mock_get_url_kwargs.return_value = mock.sentinel.super
mock_state = mock.MagicMock()
mock_state.get.return_value = mock.sentinel.kwargs
step = StatefulUrlParamsTestStep(state=mock_state)
actual = step.get_url_kwargs()
self.assertEqual(actual, mock.sentinel.kwargs)
mock_state.get.assert_called_once_with('url_kwargs', mock.sentinel.super)
@mock.patch.object(TestStep, 'get_url')
def test_get_request_kwargs(self, mock_get_url):
"""
Test that get_request_kwargs returns correct kwargs
"""
step = TestStep(
data={'foo': 'bar'},
content_type='application/json'
)
self.assertDictEqual(
step.get_request_kwargs(), {
'path': mock_get_url.return_value,
'data': {'foo': 'bar'},
'content_type': 'application/json'
}
)
|
StarcoderdataPython
|
3530362
|
<filename>cooper_pair/pair.py
# pylint: disable=C0103, E0401, R0201
"""cooper_pair is a small library for programmatic access to the DQM
GraphQL API."""
import json
import os
import tempfile
import time
try: # pragma: nocover
from urllib.parse import parse_qs
except ImportError: # pragma: nocover
from urlparse import parse_qs
import warnings
import requests
import great_expectations as ge
from gql import gql, Client
from gql.client import RetryError
from gql.transport.requests import RequestsHTTPTransport
TIMEOUT = 60
MAX_RETRIES = 10
DQM_GRAPHQL_URL = os.environ.get('DQM_GRAPHQL_URL')
def make_gql_client(transport=None, schema=None, retries=MAX_RETRIES,
timeout=TIMEOUT):
client = None
counter = 0
while client is None and counter < retries:
start_time = time.time()
while ((time.time() - start_time) <= timeout) and client is None:
try:
client = Client(
transport=transport,
fetch_schema_from_transport=(schema is None),
schema=schema,
retries=retries)
except (requests.ConnectionError, RetryError):
warnings.warn('CooperPair failed to connect to allotrope...')
counter += 1
if client is None:
raise Exception(
'CooperPair failed to connect to '
'allotrope {} times.'.format(retries))
return client
def generate_slug(name):
"""Utility function to generate snake-case-slugs.
Args:
name (str) -- the name to convert to a slug
Returns:
A string slug.
"""
# TODO: this functionality should move to the server
return name.lower().replace(' ', '-')
class CooperPair(object):
"""Entrypoint to the API."""
_client = None
def __init__(
self,
email=None,
password=<PASSWORD>,
graphql_endpoint=DQM_GRAPHQL_URL,
timeout=TIMEOUT,
max_retries=MAX_RETRIES):
"""Create a new instance of CooperPair.
Kwargs:
graphql_endpoint (str) -- The GraphQL endpoint to hit (default:
the value of the DQM_GRAPHQL_URL environment variable).
timeout (int) -- The number of seconds to wait for API responses
before timing out (default: 10).
max_retries (int) -- The number of times to retry API requests
before failing (default: 10). The worst-case time an API call
may take is (max_retries x timeout) seconds.
Raises:
AssertionError, if graphql_endpoint is not set and the
DQM_GRAPHQL_URL environment variable is not set.
Returns:
A new instance of CooperPair
"""
assert graphql_endpoint, \
'CooperPair.init: graphql_endpoint was None and ' \
'DQM_GRAPHQL_URL not set.'
if not(email and password):
warnings.warn(
'CooperPair must be initialized with email and password '
'in order to authenticate against the GraphQL api.')
self.email = email
self.max_retries = max_retries
self.password = password
self.timeout = timeout
self.token = None
self.transport = RequestsHTTPTransport(
url=graphql_endpoint, use_json=True, timeout=timeout)
@property
def client(self):
if self._client is None:
self._client = make_gql_client(
transport=self.transport,
retries=self.max_retries,
timeout=self.timeout)
# FIXME(mattgiles): login needs to be thought through
self.login()
return self._client
def login(self, email=None, password=<PASSWORD>):
if self.email is None or self.password is None:
warnings.warn(
'Instance credentials are not set. You must '
'set instance credentials (self.email and self.password) '
'in order to automatically authenticate against '
'the GraphQL api.')
email = email or self.email
password = password or <PASSWORD>
if email is None or password is None:
warnings.warn('Must provide email and password to login.')
return False
login_result = self.client.execute(
gql("""
mutation loginMutation($input: LoginInput!) {
login(input: $input) {
token
}
}
"""),
variable_values={
'input': {
'email': email,
'password': password
}
})
token = login_result['login']['token']
if token:
self.token = token
self.transport.headers = dict(
self.transport.headers or {}, **{'X-Fullerene-Token': token})
return True
else:
warnings.warn(
"Couldn't log in with email and password provided. "
"Please try again")
return False
def query(self, query, variables=None, unauthenticated=False):
"""Workhorse to execute queries.
Args:
query (string) -- A valid GraphQL query. query will apply
gql.gql on the string to generate a graphql.language.ast.Document.
Kwargs:
variables (dict) -- A Python dict containing variables to be
passed along with the GraphQL query (default: None, no
variables will be passed).
Returns:
A dict containing the parsed results of the query.
"""
if not unauthenticated:
if not self.token:
warnings.warn(
'Client not authenticated. Attempting to authenticate '
'using stored credentials...')
query_gql = gql(query)
try:
return self.client.execute(query_gql, variable_values=variables)
except (requests.exceptions.HTTPError, RetryError):
self.transport.headers = dict(
self.transport.headers or {}, **{'X-Fullerene-Token': None})
self._client = None
return self.client.execute(query_gql, variable_values=variables)
def munge_ge_evaluation_results(self, ge_results):
'''
Unpack the Great Expectations result object to match the semi-flattened
structure used by Allotrope.
:param ge_results: a list of result dicts returned by Great Expectations
:return: a list of result dicts that can be consumed by Allotrope
'''
return [
{
'success': result['success'],
'expectationId': result['expectation_id'],
'expectationType': result['expectation_config']['expectation_type'],
'expectationKwargs': json.dumps(result['expectation_config']['kwargs']),
'raisedException': result['exception_info']['raised_exception'],
'exceptionTraceback': result['exception_info']['exception_traceback'],
# 'exceptionMessage': result['exception_info']['exception_message'], #FIXME: Allotrope needs a new DB field to store this in
'summaryObj': (
json.dumps(result['result'])
if 'result' in result else json.dumps({})
)
}
for result in ge_results]
def list_question_templates(self):
"""
Return all valid question templates
:return: Graphql query result containing all question templates
"""
return self.query("""{
allQuestionTemplates {
edges {
node {
id
scId
questionType
text
expectationType
answerTemplate
answerValidation
storyTemplate
compatibleSpecTypes
}
}
}
}""")
def get_evaluation(self, evaluation_id):
"""
Query an evaluation by id
:param evaluation_id: Evaluation id
:return: Graphql query result containing Evaluation dict
"""
return self.query("""
query evaluationQuery($id: ID!) {
evaluation(id: $id) {
id
statusOrdinal
checkpointId
checkpoint {
name
}
dataset {
id
label
}
createdBy {
id
}
organization {
id
}
updatedAt
results {
edges {
node {
id
success
summaryObj
expectationType
expectationKwargs
raisedException
exceptionTraceback
evaluationId
expectationId
statusOrdinal
}
}
}
}
}
""",
variables={'id': evaluation_id}
)
def add_evaluation(
self,
dataset_id=None,
checkpoint_id=None,
checkpoint_name=None,
delay_evaluation=False,
results=None,
status_ordinal=None
):
"""Add a new evaluation.
Args:
dataset_id (int or str Relay id) -- The id of the dataset on which
to run the evaluation.
checkpoint_id (int or str Relay id) -- The id of the checkpoint to
evaluate.
checkpoint_name (str) -- The name of the checkpoint to evaluate
delay_evaluation (bool) -- If True, evaluation of dataset will be delayed
results (list) -- List of ge evaluation results
status_ordinal (int) -- Status ordinal of evaluation
Returns:
A dict containing the parsed results of the mutation.
"""
if not checkpoint_id and not checkpoint_name:
raise ValueError('must provide checkpoint_id or checkpoint_name')
return self.query("""
mutation addEvaluationMutation($evaluation: AddEvaluationInput!) {
addEvaluation(input: $evaluation) {
evaluation {
id
datasetId
dataset {
id
label
locatorDict
}
checkpointId
checkpoint {
id
name
}
createdById
createdBy {
id
}
organizationId
organization {
id
}
results {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
success
summaryObj
expectationType
expectationKwargs
raisedException
exceptionTraceback
evaluationId
statusOrdinal
}
}
}
statusOrdinal
updatedAt
}
}
}
""",
variables={
'evaluation': {
'datasetId': dataset_id,
'checkpointId': checkpoint_id,
'checkpointName': checkpoint_name,
'delayEvaluation': delay_evaluation,
'results': results,
'statusOrdinal': status_ordinal
}
})
def evaluate_checkpoint_on_pandas_df(
self,
checkpoint_id,
pandas_df,
filename=None,
project_id=None):
"""Evaluate a expectation_suite on a pandas.DataFrame.
Args:
checkpoint_id (int or str Relay id) -- The id of the checkpoint to
evaluate.
pandas_df (pandas.DataFrame) -- The data frame on which to
evaluate the expectation_suite.
Kwargs:
filename (str) -- The filename to associate with the dataset
(default: None, the name attribute of the pandas_df argument
will be used).
project_id (int or str Relay id) -- The id of the project to associate
with the evaluation
Returns:
A dict representation of the evaluation.
"""
dataset = self.add_dataset_from_pandas_df(
pandas_df,
project_id,
filename=filename)
return self.add_evaluation(
dataset['dataset']['id'],
checkpoint_id
)
def evaluate_checkpoint_on_file(
self,
checkpoint_id,
fd,
filename=None,
project_id=None):
"""Evaluate a expectation_suite on a file.
Args:
checkpoint_id (int or str Relay id) -- The id of the checkpoint to
evaluate.
fd (file-like) -- A file descriptor or file-like object to
evaluate, opened as 'rb'.
Kwargs:
filename (str) -- The filename to associate with the dataset
(default: None, the name attribute of the pandas_df argument
will be used).
project_id (int or str Relay id) -- The id of the project to associate
with the evaluation
Returns:
A dict representation of the evaluation.
"""
dataset = self.add_dataset_from_file(
fd,
project_id,
filename=filename)
return self.add_evaluation(
dataset['dataset']['id'],
checkpoint_id
)
def evaluate_pandas_df_against_checkpoint(
self,
pandas_df,
dataset_label,
checkpoint_id=None,
checkpoint_name=None):
"""
Evaluate a Pandas DataFrame against a checkpoint
:param pandas_df: (pandas.DataFrame) The data frame on which to
evaluate the checkpoint.
:param dataset_label: (str) a human-readable name to associate with
the evaluated dataset
:param checkpoint_id: (int or str Relay id) the id of the checkpoint
to evaluate against
:param checkpoint_name: (str) the name of the checkpoint to evaluate
against
:return: a Great Expectations result object, as returned by .validate method
"""
if not checkpoint_id and not checkpoint_name:
raise ValueError('must provide checkpoint_id or checkpoint_name')
if not checkpoint_id:
checkpoint_id = self.get_checkpoint_by_name(checkpoint_name)['checkpoint']['id']
expectations_config = self.get_checkpoint_as_expectations_config(
checkpoint_id=checkpoint_id, checkpoint_name=checkpoint_name)
expectation_ids = expectations_config.pop('expectation_ids', [])
ge_results = pandas_df.validate(
expectations_config=expectations_config,
result_format="SUMMARY",
catch_exceptions=True)
results = ge_results['results']
for idx, expectation_id in enumerate(expectation_ids):
results[idx]['expectation_id'] = expectation_id
munged_results = self.munge_ge_evaluation_results(ge_results=results)
new_dataset = self.add_dataset(project_id=1, label=dataset_label)
new_dataset_id = new_dataset['addDataset']['dataset']['id']
self.add_evaluation(
dataset_id=new_dataset_id,
checkpoint_id=checkpoint_id,
delay_evaluation=True,
results=munged_results
)
return ge_results
def update_evaluation(self, evaluation_id, status_ordinal=None, results=None):
"""Update an evaluation.
Args:
evaluation_id (int or str Relay id) -- The id of the evaluation
to update
status_ordinal (int) -- The status ordinal of the evaluation, if any
(default: None)
results (list of dicts) -- The results, if any (default: None)
Returns:
A dict containing the parsed results of the mutation.
"""
variables = {
'updateEvaluation': {
'id': evaluation_id
}
}
if results is not None:
variables['updateEvaluation']['results'] = results
if status_ordinal is not None:
variables['updateEvaluation']['statusOrdinal'] = status_ordinal
return self.query("""
mutation($updateEvaluation: UpdateEvaluationInput!) {
updateEvaluation(input: $updateEvaluation) {
evaluation {
id
datasetId
checkpointId
createdById
createdBy {
id
}
dataset {
id
label
locatorDict
}
organizationId
organization {
id
}
checkpoint {
id
name
}
results {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
success
summaryObj
expectationType
expectationKwargs
raisedException
exceptionTraceback
evaluationId
statusOrdinal
}
}
}
statusOrdinal
updatedAt
}
}
}
""", variables=variables)
def delete_evaluation(self, evaluation_id):
"""Delete an evaluation (soft delete).
Args:
evaluation_id (int or str Relay id) -- The id of the evaluation
to delete
Returns:
A dict containing the parsed results of the mutation.
"""
variables = {
'updateEvaluation': {
'id': evaluation_id,
'deleted': True
}
}
return self.query("""
mutation($updateEvaluation: UpdateEvaluationInput!) {
updateEvaluation(input: $updateEvaluation) {
evaluation {
id
deleted
deletedAt
updatedAt
}
}
}
""", variables=variables)
def get_dataset(self, dataset_id):
"""Retrieve a dataset by its id.
Args:
dataset_id (int or str Relay id) -- The id of the dataset
to fetch
Returns:
A dict representation of the dataset.
"""
return self.query("""
query datasetQuery($id: ID!) {
dataset(id: $id) {
id
label
project {
id
}
createdBy {
id
}
locatorDict
organization {
id
}
}
}
""",
variables={'id': dataset_id}
)
def list_datasets(self):
return self.query("""{
allDatasets{
edges {
node{
id
label
locatorDict
}
}
}
}""")
def add_datasource_spec(self, name, description=None, tags=[]):
"""Add a new datasource specification object
Args:
name (str) - the name of the new datasource to be described
description (str) - a description of the datasource
tags (list) - tags to apply to the datasource specification
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addDatasourceSpecMutation($datasourceSpec: AddDatasourceSpecInput!) {
addDatasourceSpec(input: $datasourceSpec) {
datasourceSpec {
id
name
description
tags
}
}
}
""",
variables={
"datasourceSpec": {
"name": name,
"description": description,
"tags": tags
}
})
def add_other_spec(self, datasource_spec_id, name, description=None, tags=[]):
"""Add a new 'other' specification object
Args:
datasource_spec_id (int) - the id of the datasource spec to which this spec applies
name (str) - the name of the new entity to be described
description (str) - a description of the entity
tags (list) - tags to apply to the entity specification
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addOtherSpecMutation($otherSpec: AddOtherSpecInput!) {
addOtherSpec(input: $otherSpec) {
otherSpec {
id
datasourceSpecId
name
description
tags
}
}
}
""",
variables={
"otherSpec": {
"datasourceSpecId": datasource_spec_id,
"name": name,
"description": description,
"tags": tags
}
})
def add_dataset_spec(self, datasource_spec_id, name, description=None, tags=[]):
"""Add a new dataset specification object
Args:
datasource_spec_id (int) - the id of the datasource spec to which this spec applies
name (str) - the name of the new dataset to be described
description (str) - a description of the dataset
tags (list) - tags to apply to the dataset specification
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addDatasetSpecMutation($datasetSpec: AddDatasetSpecInput!) {
addDatasetSpec(input: $datasetSpec) {
datasetSpec {
id
datasourceSpecId
name
description
tags
}
}
}
""",
variables={
"datasetSpec": {
"datasourceSpecId": datasource_spec_id,
"name": name,
"description": description,
"tags": tags
}
})
def add_table_spec(self, datasource_spec_id, name, description=None, tags=[]):
"""Add a new table specification object
Args:
datasource_spec_id (int) - the id of the datasource spec to which this spec applies
name (str) - the name of the new table to be described
description (str) - a description of the table
tags (list) - tags to apply to the table specification
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addTableSpecMutation($tableSpec: AddTableSpecInput!) {
addTableSpec(input: $tableSpec) {
tableSpec {
id
datasourceSpecId
name
description
tags
}
}
}
""",
variables={
"tableSpec": {
"datasourceSpecId": datasource_spec_id,
"name": name,
"description": description,
"tags": tags
}
})
def add_column_spec(self, table_spec_id, name, description=None, tags=[]):
"""Add a new column specification object
Args:
table_spec_id (int) - the id of the TableSpec to which this spec applies
name (str) - the name of the new column to be described
description (str) - a description of the column
tags (list) - tags to apply to the column specification
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addColumnSpecMutation($columnSpec: AddColumnSpecInput!) {
addColumnSpec(input: $columnSpec) {
columnSpec {
id
tableSpecId
name
description
tags
}
}
}
""",
variables={
"columnSpec": {
"tableSpecId": table_spec_id,
"name": name,
"description": description,
"tags": tags
}
})
def add_cross_table_spec(self, datasource_spec_id, name, description=None, tags=[]):
"""Add a new cross table specification object
Args:
datasource_spec_id (int) - the id of the datasource spec to which this spec applies
name (str) - the name of the new table pair to be described
description (str) - a description of the table pair
tags (list) - tags to apply to the cross table specification
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addCrossTableSpecMutation($crossTableSpec: AddCrossTableSpecInput!) {
addCrossTableSpec(input: $crossTableSpec) {
crossTableSpec {
id
datasourceSpecId
name
description
tags
}
}
}
""",
variables={
"crossTableSpec": {
"datasourceSpecId": datasource_spec_id,
"name": name,
"description": description,
"tags": tags
}
})
def add_cross_column_spec(self, table_spec_id, name, description=None, tags=[]):
"""Add a new cross column specification object
Args:
table_spec_id (int) - the id of the table spec to which this spec applies
name (str) - the name of the new column pair to be described
description (str) - a description of the column pair
tags (list) - tags to apply to the cross column specification
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addCrossColumnSpecMutation($crossColumnSpec: AddCrossColumnSpecInput!) {
addCrossColumnSpec(input: $crossColumnSpec) {
crossColumnSpec {
id
tableSpecId
name
description
tags
}
}
}
""",
variables={
"crossColumnSpec": {
"tableSpecId": table_spec_id,
"name": name,
"description": description,
"tags": tags
}
})
def add_question_template(
self,
text,
question_type,
answer_template,
answer_validation,
story_template,
compatible_spec_types,
sc_id,
expectation_type=None,
):
"""Add a new spec question template object
Args:
text - (string) - question text
question_type - (string) - question type (radio, etc.)
expectation_type - (string) - corresponding expectation type, if applicable
answer_template - (json) - answer template
answer_validation - (json) - answer json validation info
story_template - (json) - template of answer summary
compatible_spec_types - (list) - spec types question can be applied to
sc_id - (string) - UUID
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addQuestionTemplateMutation($questionTemplate: AddQuestionTemplateInput!) {
addQuestionTemplate(input: $questionTemplate) {
questionTemplate {
id
text
questionType
answerTemplate
answerValidation
storyTemplate
compatibleSpecTypes
scId
expectationType
}
}
}
""",
variables={
"questionTemplate": {
"text": text,
"questionType": question_type,
"answerTemplate": json.dumps(answer_template),
"answerValidation": json.dumps(answer_validation),
"storyTemplate": json.dumps(story_template),
"compatibleSpecTypes": compatible_spec_types,
"scId": sc_id,
"expectationType": expectation_type
}
})
def add_question_definition(
self,
question_type,
text,
answer_template,
answer_validation,
story_template,
question_template_id=None,
expectation_type=None
):
"""Add a new spec question definition object
Args:
text - (string) - question text
question_type - (string) - question type (radio, etc.)
expectation_type - (string) - corresponding expectation type, if applicable
answer_template - (json) - answer template
answer_validation - (json) - answer json validation info
story_template - (json) - template of answer summary
question_template_id - (int) - id of corresponding question template (optional)
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addQuestionDefinitionMutation($questionDefinition: AddQuestionDefinitionInput!) {
addQuestionDefinition(input: $questionDefinition) {
questionDefinition {
id
text
questionType
answerTemplate
answerValidation
storyTemplate
questionTemplateId
expectationType
}
}
}
""",
variables={
"questionDefinition": {
"text": text,
"questionType": question_type,
"answerTemplate": json.dumps(answer_template),
"answerValidation": json.dumps(answer_validation),
"storyTemplate": json.dumps(story_template),
"questionTemplateId": question_template_id,
"expectationType": expectation_type
}
})
def add_spec_question(self, question_definition_id, spec_id, status, flagged=False):
"""Add a new spec question object
Args:
question_definition_id (int) - the id of the corresponding question definition
spec_id (int) - the id of the corresponding spec
flagged (boolean) - boolean flag
status (string) - describes status of spec question
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addSpecQuestionMutation($specQuestion: AddSpecQuestionInput!) {
addSpecQuestion(input: $specQuestion) {
specQuestion {
id
questionDefinitionId
specId
flagged
status
}
}
}
""",
variables={
"specQuestion": {
"questionDefinitionId": question_definition_id,
"specId": spec_id,
"flagged": flagged,
"status": status
}
})
def add_spec_question_answer(self, spec_question_id, status, answer, supporting_evidence={}):
"""Add a new spec question answer object
Args:
spec_question_id (int) - the id of the corresponding spec question
status (string) - describes status of spec question answer
supporting_evidence (json) - json object describing evidence in support of hypothesized answer
answer (json) - json object describing answer to corresponding spec question
Returns:
A dict containing the parsed results of the mutation"""
return self.query("""
mutation addSpecQuestionAnswerMutation($specQuestionAnswer: AddSpecQuestionAnswerInput!) {
addSpecQuestionAnswer(input: $specQuestionAnswer) {
specQuestionAnswer {
id
specQuestionId
status
supportingEvidence
answer
}
}
}
""",
variables={
"specQuestionAnswer": {
"specQuestionId": spec_question_id,
"status": status,
"supportingEvidence": json.dumps(supporting_evidence),
"answer": json.dumps(answer)
}
})
def add_dataset(self, project_id, filename=None, label=None):
"""Add a new dataset object.
Users should probably not call this function directly. Instead,
consider add_dataset_from_file or add_dataset_from_pandas_df.
Args:
filename (str) -- The filename of the new dataset.
project_id (int or str Relay id) -- The id of the project to which
the dataset belongs.
Returns:
A dict containing the parsed results of the mutation.
"""
return self.query("""
mutation addDatasetMutation($dataset: AddDatasetInput!) {
addDataset(input: $dataset) {
dataset {
id
label
project {
id
}
createdBy {
id
}
locatorDict
organization {
id
}
}
}
}
""",
variables={
'dataset': {
'locatorDict': json.dumps({'filename': filename}) if filename else json.dumps({}),
'projectId': project_id,
'label': label
}
}
)
def add_dataset_simple(self, label, checkpoint_id, locator_dict, project_id=None):
"""
Add a new Dataset object. Bypasses AddDataset mutation logic used for
manually uploaded datasets
:param label: (string) human readable identifier
:param checkpoint_id: (int or string Relay id) id of checkpoint dataset belongs to
:param locator_dict: (dict) dict containing data necessary for retrieving dataset. e.g.:
{
's3_bucket': '',
's3_key': ''
}
:param project_id: (int or string Relay id, optional) id of project dataset belongs to
:return: a dict representing the added Dataset
"""
return self.query("""
mutation addDatasetMutation($dataset: AddDatasetInput!) {
addDataset(input: $dataset) {
dataset {
id
label
project {
id
}
createdBy {
id
}
locatorDict
organization {
id
}
}
}
}
""",
variables={
'dataset': {
'checkpointId': checkpoint_id,
'locatorDict': json.dumps(locator_dict),
'label': label,
'projectId': project_id,
'simple': True
}
}
)
def add_dataset_from_file(
self, fd, project_id, filename=None):
"""Add a new dataset from a file or file-like object.
Args:
fd (file-like) -- A file descriptor or file-like object to add
as a new dataset, opened as 'rb'.
project_id (int or str Relay id) -- The id of the project to which
the dataset belongs.
Kwargs:
filename (str) -- The filename to associate with the dataset
(default: None, the name attribute of the fd argument will be
used). Note that in the case of file-like objects without
names (e.g. py2 StringIO.StringIO), this must be set.
Returns:
A dict representation of the dataset.
Raises:
AttributeError, if filename is not set and fd does not have a
name attribute.
"""
dataset = self.add_dataset(
project_id,
filename or fd.name
)
presigned_post = dataset['addDataset']['dataset']['s3Url']
self.upload_dataset(presigned_post, fd)
return self.get_dataset(dataset['addDataset']['dataset']['id'])
def add_dataset_from_pandas_df(
self, pandas_df, project_id, filename=None):
"""Add a new dataset from a pandas.DataFrame.
Args:
pandas_df (pandas.DataFrame) -- The data frame to add.
project_id (int or str Relay id) -- The id of the project to which
the dataset belongs.
Kwargs:
filename (str) -- The filename to associate with the dataset
(default: None, the name attribute of the pandas_df argument
will be used).
Returns:
A dict representation of the dataset.
Raises:
AttributeError, if filename is not set and pandas_df does not have
a name attribute.
"""
with tempfile.TemporaryFile(mode='w+') as fd:
pandas_df.to_csv(fd, encoding='UTF_8')
fd.seek(0)
return self.add_dataset_from_file(
fd,
project_id,
filename=(filename or pandas_df.name)
)
def upload_dataset(self, presigned_post, fd):
"""Utility function to upload a file to S3.
Users should probably not call this function directly. Instead,
consider add_dataset_from_file or add_dataset_from_pandas_df.
Args:
presigned_post (str) -- A fully qualified presigned (POST) S3
URL, including query string.
fd (filelike) -- A file-like object opened for 'rb'.
Returns:
A requests.models.Response containing the results of the POST.
"""
(s3_url, s3_querystring) = presigned_post.split('?')
form_data = parse_qs(s3_querystring)
return requests.post(s3_url, data=form_data, files={'file': fd})
def delete_dataset(self, dataset_id):
"""Delete a dataset (soft delete).
Args:
dataset_id (int or str Relay id) -- The id of the dataset
to delete
Returns:
A dict containing the parsed results of the mutation.
"""
variables = {
'updateDataset': {
'id': dataset_id,
'deleted': True
}
}
return self.query("""
mutation($updateDataset: UpdateDatasetInput!) {
updateDataset(input: $updateDataset) {
dataset {
id
deleted
deletedAt
updatedAt
}
}
}
""", variables=variables)
def munge_ge_expectations_config(self, expectations_config):
"""
Convert a Great Expectations expectations_config into a list
of expectations that can be consumed by Checkpoints
:param expectations_config: expectations_config dict as returned from
Great Expectations
:return: a list of parsed expectation dicts
"""
expectations = expectations_config['expectations']
munged_expectations = []
for expectation in expectations:
munged_expectations.append({
'expectationType': expectation['expectation_type'],
'expectationKwargs': json.dumps(expectation['kwargs'])
})
return munged_expectations
def munge_ge_expectations_list(self, expectations):
"""
Convert a Great Expectations expectation list to a list
of expectations that can be consumed by Checkpoints
:param expectations: a list of expectations as returned from
Great Expectations
:return: a list of parsed expectation dicts
"""
munged_expectations = []
for expectation in expectations:
munged_expectations.append({
'expectationType': expectation['expectation_type'],
'expectationKwargs': json.dumps(expectation['kwargs'])
})
return munged_expectations
def get_expectation_suite(self, expectation_suite_id):
"""Retrieve an existing expectation_suite.
Args:
expectation_suite_id (int or str Relay id) -- The id of the expectation_suite
to retrieve
Returns:
A dict containing the parsed expectation_suite.
"""
return self.query("""
query expectationSuiteQuery($id: ID!) {
expectationSuite(id: $id) {
id
autoinspectionStatus
organization {
id
}
expectations {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
expectationType
expectationKwargs
isActivated
createdBy {
id
}
organization {
id
}
expectationSuite {
id
}
}
}
}
}
}
""",
variables={'id': expectation_suite_id}
)
def get_expectation_suite_as_json_string(
self, expectation_suite_id, include_inactive=False):
"""Retrieve a JSON representation of a expectation_suite.
Args:
expectation_suite_id (int or str Relay id) -- The id of the expectation_suite
to retrieve
include_inactive (bool) -- If true, evaluations whose isActivated
flag is false will be included in the JSON config (default:
False)
Returns:
A JSON representation of the expectation_suite.
"""
expectation_suite = self.get_expectation_suite(expectation_suite_id)['expectationSuite']
if include_inactive:
expectations = [
expectation['node']
for expectation
in expectation_suite['expectations']['edges']]
else:
expectations = [
expectation['node']
for expectation
in expectation_suite['expectations']['edges']
if expectation['node']['isActivated']]
return json.dumps(
{'expectations': [
{
'expectation_type': expectation['expectationType'],
'kwargs': json.loads(expectation['expectationKwargs'])}
for expectation in expectations]},
indent=2,
separators=(',', ': '),
sort_keys=True)
def get_expectation_suite_as_expectations_config(
self, expectation_suite_id, include_inactive=False):
"""Retrieve an expectation suite as a great_expectations expectations config.
Kwargs:
expectation_suite_id (int or str Relay id) -- The id of the expectation suite to
retrieve
include_inactive (bool) -- If true, expectations whose isActivated
flag is false will be included in the JSON config (default:
False).
Returns:
An expectations config dict as returned by
great_expectations.dataset.DataSet.get_expectations_config.
"""
expectation_suite = self.get_expectation_suite(expectation_suite_id)['expectationSuite']
if include_inactive:
expectations = [
expectation['node']
for expectation
in expectation_suite['expectations']['edges']]
else:
expectations = [
expectation['node']
for expectation
in expectation_suite['expectations']['edges']
if expectation['node']['isActivated']]
expectations_config = {
'meta': {'great_expectations.__version__': '0.4.3'},
'dataset_name': None,
'expectations': [
{'expectation_type': expectation['expectationType'],
'kwargs': json.loads(expectation['expectationKwargs'])}
for expectation
in expectations
]}
return expectations_config
def list_expectation_suites(self, complex=False):
"""Retrieve all existing expectation_suites.
Returns:
A dict containing the parsed query.
"""
if not complex:
return self.query("""
query listExpectationSuiteQuery{
allExpectationSuites {
edges {
node {
id
name
}
}
}
}
""")
else:
return self.query("""
query listExpectationSuiteQuery{
allExpectationSuites {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
name
autoinspectionStatus
organization {
id
}
expectations {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
expectationType
expectationKwargs
isActivated
createdBy {
id
}
organization {
id
}
expectationSuite {
id
}
}
}
}
}
}
}
}
"""
)
def add_expectation_suite(self, name, autoinspect=False, dataset_id=None, expectations=None):
"""Add a new expectation_suite.
Users should probably not call this function directly. Instead,
consider add_expectation_suite_from_expectations_config.
Args:
name (str) -- The name of the expectation_suite to create.
Kwargs:
autoinspect (bool) -- Flag to populate the expectation_suite with
single-column expectations generated by autoinspection of a
dataset (default: false).
dataset_id (int or str Relay id) -- The id of the dataset to
autoinspect (default: None).
expectations (list) -- A list of expectations to associate with
the expectation_suite
Raises:
AssertionError if autoinspect is true and dataset_id is not
present, or if dataset_id is present and autoinspect is false.
Returns:
A dict containing the parsed results of the mutation.
"""
# TODO: implement nested object creation for addExpectationSuite
if autoinspect:
assert dataset_id, 'Must pass a dataset_id when autoinspecting.'
else:
assert dataset_id is None, 'Do not pass a dataset_id if not ' \
'autoinspecting.'
return self.query("""
mutation addExpectationSuiteMutation($expectationSuite: AddExpectationSuiteInput!) {
addExpectationSuite(input: $expectationSuite) {
expectationSuite {
id
name
slug
autoinspectionStatus
createdBy {
id
}
expectations {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
}
}
}
organization {
id
}
}
}
}
""",
variables={
'expectationSuite': {
'name': name,
'slug': generate_slug(name),
'autoinspect': autoinspect,
'datasetId': dataset_id,
'expectations': expectations
}
})
def add_expectation_suite_from_expectations_config(
self, expectations_config, name):
"""Create a new expectation_suite from a great_expectations expectations
config.
Args:
expectations_config (dict) - An expectations config as returned by
great_expectations.dataset.DataSet.get_expectations_config.
Note that this is not validated here or on the server side --
failures will occur at evaluation time.
name (str) - The name of the expectation_suite to create.
Returns:
A dict containing the parsed expectation_suite.
"""
expectations = self.munge_ge_expectations_config(expectations_config)
return self.add_expectation_suite(name=name, expectations=expectations)
def add_expectation_suite_from_ge_expectations_list(
self, expectations_list, name):
"""Create a new expectation_suite from a great_expectations expectations
list.
Args:
expectations_list (list) - A list of Great Expectations
formatted expectations
Note that this is not validated here or on the server side --
failures will occur at evaluation time.
name (str) - The name of the expectation_suite to create.
Returns:
A dict containing the parsed expectation_suite.
"""
expectations = self.munge_ge_expectations_list(expectations_list)
return self.add_expectation_suite(name=name, expectations=expectations)
def update_expectation_suite(
self,
expectation_suite_id,
autoinspection_status=None,
expectations=None):
"""Update an existing expectation_suite.
Args:
expectation_suite_id (int or str Relay id) -- The id of the expectation_suite
to update.
Kwargs:
autoinspection_status (str) -- The status of autoinspection, if
that is to be updated (default: None, no change).
expectations (list) -- A list of dicts representing expectations
to be created & added to the expectation_suite (default: None,
no change). Note: semantics are append.
Returns:
A dict representing the parsed results of the mutation.
"""
assert any([
autoinspection_status is not None,
expectations is not None]), \
'Must update one of autoinspection_status or expectations'
variables = {
'expectationSuite': {
'id': expectation_suite_id
}
}
if expectations is not None:
variables['expectationSuite']['expectations'] = expectations
if autoinspection_status is not None:
variables['expectationSuite']['autoinspectionStatus'] = \
autoinspection_status
result = self.query("""
mutation updateExpectationSuiteMutation($expectationSuite: UpdateExpectationSuiteInput!) {
updateExpectationSuite(input: $expectationSuite) {
expectationSuite {
id
expectations {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
expectationType
expectationKwargs
isActivated
createdBy {
id
}
organization {
id
}
expectationSuite {
id
}
}
}
}
}
}
}
""",
variables=variables
)
return result
def get_expectation(self, expectation_id):
"""Retrieve an expectation by its id.
Args:
expectation_id (int or str Relay id) -- The id of the expectation
to fetch
Returns:
A dict representation of the expectation.
"""
return self.query("""
query expectationQuery($id: ID!) {
expectation(id: $id) {
id
expectationType
expectationKwargs
isActivated
createdBy {
id
}
organization {
id
}
expectationSuite {
id
}
}
}
""",
variables={'id': expectation_id}
)
def add_expectation(
self,
expectation_suite_id,
expectation_type,
expectation_kwargs,
):
"""Add a new expectation to an expectation_suite.
Args:
expectation_suite_id (int or str Relay id) -- The id of the expectation_suite
to which to add the new expectation.
expectation_type (str) -- A valid great_expectations expectation
type. Note: these are not yet validated by client or
server code, so failures will occur at evaluation time.
expectation_kwargs (JSON dict) -- Valid great_expectations
expectation kwargs, as JSON. Note: these are not yet validated
by client or server code, so failures will occur at evaluation
time.
Returns:
A dict containing the parsed results of the mutation.
Raises:
ValueError, if expectation_kwargs are not parseable as JSON
"""
# TODO: use common code (JSON schema) to validate expectation_type and
# expectation_kwargs
try:
json.loads(expectation_kwargs)
except (TypeError, ValueError):
raise ValueError(
'Must provide valid JSON expectation_kwargs (got %s)',
expectation_kwargs)
return self.query("""
mutation addExpectationMutation($expectation: AddExpectationInput!) {
addExpectation(input: $expectation) {
expectation {
id
expectationType
expectationKwargs
isActivated
createdBy {
id
}
organization {
id
}
expectationSuite {
id
}
}
}
}
""",
variables={
'expectation': {
'expectationSuiteId': expectation_suite_id,
'expectationType': expectation_type,
'expectationKwargs': expectation_kwargs,
}})
def update_expectation(
self,
expectation_id,
expectation_type=None,
expectation_kwargs=None,
is_activated=None):
# TODO: use common code (JSON schema) to validate expectation_type and
# expectation_kwargs
"""Update an existing expectation.
Args:
expectation_id (int or str Relay id) -- The id of the expectation
to update.
Kwargs:
expectation_type (str) -- A valid great_expectations expectation
type (default: None, no change). Note: these are not yet
validated by client or server code, so failures will occur at
evaluation time.
expectation_kwargs (str) -- Valid great_expectations
expectation kwargs, as JSON (default: None, no change).
If present, the existing expectation_kwargs will be
overwritten, so updates must include all unchanged keys from
the existing kwargs. Note: these are not yet validated by
client or server code, so failures will occur at evaluation
time..
is_activated (bool) -- Flag indicating whether an expectation
should be evaluated (default: None, no change).
Returns:
A dict containing the parsed results of the mutation.
Raises:
AssertionError, if none of expectation_type, expectation_kwargs,
or is_activated is provided
ValueError, if expectation_kwargs are provided but not parseable
as JSON
"""
assert any([
expectation_type is not None,
expectation_kwargs is not None,
is_activated is not None]), 'Must provide expectation_type, ' \
'expectation_kwargs, or is_activated flag'
if expectation_kwargs:
try:
json.loads(expectation_kwargs)
except (TypeError, ValueError):
raise ValueError(
'Must provide valid JSON expectation_kwargs (got %s)',
expectation_kwargs)
variables = {
'expectation': {'id': expectation_id}}
if is_activated is not None:
variables['expectation']['isActivated'] = is_activated
if expectation_type is not None:
variables['expectation']['expectationType'] = expectation_type
if expectation_kwargs is not None:
variables['expectation']['expectationKwargs'] = expectation_kwargs
return self.query("""
mutation updateExpectationMutation($expectation: UpdateExpectationInput!) {
updateExpectation(input: $expectation) {
expectation {
id
expectationType
expectationKwargs
isActivated
createdBy {
id
}
organization {
id
}
expectationSuite {
id
}
}
}
}
""",
variables=variables
)
def get_checkpoint(self, checkpoint_id):
"""Retrieve an existing checkpoint.
Args:
checkpoint_id (int or str Relay id) -- The id of the checkpoint
to retrieve
Returns:
A dict containing the parsed checkpoint.
"""
return self.query("""
query checkpointQuery($id: ID!) {
checkpoint(id: $id) {
id
name
slug
isActivated
createdBy {
id
firstName
lastName
email
}
expectationSuite {
expectations {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
expectationType
expectationKwargs
isActivated
createdBy {
id
}
organization {
id
}
}
}
}
}
}
}
""",
variables={'id': checkpoint_id}
)
def get_checkpoint_by_name(self, checkpoint_name):
"""Retrieve an existing checkpoint by name.
Args:
name (str) -- The name of the checkpoint
to retrieve
Returns:
A dict containing the parsed checkpoint.
"""
return self.query("""
query checkpointQuery($name: String!) {
checkpoint(name: $name) {
id
name
slug
isActivated
createdBy {
id
firstName
lastName
email
}
expectationSuite {
expectations {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
expectationType
expectationKwargs
isActivated
createdBy {
id
}
organization {
id
}
}
}
}
}
}
}
""",
variables={'name': checkpoint_name}
)
def get_checkpoint_as_expectations_config(
self, checkpoint_id=None, checkpoint_name=None, include_inactive=False):
"""Retrieve a checkpoint as a great_expectations expectations config.
Kwargs:
checkpoint_id (int or str Relay id) -- The id of the checkpoint to
retrieve
checkpoint_name (str) -- The name of the checkpoint to retrieve
include_inactive (bool) -- If true, evaluations whose isActivated
flag is false will be included in the JSON config (default:
False).
Returns:
An expectations config dict as returned by
great_expectations.dataset.DataSet.get_expectations_config.
"""
if not checkpoint_id and not checkpoint_name:
raise ValueError('must provide checkpoint_id or checkpoint_name')
if checkpoint_id:
checkpoint = self.get_checkpoint(checkpoint_id)
else:
checkpoint = self.get_checkpoint_by_name(checkpoint_name)
if include_inactive:
expectations = [
expectation['node']
for expectation
in checkpoint['checkpoint']['expectationSuite']['expectations']['edges']]
else:
expectations = [
expectation['node']
for expectation
in checkpoint['checkpoint']['expectationSuite']['expectations']['edges']
if expectation['node']['isActivated']]
expectation_ids = [expectation['id'] for expectation in expectations]
expectations_config = {
'meta': {'great_expectations.__version__': '0.4.4'},
'expectation_ids': expectation_ids,
'dataset_name': None,
'expectations': [
{'expectation_type': expectation['expectationType'],
'kwargs': json.loads(expectation['expectationKwargs'])}
for expectation
in expectations
]}
return expectations_config
def list_checkpoints(self):
"""
Retrieve all existing checkpoints
:return: A dict containing all checkpoints. Ex.:
{
'allCheckpoints': {
'edges': [
{
'node': {
"id": "Q2hlY2twb2ludDox",
"name": "Claims by Race and Gender-UploadCheckpoint",
"tableName": "Humana",
"slug": "claims-by-race-and-gender-upload-checkpoint",
"isActivated": true,
"createdById": 1,
"organizationId": 1,
"projectId": 1,
"expectationSuiteId": 3,
"sensorId": 1
}
}, ...
]
}
}
"""
return self.query("""
query {
allCheckpoints {
edges {
node {
id
name
tableName
slug
isActivated
createdById
organizationId
projectId
expectationSuiteId
sensorId
sensor {
type
}
}
}
}
}
""")
def add_checkpoint(
self,
name,
table_name=None,
is_activated=True,
slack_webhook=None,
expectation_suite_id=None,
sensor_id=None):
"""
Add a checkpoint.
:param name: Name of checkpoint
:param table_name: Name of associated table
:param is_activated: boolean
:param slack_webhook: optional slack webhook address to create
condigured_notification on checkpoint creation
:param expectation_suite_id: The id of corresponding expectation suite
:param sensor_id: The id of corresponding sensor
:return: A dict with parsed checkpoint (see query for structure)
"""
return self.query("""
mutation addCheckpointMutation($checkpoint: AddCheckpointInput!) {
addCheckpoint(input: $checkpoint) {
checkpoint {
id
name
tableName
slug
isActivated
sensor {
id
}
expectationSuite {
id
}
createdBy {
id
}
organization {
id
}
configuredNotifications {
pageInfo {
hasNextPage
hasPreviousPage
startCursor
endCursor
}
edges {
cursor
node {
id
}
}
}
}
}
}
""",
variables={
'checkpoint': {
'name': name,
'tableName': table_name,
'slug': generate_slug(name),
'isActivated': is_activated,
'expectationSuiteId': expectation_suite_id,
'sensorId': sensor_id,
'slackWebhook': slack_webhook
}
}
)
def setup_checkpoint_from_ge_expectations_config(
self, checkpoint_name, expectations_config, slack_webhook=None):
"""
First creates a new expectation suite, which generates new default checkpoint, sensor,
and datasource for manual file upload. After a new expectation suite is created, the new
checkpoint is created (and optionally, a new Slack notification)
:param checkpoint_name: (str) the name of the checkpoint to be created
:param expectations_config: (dict) an expectation config as return by Great Expectations
:param slack_webhook: (str) a Slack webhook to route notifications to
:return: the dict representation of the checkpoint that was created
"""
new_expectation_suite = self.add_expectation_suite_from_expectations_config(
name=checkpoint_name, expectations_config=expectations_config)
new_expectation_suite_id = new_expectation_suite['addExpectationSuite']['expectationSuite']['id']
return self.add_checkpoint(
name=checkpoint_name, expectation_suite_id=new_expectation_suite_id, slack_webhook=slack_webhook
)
def setup_checkpoint_from_ge_expectations_list(
self, checkpoint_name, expectations_list, slack_webhook=None):
"""
First creates a new expectation suite, which generates new default checkpoint, sensor,
and datasource for manual file upload. After a new expectation suite is created, the new
checkpoint is created (and optionally, a new Slack notification)
:param checkpoint_name: (str) the name of the checkpoint to be created
:param expectations_list: (list) an expectations list as return by Great Expectations
:param slack_webhook: (str) a Slack webhook to route notifications to
:return: the dict representation of the checkpoint that was created
"""
new_expectation_suite = self.add_expectation_suite_from_ge_expectations_list(
name=checkpoint_name, expectations_list=expectations_list)
new_expectation_suite_id = new_expectation_suite['addExpectationSuite']['expectationSuite']['id']
return self.add_checkpoint(
name=checkpoint_name, expectation_suite_id=new_expectation_suite_id, slack_webhook=slack_webhook
)
def list_configured_notifications_on_checkpoint(self, checkpoint_id):
"""Retrieve all existing configured notifications for
a given checkpoint_id.
Returns:
A dict containing the parsed query.
"""
return self.query("""
query checkpointQuery($id: ID!) {
checkpoint(id: $id) {
configuredNotifications {
edges {
node {
id
notificationType
value
notifyOn
}
}
}
}
}
""", variables={'id': checkpoint_id})
def add_sensor(self, name, type, data_source_id=None, excluded_paths=None, sensor_config=None):
"""
Adds a new sensor.
:param name: (str) name to identify sensor
:param type: (str) type of sensor
:param data_source_id: (int or str relay id) id of associated data source
:param excluded_paths: (array of dicts) paths to exclude from evaluation on
sensor execution, of form {'path': ..., 'reason': ...}
:param sensor_config: (dict) configuration dict with info for specifying which
files are evaluated and optionally, an s3 bucket to save file after evaluation,
:return: (dict) a dict representation of added sensor
"""
variables = {
'sensor': {
'name': name,
'type': type
}
}
if data_source_id:
variables['sensor']['dataSourceId'] = data_source_id
if excluded_paths:
variables['sensor']['excludedPaths'] = json.dumps(excluded_paths)
if sensor_config:
variables['sensor']['sensorConfig'] = json.dumps(sensor_config)
return self.query("""
mutation addSensorMutation($sensor: AddSensorInput!) {
addSensor(input: $sensor) {
sensor {
id
name
type
dataSourceId
createdBy {
id
firstName
lastName
}
organization {
id
name
}
excludedPaths
sensorConfig
}
}
}""",
variables=variables
)
def update_sensor(self, sensor_id, name=None, data_source_id=None, excluded_paths=None, sensor_config=None):
"""
Updates an existing sensor.
:param sensor_id: (int or str relay id) id of sensor to update
:param name: (str) name to identify sensor
:param data_source_id: (int or str relay id) id of associated data source
:param excluded_paths: (array of dicts) paths to exclude from evaluation on
sensor execution, of form {'path': ..., 'reason': ...}
:param sensor_config: (dict) configuration dict with info for specifying which
files are evaluated and optionally, an s3 bucket to save file after evaluation,
:return: (dict) a dict representation of updated sensor
"""
variables = {
'sensor': {
'id': sensor_id
}
}
if name:
variables['sensor']['name'] = name
if data_source_id:
variables['sensor']['dataSourceId'] = data_source_id
if excluded_paths:
variables['sensor']['excludedPaths'] = json.dumps(excluded_paths)
if sensor_config:
variables['sensor']['sensorConfig'] = json.dumps(sensor_config)
return self.query("""
mutation updateSensorMutation($sensor: UpdateSensorInput!) {
updateSensor(input: $sensor) {
sensor {
id
name
dataSourceId
createdBy {
id
firstName
lastName
}
organization {
id
name
}
excludedPaths
sensorConfig
}
}
}""",
variables={
'sensor': {
'id': sensor_id,
'name': name,
'dataSourceId': data_source_id,
'excludedPaths': excluded_paths,
'sensorConfig': sensor_config
}
}
)
def add_excluded_path_to_sensor(self, sensor_id, new_excluded_path_dict):
return self.query("""
mutation updateSensorMutation($sensor: UpdateSensorInput!) {
updateSensor(input: $sensor) {
sensor {
id
excludedPaths
}
}
}
""", variables={
'sensor': {
'id': sensor_id,
'newExcludedPathDict': json.dumps(new_excluded_path_dict)
}
}
)
def trigger_sensor(self, sensor_id):
return self.query("""
mutation triggerSensorMutation($sensor: TriggerSensorInput!) {
triggerSensor(input: $sensor) {
evaluationIds
}
}
""", variables={
'sensor': {
'id': sensor_id,
}
}
)
def add_data_source(self, name, type, is_activated=True, credentials_reference=None):
"""
Adds a new data source.
:param name: (str) name to identify data source
:param type: (str) type of data source (i.e. 's3', 'database')
:param is_activated: (bool) active status
:param credentials_reference: (dict) dict configuration with info on how to
connect to data source, e.g. {
's3_staging_bucket': ...,
'aws_access_key_id': ...,
'aws_secret_access_key': ...,
's3_bucket': ...,
'prefix': ...
}
:return: (dict) a dict representation of the added data source
"""
variables = {
'dataSource': {
'name': name,
'type': type,
'isActivated': is_activated,
}
}
if credentials_reference:
variables['dataSource']['credentialsReference'] = json.dumps(credentials_reference)
return self.query("""
mutation addDataSourceMutation($dataSource: AddDataSourceInput!) {
addDataSource(input: $dataSource) {
dataSource {
id
name
type
isActivated
createdBy {
id
firstName
lastName
}
organization {
id
name
}
credentialsReference
}
}
}""",
variables=variables
)
def update_data_source(
self,
data_source_id,
name=None,
type=None,
is_activated=None,
test_status=None,
test_error_message=None,
credentials_reference=None
):
"""
Updates an existing data source
:param data_source_id: (int or str relay id) id of data source to update
:param name: (str) name to identify data source
:param type: (str) type of data source (i.e. 's3', 'database')
:param is_activated: (bool) active status
:param test_status: (str) test status of data source (None, 'success', 'failed')
:param test_error_message: (str) optional, error message of failed test
:param credentials_reference: (dict) dict configuration with info on how to
connect to data source, e.g. {
's3_staging_bucket': ...,
'aws_access_key_id': ...,
'aws_secret_access_key': ...,
's3_bucket': ...,
'prefix': ...
}
:return: (dict) a dict representation of the added data source
"""
variables = {
'dataSource': {
'id': data_source_id
}
}
if name:
variables['dataSource']['name'] = name
if type:
variables['dataSource']['type'] = type
if is_activated or is_activated is False:
variables['dataSource']['isActivated'] = is_activated
if credentials_reference:
variables['dataSource']['credentialsReference'] = json.dumps(credentials_reference)
if test_status:
variables['dataSource']['testStatus'] = test_status
if test_error_message:
variables['dataSource']['testErrorMessage'] = test_error_message
return self.query("""
mutation updateDataSourceMutation($dataSource: UpdateDataSourceInput!) {
updateDataSource(input: $dataSource) {
dataSource {
id
name
type
isActivated
testStatus
testErrorMessage
createdBy {
id
firstName
lastName
}
organization {
id
name
}
credentialsReference
}
}
}""",
variables=variables
)
def get_config_property_by_name(self, name):
"""Retrieve an existing checkpoint by name.
Args:
name (str) -- The name of the config property
to retrieve
Returns:
The config property value.
"""
config_property = self.query("""
query configPropertyQuery($name: String!) {
configProperty(name: $name) {
value
}
}
""", variables={'name': name})['configProperty']
if config_property:
return config_property['value']
else:
return None
def list_config_properties(self):
return self.query("""{
allConfigProperties{
edges {
node{
id
name
value
}
}
}
}""")
def list_priority_levels(self):
return self.query("""{
allPriorityLevels {
edges {
node {
id
level
ordinal
iconClassName
colorClassName
}
}
}
}""")
def add_operation_run(
self,
operation_name,
workflow_run_id,
status,
message=None
):
"""Add a new operation_run
Args:
operation_name (string) -- name of operation
workflow_run_id (int or string) -- int id or string relay id of workflow_run
status (string) -- status of operation_run
Kwargs:
message (string) -- details about operation_run
Returns:
A dict representation of the added operation_run
"""
variables = {
'operationRun': {
'operationName': operation_name,
'workflowRunId': workflow_run_id,
'status': status
}
}
if message is not None:
variables['operationRun']['message'] = message
return self.query("""
mutation addOperationRunMutation($operationRun: AddOperationRunInput!) {
addOperationRun(input: $operationRun) {
operationRun {
id
operationName
workflowRunId
startDateTime
endDateTime
status
message
createdBy {
id
firstName
lastName
email
}
createdAt
updatedAt
}
}
}
""",
variables=variables
)
def get_operation_run(self, operation_run_id):
"""Retrieve a operation_run given its id
Args:
operation_run_id (int or str Relay id) -- a operation_run id
Returns:
A dict representation of the retrieved operation_run
"""
variables = {
'id': operation_run_id
}
return self.query("""
query operationRunQuery($id: ID!) {
operationRun(id: $id) {
id
operationName
workflowRunId
startDateTime
endDateTime
status
message
createdBy {
id
firstName
lastName
email
}
deleted
deletedAt
updatedAt
createdAt
}
}
""",
variables=variables
)
def execute_operation(self, operation_name, workflow_run_id):
"""Execute an operation
Args:
operation_name (string) -- name of operation
workflow_run_id (int or string Relay id) -- id of associated workflow_run
Returns:
A dict representation of the added operation_run
"""
variables = {
'operationRun': {
'operationName': operation_name,
'workflowRunId': workflow_run_id
}
}
return self.query("""
mutation executeOperationMutation($operationRun: ExecuteOperationInput!) {
executeOperation(input: $operationRun) {
operationRun {
id
operationName
workflowRunId
status
createdBy {
id
firstName
lastName
email
}
createdAt
updatedAt
}
}
}
""",
variables=variables
)
def update_operation_run(
self,
operation_run_id,
start_date_time=None,
end_date_time=None,
status=None,
message=None,
deleted=None
):
"""Update an existing operation_run.
Args:
operation_run_id (int or str Relay id) -- The id of the operation_run
to update.
Kwargs:
start_date_time (datetime) -- the start datetime of operation execution
end_date_time (datetime) -- the end datetime of operation execution
status (string) -- the status of operation run
message (string) -- details about operation run
deleted (boolean) -- soft delete flag
Returns:
A dict representing the parsed results of the mutation.
"""
variables = {
'operationRun': {
'id': operation_run_id
}
}
if start_date_time is not None:
variables['operationRun']['startDateTime'] = start_date_time.isoformat()
if end_date_time is not None:
variables['operationRun']['endDateTime'] = end_date_time.isoformat()
if status is not None:
allowed_statuses = ['init', 'running', 'completed_success', 'completed_failure']
assert status in allowed_statuses, f'Status must be one of: {", ".join(allowed_statuses)}.'
variables['operationRun']['status'] = status
if message is not None:
variables['operationRun']['message'] = message
if deleted is not None:
variables['operationRun']['deleted'] = deleted
result = self.query("""
mutation updateOperationRunMutation($operationRun: UpdateOperationRunInput!) {
updateOperationRun(input: $operationRun) {
operationRun {
id
operationName
workflowRunId
startDateTime
endDateTime
status
message
updatedAt
deletedAt
createdBy {
id
firstName
lastName
email
}
}
}
}
""",
variables=variables
)
return result
def get_workflow_run(self, workflow_run_id):
"""Retrieve a workflow_run given its id
Args:
workflow_run_id (int or str Relay id) -- a workflow_run id
Returns:
A dict representation of the retrieved workflow_run
"""
variables = {
'id': workflow_run_id
}
return self.query("""
query workflowRunQuery($id: ID!) {
workflowRun(id: $id) {
id
name
createdBy {
id
firstName
lastName
email
}
deleted
deletedAt
updatedAt
createdAt
}
}
""",
variables=variables
)
def get_workflow_run_status(self, workflow_run_id):
"""Retrieve status of workflow_run given workflow_run id
Args:
workflow_run_id (int or str Relay id) -- a workflow_run id
Returns:
A dict representation of the retrieved workflow_run status
"""
variables = {
'id': workflow_run_id
}
return self.query("""
query workflowRunStatusQuery($id: ID!) {
workflowRunStatus(id: $id) {
workflowRun {
id
name
createdBy {
id
firstName
lastName
email
}
deleted
deletedAt
updatedAt
createdAt
}
operationRuns {
id
operationName
workflowRunId
startDateTime
endDateTime
createdById
status
message
}
assets {
id
key
isDraft
data
workflowRunId
operationRunId
createdById
}
nextOperations
blockingAssets
}
}
""",
variables=variables
)
def get_workflow_runs_by_name(self, workflow_name):
"""Retrieve workflow_runs with a given workflow name
Args:
workflow_name (str) -- a workflow name
Returns:
A list of dict representations of the retrieved workflow_runs
"""
variables = {
'name': workflow_name
}
return self.query("""
query workflowRunsByNameQuery($name: String!) {
workflowRunsByName(name: $name) {
id
name
createdBy {
id
firstName
lastName
email
}
deleted
deletedAt
updatedAt
createdAt
}
}
""",
variables=variables
)
def get_next_workflow_run_operations(self, workflow_run_id):
"""Retrieve the next possible operations for a workflow_run with given id
Args:
workflow_run_id (int or str) -- id or string Relay id of workflow_run
Returns:
A list of operation names
"""
variables = {
'id': workflow_run_id
}
return self.query("""
query nextWorkflowRunOperationsQuery($id: ID!) {
nextWorkflowRunOperations(id: $id)
}
""",
variables=variables
)
def get_workflow_run_blocking_assets(self, workflow_run_id):
"""Retrieve a workflow run's blocking assets. Blocking assets are assets that their absence
is the only thing blocking an operation (not all of whose outputs are registered as assets) from running
Args:
workflow_run_id (int or str) -- id or string Relay id of workflow_run
Returns:
A list of asset keys
"""
variables = {
'id': workflow_run_id
}
return self.query("""
query workflowRunBlockingAssetsQuery($id: ID!) {
workflowRunBlockingAssets(id: $id)
}
""",
variables=variables
)
def add_workflow_run(self, name):
"""Add a new workflow_run
Args:
name (string) -- name of workflow_run
Returns:
A dict representation of the added workflow_run
"""
variables = {
'workflowRun': {
'name': name,
}
}
return self.query("""
mutation addWorkflowRunMutation($workflowRun: AddWorkflowRunInput!) {
addWorkflowRun(input: $workflowRun) {
workflowRun {
id
name
createdBy {
id
firstName
lastName
email
}
createdAt
updatedAt
}
}
}
""",
variables=variables
)
def get_asset(self, asset_id):
"""Retrieve an asset given its id
Args:
asset_id (int or str Relay id) -- an asset id
Returns:
A dict representation of the retrieved asset
"""
variables = {
'id': asset_id
}
return self.query("""
query assetQuery($id: ID!) {
asset(id: $id) {
id
key
data
isDraft
workflowRunId
operationRunId
createdBy {
id
firstName
lastName
email
}
deleted
deletedAt
updatedAt
createdAt
}
}
""",
variables=variables
)
def add_asset(self, key, data, workflow_run_id, is_draft, operation_run_id=None):
"""Add a new asset
Args:
workflow_run_id (int or str Relay id) -- a WorkflowRun id
key (string) -- key of asset to add
data (string) -- serialization of asset's data
is_draft (boolean) -- boolean indicating whether asset is a draft
Returns:
A dict representation of the added asset
"""
variables = {
'asset': {
'key': key,
'isDraft': is_draft,
'data': data,
'workflowRunId': workflow_run_id,
'operationRunId': operation_run_id
}
}
if operation_run_id is not None:
variables['asset']['operationRunId'] = operation_run_id
return self.query("""
mutation addAssetMutation($asset: AddAssetInput!) {
addAsset(input: $asset) {
asset {
id
key
data
isDraft
createdBy {
id
firstName
lastName
email
}
createdAt
updatedAt
workflowRunId
operationRunId
}
}
}
""",
variables=variables
)
def update_asset(
self,
asset_id,
is_draft=None,
data=None,
deleted=None
):
"""Update an existing asset.
Args:
asset_id (int or str Relay id) -- The id of the asset
to update.
Kwargs:
is_draft (boolean) --
deleted (boolean) -- soft delete flag
Returns:
A dict representing the parsed results of the mutation.
"""
variables = {
'asset': {
'id': asset_id
}
}
if is_draft is not None:
variables['asset']['isDraft'] = is_draft
if data is not None:
variables['asset']['data'] = data
if deleted is not None:
variables['asset']['deleted'] = deleted
result = self.query("""
mutation updateAssetMutation($asset: UpdateAssetInput!) {
updateAsset(input: $asset) {
asset {
id
key
isDraft
data
workflowRunId
operationRunId
updatedAt
deleted
deletedAt
createdBy {
id
firstName
lastName
email
}
}
}
}
""",
variables=variables
)
return result
def get_assets(self, workflow_run_id, asset_keys, include_drafts=False):
"""Retrieve a list of assets given a workflow_run_id and list of asset_keys
Args:
workflow_run_id (int or str Relay id) -- a WorkflowRun id
asset_keys (list of strings) -- a list of asset keys to fetch
include_drafts (boolean) -- boolean indicating whether to include drafts in results
Returns:
A list of asset dicts representing asset objects
"""
variables = {
'workflowRunId': workflow_run_id,
'assetKeys': asset_keys,
'includeDrafts': include_drafts
}
return self.query("""
query assetsQuery($workflowRunId: ID!, $assetKeys: [String]!, $includeDrafts: Boolean!) {
assets(workflowRunId: $workflowRunId, assetKeys: $assetKeys, includeDrafts: $includeDrafts) {
id
key
isDraft
data
workflowRunId
operationRunId
createdBy {
id
firstName
lastName
email
}
deleted
deletedAt
}
}
""",
variables=variables)
|
StarcoderdataPython
|
5129800
|
<reponame>Kiraeraser/My_Blog
from django import forms
class ContactForm(forms.Form):
full_name=forms.CharField()
email=forms.EmailField()
content= forms.CharField(widget= forms.Textarea)
#custom form validation
def clean_email(self, *args, **kwargs):
email=self.cleaned_data.get('email')
if email.endswith('edu'):
raise forms.ValidationError("this is not a valid email. Use anothor one")
print(email)
return email
|
StarcoderdataPython
|
5006678
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import frida
import time
def on_message(message, data):
if message['type'] == 'error':
print('[!] ' + message['stack'])
elif message['type'] == 'send':
print('[i] ' + message['payload'])
else:
print(message)
def main(target_process, addr, size):
session = frida.attach(target_process)
script = session.create_script("""
var buf = Memory.readByteArray(ptr('0x%x'), %d);
console.log(hexdump(buf, {
offset: 0,
length: %d,
header: true,
ansi: false
}));
""" % (addr, size, size))
script.on('message', on_message)
script.load()
time.sleep(2)
session.detach()
if __name__ == '__main__':
if len(sys.argv) < 4:
print('Usage: {} <process name or PID> <addr> <size>'.format(__file__))
sys.exit(1)
if sys.argv[1].isdigit():
target_process = int(sys.argv[1])
else:
target_process = sys.argv[1]
addr, size = int(sys.argv[2], 16), int(sys.argv[3])
main(target_process, addr, size)
|
StarcoderdataPython
|
3290633
|
<reponame>ProkopHapala/ProbeParticleModel
#!/usr/bin/python3 -u
import os
import numpy as np
import sys
import pyProbeParticle as PPU
import pyProbeParticle.GridUtils as GU
import pyProbeParticle.core as PPC
import pyProbeParticle.HighLevel as PPH
# =============== arguments definition
from optparse import OptionParser
parser = OptionParser()
parser.add_option( "-k", action="store" , type="float", help="tip stiffenss [N/m]" )
parser.add_option( "--krange", action="store" , type="float", help="tip stiffenss range (min,max,n) [N/m]", nargs=3)
parser.add_option( "-q", action="store" , type="float", help="tip charge [e]" )
parser.add_option( "--qrange", action="store" , type="float", help="tip charge range (min,max,n) [e]", nargs=3)
parser.add_option( "-b", "--boltzmann" , action="store_true", default=False, help="calculate forces with boltzmann particle" )
parser.add_option( "--bI" ,action="store_true", default=False, help="calculate current between boltzmann particle and tip" )
parser.add_option( "--tip_base", action="store_true", default=False, help="interpolates F_z field in the position of the tip_base" )
parser.add_option( "--pos", action="store_true", default=False, help="save probe particle positions" )
parser.add_option( "--vib", action="store" , type="int", default=-1, help="map PP vibration eigenmodes; 0-just eigenvals; 1-3 eigenvecs" )
parser.add_option( "--disp", action="store_true", default=False, help="save probe particle displacements")
parser.add_option( "--tipspline", action="store" , type="string", help="file where spline is stored", default=None )
parser.add_option( "--npy" , action="store_true" , help="load and save fields in npy instead of xsf", default=False)
parser.add_option( "--stm" , action="store_true" , help="load and save data for the PPSTM code" , default=False)
(options, args) = parser.parse_args()
opt_dict = vars(options)
data_format ="npy" if options.npy else "xsf"
# =============== Setup
PPU.loadParams( 'params.ini' )
print(opt_dict)
# Ks
Bds = False # Boolean double stiffness - possible double stifnness in params.ini
if opt_dict['krange'] is not None:
Ks = np.linspace( opt_dict['krange'][0], opt_dict['krange'][1], int( opt_dict['krange'][2] ) )
elif opt_dict['k'] is not None:
Ks = [ opt_dict['k'] ]
else:
Ks = [ PPU.params['stiffness'][0] ] ; Bds = True # Boolean double stiffness - possible double stifnness in params.ini
# Qs
charged_system=False
if opt_dict['qrange'] is not None:
Qs = np.linspace( opt_dict['qrange'][0], opt_dict['qrange'][1], int( opt_dict['qrange'][2] ) )
elif opt_dict['q'] is not None:
Qs = [ opt_dict['q'] ]
else:
Qs = [ PPU.params['charge'] ]
for iq,Q in enumerate(Qs):
if ( abs(Q) > 1e-7):
charged_system=True
if options.tipspline is not None :
try:
S = np.genfromtxt(options.tipspline )
print(" loading tip spline from "+options.tipspline)
xs = S[:,0].copy(); print("xs: ", xs)
ydys = S[:,1:].copy(); print("ydys: ", ydys)
PPC.setTipSpline( xs, ydys )
#Ks = [0.0]
except:
print("cannot load tip spline from "+options.tipspline)
sys.exit()
tip_base=options.tip_base
if not tip_base:
tip_base = True if ((PPU.params["tip_base"][0] != 'None') and (PPU.params["tip_base"][0] != None)) else False
print("Ks =", Ks)
print("Qs =", Qs)
print("tip_base =", tip_base)
print(" ============= RUN ")
if ( charged_system == True):
print(" load Electrostatic Force-field ")
FFel, lvec, nDim = GU.load_vec_field( "FFel" ,data_format=data_format)
if (options.boltzmann or options.bI) :
print(" load Boltzmann Force-field ")
FFboltz, lvec, nDim = GU.load_vec_field( "FFboltz", data_format=data_format)
print(" load Lenard-Jones Force-field ")
FFLJ, lvec, nDim = GU.load_vec_field( "FFLJ" , data_format=data_format)
PPU.lvec2params( lvec )
PPC.setFF( FFLJ )
xTips,yTips,zTips,lvecScan = PPU.prepareScanGrids( )
for iq,Q in enumerate( Qs ):
if ( charged_system == True):
FF = FFLJ + FFel * Q
else:
FF = FFLJ
if options.boltzmann :
FF += FFboltz
PPC.setFF_Fpointer( FF )
for ik,K in enumerate( Ks ):
dirname = "Q%1.2fK%1.2f" %(Q,K)
print(" relaxed_scan for ", dirname)
if not os.path.exists( dirname ):
os.makedirs( dirname )
if Bds:
PPC.setTip( kSpring = np.array((PPU.params['stiffness'][0],PPU.params['stiffness'][1],0.0))/-PPU.eVA_Nm )
else:
PPC.setTip( kSpring = np.array((K,K,0.0))/-PPU.eVA_Nm )
Fs,rPPs,rTips = PPH.relaxedScan3D( xTips, yTips, zTips )
GU.save_scal_field( dirname+'/OutFz', Fs[:,:,:,2], lvecScan, data_format=data_format )
if opt_dict['vib'] >= 0:
which = opt_dict['vib']
print(" === computing eigenvectors of dynamical matix which=%i ddisp=%f" %(which,PPU.params['ddisp']))
evals,evecs = PPC.stiffnessMatrix( rTips.reshape((-1,3)), rPPs.reshape((-1,3)), which=which, ddisp=PPU.params['ddisp'] )
GU.save_vec_field( dirname+'/eigvalKs', evals .reshape( rTips.shape ), lvecScan, data_format=data_format )
if which > 0: GU.save_vec_field( dirname+'/eigvecK1', evecs[0].reshape( rTips.shape ), lvecScan, data_format=data_format )
if which > 1: GU.save_vec_field( dirname+'/eigvecK2', evecs[1].reshape( rTips.shape ), lvecScan, data_format=data_format )
if which > 2: GU.save_vec_field( dirname+'/eigvecK3', evecs[2].reshape( rTips.shape ), lvecScan, data_format=data_format )
#print "SHAPE", PPpos.shape, xTips.shape, yTips.shape, zTips.shape
if opt_dict['disp']:
GU.save_vec_field( dirname+'/PPdisp', rPPs-rTips+PPU.params['r0Probe'][0], lvecScan, data_format=data_format )
if ( opt_dict['pos'] or opt_dict['stm']):
GU.save_vec_field( dirname+'/PPpos', rPPs, lvecScan, data_format=data_format )
# Please do not change this procedure, especialy the lvecScan - it is important for the STM calculations!
if options.bI:
print("Calculating current from tip to the Boltzmann particle:")
I_in, lvec, nDim = GU.load_scal_field('I_boltzmann', data_format=data_format)
I_out = GU.interpolate_cartesian( I_in, rPPs, cell=lvec[1:,:], result=None )
del I_in;
GU.save_scal_field( dirname+'/OutI_boltzmann', I_out, lvecScan, data_format=data_format)
if tip_base:
print("Interpolating FFel_tip_z in position of the tip_base. Beware, this is higher than the PP.")
Ftip_in, lvec, nDim = GU.load_scal_field('FFel_tip', data_format=data_format)
Ftip_out = GU.interpolate_cartesian( Ftip_in, rTips, cell=lvec[1:,:], result=None )
del Ftip_in;
GU.save_scal_field( './OutFzTip_base', Ftip_out, lvecScan, data_format=data_format)
tip_base = False
# the rest is done in plot_results.py; For df, go to plot_results.py
print(" ***** ALL DONE ***** ")
#plt.show()
|
StarcoderdataPython
|
9647961
|
from bs4 import BeautifulSoup
import requests
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
import os
import secrets
import logging
def visualize(handle):
logger = logging.Logger(__name__)
try:
url = 'https://codeforces.com/contests/with/'
url += handle
html_page = requests.get(url)
soup = BeautifulSoup(html_page.content, 'html5lib')
logger.info("Successfully parsed soup")
table = soup.find('table', attrs = {'class' : 'user-contests-table'})
ratings_tr = table.findAll('tr')
ratings_tr = ratings_tr[1:]
ratings_list = []
for this_row in ratings_tr:
ratings_list.append(int(this_row.findAll('td')[5].text))
ratings_list.reverse()
ratings_df = pd.DataFrame(ratings_list, columns = ['RATINGS'])
ratings_df.head()
idx_df = pd.DataFrame(np.arange(1, len(ratings_list) + 1, dtype='int64'), columns = ['CONTEST'])
idx_df.head()
df = pd.concat([idx_df, ratings_df], axis = 1)
df.columns = ['CONTEST', 'RATINGS']
df.head()
model = LinearRegression()
model.fit(idx_df, ratings_df)
# Visualization
plt.plot(idx_df, ratings_df)
plt.title('Username : {}'.format(handle))
plt.xlabel('Contest ID')
plt.ylabel('Ratings')
dummy_dataset = pd.DataFrame(np.arange(0, ratings_df.shape[0]))
plt.plot(dummy_dataset, model.predict(dummy_dataset))
output_filename = secrets.token_hex(16) + '.png'
path = os.path.abspath('.') + '/app/static/generated/'
os.makedirs(path, exist_ok = True)
plt.savefig(path + output_filename)
plt.close()
return output_filename
except Exception as e:
msg = 'Couldn\'t generate graph'
logger.warning(msg)
raise Exception(e)
if __name__ == '__main__':
visualize('razdeep')
|
StarcoderdataPython
|
4821999
|
import cyxxc.inputs
#import cyxxc.game
import cyxxc.common
|
StarcoderdataPython
|
388402
|
import csv
input_path="D:/IDENUM/data-to-import/passau/enriched_filtered_data_plus.csv"
output_path="D:/IDENUM/data-to-import/passau/output.csv"
chars_to_remove='"\'\n'
target_props=["user_name", "text_translated_en", "latitude", "longitude", "date"]
input_data=open(input_path, encoding="utf8")
reader=csv.DictReader(input_data)
csv_columns = reader.fieldnames
with open(output_path, 'w', encoding="utf8", newline='') as output_data:
writer = csv.DictWriter(output_data, fieldnames=csv_columns, quoting=csv.QUOTE_ALL) #, quotechar='"', delimiter=',', escapechar='\\')
writer.writeheader()
for line in reader:
try:
for prop in line:
# if prop == "text" and line["id"] == "11":
# print("check")
if prop in target_props:
line[prop] = ''.join(c for c in str(line[prop]) if c not in chars_to_remove)
#line[prop] = line[prop].replace("\n", " ")
#line[prop] = line[prop].replace("\\", "")
#line[prop] = line[prop].replace('"', '\'')
#line[prop] = line[prop].replace("\\", "")
# line[prop] = line[prop].replace("'", "\\'")
else:
line[prop]=""
writer.writerow(line)
except Exception as err:
print("Error: ", err)
input_data.close()
|
StarcoderdataPython
|
222605
|
#!/usr/bin/env python
import numpy as np, pandas as pd, argparse, os, sys
print(sys.version)
from pygor.models.genmodel import GenModel
#####################
## Parse arguments ##
#####################
parser = argparse.ArgumentParser()
parser.add_argument("indir")
parser.add_argument("outdir")
parser.add_argument("id")
args = parser.parse_args()
marginals_path = os.path.join(os.getcwd(), args.indir, "final_marginals.txt")
parms_path = os.path.join(os.getcwd(), args.indir, "final_parms.txt")
print marginals_path
print parms_path
##################
## Define model ##
##################
g = GenModel(parms_path, marginals_path)
def iterate_event(nickname, attr):
"""Iterate over the realizations in an event for a specific attribute."""
R = g.get_event(nickname, True).realizations
return np.array([getattr(r, attr) for r in R])
def event_order(nickname):
"""Get the sort order for realizations in an event."""
return np.argsort(iterate_event(nickname, "index"))
def event_names(nickname):
"""Get the sorted names of realizations in an event."""
return iterate_event(nickname, "name")[event_order(nickname)]
def event_marginals(nickname):
"""Get the marginal probabilities of realizations in an event."""
return g.marginals[0][nickname]
def make_distr_df(dct):
"""Make a distribution dataframe from an input dictionary."""
dct.update({"id":args.id})
return pd.DataFrame(dct)
########################
## Get V distribution ##
########################
distributions = {}
v_names = event_names("v_choice")
v_marginals = event_marginals("v_choice")
distributions["V"] = make_distr_df({"v":v_names, "p":v_marginals})
################################
## Get VJ and J distributions ##
################################
j_names = event_names("j_choice")
vj_conditionals = event_marginals("j_choice") # J marginals *given* V choice
# Get marginal J probabilities
vj_marginals = vj_conditionals.T * v_marginals.astype(float)
j_marginals = np.sum(vj_marginals, 1)
distributions["J"] = make_distr_df({"j":j_names, "p":j_marginals})
# Get joint V/J probabilities
distributions["VJ"] = make_distr_df({"v": np.tile(v_names, len(j_names)),
"j": np.repeat(j_names, len(v_names)),
"p": vj_marginals.T.flatten()
})
#################################
## Get VDJ and D distributions ##
#################################
d_names = event_names("d_gene")
vdj_conditionals = event_marginals("d_gene")
# Get marginal D probabilities
vdj_marginals = vdj_conditionals.T * vj_marginals
d_marginals = np.einsum("ijk -> i", vdj_marginals)
distributions["D"] = make_distr_df({"d":d_names, "p":d_marginals})
# Get joint V/D/J probabilities
distributions["VDJ"] = make_distr_df({
"v": np.tile(v_names, len(d_names)*len(j_names)),
"d": np.repeat(d_names, len(v_names)*len(j_names)),
"j": np.tile(np.repeat(j_names, len(v_names)), len(d_names)),
"p": vdj_marginals.flatten()
})
#################################
## Write distributions to file ##
#################################
for k in distributions.keys():
outpath = os.path.join(args.outdir, "{}.tsv".format(k))
distributions[k].to_csv(outpath, index=False, sep="\t")
|
StarcoderdataPython
|
6528290
|
<filename>offline_messages/admin.py
# from django.contrib import admin
#
# from offline_messages.models import OfflineMessage
#
# class OfflineMessageAdmin(admin.ModelAdmin):
# list_display = [f.name for f in OfflineMessage._meta.fields]
# admin.site.register(OfflineMessage, OfflineMessageAdmin)
|
StarcoderdataPython
|
11297347
|
<filename>src/phys_frames/type_analyzer.py
#Copyright 2021 Purdue University, University of Virginia.
#Copyright 2018 Purdue University, University of Nebraska--Lincoln.
#Copyright (c) 2016, University of Nebraska NIMBUS LAB <NAME> <EMAIL>
#All rights reserved.
from type_annotator import TypeAnnotator
from type_checker import TypeChecker
from symbol_helper import SymbolHelper
from type_helper import TypeHelper
from frame_type import FrameT
import cppcheckdata
import testLinter as lint
from file_processor import FileProcessor
import fnmatch
import os
import networkx as nx
from collections import OrderedDict, defaultdict
class TypeAnalyzer:
def __init__(self):
self.current_file_under_analysis = ''
self.source_file = ''
self.function_graph = nx.DiGraph()
self.all_function_graphs = []
self.all_sorted_analysis_dicts = []
self.configurations = []
line2type = lambda:defaultdict(defaultdict)
self.file2type = defaultdict(line2type)
self.should_sort_by_function_graph = True
self.should_add_manual_type_annotations = False
self.launch_static_transforms = []
self.launch_files = []
self.sh = SymbolHelper()
self.th = TypeHelper()
self.should_restart_error_printing = True
def run_file_processor(self, target=''):
fproc = FileProcessor(target)
fproc.process_target()
def run_linter_collect_all(self, target=''):
for launch_file in self.find_files(target, '*.launch'):
file_name = os.path.relpath(launch_file, target)
print "Found launch: %s" % (file_name)
self.launch_files.append(file_name)
self.run_linter_collect(launch_file)
def run_linter_collect(self, launch_file):
transform_list, error_list = lint.process_file(launch_file)
lint.print_errors(error_list)
self.th.clear_frame_orientation()
tya = TypeAnnotator(self.th, self.sh)
transform_list = [t for t in transform_list if not t.skipped]
for transform in transform_list:
frame = tya.find_type_annotation_for_linter_transform(transform)
transform.frame = frame
self.launch_static_transforms.append(transform_list)
def run_collect_all(self, target=''):
# PROCESS C/CPP FILES
for dump_file in self.find_files(target, '*.dump'):
print "Found dump: %s" % (os.path.relpath(dump_file, target))
source_file = dump_file.replace('.dump','')
self.run_collect(dump_file, source_file)
# DEBUG
self.print_debug_output()
# PROCESS LAUNCH FILES
self.run_linter_collect_all(target)
# FIND FILE GROUPS
#self.run_file_processor(target)
def run_collect(self, dump_file, source_file=''):
''' input: a cppcheck 'dump' file containing an Abstract Syntax Tree (AST), symbol table, and token list
input: a cpp source file
returns: None
side-effect: updates cppcheck data structure with information about this analysis
'''
self.current_file_under_analysis = dump_file
self.source_file = source_file
self.th.clear_var_id_dict()
self.th.clear_frame_orientation()
# PARSE FILE
data = cppcheckdata.parsedump(dump_file)
#TODO check for multiple? Now, ONLY TEST THE FIRST CONFIGURATION
for c in data.configurations[:1]:
c = self.init_cppcheck_config_data_structures(c)
self.sh.fill_func_name_dict(c.functions)
self.function_graph = nx.DiGraph()
# GET DICT OF ALL GLOBALLY SCOPED FUNCTIONS
analysis_dict = self.find_functions(c)
# FIND ORDER FOR FUNCTION GRAPH EXPLORATION
sorted_analysis_dict = analysis_dict;
if self.should_sort_by_function_graph:
self.build_function_graph(analysis_dict)
sorted_analysis_dict = self.make_sorted_analysis_dict_from_function_graph(analysis_dict)
self.all_sorted_analysis_dicts.append(sorted_analysis_dict)
self.collect_types_outside_functions(c.tokenlist)
for function_dict in sorted_analysis_dict.values():
self.collect_param_types(function_dict)
# RUN TYPE ANNOTATOR ON ALL TOKEN PARSE TREES FOR EACH FUNCTION
for function_dict in sorted_analysis_dict.values():
self.collect_types(function_dict)
self.sh.clear_func_name_dict()
self.configurations.append(c)
def find_files(self, directory, pattern):
if os.path.isfile(directory):
yield directory
# THIS IS A DIRECTORY
for root, dirs, files in os.walk(directory):
root = os.path.abspath(root)
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def collect_types_outside_functions(self, tokenlist):
tya = TypeAnnotator(self.th, self.sh)
for t in tokenlist:
if self.should_add_manual_type_annotations and (t.variable or t.str in ['.', '[', '(']): #TODO or t.varId
if os.path.basename(t.file) in self.file2type:
line2type = self.file2type[os.path.basename(t.file)]
if t.linenr in line2type:
(var_token, var_name) = (t, t.str)
if t.str in ['.', '[', '(']:
(var_token, var_name) = self.sh.find_compound_variable_token_and_name_for_sym_token(t)
if var_name in line2type[t.linenr]:
frame = eval(line2type[t.linenr][var_name])
frame = map(lambda frame_elem: FrameT(*frame_elem), frame)
self.th.set_frame_type_for_variable_token(var_token, var_name, frame)
if t.scope.type in ['Global', 'Namespace', 'Class']:
if (not t.astParent) and (t.astOperand1 or t.astOperand2):
t.isRoot = True
tya.add_type_annotations_outside_functions(t)
def collect_param_types(self, function_dict):
tya = TypeAnnotator(self.th, self.sh)
for root_token in function_dict['root_tokens']:
tya.add_param_type_annotations(root_token)
def collect_types(self, function_dict):
tya = TypeAnnotator(self.th, self.sh)
for root_token in function_dict['root_tokens']:
tya.apply_and_propagate_values(root_token)
tya.add_type_annotations(root_token)
def find_type_errors(self, target='', errors_file=''):
tyc = TypeChecker(self.th, self.sh, target)
errors_file = 'errors.txt'
print '\n' + '= '*20
print 'INCONSISTENCIES:'
print '= '*20 + '\n'
# C/CPP FILES
for i in range(len(self.configurations)):
tyc.check_types_for_file(self.configurations[i], self.all_sorted_analysis_dicts[i])
# LAUNCH FILES
for i in range(len(self.launch_static_transforms)):
if not self.launch_static_transforms[i]:
continue
error_list = lint.validate_transforms(self.launch_static_transforms[i])
if error_list:
print '- '*40
print "FILE: %s\n" % (self.launch_files[i])
lint.print_errors(error_list)
lint.print_linter_errors(error_list, self.launch_files[i], errors_file, self.should_restart_error_printing)
self.should_restart_error_printing = False
tyc.check_types_for_launch_file(self.launch_static_transforms[i], self.launch_files[i])
# GLOBAL
tyc.check_types()
# FILE_GROUPS
fproc = FileProcessor(target)
fproc.process_target()
file_groups_list = fproc.get_file_groups_list()
for group_id, file_group in file_groups_list:
tyc.check_types_for_file_group(group_id, file_group)
tyc.print_all_errors()
tyc.print_frame_errors(errors_file, self.should_restart_error_printing)
print '= '*20 + '\n'
def load_type_annotations(self, annotation_file):
# LOAD VARIABLE FRAME TYPE DATA
with open(annotation_file) as f:
for item in (line.rstrip('\n') for line in f):
file_name, line_nr, var_name, frame = item.split(':')
file_name, line_nr, var_name, frame = file_name.strip(), line_nr.strip(), var_name.strip(), frame.strip()
self.file2type[file_name][line_nr][var_name] = frame
if self.file2type:
self.should_add_manual_type_annotations = True
def init_cppcheck_config_data_structures(self, cppcheck_configuration):
''' AUGMENT CPPCHECK DATA STRUCTURE WITH ADDITIONAL FEATURES TO SUPPORT THIS ANALYSIS
'''
c = cppcheck_configuration
for t in c.tokenlist:
t.frames = []
t.vals = []
t.isRoot = False
for f in c.functions:
f.return_type = None
f.return_frames = []
f.return_arg_var_nr = 0
f.arg_frames = []
for arg_num in f.argument.keys():
f.arg_frames.append([])
f.isAnnotated = False
for v in c.variables:
v.frames = {}
v.is_frame_set = False
v.vals = {}
v.isParam = False
v.isTransform = True if self.sh.is_ros_transform_type(v) else False
v.isUsed = False
v.checkFrame = True
#v.xyz = {}
return c
def find_functions(self, a_cppcheck_configuration):
''' LINEAR SCAN THROUGH TOKENS TO FIND 'function' TOKENS THAT HAVE GLOBAL SCOPE.
COLLECT AND RETURN DICT CONTAINING FUNCTION START AND END TOKEN POINTERS
output: dict containing function start and end tokens
'''
function_dicts = {}
# FIND FUNCTIONS IN 'SCOPES' REGION OF DUMP FILE, START AND END TOKENs
for s in a_cppcheck_configuration.scopes:
if s.type=='Function':
# SCAN ALL FUNCTIONS UNLESS LIST OF FUNCTIONS SPECIFIED
function_dicts[s.Id] = {'name': s.className,
'linenr': s.classStart.linenr,
'tokenStart': s.classStart,
'tokenEnd': s.classEnd,
'scopeObject': s,
'symbol_table': {},
'function_graph_edges': [],
'function': s.function}
# CONSTRUCT LIST OF ROOT TOKENS
function_dicts[s.Id]['root_tokens'] = self.find_root_tokens(s.classStart, s.classEnd)
#print "Found %d functions..." % len(function_dicts)
return function_dicts
def find_root_tokens(self, tokenStart, tokenEnd):
''' FOR A FUNCTION DEFINED AS ALL TOKENS FROM tokenStart TO tokenEnd, FIND THE ROOTS.
input: tokenStart, a CPPCheckData Token, first token in a function
input: tokenEnd, a CPPCheckData Token, last token in a function
output: a list of root_tokens, in flow order
'''
root_tokens_set = set()
current_token = tokenStart
while(current_token != tokenEnd): #TODO: reverse token set exploration to top-down instead of bottom-up
# HAS A PARENT
if current_token.astParent:
a_parent = current_token.astParent
has_parent = True
while has_parent:
# HAS NO PARENT, THEREFORE IS ROOT
if not a_parent.astParent:
root_tokens_set.add(a_parent)
a_parent.isRoot = True
has_parent = False
else:
a_parent = a_parent.astParent
current_token = current_token.next
root_tokens = list(root_tokens_set)
# SORT NUMERICALLY BY LINE NUMBER
root_tokens = sorted(root_tokens, key=lambda x : int(x.linenr))
return root_tokens
def build_function_graph(self, analysis_dict):
''' BUILDS DIRECTED FUNCTION GRAPH.
input: a dictionary of functions from this dump file
output: none
side effect: creates a graph linked to this object
'''
# BUILD CALL GRAPH
self.function_graph = nx.DiGraph()
G = self.function_graph
for k, function_dict in analysis_dict.iteritems():
if function_dict['function']:
node = function_dict['function'].Id
G.add_node(node)
all_attr = nx.get_node_attributes(G, 'function_id')
all_attr[node] = k
nx.set_node_attributes(G, 'function_id', all_attr)
self.add_edges_to_function_graph(function_dict, G, node)
self.function_graph = G
def add_edges_to_function_graph(self, function_dict, G, current_node):
current_token = function_dict['tokenStart']
end_token = function_dict['tokenEnd']
# TERMINATION GUARENTEED IF DUMP FILE IS WELL FORMED
while current_token is not end_token:
# ON FIRST LOOP, SKIP SELF-REFERENCE
current_token = current_token.next
if current_token.function:
if not G.has_edge(current_node, current_token.function.Id):
G.add_edge(current_node, current_token.function.Id)
function_dict['function_graph_edges'].append(current_token.function)
def make_sorted_analysis_dict_from_function_graph(self, analysis_dict):
''' BUILDS A TOPO SORTED FUNCTION GRAPH.
THIS ALLOWS THE ANALYSIS TO START ON FUNCTION LEAFS, SO WE CAN HOPEFULLY DISCOVER TYPES OF THE RETURN VALUE.
THE FUNCTION GRAPH MAY HAVE CYCLES (recursion, for example), THEREFORE WE REMOVE THESE EDGES FROM THE GRAPH
AND ANALYZE THEM LAST (<-- not sure this is best)
input: a dictionary of functions from this dump file
output: OrderedDict of functions
postcondition: returned dict must be the same length as the input dict, and contain all the same elements
'''
return_dict = OrderedDict()
G = self.function_graph
# TRY FINDING A DAG. IF NOT, REMOVE EDGES AND TRY AGAIN.
super_break = 0
while nx.number_of_nodes(G) > 0 and super_break < 1000:
super_break +=1
if not nx.is_directed_acyclic_graph(G):
try:
# SEARCH FOR CYCLE AND REMOVE EDGES
edges = nx.find_cycle(G)
G.remove_edges_from(edges)
print 'Function graph has cycle %s' % edges,
except:
print 'Function graph is not a DAG and does not have a cycle!'
# GIVE UP AND RETURN UNSORTED
return analysis_dict
else:
# WE HAVE A DIGRAPH, CAN PROCEED ( and topo sort )
break
if nx.number_of_nodes(G) == 0:
# RETURN UNCHANGED
return analysis_dict
# WE HAVE A DIRECTED GRAPH WITH NODES, CAN SORT AND ADD NODES TO ORDERED LIST
function_graph_topo_sort = nx.topological_sort(G)
function_graph_topo_sort_reversed = function_graph_topo_sort[::-1]
# CREATE RETURN DICT FROM TOPO SORT
for node in function_graph_topo_sort_reversed:
function_id_attr_dict = nx.get_node_attributes(G, 'function_id')
if node in function_id_attr_dict:
# ADD FUNCTION TO NEW DICTIONARY - THIS IS THE EXPLORE ORDER
return_dict[function_id_attr_dict[node]] = analysis_dict[function_id_attr_dict[node]]
# ADD ANY REMAINING FUNCTIONS NOT IN THE TOPO SORT TO THE ORDERED DICT
for k in analysis_dict.keys():
if k not in return_dict:
return_dict[k] = analysis_dict[k]
assert (len(return_dict) == len(analysis_dict))
return return_dict
def debug_print_function_graph(self, analysis_dict):
if not analysis_dict:
return
for function_dict in analysis_dict.values():
print "%s :" % function_dict['name']
for edge in function_dict['function_graph_edges']:
print ' --> %s' % edge.name
###############
# DEBUG
###############
def print_debug_output(self):
#self.print_variable_frames_keys()
#self.print_transform_variable_frames()
pass
def print_variable_frames_keys(self):
for c in self.configurations:
for v in c.variables:
if v.nameToken and v.frames:
print "%s: %s\n" % (v.nameToken.str, v.frames.keys())
def print_transform_variable_frames(self):
for c in self.configurations:
for v in c.variables:
if v.isTransform:
print "%s: %s\n" % (v.Id, v.frames,)
|
StarcoderdataPython
|
11339431
|
# This program filter raw tweet medadata into a cleaner format
# to reduce storage space required and processing time.
import couchdb
from textblob import TextBlob
import re
# connect to local server
couchserver = couchdb.Server("http://admin:[email protected]:5984/")
# delete database if exist
dbname = "clean_data_kaustub"
if dbname in couchserver:
del couchserver[dbname]
clean = couchserver.create(dbname)
# connect to raw tweet databse
crawler = couchserver['tweettest']
# filter tweets to a cleaner format
def remove_non_letters(text):
# regex patterns
hashtag = r'#\S+'
email = r'[\w\d\._-]+@\w+(\.\w+){1,3}'
website = r'http\S+|www\.\w+(\.\w+){1,3}'
retweet = r'RT\s@\S+'
mention = r'@[\w\d]+'
punctual = r'[_\+-\.,!@\?#$%^&*();\\/|<>"\':]+'
weird = r'�+'
newline = r'\n'
spaces = r'\s{2,}'
digits = r'\d+'
combined_patterns = r'|'.join((hashtag, email, website, retweet, mention, punctual, weird, newline, digits))
stripped = re.sub(combined_patterns, ' ', text)
# remove extra whitespaces
stripped = re.sub(spaces, ' ', stripped)
stripped = stripped.strip()
return stripped
def remove_emojis(text):
emoji_pattern = re.compile(
u"(\ud83d[\ude00-\ude4f])|" # emoticonsa
u"(\ud83c[\udf00-\uffff])|" # symbols & pictographs (1 of 2)
u"(\ud83d[\u0000-\uddff])|" # symbols & pictographs (2 of 2)
u"(\ud83d[\ude80-\udeff])|" # transport & map symbols
u"(\ud83c[\udde0-\uddff])|" # flags (iOS)
"\+", flags=re.UNICODE)
# return ''.join(c for c in str if c not in emoji.UNICODE_EMOJI)
return emoji_pattern.sub (r' ', text)
def filter_tweet(text):
text = remove_emojis(text)
text = remove_non_letters(text)
if not text or len(text) == 0:
return
else:
return text
for i in crawler:
try:
tweet = crawler[i]["doc"]
print("hello", tweet)
if ('en' in tweet['lang']): # if tweet in English
id = tweet["id"]
day = tweet["created_at"][:3]
time = tweet["created_at"][11:13]
location = tweet['user']['location']
hashtags = tweet['entities']['hashtags']
lang = tweet['lang']
place = tweet['place']
favorite_count = tweet['favorite_count']
source = tweet['source'][37:44]
rt = tweet["retweet_count"]
coordinates = tweet["coordinates"]["coordinates"]
text = filter_tweet(tweet['text'])
length = len(text)
bbox = tweet["place"]["bounding_box"]['coordinates']
polarity = TextBlob(text).sentiment.polarity
subjectivity = TextBlob(text).sentiment.subjectivity
doc = {
'id': id,
'day': day,
'hour': time,
'user_location': location,
'hashtags': hashtags,
'lang': lang,
'place':place,
'favorite_count':favorite_count,
'source': source,
'rt': rt,
'text': text,
'length': length,
'bounding_box': bbox,
'polarity': polarity,
'subjectivity': subjectivity,
'coordinates': coordinates
}
# save document to two database for backup
clean.save(doc)
except Exception as e:
print("Hi there")
pass
|
StarcoderdataPython
|
4856500
|
<reponame>stephenwashington/advent-of-code-2021<gh_stars>1-10
import itertools
def process_input(filename):
values = []
with open(filename) as f:
for line in f:
input = []
output = []
l = line.strip().split("|")
for digit in l[0].strip().split(" "):
if len(digit) == 0:
continue
input.append(sorted(digit.strip()))
for output_digit in l[1].split(" "):
if len(output_digit) == 0:
continue
output.append(sorted(output_digit.strip()))
values.append({"input": input, "output": output})
return values
def part1():
values = process_input("day8-input.txt")
count = 0
for value in values:
for output_digit in value["output"]:
if len(output_digit) in [2, 4, 3, 7]:
count += 1
print(count)
def check_mapping(inputs, mapping):
five_segment = ["".join(x) for x in inputs if len(x) == 5]
six_segment = ["".join(x) for x in inputs if len(x) == 6]
scrambled_one = "".join([x for x in inputs if len(x) == 2][0])
scrambled_four = "".join([x for x in inputs if len(x) == 4][0])
scrambled_seven = "".join([x for x in inputs if len(x) == 3][0])
solution = {
"zero": "",
"one": "",
"two": "",
"three": "",
"four": "",
"five": "",
"six": "",
"seven": "",
"eight": "abcdefg",
"nine": ""
}
# then use 1, 4, 7 to double-check mapping
one = mapping["top_right"] + mapping["bottom_right"]
one = "".join(sorted(one))
if sorted(one) != sorted(scrambled_one):
return None
solution["one"] = one
four = mapping["top_left"] + mapping["top_right"] + mapping["middle"] + mapping["bottom_right"]
four = "".join(sorted(four))
if sorted(four) != sorted(scrambled_four):
return None
solution["four"] = four
seven = mapping["top_middle"] + mapping["top_right"] + mapping["bottom_right"]
seven = "".join(sorted(seven))
if sorted(seven) != sorted(scrambled_seven):
return None
solution["seven"] = seven
# for each from each segment list, we want to confirm that it matches one of hte possible numbers:
# 0: all except middle
zero = mapping["top_middle"] + mapping["top_left"] + mapping["top_right"] + mapping["bottom_left"] + mapping["bottom_right"] + mapping["bottom_middle"]
zero = "".join(sorted(zero))
if zero not in six_segment:
return None
else:
six_segment.remove(zero)
solution["zero"] = zero
# 6: all except top_right
six = mapping["top_middle"] + mapping["top_left"] + mapping["middle"] + mapping["bottom_left"] + mapping["bottom_right"] + mapping["bottom_middle"]
six = "".join(sorted(six))
if six not in six_segment:
return None
else:
six_segment.remove(six)
solution["six"] = six
# 9: all except bottom left
nine = mapping["top_middle"] + mapping["top_left"] + mapping["top_right"] + mapping["middle"] + mapping["bottom_right"] + mapping["bottom_middle"]
nine = "".join(sorted(nine))
if nine not in six_segment:
return None
else:
six_segment.remove(nine)
solution["nine"] = nine
# 2: all except top left, bottom right
two = mapping["top_middle"] + mapping["top_right"] + mapping["middle"] + mapping["bottom_left"] + mapping["bottom_middle"]
two = "".join(sorted(two))
if two not in five_segment:
return None
else:
five_segment.remove(two)
solution["two"] = two
# 3: all except top left, bottom left
three = mapping["top_middle"] + mapping["top_right"] + mapping["middle"] + mapping["bottom_right"] + mapping["bottom_middle"]
three = "".join(sorted(three))
if three not in five_segment:
return None
else:
five_segment.remove(three)
solution["three"] = three
# 5: all except top right, bottom left
five = mapping["top_middle"] + mapping["top_left"] + mapping["middle"] + mapping["bottom_right"] + mapping["bottom_middle"]
five = "".join(sorted(five))
if five not in five_segment:
return None
else:
five_segment.remove(five)
solution["five"] = five
return solution
def part2():
values = process_input("day8-input.txt")
str_to_int_mappings = {
"zero": "0",
"one": "1",
"two": "2",
"three": "3",
"four": "4",
"five": "5",
"six": "6",
"seven": "7",
"eight": "8",
"nine": "9"
}
position_mapping = {
"top_middle": "",
"top_left": "",
"top_right": "",
"middle": "",
"bottom_left": "",
"bottom_right": "",
"bottom_middle": ""
}
sum = 0
for value in values:
descrambled_solution = None
# set up the dict for easier mapping
for mapping in itertools.permutations("abcdefg"):
position_mapping["top_middle"] = mapping[0]
position_mapping["top_left"] = mapping[1]
position_mapping["top_right"] = mapping[2]
position_mapping["middle"] = mapping[3]
position_mapping["bottom_left"] = mapping[4]
position_mapping["bottom_right"] = mapping[5]
position_mapping["bottom_middle"] = mapping[6]
# check the mapping, break away if it works
solution = check_mapping(value["input"], position_mapping)
if solution is not None:
descrambled_solution = solution
break
# for the four outputs, use the determined wiring to figure out the represented number
unscrambled_outputs = []
for output in value["output"]:
joined_output = "".join(sorted(output))
for number, wiring in descrambled_solution.items():
if joined_output == wiring:
unscrambled_outputs.append(number)
actual_output_value_string = ""
for value in unscrambled_outputs:
actual_output_value_string += str_to_int_mappings[value]
sum += int(actual_output_value_string)
print(sum)
if __name__ == '__main__':
part1()
part2()
|
StarcoderdataPython
|
1805010
|
<reponame>tristen-tooming/netvisor-api-client<gh_stars>0
from marshmallow import ValidationError
from .base import Request, ListRequest
from ..exc import InvalidData
from ..responses.purchase_invoices import (
PurchaseInvoiceListResponse,
GetPurchaseInvoiceResponse
)
class GetPurchaseInvoiceRequest(Request):
method = 'GET'
uri = 'GetPurchaseInvoice.nv'
response_cls = GetPurchaseInvoiceResponse
def _raise_exception(self):
raise InvalidData(
'Data form incorrect:. '
'Purchase invoice not found with Netvisor identifier: {0}'.format(
self.params['NetvisorKey']
)
)
def parse_response(self, response):
try:
result = super(GetPurchaseInvoiceRequest, self).parse_response(response=response)
if not result:
self._raise_exception()
return result
except ValidationError:
self._raise_exception()
class PurchaseInvoiceListRequest(ListRequest):
method = 'GET'
uri = 'PurchaseInvoiceList.nv'
response_cls = PurchaseInvoiceListResponse
|
StarcoderdataPython
|
3595589
|
<reponame>LesterYHZ/Automated-Bridge-Inspection-Robot-Project
import serial
def Initialization():
ser = serial.Serial("/dev/ttyUSB0",9600)
def Send_Signal(signal):
# signal: [Int]
ser.write(bytes(signal))
|
StarcoderdataPython
|
11314211
|
<filename>tests/test_url_util.py<gh_stars>10-100
"""
Copyright 2020-present Nike, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
import unittest
from cerberus.url_util import ensure_single_trailing_slash, ensure_no_trailing_slash
class TestEnsureTrailingSlash(unittest.TestCase):
"""unit tests for url_util.ensure_trailing_slash"""
@staticmethod
def test_no_trailing_slash():
url_with_trailing_slash = ensure_single_trailing_slash("nike.com")
assert url_with_trailing_slash.endswith(".com/")
@staticmethod
def test_single_trailing_slash():
url_with_trailing_slash = ensure_single_trailing_slash("nike.com/")
assert url_with_trailing_slash.endswith(".com/")
@staticmethod
def test_multiple_trailing_slash():
url_with_trailing_slash = ensure_single_trailing_slash("nike.com//")
assert url_with_trailing_slash.endswith(".com/")
class TestEnsureNoTrailingSlash(unittest.TestCase):
"""unit tests for url_util.ensure_no_trailing_slash"""
@staticmethod
def test_no_trailing_slash():
url_without_trailing_slash = ensure_no_trailing_slash("nike.com")
assert url_without_trailing_slash.endswith(".com")
@staticmethod
def test_one_trailing_slash():
url_without_trailing_slash = ensure_no_trailing_slash("nike.com/")
assert url_without_trailing_slash.endswith(".com")
@staticmethod
def test_multiple_trailing_slash():
url_without_trailing_slash = ensure_no_trailing_slash("nike.com//")
assert url_without_trailing_slash.endswith(".com")
|
StarcoderdataPython
|
3233194
|
"""Process LN2_PATCH_FLATTEN outputs for cake plots."""
import numpy as np
import nibabel as nb
FILE1 = "/home/faruk/Documents/temp_flooding_brains/data/ding_flat/ding_flat_test.nii.gz"
FILE2 = "/home/faruk/Documents/temp_flooding_brains/data/ding_flat/ding_flat_L2.nii.gz"
OUTFILE = "/home/faruk/Documents/temp_flooding_brains/data/ding_flat/ding_flat_test_prep.nii.gz"
NR_CAKE_LAYERS = 5
# -----------------------------------------------------------------------------
nii1 = nb.load(FILE1)
dims = nii1.shape
data = nii1.get_fdata()
# Quantize norm
norm = nb.load(FILE2).get_fdata()
norm /= norm.max()
norm *= NR_CAKE_LAYERS
norm = np.ceil(norm)
# Make space on z axis for cake layers using an extra dimension
new = np.zeros((dims[0], dims[1], NR_CAKE_LAYERS, dims[2]))
# Elevate norm bins center to outwards
nr_layers = dims[2]
for i, j in enumerate(range(NR_CAKE_LAYERS, 0, -1)):
temp = np.zeros(dims)
temp[norm == j] = data[norm == j]
new[:, :, i, :] = temp
# Flatten the extra dimension onto 3rd
new = new.reshape((dims[0], dims[1], NR_CAKE_LAYERS * dims[2]))
img = nb.Nifti1Image(new, affine=nii1.affine, header=nii1.header)
nb.save(img, OUTFILE)
print("Finished.")
|
StarcoderdataPython
|
3262140
|
# Presently unused
from flask.ext.script import Manager
from flask.ext.migrate import Migrate, MigrateCommand
import os
from service.model import *
from service.server import app, db
app.config.from_object( os.environ.get( 'SETTINGS' ) )
migrate = Migrate( app, db )
manager = Manager( app )
manager.add_command( 'db', MigrateCommand )
if __name__ == '__main__':
manager.run()
|
StarcoderdataPython
|
1994550
|
<gh_stars>0
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import unittest
from opentelemetry.semconv.model.semantic_convention import SemanticConventionSet
from opentelemetry.semconv.templating.markdown.options import MarkdownOptions
from opentelemetry.semconv.templating.markdown import MarkdownRenderer
class TestCorrectMarkdown(unittest.TestCase):
def testRef(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/ref/general.yaml"))
semconv.parse(self.load_file("markdown/ref/rpc.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 5)
md = self.load_file("markdown/ref/input/input_rpc.md")
with open(md, "r") as markdown:
content = markdown.read()
renderer = MarkdownRenderer(self.load_file("markdown/ref/input"), semconv)
output = io.StringIO()
renderer._render_single_file(content, md, output)
with open(self.load_file("markdown/ref/expected.md"), "r") as markdown:
expected = markdown.read()
assert output.getvalue() == expected
def testInclude(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/include/faas.yaml"))
semconv.parse(self.load_file("markdown/include/http.yaml"))
semconv.parse(self.load_file("markdown/include/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 7)
md = self.load_file("markdown/include/input/input_faas.md")
with open(md, "r") as markdown:
content = markdown.read()
renderer = MarkdownRenderer(self.load_file("markdown/include/input"), semconv)
output = io.StringIO()
renderer._render_single_file(content, md, output)
with open(self.load_file("markdown/include/expected.md"), "r") as markdown:
expected = markdown.read()
assert output.getvalue() == expected
def testDeprecated(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/deprecated/http.yaml"))
semconv.parse(self.load_file("markdown/deprecated/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 5)
with open(self.load_file("markdown/deprecated/input.md"), "r") as markdown:
content = markdown.read()
with open(self.load_file("markdown/deprecated/expected.md"), "r") as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/deprecated/",
"markdown/deprecated/input.md",
content,
expected,
)
def testStability(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/stability/stability.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 1)
with open(self.load_file("markdown/stability/input.md"), "r") as markdown:
content = markdown.read()
# Labels
with open(
self.load_file("markdown/stability/labels_expected.md"), "r"
) as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/stability/",
"markdown/stability/input.md",
content,
expected,
)
# Badges
with open(
self.load_file("markdown/stability/badges_expected.md"), "r"
) as markdown:
expected = markdown.read()
options = MarkdownOptions(enable_stable=True, use_badge=True)
self.check_render(
semconv,
"markdown/stability/",
"markdown/stability/input.md",
content,
expected,
options,
)
def testSingle(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/single/http.yaml"))
semconv.parse(self.load_file("markdown/single/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 5)
with open(self.load_file("markdown/single/input.md"), "r") as markdown:
content = markdown.read()
with open(self.load_file("markdown/single/expected.md"), "r") as markdown:
expected = markdown.read()
self.check_render(
semconv, "markdown/single/", "markdown/single/input.md", content, expected,
)
def testEmpty(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/empty/http.yaml"))
semconv.parse(self.load_file("markdown/empty/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 5)
with open(self.load_file("markdown/empty/input.md"), "r") as markdown:
content = markdown.read()
with open(self.load_file("markdown/empty/expected.md"), "r") as markdown:
expected = markdown.read()
self.check_render(
semconv, "markdown/empty/", "markdown/empty/input.md", content, expected
)
def testExampleArray(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/example_array/http.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 1)
with open(self.load_file("markdown/example_array/input.md"), "r") as markdown:
content = markdown.read()
with open(
self.load_file("markdown/example_array/expected.md"), "r"
) as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/example_array/",
"markdown/example_array/input.md",
content,
expected,
)
def testMultiple(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/multiple/http.yaml"))
semconv.parse(self.load_file("markdown/multiple/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 5)
with open(self.load_file("markdown/multiple/input.md"), "r") as markdown:
content = markdown.read()
with open(self.load_file("markdown/multiple/expected.md"), "r") as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/multiple/",
"markdown/multiple/input.md",
content,
expected,
)
def testEnumInt(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/enum_int/rpc.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 1)
with open(self.load_file("markdown/enum_int/input.md"), "r") as markdown:
content = markdown.read()
with open(self.load_file("markdown/enum_int/expected.md"), "r") as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/enum_int/",
"markdown/enum_int/input.md",
content,
expected,
)
def testExtendConstraint(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/extend_constraint/database.yaml"))
semconv.parse(self.load_file("markdown/extend_constraint/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 7)
with open(
self.load_file("markdown/extend_constraint/input.md"), "r"
) as markdown:
content = markdown.read()
with open(
self.load_file("markdown/extend_constraint/expected.md"), "r"
) as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/extend_constraint/",
"markdown/extend_constraint/input.md",
content,
expected,
)
def test_error_missing_end(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/missing_end_tag/http.yaml"))
semconv.parse(self.load_file("markdown/missing_end_tag/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 5)
with open(self.load_file("markdown/missing_end_tag/input.md"), "r") as markdown:
content = markdown.read()
with self.assertRaises(Exception) as ex:
renderer = MarkdownRenderer(
self.load_file("markdown/missing_end_tag/"), semconv
)
renderer._render_single_file(
content, "markdown/missing_end_tag/input.md", io.StringIO()
)
self.assertEqual("Missing ending <!-- endsemconv --> tag", ex.exception.args[0])
def test_error_wrong_id(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/wrong_semconv_id/http.yaml"))
semconv.parse(self.load_file("markdown/wrong_semconv_id/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 5)
with open(
self.load_file("markdown/wrong_semconv_id/input.md"), "r"
) as markdown:
content = markdown.read()
with self.assertRaises(Exception) as ex:
renderer = MarkdownRenderer(
self.load_file("markdown/wrong_semconv_id/"), semconv
)
renderer._render_single_file(
content, "markdown/wrong_semconv_id/input.md", io.StringIO()
)
self.assertEqual("Semantic Convention ID db not found", ex.exception.args[0])
def test_empty_table(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/empty_table/http.yaml"))
semconv.parse(self.load_file("markdown/empty_table/faas.yaml"))
semconv.parse(self.load_file("markdown/empty_table/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 7)
with open(self.load_file("markdown/empty_table/input.md"), "r") as markdown:
content = markdown.read()
with open(self.load_file("markdown/empty_table/expected.md"), "r") as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/empty_table/",
"markdown/empty_table/input.md",
content,
expected,
)
def test_parameter_full(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/parameter_full/http.yaml"))
semconv.parse(self.load_file("markdown/parameter_full/faas.yaml"))
semconv.parse(self.load_file("markdown/parameter_full/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 7)
with open(self.load_file("markdown/parameter_full/input.md"), "r") as markdown:
content = markdown.read()
with open(
self.load_file("markdown/parameter_full/expected.md"), "r"
) as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/parameter_full/",
"markdown/parameter_full/input.md",
content,
expected,
)
def test_parameter_tag(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/parameter_tag/database.yaml"))
semconv.parse(self.load_file("markdown/parameter_tag/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 6)
with open(self.load_file("markdown/parameter_tag/input.md"), "r") as markdown:
content = markdown.read()
with open(
self.load_file("markdown/parameter_tag/expected.md"), "r"
) as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/parameter_tag/",
"markdown/parameter_tag/input.md",
content,
expected,
)
def test_parameter_tag_empty(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/parameter_tag_empty/database.yaml"))
semconv.parse(self.load_file("markdown/parameter_tag_empty/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 6)
with open(
self.load_file("markdown/parameter_tag_empty/input.md"), "r"
) as markdown:
content = markdown.read()
with open(
self.load_file("markdown/parameter_tag_empty/expected.md"), "r"
) as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/parameter_tag_empty/",
"markdown/parameter_tag_empty/input.md",
content,
expected,
)
def test_parameter_tag_no_attr(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/parameter_tag_no_attr/database.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 1)
with open(
self.load_file("markdown/parameter_tag_no_attr/input.md"), "r"
) as markdown:
content = markdown.read()
with open(
self.load_file("markdown/parameter_tag_no_attr/expected.md"), "r"
) as markdown:
expected = markdown.read()
with self.assertRaises(Exception) as ex:
self.check_render(
semconv,
"markdown/parameter_tag_no_attr/",
"markdown/parameter_tag_no_attr/input.md",
content,
expected,
)
self.assertEqual(
"No attributes retained for 'db' filtering by 'wrong'", ex.exception.args[0]
)
def test_parameter_remove_constraint(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(
self.load_file("markdown/parameter_remove_constraint/database.yaml")
)
semconv.parse(
self.load_file("markdown/parameter_remove_constraint/general.yaml")
)
semconv.finish()
self.assertEqual(len(semconv.models), 6)
with open(
self.load_file("markdown/parameter_remove_constraint/input.md"), "r"
) as markdown:
content = markdown.read()
with open(
self.load_file("markdown/parameter_remove_constraint/expected.md"), "r"
) as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/parameter_remove_constraint/",
"markdown/parameter_remove_constraint/input.md",
content,
expected,
)
def test_parameter_empty(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/parameter_empty/http.yaml"))
semconv.parse(self.load_file("markdown/parameter_empty/faas.yaml"))
semconv.parse(self.load_file("markdown/parameter_empty/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 7)
with open(self.load_file("markdown/parameter_empty/input.md"), "r") as markdown:
content = markdown.read()
with open(
self.load_file("markdown/parameter_empty/expected.md"), "r"
) as markdown:
expected = markdown.read()
self.check_render(
semconv,
"markdown/parameter_empty/",
"markdown/parameter_empty/input.md",
content,
expected,
)
def test_wrong_parameter(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/parameter_wrong/http.yaml"))
semconv.parse(self.load_file("markdown/parameter_wrong/faas.yaml"))
semconv.parse(self.load_file("markdown/parameter_wrong/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 7)
with open(self.load_file("markdown/parameter_wrong/input.md"), "r") as markdown:
content = markdown.read()
expected = ""
with self.assertRaises(ValueError) as ex:
self.check_render(
semconv,
"markdown/parameter_wrong/",
"markdown/parameter_wrong/input.md",
content,
expected,
)
self.fail()
e = ex.exception
msg = e.args[0]
self.assertIn("Unexpected parameter", msg)
self.assertIn("`invalid`", msg)
def test_wrong_syntax(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/parameter_wrong_syntax/http.yaml"))
semconv.parse(self.load_file("markdown/parameter_wrong_syntax/faas.yaml"))
semconv.parse(self.load_file("markdown/parameter_wrong_syntax/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 7)
with open(
self.load_file("markdown/parameter_wrong_syntax/input.md"), "r"
) as markdown:
content = markdown.read()
expected = ""
with self.assertRaises(ValueError) as ex:
self.check_render(
semconv,
"markdown/parameter_wrong_syntax/",
"markdown/parameter_wrong_syntax/input.md",
content,
expected,
)
self.fail()
e = ex.exception
msg = e.args[0]
self.assertIn("Wrong syntax", msg)
def test_wrong_duplicate(self):
semconv = SemanticConventionSet(debug=False)
semconv.parse(self.load_file("markdown/parameter_wrong_duplicate/http.yaml"))
semconv.parse(self.load_file("markdown/parameter_wrong_duplicate/faas.yaml"))
semconv.parse(self.load_file("markdown/parameter_wrong_duplicate/general.yaml"))
semconv.finish()
self.assertEqual(len(semconv.models), 7)
with open(
self.load_file("markdown/parameter_wrong_duplicate/input.md"), "r"
) as markdown:
content = markdown.read()
expected = ""
with self.assertRaises(ValueError) as ex:
self.check_render(
semconv,
"markdown/parameter_wrong_duplicate/",
"markdown/parameter_wrong_duplicate/input.md",
content,
expected,
)
self.fail()
e = ex.exception
msg = e.args[0]
self.assertIn("Parameter", msg)
self.assertIn("already defined", msg)
def test_units(self):
semconv = SemanticConventionSet(debug=True)
semconv.parse(self.load_file("yaml/metrics/units.yaml"))
semconv.finish()
assert len(semconv.models) == 1
content = self.read_file("markdown/metrics/units_input.md")
expected = self.read_file("markdown/metrics/units_output.md")
self.check_render(
semconv,
"markdown/metrics/",
"markdown/metrics/units_input.md",
content,
expected,
)
def check_render(
self,
semconv,
folder,
file_name,
content: str,
expected: str,
options=MarkdownOptions(),
):
renderer = MarkdownRenderer(self.load_file(folder), semconv, options)
output = io.StringIO()
renderer._render_single_file(content, self.load_file(file_name), output)
result = output.getvalue()
assert result == expected
_TEST_DIR = os.path.dirname(__file__)
def read_file(self, filename):
with open(self.load_file(filename), "r") as test_file:
return test_file.read()
def load_file(self, filename):
return os.path.join(self._TEST_DIR, "..", "..", "data", filename)
|
StarcoderdataPython
|
3302575
|
<gh_stars>10-100
from collections import OrderedDict
from django.utils.encoding import force_text
from django.utils.http import urlencode
from rest_framework import exceptions
from rest_framework import serializers
from rest_framework.metadata import SimpleMetadata
from rest_framework.request import clone_request
from rest_framework.reverse import reverse
from rest_framework.utils.field_mapping import ClassLookupDict
from waldur_core.core.utils import sort_dict
class ActionSerializer(object):
def __init__(self, func, name, request, view, resource):
self.func = func
self.name = name
self.request = request
self.resource = resource
self.view = view
def serialize(self):
reason = self.get_reason()
return {
'title': self.get_title(),
'method': self.get_method(),
'destructive': self.is_destructive(),
'url': self.get_url(),
'reason': reason,
'enabled': not reason
}
def is_destructive(self):
if self.name == 'destroy':
return True
return getattr(self.func, 'destructive', False)
def get_title(self):
try:
return getattr(self.func, 'title')
except AttributeError:
return self.name.replace('_', ' ').title()
def get_reason(self):
try:
self.view.initial(self.request)
except exceptions.APIException as e:
return force_text(e)
def get_method(self):
if self.name == 'destroy':
return 'DELETE'
elif self.name == 'update':
return 'PUT'
return getattr(self.func, 'method', 'POST')
def get_url(self):
base_url = self.request.build_absolute_uri()
method = self.get_method()
if method in ('DELETE', 'PUT'):
return base_url
return base_url + self.name + '/'
def merge_dictionaries(a, b):
new = a.copy()
new.update(b)
return new
class ActionsMetadata(SimpleMetadata):
"""
Difference from SimpleMetadata class:
1) Skip read-only fields, because options are used only for provisioning new resource.
2) Don't expose choices for fields with queryset in order to reduce size of response.
3) Attach actions metadata
"""
label_lookup = ClassLookupDict(
mapping=merge_dictionaries({
serializers.JSONField: 'text'
}, SimpleMetadata.label_lookup.mapping)
)
def determine_metadata(self, request, view):
self.request = request
metadata = OrderedDict()
if view.lookup_field in view.kwargs:
metadata['actions'] = self.get_actions(request, view)
else:
metadata['actions'] = self.determine_actions(request, view)
return metadata
def get_actions(self, request, view):
"""
Return metadata for resource-specific actions,
such as start, stop, unlink
"""
metadata = OrderedDict()
actions = self.get_resource_actions(view)
resource = view.get_object()
for action_name, action in actions.items():
if action_name == 'update':
view.request = clone_request(request, 'PUT')
else:
view.action = action_name
data = ActionSerializer(action, action_name, request, view, resource)
metadata[action_name] = data.serialize()
if not metadata[action_name]['enabled']:
continue
fields = self.get_action_fields(view, action_name, resource)
if not fields:
metadata[action_name]['type'] = 'button'
else:
metadata[action_name]['type'] = 'form'
metadata[action_name]['fields'] = fields
view.action = None
view.request = request
return metadata
@classmethod
def get_resource_actions(cls, view):
actions = {}
disabled_actions = getattr(view.__class__, 'disabled_actions', [])
for key in dir(view.__class__):
callback = getattr(view.__class__, key)
if getattr(callback, 'deprecated', False):
continue
if 'post' not in getattr(callback, 'bind_to_methods', []):
continue
if key in disabled_actions:
continue
actions[key] = callback
if 'DELETE' in view.allowed_methods and 'destroy' not in disabled_actions:
actions['destroy'] = view.destroy
if 'PUT' in view.allowed_methods and 'update' not in disabled_actions:
actions['update'] = view.update
return sort_dict(actions)
def get_action_fields(self, view, action_name, resource):
"""
Get fields exposed by action's serializer
"""
serializer = view.get_serializer(resource)
fields = OrderedDict()
if not isinstance(serializer, view.serializer_class) or action_name == 'update':
fields = self.get_fields(serializer.fields)
return fields
def get_serializer_info(self, serializer):
"""
Given an instance of a serializer, return a dictionary of metadata
about its fields.
"""
if hasattr(serializer, 'child'):
# If this is a `ListSerializer` then we want to examine the
# underlying child serializer instance instead.
serializer = serializer.child
return self.get_fields(serializer.fields)
def get_fields(self, serializer_fields):
"""
Get fields metadata skipping empty fields
"""
fields = OrderedDict()
for field_name, field in serializer_fields.items():
# Skip tags field in action because it is needed only for resource creation
# See also: WAL-1223
if field_name == 'tags':
continue
info = self.get_field_info(field, field_name)
if info:
fields[field_name] = info
return fields
def get_field_info(self, field, field_name):
"""
Given an instance of a serializer field, return a dictionary
of metadata about it.
"""
field_info = OrderedDict()
field_info['type'] = self.label_lookup[field]
field_info['required'] = getattr(field, 'required', False)
attrs = [
'label', 'help_text', 'default_value', 'placeholder', 'required',
'min_length', 'max_length', 'min_value', 'max_value', 'many'
]
if getattr(field, 'read_only', False):
return None
for attr in attrs:
value = getattr(field, attr, None)
if value is not None and value != '':
field_info[attr] = force_text(value, strings_only=True)
if 'label' not in field_info:
field_info['label'] = field_name.replace('_', ' ').title()
if hasattr(field, 'view_name'):
list_view = field.view_name.replace('-detail', '-list')
base_url = reverse(list_view, request=self.request)
field_info['type'] = 'select'
field_info['url'] = base_url
if hasattr(field, 'query_params'):
field_info['url'] += '?%s' % urlencode(field.query_params)
field_info['value_field'] = getattr(field, 'value_field', 'url')
field_info['display_name_field'] = getattr(field, 'display_name_field', 'display_name')
if hasattr(field, 'choices') and not hasattr(field, 'queryset'):
field_info['choices'] = [
{
'value': choice_value,
'display_name': force_text(choice_name, strings_only=True)
}
for choice_value, choice_name in field.choices.items()
]
return field_info
|
StarcoderdataPython
|
5149835
|
<filename>src/data/make_dataset.py
import pickle
import re
from os import listdir
from os.path import isfile, join
import pandas as pd
from utils.constants import RAW_DATA_PATH, PROCESSED_DATA_PATH
def make_dataset():
raw_data_file_name = [file_name for file_name in listdir(RAW_DATA_PATH) if isfile(join(RAW_DATA_PATH, file_name))]
raw_data_file_name = [file_name for file_name in raw_data_file_name if re.search(r'.csv', file_name, re.M)]
raw_data_files = [pd.read_csv(RAW_DATA_PATH + file_name) for file_name in raw_data_file_name]
final_data_file = pd.concat(raw_data_files, axis=0, ignore_index=True)
final_data_file.to_csv(PROCESSED_DATA_PATH + 'final_data.csv')
with open(PROCESSED_DATA_PATH + 'final_data.pickle', 'wb') as file:
pickle.dump(final_data_file, file)
print("Complete")
|
StarcoderdataPython
|
5193379
|
<reponame>hopelife/mstb
cd C:\dev\projects\mstb
conda activate ml32
python -m unittest tests.test_mongodb_handler
|
StarcoderdataPython
|
218033
|
import logging
import logging.handlers
import os
import subprocess
import time
LENGTH = 2
DELAY = 0.05
logger = logging.getLogger(__name__)
class State(object):
"""Abstract. Contains the state of a set of LEDs."""
def __init__(self, strand, start, end):
self.strand = strand
self.start = start # inclusive
self.end = end # exclusive
self.setup()
def setup(self):
raise NotImplementedError()
def tick(self):
raise NotImplementedError()
class Fade(State):
"""Fades a range of LEDs on and off."""
def setup(self):
self.value = 0
self.step = 1
self.high = 15
self.low = 0
def tick(self):
self.value += self.step
if self.value > self.high:
self.value = self.high - self.step
self.step *= -1
elif self.value < self.low:
self.value = self.low - self.step
self.step *= -1
for i in range(self.start, self.end):
self.strand[i] = (self.value, 0, 0)
class Display(object):
"""Contains the state of the entire LED display."""
def __init__(self, strand, delay):
self.strand = strand
self.states = []
self.delay = delay
self._button_handles = {}
self._button_values = {}
self._on_press = {}
self._on_release = {}
# repert every 5 minutes = 300s/report = 6000 ticks @ 0.05s/tick
self.loop_limit = 300 / delay
self.loop_cnt = 0
self.loop_total = 0
self.loop_max = 0
def register_state(self, state_cls, length=0, index=-1):
if index != -1:
if index > len(self.states):
raise ValueError("Invalid index: {} exceeds {}".format(
index, len(self.states)))
self.states = self.states[0:index]
start = 0
if self.states:
start = self.states[-1].end
if start >= len(self.strand):
raise ValueError("Invalid start: {} >= {}".format(
start, len(self.strand)))
if length == 0:
length = len(self.strand) - start
self.states.append(state_cls(self.strand, start, start + length))
def loop(self):
while True:
start = time.time()
self.read()
self.tick()
self.strand.show()
self.sleep(time.time() - start)
def read(self):
for btn, value in self._button_values.items():
button = self._button_handles[btn]
if not value and button.value:
logger.info("button %s pressed", btn)
self._button_values[btn] = True
if btn in self._on_press:
self._on_press[btn]()
elif value and not button.value:
logger.info("button %s released", btn)
self._button_values[btn] = False
if btn in self._on_release:
self._on_release[btn]()
def sleep(self, secs=0):
self.loop_total += secs
self.loop_cnt += 1
if secs > self.loop_max:
self.loop_max = secs
if self.loop_cnt == self.loop_limit:
logger.info("Time states: avg={:0.4f}s, max={:0.4f}s".format(
self.loop_total / self.loop_limit,
self.loop_max
))
self.loop_cnt = 0
self.loop_max = 0
self.loop_total = 0
if self.delay > secs:
time.sleep(self.delay - secs)
def tick(self):
for state in self.states:
state.tick()
def register_onpress(self, pin, action):
btn = str(pin)
logger.info("Registered onpress for %s", btn)
self._button_handles[btn] = pin
self._button_values[btn] = False
self._on_press[btn] = action
def register_onrelease(self, pin, action):
btn = str(pin)
logger.info("Registered onrelease for %s", btn)
self._button_handles[btn] = pin
self._button_values[btn] = False
self._on_release[btn] = action
def clear(self):
for i in range(len(self.strand)):
self.strand[i] = (0, 0, 0)
self.strand.show()
def setup_logger():
logger.setLevel(logging.DEBUG)
log_format = logging.Formatter(
'%(asctime)s %(levelname)7s %(name)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
log_stream = logging.StreamHandler()
log_stream.setFormatter(log_format)
logging.getLogger().addHandler(log_stream)
log_file = logging.handlers.TimedRotatingFileHandler(
'/home/pi/logs/bookshelf-lights.log', when='d', backupCount=30)
log_file.setFormatter(log_format)
logging.getLogger().addHandler(log_file)
def main():
import board
import gpiozero
import neopixel
setup_logger()
logger.info("Starting up... Strand length = %s, delay = %0.3f",
LENGTH, DELAY)
display = Display(neopixel.NeoPixel(board.D18, LENGTH), DELAY)
def button_pressed(button_num):
logger.info("button %s pressed", button_num)
def shutdown():
display.clear()
subprocess.call('halt', shell=False)
display.register_state(Fade, length=1)
display.register_onpress(
gpiozero.Button(4), shutdown)
display.register_onpress(
gpiozero.Button(17), lambda: button_pressed(2))
try:
display.loop()
except BaseException as e:
display.clear()
logger.error("Error: %s", e.__class__.__name__, exc_info=True)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
9613515
|
val = ()
valpar = 0
valparcum = ()
print(type(val))
for c in range(0,4):
val += (int(input('Digite um número: ')),)
valpar = val[c]
if valpar % 2 == 0:
valparcum += valpar,
print(f'\nVocê digitou os valores {val}')
print(f'O valor 9 apareceu {val.count(9)} vezes.')
if 3 in val:
print(f'Posição do primeiro 3 digitado: {val.index(3)+1}')
else:
print('Nenhum 3 foi digitado.')
if valparcum == ():
print('Nenhum número par foi digitado.')
else:
print(f'Os números pares foram: {valparcum}')
|
StarcoderdataPython
|
1800379
|
from utils import *
from tqdm import tqdm
def pqDist_one(C, N_books, g_x, q_x):
l1, l2 = C.shape
L_word = int(l2/N_books)
D_C = T.zeros((l1, N_books), dtype=T.float32)
q_x_split = T.split(q_x, L_word, 0)
g_x_split = np.split(g_x.cpu().data.numpy(), N_books, 1)
C_split = T.split(C, L_word, 1)
D_C_split = T.split(D_C, 1, 1)
for j in range(N_books):
for k in range(l1):
D_C_split[j][k] =T.norm(q_x_split[j]-C_split[j][k], 2)
#D_C_split[j][k] = T.norm(q_x_split[j]-C_split[j][k], 2).detach() #for PyTorch version over 1.9
if j == 0:
dist = D_C_split[j][g_x_split[j]]
else:
dist = T.add(dist, D_C_split[j][g_x_split[j]])
Dpq = T.squeeze(dist)
return Dpq
def Indexing(C, N_books, X):
l1, l2 = C.shape
L_word = int(l2/N_books)
x = T.split(X, L_word, 1)
y = T.split(C, L_word, 1)
for i in range(N_books):
diff = squared_distances(x[i], y[i])
arg = T.argmin(diff, dim=1)
min_idx = T.reshape(arg, [-1, 1])
if i == 0:
quant_idx = min_idx
else:
quant_idx = T.cat((quant_idx, min_idx), dim=1)
return quant_idx
def Evaluate_mAP(C, N_books, gallery_codes, query_codes, gallery_labels, query_labels, device, TOP_K=None):
num_query = query_labels.shape[0]
mean_AP = 0.0
with tqdm(total=num_query, desc="Evaluate mAP", bar_format='{desc:<15}{percentage:3.0f}%|{bar:10}{r_bar}') as pbar:
for i in range(num_query):
# Retrieve images from database
retrieval = (query_labels[i, :] @ gallery_labels.t() > 0).float()
# Arrange position according to hamming distance
retrieval = retrieval[T.argsort(pqDist_one(C, N_books, gallery_codes, query_codes[i]))][:TOP_K]
# Retrieval count
retrieval_cnt = retrieval.sum().int().item()
# Can not retrieve images
if retrieval_cnt == 0:
continue
# Generate score for every position
score = T.linspace(1, retrieval_cnt, retrieval_cnt).to(device)
# Acquire index
index = (T.nonzero(retrieval == 1, as_tuple=False).squeeze() + 1.0).float().to(device)
mean_AP += (score / index).mean()
pbar.update(1)
mean_AP = mean_AP / num_query
return mean_AP
def DoRetrieval(device, args, net, C):
print("Do Retrieval!")
trainset = torchvision.datasets.CIFAR10(root=args.data_dir, train=True, download=args.if_download, transform=transforms.ToTensor())
Gallery_loader = T.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
testset = torchvision.datasets.CIFAR10(root=args.data_dir, train=False, download=args.if_download, transform=transforms.ToTensor())
Query_loader = T.utils.data.DataLoader(testset, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers)
net.eval()
with T.no_grad():
with tqdm(total=len(Gallery_loader), desc="Build Gallery", bar_format='{desc:<15}{percentage:3.0f}%|{bar:10}{r_bar}') as pbar:
for i, data in enumerate(Gallery_loader, 0):
gallery_x_batch, gallery_y_batch = data[0].to(device), data[1].to(device)
outputs = net(gallery_x_batch)
gallery_c_batch = Indexing(C, args.N_books, outputs[0])
gallery_y_batch = T.eye(args.num_cls)[gallery_y_batch]
if i == 0:
gallery_c = gallery_c_batch
gallery_y = gallery_y_batch
else:
gallery_c = T.cat([gallery_c, gallery_c_batch], 0)
gallery_y = T.cat([gallery_y, gallery_y_batch], 0)
pbar.update(1)
with tqdm(total=len(Query_loader), desc="Compute Query", bar_format='{desc:<15}{percentage:3.0f}%|{bar:10}{r_bar}') as pbar:
for i, data in enumerate(Query_loader, 0):
query_x_batch, query_y_batch = data[0].to(device), data[1].to(device)
outputs = net(query_x_batch)
query_y_batch = T.eye(args.num_cls)[query_y_batch]
if i == 0:
query_c = outputs[0]
query_y = query_y_batch
else:
query_c = T.cat([query_c, outputs[0]], 0)
query_y = T.cat([query_y, query_y_batch], 0)
pbar.update(1)
mAP = Evaluate_mAP(C, args.N_books, gallery_c.type(T.int), query_c, gallery_y, query_y, device, args.Top_N)
return mAP
|
StarcoderdataPython
|
113369
|
<filename>utils/config.py
import sys
sys.dont_write_bytecode = True
TOKEN = "<KEY>"
PREFIX = "tb!"
STATUS = "Station Bot Beta | v1.1"
OWNERS = [607190287894446081] # list of user ids (ints)
prefixes = "tb!"
|
StarcoderdataPython
|
299026
|
#!/usr/local/bin/python3
class Potencia:
# Calcula uma potencia especifica
def __init__(self, expoente): # construtor padrão
# (self) está relacionada a própria instancia - param obrigatorio
self.expoente = expoente
def __call__(self, base):
return base ** self.expoente
if __name__ == '__main__':
quadrado = Potencia(2)
cubo = Potencia(3)
if callable(quadrado) and callable(cubo):
print(f'3Q => {quadrado(3)}')
print(f'5C => {cubo(5)}')
print(Potencia(4)(2)) # 2 é a base 3 4 é a potencia
|
StarcoderdataPython
|
1718622
|
# -*- coding: utf8 -*-
"""
导出后的文件处理示例
如果你采用集群的方式部署,那么你可能希望在节点导出完成之后上传到文件服务上,然后给页面返回一个URL
提供用户下载。这里作为单机部署的示例,仅移动文件夹到static目录然后输出一个URL来。
如果你采用集群方式,把文件处理后输出下载的URL就行了。至于原理,那就是导出完成后Python调用一下
配置的脚本命令,把文件夹作为参数传递进来而已。
"""
import sys
import os
import uuid
import zipfile
def main():
if len(sys.argv) < 1:
print(__file__ + "【错误】参数个数太少", file = sys.stderr)
exit(1)
if False == os.path.isdir(sys.argv[1]):
print(__file__ + "【错误】文件夹不存在 (" + sys.argv[1] + ")", file = sys.stderr)
exit(1)
scriptPath = os.path.split(os.path.realpath(__file__))[0]
targetPath = scriptPath + os.sep + os.sep.join(["pltplconf", "static"])
fileName = uuid.uuid1().hex + ".zip"
targetFile = targetPath + os.sep + fileName
with zipfile.ZipFile(targetFile, "w", compression=zipfile.ZIP_DEFLATED, compresslevel=9, strict_timestamps=False) as zipObject:
for file in os.listdir(sys.argv[1]):
zipObject.write(sys.argv[1] + os.sep + file, file)
# 输出文件下载路径。因为这个是单机示例,所以直接输出web目录为根目录的路径即可
print("/static/" + fileName)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3243743
|
<reponame>redzhepdx/IWC-Net<gh_stars>1-10
from capsule_layers import *
import tensorflow as tf
import numpy as np
import cv2
def cap_U_encoder(input, K):
x = tf.reshape(input, shape=[-1, 224, 224, 3])
conv1 = tf.layers.conv2d(x, 16, 5, 1, padding="same", kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print("Conv1_Shape : ", conv1.get_shape())
conv1_expanded = tf.expand_dims(conv1, axis=3)
#print("Conv1_Expanded Shape : ", conv1_expanded.get_shape())
conv_caps_1 = SegConvolution(conv1_expanded, 5, 2, 16, strides=2, routings=1)
print("Conv Caps 1 Shape : ", conv_caps_1.get_shape())
conv_caps_2 = SegConvolution(conv_caps_1, 5, 4, 16, strides=1, routings=3)
print("Conv Caps 2 Shape : ", conv_caps_2.get_shape())
conv_caps_3 = SegConvolution(conv_caps_2, 5, 4, 32, strides=2, routings=3)
print("Conv Caps 3 Shape : ", conv_caps_3.get_shape())
conv_caps_4 = SegConvolution(conv_caps_3, 5, 8, 32, strides=1, routings=3)
print("Conv Caps 4 Shape : ", conv_caps_4.get_shape())
conv_caps_5 = SegConvolution(conv_caps_4, 5, 8, 64, strides=2, routings=3)
print("Conv Caps 5 Shape : ", conv_caps_5.get_shape())
conv_caps_6 = SegConvolution(conv_caps_5, 5, 8, 64, strides=1, routings=3)
print("Conv Caps 6 Shape : ", conv_caps_6.get_shape())
conv_caps_7 = SegConvolution(conv_caps_6, 5, 8, 128, strides=2, routings=3)
print("Conv Caps 7 Shape : ", conv_caps_7.get_shape())
conv_caps_8 = SegConvolution(conv_caps_7, 5, 16, 64, strides=1, routings=3)
print("Conv Caps 8 Shape : ", conv_caps_8.get_shape())
deconv_caps_1 = SegConvolution(conv_caps_8, 4, 16, 64, strides=2, routings=3, op='deconv')
print("Deconv 1 Shape : ", deconv_caps_1.get_shape())
concat_1 = tf.concat([deconv_caps_1, conv_caps_6], axis=3)
conv_caps_9 = SegConvolution(concat_1, 5, 8, 32, strides=1, routings=3)
print("Conv Caps 9 Shape : ", conv_caps_9.get_shape())
deconv_caps_2 = SegConvolution(conv_caps_9, 4, 8, 32, strides=2, routings=3, op='deconv')
print("Deconv 2 Shape : ", deconv_caps_2.get_shape())
concat_2 = tf.concat([deconv_caps_2, conv_caps_4], axis=3)
conv_caps_10 = SegConvolution(concat_2, 5, 4, 16, strides=1, routings=3)
print("Conv Caps 10 Shape : ", conv_caps_10.get_shape())
deconv_caps_3 = SegConvolution(conv_caps_10, 4, 4, 16, strides=2, routings=3, op='deconv')
print("Deconv 2 Shape : ", deconv_caps_2.get_shape())
concat_3 = tf.concat([deconv_caps_3, conv_caps_2], axis=3)
conv_caps_11 = SegConvolution(concat_3, 5, 2, 16, strides=1, routings=3)
print("Conv Caps 11 Shape : ", conv_caps_11.get_shape())
deconv_caps_4 = SegConvolution(conv_caps_11, 4, 2, 16, strides=2, routings=3, op='deconv')
print("Deconv 4 Shape : ", deconv_caps_4.get_shape())
concat_4 = tf.concat([deconv_caps_4, conv1_expanded], axis=3)
conv_caps_12 = SegConvolution(concat_4, 1, 1, K, strides=1, routings=3)
print("Conv Caps 12 Shape : ", conv_caps_12.get_shape())
squeeze = tf.squeeze(conv_caps_12, axis=3)
#print("Squeeze Shape : ", squeeze.get_shape())
softmax = softmax_1 = tf.nn.softmax(squeeze)
print("Encode Output Shape : " , softmax.get_shape())
return softmax
def cap_U_decoder(input):
dec_conv1 = tf.layers.conv2d(input, 16, 5, 1, padding="same", kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
print("Conv1_Shape : ", dec_conv1.get_shape())
dec_conv1_expanded = tf.expand_dims(dec_conv1, axis=3)
#print("Conv1_Expanded Shape : ", conv1_expanded.get_shape())
dec_conv_caps_1 = SegConvolution(dec_conv1_expanded, 5, 2, 16, strides=2, routings=1)
print("Conv Caps 1 Shape : ", dec_conv_caps_1.get_shape())
dec_conv_caps_2 = SegConvolution(dec_conv_caps_1, 5, 4, 16, strides=1, routings=3)
print("Conv Caps 2 Shape : ", dec_conv_caps_2.get_shape())
dec_conv_caps_3 = SegConvolution(dec_conv_caps_2, 5, 4, 32, strides=2, routings=3)
print("Conv Caps 3 Shape : ", dec_conv_caps_3.get_shape())
dec_conv_caps_4 = SegConvolution(dec_conv_caps_3, 5, 8, 32, strides=1, routings=3)
print("Conv Caps 4 Shape : ", dec_conv_caps_4.get_shape())
dec_conv_caps_5 = SegConvolution(dec_conv_caps_4, 5, 8, 64, strides=2, routings=3)
print("Conv Caps 5 Shape : ", dec_conv_caps_5.get_shape())
dec_conv_caps_6 = SegConvolution(dec_conv_caps_5, 5, 8, 64, strides=1, routings=3)
print("Conv Caps 6 Shape : ", dec_conv_caps_6.get_shape())
dec_conv_caps_7 = SegConvolution(dec_conv_caps_6, 5, 8, 128, strides=2, routings=3)
print("Conv Caps 7 Shape : ", dec_conv_caps_7.get_shape())
dec_conv_caps_8 = SegConvolution(dec_conv_caps_7, 5, 16, 64, strides=1, routings=3)
print("Conv Caps 8 Shape : ", dec_conv_caps_8.get_shape())
dec_deconv_caps_1 = SegConvolution(dec_conv_caps_8, 4, 16, 64, strides=2, routings=3, op='deconv')
print("Deconv 1 Shape : ", dec_deconv_caps_1.get_shape())
dec_concat_1 = tf.concat([dec_deconv_caps_1, dec_conv_caps_6], axis=3)
dec_conv_caps_9 = SegConvolution(dec_concat_1, 5, 8, 32, strides=1, routings=3)
print("Conv Caps 9 Shape : ", dec_conv_caps_9.get_shape())
dec_deconv_caps_2 = SegConvolution(dec_conv_caps_9, 4, 8, 32, strides=2, routings=3, op='deconv')
print("Deconv 2 Shape : ", dec_deconv_caps_2.get_shape())
dec_concat_2 = tf.concat([dec_deconv_caps_2, dec_conv_caps_4], axis=3)
dec_conv_caps_10 = SegConvolution(dec_concat_2, 5, 4, 16, strides=1, routings=3)
print("Conv Caps 10 Shape : ", dec_conv_caps_10.get_shape())
dec_deconv_caps_3 = SegConvolution(dec_conv_caps_10, 4, 4, 16, strides=2, routings=3, op='deconv')
print("Deconv 3 Shape : ", dec_deconv_caps_3.get_shape())
dec_concat_3 = tf.concat([dec_deconv_caps_3, dec_conv_caps_2], axis=3)
dec_conv_caps_11 = SegConvolution(dec_concat_3, 5, 2, 16, strides=1, routings=3)
print("Conv Caps 11 Shape : ", dec_conv_caps_11.get_shape())
dec_deconv_caps_4 = SegConvolution(dec_conv_caps_11, 4, 2, 16, strides=2, routings=3, op='deconv')
print("Deconv 3 Shape : ", dec_deconv_caps_4.get_shape())
dec_concat_4 = tf.concat([dec_deconv_caps_4, dec_conv1_expanded], axis=3)
dec_conv_caps_12 = SegConvolution(dec_concat_4, 1, 1, 16, strides=1, routings=3)
print("Conv Caps 12 Shape : ", dec_conv_caps_12.get_shape())
dec_squeeze = tf.squeeze(dec_conv_caps_12, axis=3)
#Reconstruction
rec_conv_1 = tf.layers.conv2d(dec_squeeze, 64, 1, 1, padding="same", kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
rec_conv_2 = tf.layers.conv2d(rec_conv_1, 128, 1, 1, padding="same", kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
output = tf.layers.conv2d(rec_conv_2, 3, kernel_size = 1, padding="same")
print("Decoder Output Shape : " , output.get_shape())
return output
if __name__ == "__main__":
batch_size = 1
K = 4
width = 224
height = 224
img = cv2.imread("gdz.png")
img = cv2.resize(img, (width, height))
img = np.array([img])
input = np.random.uniform(size=(1, width, height, 3))
y = np.random.uniform(size=(1, width, height, 3))
image = tf.placeholder(tf.float32, [None, width, height, 3])
segment = tf.placeholder(tf.float32, [None, width, height, 3])
encoder = cap_U_encoder(image, K)
decoder = cap_U_decoder(encoder)
|
StarcoderdataPython
|
76336
|
import os
import yaml
class Config(object):
"""Script configuration file parser.
Attributes
----------
dataset: str
Name of the dataset to train on (i.e., 'omniglot').
num_epochs: int
Number of training epochs.
num_episodes: int
Number of episodes per epoch.
num_ways_train: int
Number of random classes per episode for training.
num_support_train: int
Number of samples per class to use as support for training.
num_query_train: int
Number of samples per class to use as query for training.
num_ways_val: int
Number of random classes per episode for validation.
num_support_val: int
Number of samples per class to use as support for validation.
num_query_val: int
Number of samples per class to use as query for validation.
seed: int
Random seed.
"""
def __init__(self, config_yaml: str) -> None:
if not os.path.exists(config_yaml):
raise ValueError(
f"The config file at {config_yaml} is missing.")
config = yaml.load(open(config_yaml, "r"))
self.dataset = config["dataset"]
self.num_epochs = config.get("num_epochs", 100)
self.num_episodes = config.get("num_episodes", 100)
self.num_ways_train = config.get("num_ways_train", 60)
self.num_support_train = config.get("num_support_train", 5)
self.num_query_train = config.get("num_query_train", 5)
self.num_ways_val = config.get("num_ways_val", 5)
self.num_support_val = config.get("num_support_val", 5)
self.num_query_val = config.get("num_query_val", 15)
self.seed = config.get("seed", 0)
|
StarcoderdataPython
|
1813248
|
import random
import argparse
from collections import Counter
parser = argparse.ArgumentParser()
parser.add_argument('--train_size', help='Size for the training set', type=int, default=100000)
parser.add_argument('--dev_size', help='Size for the dev set', type=int, default=1000)
parser.add_argument('--test_size', help='Size for the test set', type=int, default=10000)
parser.add_argument('--gen_size', help='Size for the generalization set', type=int, default=10000)
parser.add_argument('--sentences', help='File of sentences to process', type=str, default=None)
parser.add_argument('--output_prefix', help='Prefix for output file names', type=str, default=None)
args = parser.parse_args()
fi = open(args.sentences, "r")
fo_train = open(args.output_prefix + ".train", "w")
fo_dev = open(args.output_prefix + ".dev", "w")
fo_test = open(args.output_prefix + ".test", "w")
fo_gen = open(args.output_prefix + ".gen", "w")
# Words to delete from the sentences being processed
delList = ["N1", "N2", "N3", "N4", "N5", "N6", "N7", "N8", "AUX1", "AUX2", "AUX3", "AUX4", "AUX5", "AUX6", "AUX7", "AUX8",
"VI1", "VI2", "VI3", "VT1", "VT2", "VT3", "VT4", "VT5", "VI", "VT"]
delDict = {}
for item in delList:
delDict[item] = 1
# Convert a declarative sentence to a counterfactual question with the main auxiliary at the end of the sentence
def questionify(sent):
sent[-2] = "?"
if "AUX4" in sent:
ind = sent.index("AUX4")
else:
ind = sent.index("AUX5")
newSent = sent[:ind + 1] + sent[ind + 2:-2] + [sent[ind + 1]] + sent[-2:]
return newSent
def process(sent):
if sent[-1] == "quest":
quest = 1
else:
quest = 0
newSent = []
for word in sent:
if word not in delDict:
newSent.append(word)
return " ".join(newSent[:-1])
count_orc = 0
count_src = 0
aux_list = ["have", "has", "haven't", "hasn't"]
aux_dict = {}
for aux in aux_list:
aux_dict[aux] = 1
def get_auxes(words):
aux_set = []
for word in words:
if word in aux_dict:
aux_set.append(word)
return aux_set
def get_nouns(words):
noun_set = []
for i, word in enumerate(words):
if word.startswith("N"):
noun_set.append(words[i+1])
return noun_set
# Sentences we've already used (to avoid repeats)
used_dict = {}
count_train = 0
count_dev = 0
count_test = 0
count_gen = 0
count_iid = 0
count_ood = 0
iid_examples = []
ood_examples = []
filter_counts = Counter()
for line in fi:
if count_iid >= args.train_size + args.dev_size + args.test_size and count_ood >= args.gen_size:
break
sent = line.strip()
if sent in used_dict:
continue
used_dict[sent] = 1
words = sent.split()
nouns = get_nouns(words)
#if len(nouns) > len(set(nouns)):
# filter_counts[len(nouns)] += 1
# continue
if words[3] == "that" or words[3] == "who":
rel_on_subj = 1
else:
rel_on_subj = 0
quest = random.getrandbits(1)
if quest:
words.append("quest")
else:
words.append("decl")
if quest:
result = process(words) + " quest" + "\t" + process(questionify(words)) + "\n"
else:
result = process(words) + " decl" + "\t" + process(words) + "\n"
if (not rel_on_subj or not quest) and count_iid < args.train_size + args.dev_size + args.test_size:
iid_examples.append(result)
count_iid += 1
elif rel_on_subj and quest and count_ood < args.gen_size:
words_auxes = get_auxes(words)
if words_auxes == ["have", "haven't"] or words_auxes == ["haven't", "have"] or words_auxes == ["has", "hasn't"] or words_auxes == ["hasn't", "has"]:
if words[5] in aux_dict:
if count_src <= 6666:
ood_examples.append(result)
count_ood += 1
count_src += 1
else:
if count_orc <= 3333:
ood_examples.append(result)
count_ood += 1
count_orc += 1
random.shuffle(iid_examples)
random.shuffle(ood_examples)
train_set = iid_examples[:args.train_size]
dev_set = iid_examples[args.train_size:args.train_size+args.dev_size]
test_set = iid_examples[args.train_size+args.dev_size:args.train_size+args.dev_size+args.test_size]
gen_set = ood_examples[:args.gen_size]
for elt in train_set:
fo_train.write(elt)
for elt in dev_set:
fo_dev.write(elt)
for elt in test_set:
fo_test.write(elt)
for elt in gen_set:
fo_gen.write(elt)
print(count_orc, count_src)
|
StarcoderdataPython
|
6530569
|
<filename>algorithm/wordifier.py
"""
Module provides Wordifier with main functionality:
wordify number a given phone number
all possible wordification of given phone number
convert wordified number to phone number
"""
from algorithm.dictionary import Dictionary
from utils.utils import find_number_to_character_mapping, string_to_number, \
remove_non_alpha_numeric
class Wordifier:
"""
Main methods includes:
- number_to_words
- all_wordifications
- words_to_number
"""
def __init__(self):
self.dictionary = Dictionary()
def _is_valid_query(self, number_query):
number_query = number_query[2:]
if "0" in number_query or "1" in number_query:
raise ValueError
return True
def all_wordifications(self, number):
"""
Returns All wordification
"""
query = remove_non_alpha_numeric(number)
assert self._is_valid_query(query)
result = []
for i in range(2, len(query)):
query_to_combinations = query[i:]
all_combinations = self.find_all_combinations(query_to_combinations)
words = self.check_words_against_dictionary(all_combinations)
for word in words:
ans = query[:i] + word + query[i+len(word):]
result.append(ans)
return result
@staticmethod
def words_to_number(query_string):
"""
converts wordified phone number into numeric phone number
"""
return string_to_number(query_string)
def number_to_words(self, number):
"""
Return word with biggest match
"""
query = remove_non_alpha_numeric(number)
result = ""
last_word = ""
assert self._is_valid_query(query)
for i in range(2, len(query)):
query_to_combinations = query[i:]
all_combinations = self.find_all_combinations(query_to_combinations)
words = self.check_words_against_dictionary(all_combinations)
for word in words:
ans = query[:i] + word + query[i+len(word):]
if len(word) >= len(last_word):
result = ans
last_word = word
return result
def add_words_to_dictionary(self, filename):
"""
Add words from file in the dictionary
"""
self.dictionary.add_to_dictionary_from_txt(filename)
def add_single_word_to_dictionary(self, word):
"""
Add single word in the dictionary
"""
self.dictionary.add_single_word(word)
def check_words_against_dictionary(self, all_combinations):
"""
Return all the combinations which are present in a Number
"""
found_words = []
for combination in all_combinations:
query = combination.lower()
is_present = self.dictionary.search_multi_words(query)
if is_present:
found_words.append(combination)
return found_words
@staticmethod
def find_all_combinations(query_number):
"""
From the query number, find all the combinations of words possible
"""
all_combinations = []
ans = find_number_to_character_mapping(query_number[0])
curr_ans = []
for iterator in range(1, len(query_number)):
num = query_number[iterator]
chars = find_number_to_character_mapping(num)
for each_ans in ans:
for char in chars:
curr_ans.append(each_ans + str(char))
all_combinations.append(each_ans + str(char))
ans = curr_ans
curr_ans = []
return all_combinations
|
StarcoderdataPython
|
5100744
|
<filename>donkey_gym_wrapper/env.py<gh_stars>0
# Copyright (c) 2018 <NAME>
# MIT License
'''
Hijacked donkey_gym wrapper with VAE.
- Use Z vector as observation space.
- Store raw images in VAE buffer.
Problem that DDPG already well implemented in stable-baselines
and VAE integration will require full reimplementation of DDPG
codebase. Instead we hijack VAE into gym environment.
'''
import os
import numpy as np
import gym
from gym import spaces
from donkey_gym.envs.donkey_env import DonkeyEnv
from donkey_gym.envs.donkey_sim import DonkeyUnitySimContoller
from donkey_gym.envs.donkey_proc import DonkeyUnityProcess
class DonkeyVAEEnv(DonkeyEnv):
def __init__(self, level=0, time_step=0.05, frame_skip=2, z_size=512):
self.z_size = z_size
print("starting DonkeyGym env")
# start Unity simulation subprocess
self.proc = DonkeyUnityProcess()
try:
exe_path = os.environ['DONKEY_SIM_PATH']
except:
print("Missing DONKEY_SIM_PATH environment var. Using defaults")
#you must start the executable on your own
exe_path = "self_start"
try:
port = int(os.environ['DONKEY_SIM_PORT'])
except:
print("Missing DONKEY_SIM_PORT environment var. Using defaults")
port = 9091
try:
headless = os.environ['DONKEY_SIM_HEADLESS']=='1'
except:
print("Missing DONKEY_SIM_HEADLESS environment var. Using defaults")
headless = False
self.proc.start(exe_path, headless=headless, port=port)
# start simulation com
self.viewer = DonkeyUnitySimContoller(level=level, time_step=time_step, port=port)
# steering
# TODO(r7vme): Add throttle
self.action_space = spaces.Box(low=np.array([-1.0]), high=np.array([1.0]), dtype=np.float32)
# z latent vector
self.observation_space = spaces.Box(low=np.finfo(np.float32).min,
high=np.finfo(np.float32).max,
shape=(1, self.z_size), dtype=np.float32)
# simulation related variables.
self.seed()
# Frame Skipping
self.frame_skip = frame_skip
# wait until loaded
self.viewer.wait_until_loaded()
def step(self, action):
for i in range(self.frame_skip):
self.viewer.take_action(action)
observation, reward, done, info = self._observe()
return observation, reward, done, info
def reset(self):
self.viewer.reset()
observation, reward, done, info = self._observe()
return observation
def _observe(self):
observation, reward, done, info = self.viewer.observe()
# Solves chicken-egg problem as gym calls reset before we call set_vae.
if not hasattr(self, "vae"):
return np.zeros(self.z_size), reward, done, info
# Store image in VAE buffer.
self.vae.buffer_append(observation)
return self.vae.encode(observation), reward, done, info
def set_vae(self, vae):
self.vae = vae
|
StarcoderdataPython
|
8034706
|
from struct import unpack
class CalFile:
""" class for parsing a single .cal file
==== functions ====
__init__(filename)
a .cal file to open must be specified as string [filename].
parseHeader() initialize global variables. should be executed before other functions are called.
printInfo() print basic info, such as number of compartments, time steps, etc.
getSequence(s,x,y,z)
returns value of symbol [s] in compartment at [x][y][z].
getByID(s,cid) returns value of symbol [s] in [cid]th compartment.
getSeqAverage(s,average=True)
returns total value of symbol [s] throughout the model.
setting [average] to true divides each value by the number of compartments.
==== variables ====
f file handler
symbolNames string list of all symbol names in simulation
symbolUnits string list of all symbol units, stored in same order as symbolNames
symbolOut string list of symbols recorded in file
timeStart start time of simulation
timeEnd end time of simulation
timeDelta time step used in simulation
timeRecord time interval of records
timeN number of recorded timepoints
cStatus contains: number of compartments, number of membranes, compartment length X, Y, Z
c hashes c[(x,y,z)] => compartment ID. used in getSequence()
offsetResults binary offset of simulation results in .cal file
verbose when False, supresses trivial notifications except errors
"""
f = None
symbolNames = []
symbolUnits = []
symbolOut = []
timeStart = 0.0
timeEnd = 0.0
timeDelta = 0.0
timeRecord = 0.0
timeN = 0
cStatus = []
c = {}
offsetResults = None
verbose = False
def __init__(self,filename=None,verbose=False):
""" initialize. 'filename' must be specified
"""
try:
self.f = open(filename,"rb")
except OSError as e:
print("could not open \"{}\"".format(filename))
raise
except:
raise
if(isinstance(verbose,bool)):
self.verbose = verbose
if(self.verbose):
print("opening file: {}".format(filename))
self.parseHeader()
def parseHeader(self):
""" extract basic information from self.f.
this function is time-consuming, and is therefore separated.
"""
# length constants derived from Sim_prog.cpp
LEN_HEADER = 160
LEN_TAB = 16
LEN_STRING = 512
# irrelevant bytes
self.f.seek(LEN_HEADER,0)
# *Information****
self.f.seek(LEN_TAB,1)
size = int.from_bytes(self.f.read(4),byteorder="little")
self.f.seek(size,1)
# *VsimFile*******
self.f.seek(LEN_TAB,1)
self.f.seek(4+LEN_STRING,1)
# *Symbol List****
self.f.seek(LEN_TAB,1)
n = int.from_bytes(self.f.read(4),byteorder="little")
for i in range(n):
size = int.from_bytes(self.f.read(4),byteorder="little")
self.symbolNames.append(self.f.read(size).decode())
size = int.from_bytes(self.f.read(4),byteorder="little")
self.symbolUnits.append(self.f.read(size).decode())
self.f.seek(4*5 + 8*7, 1) # other symbol attributes
# *CalcCondition**
self.f.seek(LEN_TAB,1)
(self.timeStart,self.timeEnd,self.timeDelta,self.timeRecord) = unpack("<dddd",self.f.read(32)) # start, end, calculation interval, output interval
self.timeN = int( ((self.timeEnd - self.timeStart) / self.timeRecord) +1 )
# *OutputSymbols**
self.f.seek(LEN_TAB,1)
n = int.from_bytes(self.f.read(4), byteorder="little")
for i in range(n):
size = int.from_bytes(self.f.read(4), byteorder="little")
self.symbolOut.append( self.f.read(size).decode() )
# *Stimulation****
self.f.seek(LEN_TAB,1)
n = int.from_bytes(self.f.read(4), byteorder="little")
for i in range(n):
# TODO: should ba able to handle cases with stimulation
pass
# *CompartmentSt.*
self.f.seek(LEN_TAB,1)
self.cStatus = unpack("<iiddd",self.f.read(8+24)) # compartments, membranes, compartment length X, Y, Z
for i in range(self.cStatus[0]):
self.c[ unpack("<iiiBii",self.f.read(21))[0:3] ] = i # self.c[x,y,z] = i
# *SymbolValues***
# initial values.
# TODO: should ba able to handle cases with N_TRANS, N_EXTRA, N_INTRA, etc.
self.f.seek(LEN_TAB,1)
self.f.seek(8 * self.cStatus[0] * len(self.symbolNames), 1)
# *results********
self.f.seek(LEN_TAB,1)
self.offsetResults = self.f.tell()
# file size validation
self.f.seek(0,2)
estimate = 8*self.cStatus[0]*len(self.symbolOut)*self.timeN + 4*self.timeN
recordSize = self.f.tell() - self.offsetResults
if( estimate > recordSize ):
x = recordSize / (8*self.cStatus[0]*len(self.symbolOut) + 4)
if(self.verbose):
print("actual file size is smaller than expected, found only {} timepoints (= {:.3f} s)".format(x, (x-1)*self.timeRecord))
self.timeN = int(x)
def printInfo(self):
print( "======== model summary ========")
print( "compartments: {}\nmembranes: {}".format(self.cStatus[0],self.cStatus[1]) )
print( "compartment size:\n {}\n *{}\n *{} m".format(self.cStatus[2],self.cStatus[3],self.cStatus[4]) )
print( "expected time {}~{} s, output interval {} s".format(self.timeStart,self.timeEnd,self.timeRecord) )
print( "recorded time {}~{} s (= {} timepoints)".format(self.timeStart,self.timeRecord*(self.timeN-1),self.timeN) )
print( "recorded symbols:" )
for i in range(len(self.symbolOut)):
print( "\t[{}]\t{}".format(i,self.symbolOut[i]) )
def getSequence(self,s,x,y,z):
""" timecourse of symbol [s] in compartment at [x][y][z].
[s] must be a string.
[x][y][z] must be integers.
returns timecourse as a list of floats.
upon error, raises error and aborts.
"""
# argument validation
try:
assert s in self.symbolOut, "error in getSequence(): could not find symbol \"{}\"".format(s)
assert (x,y,z) in self.c, "error in getSequence(): no compartment at ({},{},{})".format(x,y,z)
except AssertionError as e:
raise
result = []
cid = self.c[(x,y,z)]
index = self.symbolOut.index(s)
for i in range(self.timeN):
self.f.seek( self.offsetResults + i * (len(self.symbolOut)*self.cStatus[0]*8+4) )
self.f.seek( 4,1 )
self.f.seek( index*self.cStatus[0]*8, 1 )
self.f.seek( cid*8, 1 )
result.append( unpack("<d", self.f.read(8))[0] )
return result
def getByID(self,s,cid):
""" timecourse of symbol [s] in compartment at [x][y][z].
[s] must be a string.
[cid] must be integer.
returns timecourse as a list of floats.
upon error, raises error and aborts.
"""
# argument validation
try:
assert s in self.symbolOut, "error in getSequence(): could not find symbol \"{}\"".format(s)
assert (0<=cid and cid<self.cStatus[0]), "error in getSequence(): no such compartment: {}.".format(cid)
except AssertionError as e:
raise
result = []
index = self.symbolOut.index(s)
for i in range(self.timeN):
self.f.seek( self.offsetResults + i * (len(self.symbolOut)*self.cStatus[0]*8+4) )
self.f.seek( 4,1 )
self.f.seek( index*self.cStatus[0]*8, 1 )
self.f.seek( cid*8, 1 )
result.append( unpack("<d", self.f.read(8))[0] )
return result
def getSeqAverage(self,s,average=True):
""" returns amount of symbol [s] as array.
[s] must be a string.
when [average] is True, each value is averaged per compartment.
beware of rownding errors.
"""
x = []
for i in range(self.cStatus[0]):
x.append( self.getByID(s,i) )
if(average):
result = [sum(unit)/self.cStatus[0] for unit in list(zip(*x))]
else:
result = [sum(unit) for unit in list(zip(*x))]
return result
|
StarcoderdataPython
|
9794356
|
<gh_stars>0
import asyncio
from django.db.models import Count
from usaspending_api.awards.v2.filters.matview_filters import matview_search_filter
from usaspending_api.common.helpers.orm_helpers import generate_raw_quoted_query
from usaspending_api.common.data_connectors.async_sql_query import async_run_select
def fetch_all_category_counts(filters, category_to_model_dict):
loop = asyncio.new_event_loop()
results = {}
for k, v in category_to_model_dict.items():
queryset = matview_search_filter(filters, v).annotate(count=Count("*")).values("count")
sql = generate_raw_quoted_query(queryset)
# Django refuses to provide a viable option to exclude "GROUP BY ..." so it is stripped before running the SQL
remove_groupby_string_index = sql.find("GROUP BY")
results[k] = asyncio.ensure_future(async_run_select(sql[:remove_groupby_string_index]), loop=loop)
all_statements = asyncio.gather(*[value for value in results.values()])
loop.run_until_complete(all_statements)
loop.close()
return {k: v.result()[0]["count"] for k, v in results.items()}
|
StarcoderdataPython
|
6471081
|
<gh_stars>0
#!/usr/local/bin/python3
# NOTE: The model itself is quite meaningless. The purpose is
# to check some features. Consider it like an unit-test
from simulation.aivika.modeler import *
model = MainModel()
data_type = TransactType(model, 'Transact')
input_stream = uniform_random_stream(data_type, 3, 7)
input_queue = create_queue(model, data_type, 10, name = 'queue', descr = 'The input queue')
input_queue_source = input_queue.add_result_source()
enqueue_stream_or_remove_item(input_queue, input_stream)
server = uniform_random_server(data_type, 1, 2, name = 'server', descr = 'The server')
server_source = server.add_result_source()
arrival_timer = create_arrival_timer(model, name = 'arrivalTimer', descr = 'The arrival timer')
arrival_timer_source = arrival_timer.add_result_source()
output_stream0 = dequeue_stream(input_queue)
output_stream1 = server_stream(server, output_stream0)
output_stream = arrival_timer_stream(arrival_timer, output_stream1)
terminate_stream(output_stream)
specs = Specs(0, 100, 0.1)
views = [ExperimentSpecsView(title = 'Testing Experiment Title',
descr = 'Some long description follows...'),
InfoView(title = 'Testing InfoView Title',
descr = 'Testing InfoView Description',
series = [arrival_timer_source,
input_queue_source,
server_source]),
DeviationChartView(title = 'Testing DeviationChartView Title',
descr = 'Testing DeviationChartView Description',
width = 800,
height = 500,
left_y_series = [arrival_timer_source.processing_time],
right_y_series = [server_source.processing_time],
plot_title = 'Testing Plot Title'),
TimeSeriesView(title = 'Testing TimeSeriesView Title',
descr = 'Testing TimeSeriesView Description',
width = 800,
height = 500,
left_y_series = [arrival_timer_source.processing_time.min_value,
arrival_timer_source.processing_time.max_value],
right_y_series = [arrival_timer_source.processing_time.mean_value],
plot_title = 'Testing Plot Title',
run_plot_title = '$PLOT_TITLE / Run $RUN_INDEX of $RUN_COUNT'),
XYChartView(title = 'Testing XYChartView Title',
descr = 'Testing XYChartView Description',
width = 800,
height = 500,
x_series = arrival_timer_source.processing_time.mean_value,
left_y_series = [arrival_timer_source.processing_time.min_value,
arrival_timer_source.processing_time.max_value],
right_y_series = [arrival_timer_source.processing_time.mean_value],
plot_title = 'Testing Plot Title',
run_plot_title = '$PLOT_TITLE / Run $RUN_INDEX of $RUN_COUNT'),
FinalXYChartView(title = 'Testing FinalXYChartView Title',
descr = 'Testing FinalXYChartView Description',
width = 800,
height = 500,
x_series = arrival_timer_source.processing_time.mean_value,
left_y_series = [arrival_timer_source.processing_time.min_value,
arrival_timer_source.processing_time.max_value],
right_y_series = [arrival_timer_source.processing_time.mean_value],
plot_title = 'Testing Plot Title'),
HistogramView(title = 'Testing HistogramView Title',
descr = 'Testing HistogramView Description',
width = 800,
height = 500,
series = [arrival_timer_source.processing_time.mean_value],
plot_title = 'Testing Plot Title',
run_plot_title = '$PLOT_TITLE / Run $RUN_INDEX of $RUN_COUNT'),
FinalHistogramView(title = 'Testing FinalHistogramView Title',
descr = 'Testing FinalHistogramView Description',
width = 800,
height = 500,
series = [arrival_timer_source.processing_time.mean_value],
plot_title = 'Testing Plot Title')]
renderer = ExperimentRendererUsingDiagrams(views)
experiment = Experiment(renderer, run_count = 3)
model.run(specs, experiment)
|
StarcoderdataPython
|
30743
|
<filename>ConsecutiveCharacters.py
'''
Given a string s, the power of the string is the maximum length of a non-empty substring that contains only one unique character.
Return the power of the string.
Example 1:
Input: s = "leetcode"
Output: 2
Explanation: The substring "ee" is of length 2 with the character 'e' only.
Example 2:
Input: s = "abbcccddddeeeeedcba"
Output: 5
Explanation: The substring "eeeee" is of length 5 with the character 'e' only.
'''
class Solution:
def maxPower(self, s: str) -> int:
current = 1
max_freq = 0
for i in range(1, len(s)):
if s[i] == s[i - 1]:
current += 1
else:
max_freq = max(current, max_freq)
current = 1
return max(max_freq, current)
|
StarcoderdataPython
|
105031
|
<filename>moonleap/resource/prop.py
import typing as T
from dataclasses import dataclass
@dataclass(frozen=True)
class Prop:
get_value: T.Callable
set_value: T.Optional[T.Callable] = None
|
StarcoderdataPython
|
8039340
|
from __future__ import division
import json
from datetime import datetime
import gspread
from oauth2client.client import SignedJwtAssertionCredentials
from feemodel.apiclient import client
from feemodeldata.util import retry
from feemodeldata.plotting import logger
# This is deprecated - mining pools are no longer identified.
def get_pools_table():
pe = client.get_poolsobj()
poolitems = sorted(pe.pools.items(),
key=lambda p: p[1].hashrate, reverse=True)
totalhashrate = pe.calc_totalhashrate()
table = [[
name,
pool.hashrate*1e-12,
pool.hashrate/totalhashrate,
pool.maxblocksize,
pool.minfeerate,
pool.mfrstats['abovekn'],
pool.mfrstats['belowkn'],
pool.mfrstats['mean'],
pool.mfrstats['std'],
pool.mfrstats['bias']]
for name, pool in poolitems]
timestamp = (datetime.utcfromtimestamp(pe.timestamp).
strftime("%Y/%m/%d %H:%M"))
misc_stats = [totalhashrate*1e-12, 1 / pe.blockrate, timestamp]
return table, misc_stats
@retry(wait=1, maxtimes=3, logger=logger)
def update_tables(credentials, table, misc_stats):
gc = gspread.authorize(credentials)
spreadsheet = gc.open("Mining Pools")
pools_wks = spreadsheet.worksheet("Pools")
numrows = len(table)
numcols = len(table[0])
pools_wks.resize(rows=numrows+1)
endcell = pools_wks.get_addr_int(numrows+1, numcols)
cell_list = pools_wks.range('A2:' + endcell)
table_list = sum(table, [])
for cell, cellvalue in zip(cell_list, table_list):
cell.value = cellvalue
pools_wks.update_cells(cell_list)
misc_wks = spreadsheet.worksheet("Misc")
cell_list = misc_wks.range("A2:C2")
for cell, cellvalue in zip(cell_list, misc_stats):
cell.value = cellvalue
misc_wks.update_cells(cell_list)
def main(credentialsfile):
table, misc_stats = get_pools_table()
with open(credentialsfile, "r") as f:
json_key = json.load(f)
scope = ['https://spreadsheets.google.com/feeds']
credentials = SignedJwtAssertionCredentials(
json_key['client_email'], json_key['private_key'], scope)
update_tables(credentials, table, misc_stats)
|
StarcoderdataPython
|
3478590
|
# Copyright (c) <NAME>. All rights reserved.
from .builder import OPTIMIZERS, build_optimizer
from .lamb import Lamb
__all__ = ['OPTIMIZERS', 'build_optimizer', 'Lamb']
|
StarcoderdataPython
|
9743988
|
import requests
import json
import clipboard
import time
def main():
temp = None
try:
import tkinter
temp = 1
except:
temp = 0
if temp == 0:
print("No Valid Tkinter installation found. Either tkinter is not installed or tkinter is not supported on this platform.")
if temp == 1:
try:
from tkinter import Tk
from tkinter.filedialog import askopenfilename
Tk().withdraw()
filename = askopenfilename()
myfiles = {'file': open(filename ,'rb')}
url = "https://file.io"
postrequest = requests.post(url, files = myfiles)
jsonloadedresponse = json.loads(postrequest.text)
downloadlink = jsonloadedresponse["link"]
print(downloadlink)
clipboard.copy(downloadlink)
time.sleep(1)
except:
print("Error")
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
3495872
|
from django.conf.urls import url
from .views import search_users, new_connection, accept_connection
urlpatterns = [
url(r'^search/$', search_users, name="connections_user_search"),
url(r'^invite/$', new_connection , name="connections_new_connection"),
url(r'^accept/$', accept_connection, name="connections_accept_connection"),
]
|
StarcoderdataPython
|
3455262
|
<gh_stars>10-100
"""Custom middlewares for the project."""
from __future__ import absolute_import
import re
from django.conf import settings
from django.core.mail import mail_managers
from django.http import HttpResponseRedirect
from django.utils.encoding import force_text
class AjaxRedirectMiddleware(object):
"""
Middleware that sets a made up status code when a redirect has happened.
This is necessary for AJAX calls with jQuery. It seems to set the status
code to 200 when in reality it was a 301 or 302.
If you want to override this behaviour for some of your ajax calls, you
can add `ajax_redirect_passthrough` as a hidden field or as a GET
parameter.
"""
def process_response(self, request, response):
if request.is_ajax():
if request.GET.get('ajax_redirect_passthrough', request.POST.get(
'ajax_redirect_passthrough')):
return response
if type(response) == HttpResponseRedirect:
response.status_code = 278
return response
class ErrorMiddleware(object):
"""Alter HttpRequest objects on Error."""
def process_exception(self, request, exception):
"""
Add user details.
"""
if request.user and hasattr(request.user, 'email'):
request.META['USER'] = request.user.email
class SSLRedirect:
"""
Redirects all non-SSL requests to the SSL versions.
You can add exceptions via the setting ``NO_SSL_URLS``. This allows you to
forward your whole website to the SSL version except for a few URLs that
you need to serve via non-SSL for whatever reason.
"""
def process_request(self, request):
no_ssl_urls = getattr(settings, 'NO_SSL_URLS', [])
urls = tuple([re.compile(url) for url in no_ssl_urls])
secure = False
for url in urls:
if not url.match(request.path):
secure = True
break
if not secure == self._is_secure(request):
return self._redirect(request, secure)
def _is_secure(self, request):
if request.is_secure():
return True
# Handle the Webfaction case until this gets resolved in the
# request.is_secure()
if 'HTTP_X_FORWARDED_SSL' in request.META:
return request.META['HTTP_X_FORWARDED_SSL'] == 'on'
return False
def _redirect(self, request, secure):
protocol = secure and "https" or "http"
if secure:
host = getattr(settings, 'SSL_HOST', request.get_host())
else:
host = getattr(settings, 'HTTP_HOST', request.get_host())
newurl = "%s://%s%s" % (protocol, host, request.get_full_path())
if settings.DEBUG and request.method == 'POST':
raise Exception(
"Django can't perform a SSL redirect while maintaining POST"
" data. Please structure your views so that redirects only"
" occur during GETs.")
return HttpResponseRedirect(newurl)
class CustomBrokenLinkEmailsMiddleware(object):
"""Custom version that adds the user to the error email."""
def process_response(self, request, response):
"""
Send broken link emails for relevant 404 NOT FOUND responses.
"""
if response.status_code == 404 and not settings.DEBUG:
domain = request.get_host()
path = request.get_full_path()
referer = force_text(
request.META.get('HTTP_REFERER', ''), errors='replace')
if not self.is_ignorable_request(request, path, domain, referer):
ua = request.META.get('HTTP_USER_AGENT', '<none>')
ip = request.META.get('REMOTE_ADDR', '<none>')
user = None
if request.user and hasattr(request.user, 'email'):
user = request.user.email
content = (
"Referrer: %s\n"
"Requested URL: %s\n"
"User agent: %s\n"
"IP address: %s\n"
"User: %s\n"
) % (referer, path, ua, ip, user)
if self.is_internal_request(domain, referer):
internal = 'INTERNAL '
else:
internal = ''
mail_managers(
"Broken %slink on %s" % (
internal,
domain
),
content,
fail_silently=True)
return response
def is_internal_request(self, domain, referer):
"""
Returns True if referring URL is the same domain as current request.
"""
# Different subdomains are treated as different domains.
return bool(re.match("^https?://%s/" % re.escape(domain), referer))
def is_ignorable_request(self, request, uri, domain, referer):
"""
Returns True if the given request *shouldn't* notify the site managers.
"""
# '?' in referer is identified as search engine source
if (not referer or (not self.is_internal_request(
domain, referer) and '?' in referer)):
return True
return any(
pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS)
|
StarcoderdataPython
|
4893615
|
<reponame>affjljoo3581/Expanda
from expanda.shuffling import shuffle
from unittest import mock
from io import BytesIO
class _modified_open_wrapper(object):
def __init__(self):
self.file_table = {}
def __call__(self, path, mode):
# Return already created fake file.
if path in self.file_table:
return self.file_table[path]
# Modify `close` method to prevent actually close the buffer.
def modified_close():
self.file_table[path].seek(0)
# Create fake file using 'BytesIO' which is similary to file.
self.file_table[path] = BytesIO()
self.file_table[path].close = modified_close
return self.file_table[path]
@mock.patch('os.remove')
@mock.patch('builtins.open')
def test_shuffling_integrity(mock_open, mock_remove):
mock_open.side_effect = _modified_open_wrapper()
# Create target file.
original = list(range(100))
with open('input', 'wb') as fp:
fp.write(b'\n'.join([str(i).encode() for i in original]))
# Shuffle the file and write to `output`.
shuffle('input', 'output', 'tmp')
# Read shuffled file.
with open('output', 'rb') as fp:
shuffled = [int(i.decode()) for i in fp.readlines()]
assert sorted(shuffled) == original
|
StarcoderdataPython
|
3220029
|
<filename>setup.py
# -*- coding: utf-8 -*-
import os
from setuptools import setup
VERSION = '1.3.1'
setup(
name='conllu',
packages=["conllu"],
version=VERSION,
description='CoNLL-U Parser parses a CoNLL-U formatted string into a nested python dictionary',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.md')).read(),
long_description_content_type="text/markdown",
author=u'<NAME>',
author_email='<EMAIL>',
url='https://github.com/EmilStenstrom/conllu/',
install_requires=[],
keywords=['conllu', 'conll', 'conll-u', 'parser', 'nlp'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Operating System :: OS Independent",
],
)
|
StarcoderdataPython
|
8003255
|
<reponame>changwoo-ivy/ivy-fabric
from unittest import TestCase
# noinspection PyUnresolvedReferences
from types import ModuleType, FunctionType
from shell_command import ShellCommand
import re
import utils
class TestShellCommand(TestCase):
def setUp(self):
self.cmd = ShellCommand(
database='wordpress',
user='user',
passwd='<PASSWORD>',
mysql_options='--protocol=tcp',
mysqldump_options='--single-transaction'
)
@staticmethod
def replace_spaces(string):
return re.sub(r'\s+', ' ', string)
def test_redirect(self):
self.assertEqual(self.cmd.redirect_null, self.cmd.redirect())
self.assertEqual('1 ' + self.cmd.redirect_null, self.cmd.redirect(1))
def test_cmd_mysql(self):
self.assertEqual(
'mysql --host=localhost --port=3306 --user=user --password=<PASSWORD> '
'--protocol=tcp --column-type-info wordpress 2 > /dev/null',
self.replace_spaces(self.cmd.cmd_mysql('--column-type-info'))
)
def test_cmd_mysqldump(self):
self.assertEqual(
'mysqldump --host=localhost --port=3306 --user=user --password=<PASSWORD> '
'--single-transaction --skip-opt wordpress 2 > /dev/null',
self.replace_spaces(self.cmd.cmd_mysqldump('--skip-opt'))
)
def test_cmd_snapshot_database(self):
self.assertEqual(
'mysqldump --host=localhost --port=3306 --user=user --password=<PASSWORD> '
'--single-transaction wordpress 2 > /dev/null | gzip > snapshot.sql.gz',
self.replace_spaces(self.cmd.cmd_snapshot_database('snapshot.sql.gz', None))
)
def test_cmd_snapshot_directory(self):
self.assertEqual(
'tar czpf snapshot.tar.gz public_html',
self.replace_spaces(self.cmd.cmd_snapshot_directory('snapshot.tar.gz', 'public_html'))
)
def test_cmd_replace_local_db(self):
self.assertEqual(
'gunzip -c snapshot.sql.gz | '
'mysql --host=localhost --port=3306 --user=user --password=<PASSWORD> '
'--protocol=tcp wordpress 2 > /dev/null',
self.replace_spaces(
self.cmd.cmd_replace_local_db(
snapshot_file='snapshot.sql.gz',
mysql_options='',
pipe=''
)
)
)
self.assertEqual(
'gunzip -c snapshot.sql.gz | sed | pipe-test | '
'mysql --host=localhost --port=3306 --user=user --password=<PASSWORD> '
'--protocol=tcp wordpress 2 > /dev/null',
self.replace_spaces(
self.cmd.cmd_replace_local_db(
snapshot_file='snapshot.sql.gz',
mysql_options='',
pipe=['sed', 'pipe-test']
)
)
)
self.assertEqual(
'gunzip -c snapshot.sql.gz | sed | '
'mysql --host=localhost --port=3306 --user=user --password=<PASSWORD> '
'--protocol=tcp wordpress 2 > /dev/null',
self.replace_spaces(
self.cmd.cmd_replace_local_db(
snapshot_file='snapshot.sql.gz',
mysql_options='',
pipe='sed'
)
)
)
class TestUtils(TestCase):
def test_load_script_module(self):
test_script = utils.load_script_module('test_script')
self.assertIsInstance(test_script, ModuleType)
test_func = getattr(test_script, 'test_func')
self.assertIsInstance(test_func, FunctionType)
self.assertEqual('Hello, World!', test_func())
|
StarcoderdataPython
|
6425777
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
__author__ = '<NAME> (<EMAIL>)'
__license__ = 'MIT'
__version__ = '1.19.1'
|
StarcoderdataPython
|
6646096
|
# -*- coding: utf-8 -*-
# Copyright 2021, SERTIT-ICube - France, https://sertit.unistra.fr/
# This file is part of sertit-utils project
# https://github.com/sertit/sertit-utils
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Script testing string functions """
import argparse
import logging
from datetime import date, datetime
import pytest
from sertit import strings
def test_conversion():
# Str to bool
true_str = (True, "yes", "true", "t", "y", "1")
false_str = (False, "no", "false", "f", "n", "0")
for true, false in zip(true_str, false_str):
assert strings.str_to_bool(true)
assert not strings.str_to_bool(false)
# Str to logging verbosity
debug_str = ("debug", "d", 10)
info_str = ("info", "i", 20)
warn_str = ("warning", "w", "warn", 30)
err_str = ("error", "e", "err", 40)
for debug, info, warn, err in zip(debug_str, info_str, warn_str, err_str):
assert strings.str_to_verbosity(debug) == logging.DEBUG
assert strings.str_to_verbosity(info) == logging.INFO
assert strings.str_to_verbosity(warn) == logging.WARNING
assert strings.str_to_verbosity(err) == logging.ERROR
with pytest.raises(argparse.ArgumentTypeError):
strings.str_to_verbosity("oopsie")
# Str to list of dates
list_of_str_dates = "20200909105055, 2019-08-06;19560702121212\t2020-08-09"
list_of_datetimes = [
datetime(2020, 9, 9, 10, 50, 55),
datetime(2019, 8, 6),
datetime(1956, 7, 2, 12, 12, 12),
datetime(2020, 8, 9),
]
list_of_dates = [
datetime(2020, 9, 9, 10, 50, 55),
date(2019, 8, 6),
datetime(1956, 7, 2, 12, 12, 12),
date(2020, 8, 9),
]
assert list_of_datetimes == strings.str_to_list_of_dates(
list_of_str_dates, date_format="%Y%m%d%H%M%S", additional_separator="\t"
)
assert strings.str_to_list_of_dates(list_of_dates) == list_of_datetimes
assert strings.str_to_list_of_dates(list_of_datetimes) == list_of_datetimes
# Str to list
list_str_up = ["A", "B"]
list_str_low = ["a", "b"]
assert strings.str_to_list(list_str_up) == list_str_up
assert strings.str_to_list(list_str_low, case="upper") == list_str_up
assert strings.str_to_list(list_str_up, case="lower") == list_str_low
def test_str():
"""Test string function"""
tstr = "ThisIsATest"
assert strings.snake_to_camel_case(strings.camel_to_snake_case(tstr)) == tstr
assert strings.to_cmd_string(tstr) == f'"{tstr}"'
|
StarcoderdataPython
|
1880679
|
from anytree import Node, PostOrderIter
DIRECTION = ["inc_", "exc_"]
FILTERS = ["name", "content", "type", "text_longer", "ancestor", "parent"]
LBL_WARNING = """Warning:
This feature is assuming that you have a DATABASE which is doing the heavylifting.
If you do not have that, please use the normal version.
"""
class SearchableMixin(object):
def select(self, node_name):
node = self.find_node(node_name)
if not node:
return
return type(self).from_tree(node, title=node_name)
def get_query(self, query_str:str) -> dict:
return self._build_query(query_str)
def _build_query(self, query_str: str) -> dict:
# init, fix
query_str = query_str.replace(" ", " ").replace("\n", " ").rstrip().lstrip()
if not query_str.startswith("inc_") and not query_str.startswith("exc_"):
query_str = "inc_content:" + query_str
if "inc_ancestor:" not in query_str:
query_str += " inc_ancestor:" + str(self.tree.name)
# break into lines
for i in DIRECTION:
query_str = query_str.replace(i, "_BREAK_" + i)
values = [
i.rstrip().lstrip()
for i in query_str.split("_BREAK_")
if i.rstrip().lstrip() != ""
]
# build query
query_dict = {}
for line in values:
u_ind = line.find("_") # underline index
c_ind = line.find(":") # comma index
key = line[u_ind + 1 : c_ind]
if key not in FILTERS:
continue
if key not in query_dict:
query_dict[key] = {}
query_dict[key][line[:u_ind]] = [
i.rstrip().lstrip().lower()
for i in line[c_ind + 1 :].rstrip().lstrip().split(",")
if i != ""
]
# fix query
for key, val in query_dict.items():
if "inc" not in val:
query_dict[key]["inc"] = []
if "exc" not in val:
query_dict[key]["exc"] = []
return query_dict
def search(
self,
query_str: str,
title: str = None,
select_from: str = None,
filter_type: str = None,
silent: bool = True,
adv_form: bool = False,
):
if 'inc_' in query_str or 'exc_' in query_str:
raise Exception('Exception: For advanced queries use advanced_search.')
if filter_type and isinstance(filter_type, str):
query_str += ' inc_type:' + filter_type
if select_from and isinstance(select_from, str):
query_str += ' inc_ancestor:' + select_from
if adv_form:
print(query_str)
return self.advanced_search(query_str, title=title, silent=silent)
def advanced_search(
self,
query_str: str,
node_type: Node = None,
title: str = None,
lbl: bool = False,
silent: bool = True,
with_children: bool = False,
) -> Node:
"""Deals with the independent steps of searching
- filter-content
- filter-others
- filter-ancestors
- merge
For performance we can have this feature enabled: late_body_loading
- which considers a potential missing body if loading from db.
This is for cases of large-text bodies that can block the memory.
For this, we do the filtering in the engine of the DATABASE that we are using.
Write WARNING when using this feature.
Otherwise, we load KnowledgeGraph with Body.
Parameters:
full_tree: keep the full tree of ancestors
title: name for the search top
groupby_type: group results by type
keep in mind document:section differentiation
late_body_loading: takes care of late-loading of body for
performance improvement.
silent_lbl_warning: silent the warning of DATABASE required.
"""
query_dict = self._build_query(query_str)
if node_type == None:
node_type = type(self.tree)
nodes = []
# content filtering
if lbl:
if not silent:
print(LBL_WARNING)
# content_filtered_nodes
nodes.append(self.apply_lbl_filters(query_dict, late_body_loading=True))
# nodes based on other filters
for node in PostOrderIter(self.tree):
node.advanced_scoring(
query_dict, late_body_loading=lbl, strict=True
)
nodes.append(set([i for i in PostOrderIter(self.tree) if i.score]))
# ancestor filtering
if "ancestor" in query_dict:
nodes.append(
self.filter_subgraph(
inc_list=query_dict["ancestor"]["inc"],
exc_list=query_dict["ancestor"]["exc"],
)
)
# merging
# get count of nodes found
# quickhack for skipping overlapping nodes
def get_tuple_node(node):
return (node.name, node.parent.name if node.parent else None)
def get_tuple_list(nodes):
return [
set([get_tuple_node(i) for i in j])
for j in nodes
]
incl_tuple = set.intersection(*get_tuple_list(nodes))
incl_nodes = [i for i in nodes[-1] if get_tuple_node(i) in incl_tuple]
if not silent:
print("Total Selected: ", len(nodes[-1]))
print("Total Filtered: ", len(nodes[0]))
print("--------------")
print("Total Found: ", len(incl_nodes))
print()
# post processing
ancestor_node = self.merge_nodes_into_tree(
node_list=incl_nodes,
node_type=node_type,
with_children=with_children,
)
if not title:
title = ancestor_node.name
return type(self).from_tree(ancestor_node, title=title)
|
StarcoderdataPython
|
9674423
|
from utils import *
from GCNmodel import *
|
StarcoderdataPython
|
6438994
|
<reponame>I4-Projektseminar-HHU-2016/seminar-project-marionline03
# -*- coding: utf-8 -*-
import logging
# for DB usage
import sqlite3
logging.basicConfig(filename='log.txt',level=logging.DEBUG)
#Voc
def make_tabel_voc():
try:
con = sqlite3.connect('game.db')
con.execute('''CREATE TABLE IF NOT EXISTS voc (
id INTEGER PRIMARY KEY,
question CHAR(100) NOT NULL,
answer CHAR(100) NOT NULL,
times_answered_correctly INT,
question_language CHAR(100),
answer_language CHAR(100)
)
''')
# due_on FLOAT
# due BOOL
con.commit()
con.close()
except Exception as msg:
logging.debug('make_tabel_voc: ERROR: ', msg)
print(msg)
#make_tabel_voc()
def write_tabel_voc(data):
question, answer, times_answered_correctly, question_language, answer_language = data
try:
con = sqlite3.connect('game.db')
con.execute('INSERT INTO voc (question, answer, times_answered_correctly, question_language, answer_language) VALUES (?,?,?,?,?);',(question, answer, times_answered_correctly, question_language, answer_language))
con.commit()
except Exception as msg:
logging.debug('write_tabel_voc: Error', msg)
finally:
if con:
con.close()
#write_tabel_voc(('Hello','Hallo',0, 'german'))
#write_tabel_voc(('Guten Abend','Good evening',0,'german'))
#write_tabel_voc(('Wo ist ...?','Where is ...?',0,'german'))
#write_tabel_voc(('Ich bin ...','I am ...',0, 'german'))
#write_tabel_voc(('Ich habe Hunger.','I am hungry.',0, 'german'))
#write_tabel_voc(('Help!','Hilfe!',0, 'german'))
#write_tabel_voc(('Bye!','Tschüss!',0, 'german'))
def alter_tabel_voc(data_name,data_content):
try:
con = sqlite3.connect('game.db')
command="UPDATE voc SET {}={};".format(data_name, data_content) # image_path CHAR(200)
con.execute(command)
con.commit()
con.close()
except Exception as msg:
print('alter_tabel_voc: ERROR: ', msg)
logging.debug('alter_tabel_voc: ERROR:', msg)
finally:
if con:
con.close()
#alter_tabel_voc('times_answered_correctly', 3)
def alter_tabel_voc_where(data_name, data_content, where, what):
try:
con = sqlite3.connect('game.db')
command="UPDATE voc SET {}='{}' WHERE {}={};".format(data_name, data_content, where, what) # image_path CHAR(200)
con.execute(command)
con.commit()
con.close()
except Exception as msg:
print('alter_tabel_voc: ERROR: ', msg)
logging.debug('alter_tabel_voc: ERROR:', msg)
finally:
if con:
con.close()
#alter_tabel_voc_where('times_answered_correctly', 5, 'id', 2)
def read_everything_from_tabel_voc():
try:
con = sqlite3.connect('game.db')
command ="SELECT * FROM voc"
c = con.cursor()
c.execute(command)
data = c.fetchall()
#logging.debug("read_everything_from_tabel_voc: {}".format(data))
return data
except Exception as msg:
logging.debug('read_everything_from_tabel_voc: ERROR:', msg)
finally:
if con:
con.close()
#data = read_everything_from_tabel_voc()
#print(data)
def read_value_from_tabel_voc(value, where, what):
try:
con = sqlite3.connect('game.db')
command ="SELECT {} FROM voc WHERE {}={}".format(value, where, what)
c = con.cursor()
c.execute(command)
data = c.fetchall()
#logging.debug("read_value_from_tabel_voc: {}".format(data))
#print(data)
return data
except Exception as msg:
logging.debug('read_value_from_tabel_voc: ERROR:', msg)
finally:
if con:
con.close()
|
StarcoderdataPython
|
1620197
|
<reponame>velocist/TS4CheatsInfo<filename>Scripts/simulation/rabbit_hole/career_rabbit_hole.py
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\rabbit_hole\career_rabbit_hole.py
# Compiled at: 2020-08-21 07:36:12
# Size of source mod 2**32: 4199 bytes
import services
from event_testing.resolver import SingleSimResolver
from rabbit_hole.rabbit_hole import RabbitHole, RabbitHolePhase, RabbitHoleTimingPolicy
from sims4.tuning.instances import lock_instance_tunables
class CareerRabbitHole(RabbitHole):
def __init__(self, *args, career_uid=None, **kwargs):
(super().__init__)(*args, **kwargs)
self._career_uid = career_uid
@classmethod
def get_affordance(cls, sim_info, career_uid):
career = services.sim_info_manager().get(sim_info.sim_id).career_tracker.get_career_by_uid(career_uid)
if career is None:
return
resolver = SingleSimResolver(sim_info)
for tested_affordance_tuning in career.tested_affordances:
if tested_affordance_tuning.tests.run_tests(resolver):
return tested_affordance_tuning.affordance
if sim_info.is_at_home:
return career.career_affordance
def select_affordance(self):
if self._selected_affordance is not None:
return self._selected_affordance
sim_info = services.sim_info_manager().get(self.sim_id)
self._selected_affordance = self.get_affordance(sim_info, self._career_uid)
return self._selected_affordance
def is_valid_to_restore(self, sim_info):
career_tracker = sim_info.career_tracker
if career_tracker is None:
return False
career = career_tracker.get_career_by_uid(self._career_uid)
if career is None:
return False
return super().is_valid_to_restore(sim_info)
@classmethod
def get_travel_affordance(cls, sim_info, career_uid):
career = sim_info.career_tracker.get_career_by_uid(career_uid)
return career.go_home_to_work_affordance
def select_travel_affordance(self):
sim_info = services.sim_info_manager().get(self.sim_id)
return self.get_travel_affordance(sim_info, self._career_uid)
def on_activate(self):
super().on_activate()
career = services.sim_info_manager().get(self.sim_id).career_tracker.get_career_by_uid(self._career_uid)
career.attend_work()
def on_remove(self, canceled=False):
super().on_remove(canceled=canceled)
if canceled:
sim_info = services.sim_info_manager().get(self.sim_id)
if sim_info is not None:
if sim_info.career_tracker is not None:
career = sim_info.career_tracker.get_career_by_uid(self._career_uid)
if career is None:
return
elif self.is_active():
career.leave_work(left_early=True)
else:
career.on_inactive_rabbit_hole_canceled()
def save(self, rabbit_hole_data):
super().save(rabbit_hole_data)
rabbit_hole_data.career_uid = self._career_uid
def load(self, rabbit_hole_data):
super().load(rabbit_hole_data)
self._career_uid = rabbit_hole_data.career_uid
lock_instance_tunables(CareerRabbitHole, away_action=None,
time_tracking_policy=(RabbitHoleTimingPolicy.NO_TIME_LIMIT),
affordance=None,
tested_affordances=None,
go_home_and_attend=None)
|
StarcoderdataPython
|
6436093
|
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import copy
from gym.envs.classic_control import rendering
from scipy.special import expit
class DPGCEEnv(gym.Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self):
self.current_state = 0
self._seed()
self.viewer = None
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def _step(self, action):
if self.current_state == 0:
reward = 0
done = False
threshold = expit(-action)
random_number = self.np_random.uniform(low=0.0, high=1.0, size=(1,))
if random_number < threshold:
next_state = 1
else:
next_state = 2
else:
next_state = 0
done = True
if self.current_state == 1:
reward = 2.0*expit(-action)
else:
reward = expit(action)
self.current_state = next_state
return self.current_state, reward, done, {}
def _reset(self):
self.current_state = 0
return self.current_state
def _render(self, mode='human', close=False):
return None
|
StarcoderdataPython
|
6581462
|
#<NAME> and <NAME>
print('hello World')
|
StarcoderdataPython
|
3355019
|
import networkx as nx
import numpy as np
from ...Fragment.FragmentChain import FragmentChain
from ..AssemblyMixError import AssemblyMixError
class ConstructsMixin:
"""Mixin for AssemblyMix"""
def compute_random_circular_fragments_sets(
self, staling_cutoff=100, fragments_sets_filters=()
):
"""Return an iterator over all the lists of fragments [f1, f2, f3...fn]
that can assemble into a circular construct.
This means that fragment f1 will clip with f2 (in this order),
f2 with f3... and fn with f1.
Parameters
----------
fragments_sets_filters
A list of test functions of the form (set->True/False) where "set" is
a list of fragments from the mix (which assemble into a circular
construct). Only circular fragments sets passing all these tests
will be returned
randomize
If set to False, the circular fragments sets will be returned one by
one until the last one, in an order implemented by
networkx.simple_cycles.
True, the circular sets returned will be drawn randomly
(a circular set will only be returned once). This is very practical
to obtain a sample out of a combinatorial assembly mix. However this
feature is a bit experimental, and the iteration will certainly stale
before all cycles have been found, because the randomizer can't find
any new cycle.
randomization_staling_cutoff
If randomize is True, the randomizer will throw an error if the
latest C cycles it has drawn had already been seen before, where C
is the randomization staling cutoff.
"""
def generator():
"""Return random cycles from the connections graph.
The randomness is introduced by permuting the nodes names,
running `networkx.circular_paths` once, permuting the nodes
names again, etc.
"""
graph = self.filtered_connections_graph
seen_hashes = set()
graph_nodes = list(graph.nodes())
node_to_index = {node: i for i, node in enumerate(graph_nodes)}
while True:
permutation = np.arange(len(graph_nodes))
np.random.shuffle(permutation)
antipermutation = np.argsort(permutation)
new_graph = nx.DiGraph(
[
(
permutation[node_to_index[node1]],
permutation[node_to_index[node2]],
)
for node1, node2 in graph.edges()
]
)
counter = 0
for cycle in nx.simple_cycles(new_graph):
cycle = [antipermutation[i] for i in cycle]
fragments = [
self.fragments_dict[graph_nodes[i]] for i in cycle
]
cycle = FragmentChain(
fragments, is_cycle=True
).standardized()
cycle_hash = hash(cycle)
if cycle_hash in seen_hashes:
counter += 1
if counter > staling_cutoff:
raise ValueError(
"Randomization staled. Only randomize when"
" the search space is huge."
)
continue
seen_hashes.add(cycle_hash)
if all(
fl(cycle.fragments) for fl in fragments_sets_filters
):
yield cycle.fragments
break
else:
break
return generator()
def compute_circular_fragments_sets(self, fragments_sets_filters=()):
"""Return an iterator over all the lists of fragments [f1, f2, f3...fn]
that can assemble into a circular construct.
This means that fragment f1 will clip with f2 (in this order),
f2 with f3... and fn with f1.
Parameters
----------
fragments_sets_filters
A list of test functions of the form (set->True/False) where "set" is
a list of fragments from the mix (which assemble into a circular
construct). Only circular fragments sets passing all these tests
will be returned
"""
def generator():
"""Iterate over all circular paths in the connexion graph
using Networkx's `simple_paths`."""
seen_hashes = set()
for cycle in nx.simple_cycles(self.filtered_connections_graph):
cycle = [self.fragments_dict[i] for i in cycle]
cycle = FragmentChain(cycle, is_cycle=True).standardized()
cycle_hash = hash(cycle)
if cycle_hash in seen_hashes:
continue
seen_hashes.add(cycle_hash)
if all(fl(cycle.fragments) for fl in fragments_sets_filters):
yield cycle.fragments
return generator()
def compute_circular_assemblies(
self,
fragments_sets_filters=(),
seqrecord_filters=(),
annotate_parts_homologies=False,
randomize=False,
randomization_staling_cutoff=100,
):
"""Return a generator listing the circular assemblies in the graph.
Parameters
----------
fragments_sets_filters
A list of test functions of the form (set->True/False) where "set" is
a list of fragments from the mix (which assemble into a circular
construct). Only circular fragments sets passing all these tests
will be returned
seqrecord_filters
A list of test functions of the form (record->True/False) where
"record" is the biopython record of a circular assembly found.
Only records passing all these tests will be returned
annotate_parts_homologies
If True, the junctions between assembled fragments will be annotated
in the final record with a feature of type 'homology' and label
equal to the homology (if <8bp), else simply 'homology'.
randomize
If set to False, the circular fragments sets will be returned one by
one until the last one, in an order implemented by
networkx.simple_cycles.
True, the circular sets returned will be drawn randomly
(a circular set will only be returned once). This is very practical
to obtain a sample out of a combinatorial assembly mix. However this
feature is a bit experimental, and the iteration will certainly stale
before all cycles have been found, because the randomizer can't find
any new cycle.
randomization_staling_cutoff
If randomize is True, the randomizer will throw an error if the
latest C cycles it has drawn had already been seen before, where C
is the randomization staling cutoff.
"""
def assemblies_generator():
if randomize:
fragments = self.compute_random_circular_fragments_sets(
fragments_sets_filters=fragments_sets_filters,
staling_cutoff=randomization_staling_cutoff,
)
else:
fragments = self.compute_circular_fragments_sets(
fragments_sets_filters=fragments_sets_filters
)
for fragments in fragments:
construct = self.assemble(
fragments,
circularize=True,
annotate_homologies=annotate_parts_homologies,
)
if all(fl(construct) for fl in seqrecord_filters):
construct.fragments = fragments
yield construct
return assemblies_generator()
def compute_linear_assemblies(
self,
fragments_sets_filters=(),
min_parts=2,
seqrecord_filters=(),
annotate_parts_homologies=False,
):
"""Return a generator listing the possible linear assemblies.
Parameters
----------
fragments_sets_filters
A list of test functions of the form (set->True/False) where "set" is
a list of fragments from the mix (which assemble into a circular
construct). Only circular fragments sets passing all these tests
will be returned
min_parts
Assemblies with less than this number of parts will be ignored.
seqrecord_filters
A list of test functions of the form (record->True/False) where
"record" is the biopython record of a circular assembly found.
Only records passing all these tests will be returned
annotate_parts_homologies
If True, the junctions between assembled fragments will be annotated
in the final record with a feature of type 'homology' and label
equal to the homology (if <8bp), else simply 'homology'.
randomize
If set to False, the circular fragments sets will be returned one by
one until the last one, in an order implemented by
networkx.simple_cycles.
True, the circular sets returned will be drawn randomly
(a circular set will only be returned once). This is very practical
to obtain a sample out of a combinatorial assembly mix. However this
feature is a bit experimental, and the iteration will certainly stale
before all cycles have been found, because the randomizer can't find
any new cycle.
randomization_staling_cutoff
If randomize is True, the randomizer will throw an error if the
latest C cycles it has drawn had already been seen before, where C
is the randomization staling cutoff.
Notes
------
This is a bit undertested as there have been little use cases.
"""
seen_hashes = set()
g = self.filtered_connections_graph
for source, targets in nx.shortest_path(g).items():
for target, path in targets.items():
if len(path) < min_parts:
continue
fragments = [self.fragments_dict[f] for f in path]
if not all([fl(fragments) for fl in fragments_sets_filters]):
continue
chain = FragmentChain(fragments).standardized()
chain_hash = hash(chain)
if chain_hash in seen_hashes:
continue
seen_hashes.add(chain_hash)
fragments_assembly = self.assemble(
fragments, annotate_homologies=annotate_parts_homologies
)
if all([fl(fragments_assembly) for fl in seqrecord_filters]):
yield (fragments_assembly)
|
StarcoderdataPython
|
11212386
|
import subprocess
import os
from spotdl.encode import EncoderBase
from spotdl.encode.exceptions import EncoderNotFoundError
from spotdl.encode.exceptions import FFmpegNotFoundError
import logging
logger = logging.getLogger(__name__)
# Key: from format
# Subkey: to format
RULES = {
"m4a": {
"mp3": "-codec:v copy -codec:a libmp3lame",
"opus": "-codec:a libopus",
"m4a": "-acodec copy",
"flac": "-codec:a flac",
"ogg": "-codec:a libvorbis -q:a 5",
},
"opus": {
"mp3": "-codec:a libmp3lame",
"m4a": "-cutoff 20000 -codec:a aac",
"flac": "-codec:a flac",
"ogg": "-codec:a libvorbis -q:a 5",
"opus": "-acodec copy",
},
}
class EncoderFFmpeg(EncoderBase):
"""
A class for encoding media files using FFmpeg.
Parameters
----------
encoder_path: `str`
Path to FFmpeg.
must_exist: `bool`
Error out immediately if the encoder isn't found in
``encoder_path``.
Examples
--------
+ Re-encode an OPUS stream from STDIN to an MP3:
>>> import os
>>> input_path = "audio.opus"
>>> target_path = "audio.mp3"
>>> input_path_size = os.path.getsize(input_path)
>>>
>>> from spotdl.encode.encoders import EncoderFFmpeg
>>> ffmpeg = EncoderFFmpeg()
>>> process = ffmpeg.re_encode_from_stdin(
... input_encoding="opus",
... target_path=target_path
... )
>>>
>>> chunk_size = 4096
>>> total_chunks = (input_path_size // chunk_size) + 1
>>>
>>> with open(input_path, "rb") as fin:
... for chunk_number in range(1, total_chunks+1):
... chunk = fin.read(chunk_size)
... process.stdin.write(chunk)
... print("chunks encoded: {}/{}".format(
... chunk_number,
... total_chunks,
... ))
>>>
>>> process.stdin.close()
>>> process.wait()
"""
def __init__(self, encoder_path="ffmpeg", must_exist=True):
_loglevel = "-hide_banner -nostats -v warning"
_additional_arguments = ["-b:a", "192k", "-vn"]
try:
super().__init__(encoder_path, must_exist, _loglevel, _additional_arguments)
except EncoderNotFoundError as e:
raise FFmpegNotFoundError(e.args[0])
self._rules = RULES
def set_trim_silence(self):
self.set_argument("-af silenceremove=start_periods=1")
def get_encoding(self, path):
return super().get_encoding(path)
def _generate_encoding_arguments(self, input_encoding, target_encoding):
initial_arguments = self._rules.get(input_encoding)
if initial_arguments is None:
raise TypeError(
'The input format ("{}") is not supported.'.format(
input_encoding,
))
arguments = initial_arguments.get(target_encoding)
if arguments is None:
raise TypeError(
'The output format ("{}") is not supported.'.format(
target_encoding,
))
return arguments
def set_debuglog(self):
self._loglevel = "-loglevel debug"
def _generate_encode_command(self, input_path, target_path,
input_encoding=None, target_encoding=None):
if input_encoding is None:
input_encoding = self.get_encoding(input_path)
if target_encoding is None:
target_encoding = self.get_encoding(target_path)
arguments = self._generate_encoding_arguments(
input_encoding,
target_encoding
)
command = [self.encoder_path] \
+ ["-y", "-nostdin"] \
+ self._loglevel.split() \
+ ["-i", input_path] \
+ arguments.split() \
+ self._additional_arguments \
+ ["-f", self.target_format_from_encoding(target_encoding)] \
+ [target_path]
return command
def re_encode(self, input_path, target_path, target_encoding=None, delete_original=False):
encode_command = self._generate_encode_command(
input_path,
target_path,
target_encoding=target_encoding
)
logger.debug("Calling FFmpeg with:\n{command}".format(
command=encode_command,
))
process = subprocess.Popen(encode_command)
process.wait()
encode_successful = process.returncode == 0
if encode_successful and delete_original:
os.remove(input_path)
return process
def re_encode_from_stdin(self, input_encoding, target_path, target_encoding=None):
encode_command = self._generate_encode_command(
"-",
target_path,
input_encoding=input_encoding,
target_encoding=target_encoding,
)
logger.debug("Calling FFmpeg with:\n{command}".format(
command=encode_command,
))
process = subprocess.Popen(encode_command, stdin=subprocess.PIPE)
return process
|
StarcoderdataPython
|
8193385
|
<reponame>eduardo98m/GiaDog
"""
Authors: <NAME>, <NAME>
Project: Graduation Thesis: GIAdog
This file contains a demo to control the robot using the bezier gait.
"""
import os, sys
sys.path.append(os.path.dirname(os.path.realpath(f'{__file__}/..')))
import numpy as np
import pathlib
sys.path.append(os.path.dirname(os.path.realpath(f'{__file__}/..')))
import os, sys
import argparse
from src.kinematics import FTG_handler, solve_leg_IK,transformation_matrices
from src.joystick import XboxController
from src.__env__ import LEG_SPAN, SIM_SECONDS_PER_STEP, STATE_FEATURES, \
PRIVILEGED_STATE_FEATURES, CONTROLLER_LATENCY_STEPS, \
GRAVITY_VECTOR,UPDATE_METHODS, ACTION_SPACE_SIZE,\
ADD_GAIT_DIRECTIONALITY
from src.GymEnvs import TerrainScene, QuadrupedRobot
from time import sleep, time
import numpy as np
from src.GymEnvs import pyBulletPainter as pbp
from pybullet_utils import bullet_client
import pybullet
quadruped_urdf = str(pathlib.Path(__file__).parent.parent.resolve()) +\
'/mini_ros/urdf/spot.urdf'
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Test terrain curriculum.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'-u', '--spot-urdf',
type=str,
default=str(pathlib.Path(__file__).parent.parent.resolve()) +\
'/mini_ros/urdf/spot.urdf',
help='Path to the URDF file of the quadruped robot.',
metavar='PATH'
)
args = parser.parse_args()
# Controller
joy = XboxController()
# Camera settings
cam_dist = 2
cam_yaw = 0
cam_pitch = -30
camera_speed = 1.5
# FTG settings
foot_trajectory_generator = FTG_handler("base")
client = bullet_client.BulletClient(pybullet.GUI)
robot = QuadrupedRobot(
quadruped_urdf,
client,
UPDATE_METHODS,
STATE_FEATURES,
PRIVILEGED_STATE_FEATURES
)
terrain_file = 'terrains/steps.txt'
scene = TerrainScene(
client,
GRAVITY_VECTOR,
SIM_SECONDS_PER_STEP,
CONTROLLER_LATENCY_STEPS
)
client.resetSimulation()
scene.episode_restart()
scene.load_terrain_from_file(terrain_file)
scene.place_terrain_from_file()
robot.add_to_scene(scene, first_execution = True)
robot.reset(np.array([0,0]))
# We create the menu paremeters in pybullet
phase_leg_1 = client.addUserDebugParameter(
'Leg 1',
*(0, 2*np.pi, 0)
)
phase_leg_2 = client.addUserDebugParameter(
'Leg 2',
*(0, 2*np.pi, np.pi)
)
phase_leg_3 = client.addUserDebugParameter(
'Leg 3',
*(0, 2*np.pi, np.pi)
)
phase_leg_4 = client.addUserDebugParameter(
'Leg 4',
*(0, 2*np.pi, 0)
)
base_frequency = client.addUserDebugParameter(
'Base frequency',
*(0, 32., 2.5))
psi = client.addUserDebugParameter(
'psi',
*(-0.001, 0.001, 0.00015)
)
delta = client.addUserDebugParameter(
"delta",
*(-0.10, 0.1, 0.005)
)
reset_id = client.addUserDebugParameter('Reset', 1, 0, 0)
reset_count = 0
z_frac = client.addUserDebugParameter(
"z_frac",
*(0, 1, 0.125))
#foot_trajectory_generator.dt = 1/240
t_o = time()
while True:
t_i = time()
scene.global_step(
prev_step_function_callbacks =
robot.get_previous_step_callback_functions(),
all_steps_function_callbacks =
robot.get_all_steps_callback_functions()
)
robot.elapsed_time = scene.elapsed_time
# Step the simulation
state = robot.state()
# Read the parameters
sigma = np.array([client.readUserDebugParameter(phase_leg_1),
client.readUserDebugParameter(phase_leg_2),
client.readUserDebugParameter(phase_leg_3),
client.readUserDebugParameter(phase_leg_4)
])
#foot_trajectory_generator.delta_base = client.readUserDebugParameter(delta)
#foot_trajectory_generator.psi_base = client.readUserDebugParameter(psi)
robot.foot_trajectory_generator.FTG.f0 = client.readUserDebugParameter(base_frequency)
z_frac_val = client.readUserDebugParameter(z_frac)
if reset_count != client.readUserDebugParameter(reset_id):
reset_count = client.readUserDebugParameter(reset_id)
# Reset robot position
client.resetBasePositionAndOrientation(
robot.robot_id,
(0,0,0.5),
(0,0,0,1)
)
alpha, movement_intensity = joy.get_left_joystick()
l1 = joy.LeftBumper
r1 = joy.RightBumper
cam_dir, intensity_cam = joy.get_right_joystick()
cam_pitch = cam_pitch - intensity_cam*camera_speed*np.sin(cam_dir)
cam_yaw = cam_yaw - intensity_cam*camera_speed*np.cos(cam_dir)
yaw = robot.orientation[2]
dir_angle = np.mod((alpha - np.pi/2), 2*np.pi)
robot.turn_dir = [(l1 - r1)]
robot.dir_angle = dir_angle
timestep = robot.elapsed_time - t_o
if movement_intensity == 0:
robot.foot_trajectory_generator.gait_directionality = False
else:
robot.foot_trajectory_generator.gait_directionality = True
robot.apply_action(np.zeros(ACTION_SPACE_SIZE))
client.resetDebugVisualizerCamera(cam_dist, cam_yaw,
cam_pitch,
[robot.position[0], robot.position[1], 0.2])
t_f = time()
sleep_time = SIM_SECONDS_PER_STEP * CONTROLLER_LATENCY_STEPS - (t_f - t_i)
if sleep_time > 0:
sleep(sleep_time)
|
StarcoderdataPython
|
1701443
|
<reponame>globophobe/crypto-tick-data
import pandas as pd
from ..constants import VOLUME
def is_sample(data_frame, first_index, last_index):
first_row = data_frame.loc[first_index]
last_row = data_frame.loc[last_index]
# For speed, short-circuit
if first_row.timestamp == last_row.timestamp:
if first_row.nanoseconds == last_row.nanoseconds:
if first_row.tickRule == last_row.tickRule:
if "symbol" in data_frame.columns:
if first_row.symbol == last_row.symbol:
return False
else:
return False
return True
def aggregate_trades(data_frame):
df = data_frame.reset_index()
idx = 0
samples = []
total_rows = len(df) - 1
# Were there two or more trades?
if len(df) > 1:
for row in df.itertuples():
index = row.Index
last_index = index - 1
if index > 0:
is_last_iteration = index == total_rows
# Is this the last iteration?
if is_last_iteration:
# If equal, one sample
if not is_sample(df, idx, index):
# Aggregate from idx to end of data frame
sample = df.loc[idx:]
samples.append(agg_trades(sample))
# Otherwise, two samples.
else:
# Aggregate from idx to last_index
sample = df.loc[idx:last_index]
samples.append(agg_trades(sample))
# Append last row.
sample = df.loc[index:]
assert len(sample) == 1
samples.append(agg_trades(sample))
# Is the last row equal to the current row?
elif is_sample(df, last_index, index):
# Aggregate from idx to last_index
sample = df.loc[idx:last_index]
aggregated_sample = agg_trades(sample)
samples.append(aggregated_sample)
idx = index
# Only one trade in data_frame
elif len(df) == 1:
aggregated_sample = agg_trades(df)
samples.append(aggregated_sample)
return pd.DataFrame(samples)
def agg_trades(data_frame):
last_row = data_frame.iloc[-1]
timestamp = last_row.timestamp
last_price = last_row.price
ticks = len(data_frame)
# Is there more than 1 trade to aggregate?
if ticks > 1:
volume = data_frame.volume.sum()
notional = data_frame.notional.sum()
else:
volume = last_row.volume
notional = last_row.notional
data = {
"timestamp": timestamp,
"nanoseconds": last_row.nanoseconds,
"price": last_price,
"volume": volume,
"notional": notional,
"ticks": ticks,
"tickRule": last_row.tickRule,
}
if "symbol" in data_frame.columns:
data.update({"symbol": last_row.symbol})
return data
def aggregate_filter(data_frame, attr=VOLUME, value=1000):
samples = []
total_rows = len(data_frame)
if len(data_frame):
next_index = 0
for row in data_frame.itertuples():
index = row.Index
if getattr(row, attr) >= value:
if index == 0:
sample = data_frame.loc[:index]
else:
sample = data_frame.loc[next_index:index]
samples.append(agg_filter(sample, is_min_val=True))
next_index = index + 1
if next_index < total_rows:
sample = data_frame.loc[next_index:]
samples.append(agg_filter(sample))
return samples
def agg_filter(df, is_min_val=False):
data = {}
if is_min_val:
last_row = df.iloc[-1]
data.update(
{
"timestamp": last_row.timestamp,
"price": last_row.price,
"volume": last_row.volume,
"notional": last_row.notional,
"tickRule": last_row.tickRule,
"ticks": last_row.ticks,
}
)
if not is_min_val or len(df) > 1:
buy_df = df[df.tickRule == 1]
data.update(
{
"high": df.price.max(),
"low": df.price.min(),
"totalBuyVolume": buy_df.volume.sum(),
"totalVolume": df.volume.sum(),
"totalBuyNotional": buy_df.notional.sum(),
"totalNotional": df.notional.sum(),
"totalBuyTicks": buy_df.ticks.sum(),
"totalTicks": df.ticks.sum(),
}
)
return data
|
StarcoderdataPython
|
3286345
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
import matplotlib.animation as animation
from math import pi
from ...spot_micro_stick_figure import SpotMicroStickFigure
from ...utilities import spot_micro_kinematics as smk
d2r = pi/180
r2d = 180/pi
def update_lines(num, coord_data, lines):
line_to_leg__and_link_dict = {4:(0,0),
5:(0,1),
6:(0,2),
7:(1,0),
8:(1,1),
9:(1,2),
10:(2,0),
11:(2,1),
12:(2,2),
13:(3,0),
14:(3,1),
15:(3,2)}
for line, i in zip(lines, range(len(lines))):
if i < 4:
# First four lines are the square body
if i == 3:
ind = -1
else:
ind = i
x_vals = [coord_data[num][ind][0][0], coord_data[num][ind+1][0][0]]
y_vals = [coord_data[num][ind][0][1], coord_data[num][ind+1][0][1]]
z_vals = [coord_data[num][ind][0][2], coord_data[num][ind+1][0][2]]
# NOTE: there is no .set_data() for 3 dim data...
line.set_data(x_vals,z_vals)
line.set_3d_properties(y_vals)
# Next 12 lines are legs
# Leg 1, link 1, link 2, link 3
# Leg 2, link 1, link 2, link 3...
else:
leg_num = line_to_leg__and_link_dict[i][0]
link_num = line_to_leg__and_link_dict[i][1]
x_vals = [coord_data[num][leg_num][link_num][0], coord_data[num][leg_num][link_num+1][0]]
y_vals = [coord_data[num][leg_num][link_num][1], coord_data[num][leg_num][link_num+1][1]]
z_vals = [coord_data[num][leg_num][link_num][2], coord_data[num][leg_num][link_num+1][2]]
line.set_data(x_vals,z_vals)
line.set_3d_properties(y_vals)
return lines
# Attaching 3D axis to the figure
fig = plt.figure()
ax = p3.Axes3D(fig)
ax.set_xlabel('X')
ax.set_ylabel('Z')
ax.set_zlabel('Y')
ax.set_xlim3d([-0.2, 0.2])
ax.set_zlim3d([0, 0.4])
ax.set_ylim3d([-0.2,0.2])
# Set azimtuth and elevation of plot
# ax.view_init(elev=135,azim=0)
# Instantiate spot micro stick figure obeject
sm = SpotMicroStickFigure(x=0,y=0.14,z=0, theta=00*d2r)
# Define absolute position for the legs
l = sm.body_length
w = sm.body_width
l1 = sm.hip_length
l2 = sm.upper_leg_length
l3 = sm.lower_leg_length
desired_p4_points = np.array([ [-l/2, 0, w/2 + l1],
[ l/2 , 0, w/2 + l1],
[ l/2 , 0, -w/2 - l1],
[-l/2 , 0, -w/2 - l1] ])
sm.set_absolute_foot_coordinates(desired_p4_points)
# Set a pitch angle
sm.set_body_angles(theta=00*d2r)
# Get leg coordinates
coords = sm.get_leg_coordinates()
# Initialize empty list top hold line objects
lines = []
# Construct the body of 4 lines from the first point of each leg (the four corners of the body)
for i in range(4):
# For last leg, connect back to first leg point
if i == 3:
ind = -1
else:
ind = i
# Due to mplot3d rotation and view limitations, swap y and z to make the stick figure
# appear oriented better
x_vals = [coords[ind][0][0], coords[ind+1][0][0]]
y_vals = [coords[ind][0][1], coords[ind+1][0][1]]
z_vals = [coords[ind][0][2], coords[ind+1][0][2]]
lines.append(ax.plot(x_vals,z_vals,y_vals,color='k')[0])
# Plot color order for leg links: (hip, upper leg, lower leg)
plt_colors = ['r','c','b']
for leg in coords:
for i in range(3):
# Due to mplot3d rotation and view limitations, swap y and z to make the stick figure
# appear oriented better
x_vals = [leg[i][0], leg[i+1][0]]
y_vals = [leg[i][1], leg[i+1][1]]
z_vals = [leg[i][2], leg[i+1][2]]
lines.append(ax.plot(x_vals,z_vals,y_vals,color=plt_colors[i])[0])
# Create data of robot pitching up and down
num_angles = 25
pitch_angles = np.linspace(-30*d2r,30*d2r,num_angles)
coord_data = []
for theta in pitch_angles:
# Set a pitch angle
sm.set_body_angles(theta=theta)
x = sm.get_leg_angles()
# Get leg coordinates and append to data list
coord_data.append(sm.get_leg_coordinates())
coord_data = coord_data + coord_data[::-1]
# Creating the Animation object
line_ani = animation.FuncAnimation(fig, update_lines, num_angles*2, fargs=(coord_data, lines),
interval=75, blit=False)
plt.show()
|
StarcoderdataPython
|
9637766
|
import pytest
import seval
def test_unsafe():
with pytest.raises(ValueError):
seval.safe_eval('pow(2,3)')
|
StarcoderdataPython
|
390320
|
<reponame>wyaadarsh/LeetCode-Solutions
class Solution:
def maxProfit(self, k, prices):
"""
:type k: int
:type prices: List[int]
:rtype: int
"""
n = len(prices)
if n == 0:
return 0
if k >= n // 2:
return sum(max(0, prices[i + 1] - prices[i]) for i in range(n - 1))
dp = [[0] * n for _ in range(k + 1)]
for i in range(1, k + 1):
mx = -prices[0]
for j in range(1, n):
dp[i][j] = max(dp[i][j - 1], prices[j] + mx)
mx = max(mx, dp[i - 1][j - 1] - prices[j])
return dp[-1][-1]
|
StarcoderdataPython
|
9605241
|
import numpy as np
import librosa
import os
import soundfile as sf
import matplotlib.pyplot as plt
"""
This file takes care of the computation of the inputs and to the saving of the results in training/testing task
"""
def getStats(feature):
"""
:param feature: np array in 3 dimensions
:return: the mean and the std of this array through the axis 1 and 2
"""
return np.array([np.mean(feature, axis=(1, 2)), np.std(feature, axis=(1, 2))])
def getAllInputs(filename):
"""
:param filename: a .wav file
:return: the inputs (not normalized) for the neural network
"""
audio, sr_ = sf.read(filename) # (4800000, 2)
left = audio[::2, 0]
right = audio[::2, 1]
# waveform - (2, 120000)
waveform = audio.T[:, ::4]
# spectrogram - (2, 1025, 469)
spectrogram = np.abs(np.array([librosa.core.stft(left),
librosa.core.stft(right)]))
# rms - (2, 1, 469)
rms = np.array([librosa.feature.rms(left), # Root Mean Square
librosa.feature.rms(right)])
# zcr - (2, 1, 469)
zcr = np.array([librosa.feature.zero_crossing_rate(left), # Zero Crossing Rate
librosa.feature.zero_crossing_rate(right)])
# sc - (2, 1, 469)
sc = np.array([librosa.feature.spectral_centroid(left), # Spectral Centroid
librosa.feature.spectral_centroid(right)])
# sr - (2, 1, 469)
sr = np.array([librosa.feature.spectral_rolloff(left), # Spectral Roll-of
librosa.feature.spectral_rolloff(right)])
# sfm - (2, 1, 469)
sfm = np.array([librosa.feature.spectral_flatness(left), # Spectral Flatness Mesure
librosa.feature.spectral_flatness(right)])
# mel_spectrogram - (2, 100, 469)
n_mels = 100
mel_spectrogram = np.array([librosa.feature.melspectrogram(y=left, sr=sr_, n_mels=n_mels), # (2, 100, 469)
librosa.feature.melspectrogram(y=right, sr=sr_, n_mels=n_mels)])
logmel_spectrogram = librosa.core.amplitude_to_db(mel_spectrogram)
# getStats - (10,)
stats = np.concatenate([getStats(rms), getStats(zcr),
getStats(sc), getStats(sr), getStats(sfm)])
#### Reshape for the neural network #####
# Waveform
waveform = np.reshape(waveform, (2, 120000))
# spectrogram
spectrogram = np.reshape(spectrogram, (2, 1025, 469)) # Not used
# Features
features = np.concatenate(
[
np.reshape(rms, (2, 469)),
np.reshape(zcr, (2, 469)),
np.reshape(sc, (2, 469)),
np.reshape(sr, (2, 469)),
np.reshape(sfm, (2, 469))
],
axis=0
)
features = np.reshape(features, (10, 469))
# Features mstd
fmstd = np.reshape(stats.T, (2, 10)) # (right+left, 2 * nb_features)
##### Create datas #####
data = (
waveform, # (2, 120000)
logmel_spectrogram, # (2, 1025, 469), for mel : (2, 100, 469)
features, # (10, 469)
fmstd # (2, 10)
)
# data = (waveform, spectrogram, rms, zcr, mel_spectrogram, stats)
return data
#### Function to set up the environment
def setEnviromnent():
"""
Creates the folders for the Generated Dataset
"""
if not os.path.isdir('./GeneratedDataset'):
os.mkdir('./GeneratedDataset')
if not os.path.isdir('./GeneratedDataset/train'):
os.mkdir('./GeneratedDataset/train')
if not os.path.isdir('./GeneratedDataset/test'):
os.mkdir('./GeneratedDataset/test')
def setLightEnviromnent():
"""
Creates the folders for the Generated Dataset with a snall number of Data (for test on CPU)
"""
if not os.path.isdir('./GeneratedLightDataset'):
os.mkdir('./GeneratedLightDataset')
if not os.path.isdir('./GeneratedLightDataset/train'):
os.mkdir('./GeneratedLightDataset/train')
if not os.path.isdir('./GeneratedLightDataset/test'):
os.mkdir('./GeneratedLightDataset/test')
def createInputParametersFile(template, fileName, dn_parameters):
"""
:param template: The template of the dictionnary input_parameters
:param fileName: The path where we want to save it
:param dn_parameters: the parameters of the neural network
Creates the file "fileName" with the dictionnary input_parameters filled knowing the architecture of the
Neural Network (known with dn_parameters)
"""
waveform, spectrogram, features, fmstd = getAllInputs('./Dataset/train/audio/0.wav')
template['spectrum']['nb_channels'], template['spectrum']['h'], template['spectrum']['w'] = spectrogram.shape
template['audio']['nb_channels'], template['audio']['len'] = waveform.shape
template['features']['nb_channels'], template['features']['len'] = features.shape
template['fmstd']['len'] = fmstd.shape[0] * fmstd.shape[1]
template['final']['len'] = dn_parameters['spectrum']['size_fc'] + dn_parameters['audio']['size_fc'] + \
dn_parameters['features']['size_fc'] + dn_parameters['fmstd']['layers_size'][-1]
np.save(fileName, template)
def saveFigures(folder, name, summaryDict):
"""
:param folder: the folder where we want to save it
:param name: the name of the figures
:param summaryDict: the data of the training we want to plot
Save the plot of the evolution of the training loss and the testing loss through the epochs
Save the plot of the evolution of the training accuracy and the testing loss accuracy the epochs
"""
loss_train = summaryDict['loss_train']
loss_test = summaryDict['loss_test']
acc_train = summaryDict['acc_train']
acc_test = summaryDict['acc_test']
nb_epochs = summaryDict['nb_epochs']
best_epoch = summaryDict['best_model']['epoch']
best_loss_train = summaryDict['best_model']['loss_train']
best_acc_train = summaryDict['best_model']['acc_train']
best_loss_test = summaryDict['best_model']['loss_test']
best_acc_test = summaryDict['best_model']['acc_test']
min_loss = min(min(loss_train), min(loss_test))
max_loss = max(max(loss_train), max(loss_test))
min_acc = min(min(acc_train), min(acc_test))
max_acc = max(max(acc_train), max(acc_test))
x = np.arange(1, nb_epochs + 1)
# Save of the loss
plt.figure()
plt.plot(x, loss_train, 'steelblue', label='Training Loss')
plt.plot(x, loss_test, 'darkorange', label='Testing Loss')
plt.title('Variation of the Loss through the epochs\n' + name)
plt.xlabel('Epoch')
plt.ylabel('Loss value')
plt.plot([1, nb_epochs], [best_loss_train, best_loss_train], 'steelblue', linestyle='--',
label='Model training loss : {0}'.format(round(best_loss_train, 4)))
plt.plot([1, nb_epochs], [best_loss_test, best_loss_test], color='darkorange', linestyle='--',
label='Model testing loss : {0}'.format(round(best_loss_test, 4)))
plt.plot([best_epoch, best_epoch], [min_loss, max_loss], color='dimgray', linestyle='--',
label='Best Epoch : {0}'.format(best_epoch))
plt.legend()
plt.grid()
plt.savefig(os.path.join(folder, 'LossFigure_' + name + '.png'))
# Save the accuracy
plt.figure()
plt.plot(x, acc_train, 'steelblue', label='Training Accuracy')
plt.plot(x, acc_test, 'darkorange', label='Testing Accuracy')
plt.title('Variation of the Accuracy through the epochs\n' + name)
plt.xlabel('Epoch')
plt.ylabel('Accuracy value (%)')
plt.plot([1, nb_epochs], [best_acc_train, best_acc_train], color='steelblue', linestyle='--',
label='Model train accuracy : {0}'.format(round(best_acc_train, 2)))
plt.plot([1, nb_epochs], [best_acc_test, best_acc_test], color='darkorange', linestyle='--',
label='Model test accuracy : {0}'.format(round(best_acc_test, 2)))
plt.plot([best_epoch, best_epoch], [min_acc, max_acc], color='dimgray', linestyle='--',
label='Best Epoch : {0}'.format(best_epoch))
plt.legend()
plt.grid()
plt.savefig(os.path.join(folder, 'AccuracyFigure_' + name + '.png'))
def saveText(folder, name, summaryDict):
"""
:param folder: the folder where we want to save it
:param name: the name of the figures
:param summaryDict: the data of the training we want to plot
Save a text file which summarize the saved model
"""
loss_train = summaryDict['best_model']['loss_train']
loss_test = summaryDict['best_model']['loss_test']
acc_train = summaryDict['best_model']['acc_train']
acc_test = summaryDict['best_model']['acc_test']
nb_epochs = summaryDict['nb_epochs']
best_epoch = summaryDict['best_model']['epoch']
input_used = summaryDict['inputs_used']
iu_txt = ''
flag = False
if input_used[0] == '1':
iu_txt += 'waveform'
flag = True
if input_used[1] == '1':
if flag:
iu_txt += ', spectrogram'
else:
iu_txt += 'spectrogram'
flag = True
if input_used[2] == '1':
if flag:
iu_txt += ', features'
else:
iu_txt += 'features'
flag = True
if input_used[3] == '1':
if flag:
iu_txt += ', fmstd'
else:
iu_txt += 'fmstd'
flag = True
text = 'Summary of {5} :\n\n' \
'Training Loss : {0}\n' \
'Testing Loss : {1}\n' \
'Training Accuracy : {2}\n' \
'Testing Accuracy : {3}\n' \
'Train Epochs : {4}\n' \
'Best Epoch : {8}\n\n' \
'Inputs Used : {7}\t ({6})'\
.format(
loss_train, loss_test, acc_train, acc_test, nb_epochs, name, iu_txt, input_used, best_epoch
)
with open(os.path.join(folder, 'Summary_' + name + '.txt'), 'a') as f:
f.write(text)
|
StarcoderdataPython
|
8102330
|
<filename>Trakttv.bundle/Contents/Libraries/Shared/oem_framework/models/show.py
from oem_framework.core.helpers import get_attribute
from oem_framework.models.core import BaseMedia, ModelRegistry
import logging
log = logging.getLogger(__name__)
class Show(BaseMedia):
__slots__ = ['names', 'mappings', 'seasons']
__attributes__ = ['names', 'mappings', 'seasons']
def __init__(self, collection, identifiers, names, mappings=None, seasons=None, **kwargs):
super(Show, self).__init__(collection, 'show', identifiers, **kwargs)
self.names = self._parse_names(collection, identifiers, names) or {}
self.mappings = mappings or []
self.seasons = seasons or {}
def to_dict(self, key=None, flatten=True):
result = super(Show, self).to_dict(key=key, flatten=flatten)
if not flatten:
return result
# Flatten "names" attribute
self._flatten_names(self.collection, result)
return result
@classmethod
def from_dict(cls, collection, data, **kwargs):
touched = set()
# Construct movie
show = cls(
collection,
identifiers=get_attribute(touched, data, 'identifiers'),
names=set(get_attribute(touched, data, 'names', [])),
supplemental=get_attribute(touched, data, 'supplemental', {}),
**get_attribute(touched, data, 'parameters', {})
)
# Construct seasons
if 'seasons' in data:
show.seasons = dict([
(k, ModelRegistry['Season'].from_dict(collection, v, key=k, parent=show))
for k, v in get_attribute(touched, data, 'seasons').items()
])
# Ensure all attributes were touched
omitted = [
k for k in (set(data.keys()) - touched)
if not k.startswith('_')
]
if omitted:
log.warn('Show.from_dict() omitted %d attribute(s): %s', len(omitted), ', '.join(omitted))
return show
def __repr__(self):
if self.identifiers and self.names:
service = list(self.identifiers.keys())[0]
return '<Show %s: %r, names: %r>' % (
service,
self.identifiers[service],
self.names
)
if self.identifiers:
service = list(self.identifiers.keys())[0]
return '<Show %s: %r>' % (
service,
self.identifiers[service]
)
if self.names:
return '<Show names: %r>' % (
self.names
)
return '<Show>'
|
StarcoderdataPython
|
11302594
|
<reponame>JohnyEngine/CNC
import iso_read as iso
import sys
# just use the iso reader
class Parser(iso.Parser):
def __init__(self, writer):
iso.Parser.__init__(self, writer)
|
StarcoderdataPython
|
255629
|
import pytest
from src.config import env
class TestTesting:
"""
make sure ENV variable is testing
"""
def test_testing_environment(self):
assert env.IS_TESTING == True
|
StarcoderdataPython
|
11263388
|
class SBDResult(object):
def __init__(self, original_data: list) -> None:
self.original_data = original_data
self.scores = []
self.normal_behavior = []
self.normalized_data = []
def set_computed_values(self, scores: list, normal_behavior: list, normalized_data: list) -> None:
self.scores = scores
self.normal_behavior = normal_behavior
self.normalized_data = normalized_data
|
StarcoderdataPython
|
9782546
|
<gh_stars>0
"""Utility functions for mupub.
"""
__docformat__ = 'reStructuredText'
import os
import argparse
import sys
from clint.textui.validators import ValidationError
import stat
import mupub
def _find_files(folder, outlist):
for entry in os.listdir(path=folder):
# ignore hidden and backup files
if entry.startswith('.') or entry.endswith('~'):
continue
path = os.path.join(folder, entry)
stflags = os.stat(path).st_mode
if stat.S_ISREG(stflags):
outlist.append(path)
elif stat.S_ISDIR(stflags):
# recurse to get files under this folder
outlist = _find_files(path, outlist)
return outlist
def find_files(folder):
"""Return a list of all files in a folder
:param str folder: The top-most folder.
:returns: list of files under folder
:rtype: [str]
"""
return _find_files(folder, [])
def resolve_lysfile(infile):
if os.path.exists(infile):
return infile
base,infile = mupub.resolve_input(infile)
return os.path.join(base+'-lys', infile)
def resolve_input(infile=None):
"""Determine the file naming components for mutopia.
:param str infile: A candidate input file.
:returns: base (usually containing folder name) and infile, as
strings.
:rtype: Tuple
A convenience routine to determine the base name for a mutopia
piece. The mechanism assumes the user's current working directory
is appropriate to the build being acted on. The algorithm then
works on the naming convention:
- If infile==None, determine base and infile from the current
working directory. This will allow for the possibility of the
actual files to be in an ``-lys`` subfolder.
- If infile is given, return the basename of the current working
folder and infile as a tuple.
"""
base = os.path.basename(os.getcwd())
if not infile:
if os.path.exists(base+'.ly'):
infile = base+'.ly'
elif os.path.exists(base+'-lys'):
candidate = os.path.join(base+'-lys', base+'.ly')
if os.path.exists(candidate):
infile = candidate
return base,infile
_BOOLEANS = {'y': True,
'yes': True,
'true': True,
'1': True,
'n': False,
'no': False,
'false': False,
'0': False
}
class BooleanValidator(object):
"""A mechanism to validate valid boolean input.
"""
_message = 'Enter a valid boolean.'
def __init__(self, message=None):
if message is not None:
self._message = message
def __call__(self, value):
try:
return _BOOLEANS[value.strip().lower()]
except KeyError:
raise ValidationError(self._message)
|
StarcoderdataPython
|
9636906
|
<reponame>RamonvdW/dof
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 <NAME>.
# All rights reserved.
# Licensed under BSD-3-Clause-Clear. See LICENSE file for details.
from django.views.generic import ListView
from django.contrib.auth.mixins import UserPassesTestMixin
from django.db.models import Q, Value
from django.db.models.functions import Concat
from django.http import HttpResponseRedirect
from django.urls import reverse
from .models import LogboekRegel
from urllib.parse import quote_plus
TEMPLATE_LOGBOEK_REST = 'logboek/rest.dtl'
TEMPLATE_LOGBOEK_UITROL = 'logboek/uitrol.dtl'
TEMPLATE_LOGBOEK_ACCOUNTS = 'logboek/accounts.dtl'
RESULTS_PER_PAGE = 50
class LogboekBasisView(UserPassesTestMixin, ListView):
""" Deze view toont het logboek """
# class variables shared by all instances
template_name = "" # must override
base_url = "" # must override
paginate_by = RESULTS_PER_PAGE # enable Paginator built into ListView
def test_func(self):
""" called by the UserPassesTestMixin to verify the user has permissions to use this view """
return self.request.user.is_authenticated and self.request.user.is_staff
def handle_no_permission(self):
""" gebruiker heeft geen toegang --> redirect naar het plein """
return HttpResponseRedirect(reverse('Plein:plein'))
def _make_link_urls(self, context):
# voorbereidingen voor een regel met volgende/vorige links
# en rechtstreekse links naar een 10 pagina's
links = list()
num_pages = context['paginator'].num_pages
page_nr = context['page_obj'].number
# previous
if page_nr > 1:
tup = ('vorige', self.base_url + '?page=%s' % (page_nr - 1))
links.append(tup)
else:
tup = ('vorige_disable', '')
links.append(tup)
# block van 10 pagina's; huidige pagina in het midden
range_start = page_nr - 5
range_end = range_start + 9
if range_start < 1:
range_end += (1 - range_start) # 1-0=1, 1--1=2, 1--2=3, etc.
range_start = 1
if range_end > num_pages:
range_end = num_pages
for pgnr in range(range_start, range_end+1):
tup = ('%s' % pgnr, self.base_url + '?page=%s' % pgnr)
links.append(tup)
# for
# next
if page_nr < num_pages:
tup = ('volgende', self.base_url + '?page=%s' % (page_nr + 1))
links.append(tup)
else:
tup = ('volgende_disable', '')
links.append(tup)
return links
def get_queryset(self):
# haal de queryset met focus op en filter deze op een eventuele zoekterm
qset = self.get_focused_queryset()
zoekterm = self.request.GET.get('zoekterm', '')
if zoekterm:
qset = (qset
.annotate(hele_naam=Concat('actie_door_account__first_name',
Value(' '),
'actie_door_account__last_name'))
.filter(Q(gebruikte_functie__icontains=zoekterm) |
Q(actie_door_account__hele_naam__icontains=zoekterm) |
Q(activiteit__icontains=zoekterm)))
return qset
def get_focused_queryset(self):
# must be implemented by sub-class
raise NotImplementedError() # pragma: no cover
def get_context_data(self, **kwargs):
""" called by the template system to get the context data for the template """
context = super().get_context_data(**kwargs)
context['filter'] = self.filter
if context['is_paginated']:
context['page_links'] = self._make_link_urls(context)
context['active'] = str(context['page_obj'].number)
# extra informatie vaststellen, maar alleen voor de actieve pagina
for obj in context['object_list']:
obj.door = obj.bepaal_door()
# for
context['url_rest'] = reverse('Logboek:rest')
context['url_uitrol'] = reverse('Logboek:uitrol')
context['url_accounts'] = reverse('Logboek:accounts')
context['filter_url'] = self.base_url
# extra knop tonen om zoekterm te wissen
zoekterm = self.request.GET.get('zoekterm', '')
if zoekterm:
context['zoekterm'] = zoekterm
context['unfiltered_url'] = reverse('Logboek:%s' % self.filter)
zoekterm = "?zoekterm=%s" % quote_plus(zoekterm)
context['url_rest'] += zoekterm
context['url_uitrol'] += zoekterm
context['url_accounts'] += zoekterm
return context
class LogboekRestView(LogboekBasisView):
""" Deze view toont de het hele logboek """
template_name = TEMPLATE_LOGBOEK_REST
filter = 'rest'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.base_url = reverse('Logboek:alles')
def get_focused_queryset(self):
""" retourneer de data voor de template view """
return (LogboekRegel
.objects
.select_related('actie_door_account')
.exclude(Q(gebruikte_functie='maak_beheerder') | # Accounts
Q(gebruikte_functie='Wachtwoord') | # Accounts
Q(gebruikte_functie='Inloggen') |
Q(gebruikte_functie='Inlog geblokkeerd') |
Q(gebruikte_functie='OTP controle') |
Q(gebruikte_functie='Bevestig e-mail') |
Q(gebruikte_functie='Uitrol')) # Uitrol
.order_by('-toegevoegd_op'))
class LogboekAccountsView(LogboekBasisView):
""" Deze view toont de logboek regels die met Accounts te maken hebben: aanmaken, inloggen, OTP, etc. """
template_name = TEMPLATE_LOGBOEK_ACCOUNTS
filter = 'accounts'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.base_url = reverse('Logboek:accounts')
def get_focused_queryset(self):
""" retourneer de data voor de template view """
return (LogboekRegel
.objects
.select_related('actie_door_account')
.filter(Q(gebruikte_functie='maak_beheerder') |
Q(gebruikte_functie='Inloggen') |
Q(gebruikte_functie='Inlog geblokkeerd') |
Q(gebruikte_functie='OTP controle') |
Q(gebruikte_functie='Bevestig e-mail') |
Q(gebruikte_functie='Wachtwoord'))
.order_by('-toegevoegd_op'))
class LogboekUitrolView(LogboekBasisView):
""" Deze view toont de logboek regels die met de uitrol van software te maken hebben """
template_name = TEMPLATE_LOGBOEK_UITROL
filter = 'uitrol'
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.base_url = reverse('Logboek:uitrol')
def get_focused_queryset(self):
""" retourneer de data voor de template view """
return (LogboekRegel
.objects
.select_related('actie_door_account')
.filter(gebruikte_functie='Uitrol')
.order_by('-toegevoegd_op'))
# end of file
|
StarcoderdataPython
|
3508673
|
import ConfigParser
import re
from elfstatsd.log_record import LogRecord
from elfstatsd import settings
import pytest
from elfstatsd.storage.storage import MetadataStorage, RecordsStorage, ResponseCodesStorage, PatternsMatchesStorage
from elfstatsd.storage.called_method_storage import CalledMethodStorage
#storage key, which is the highest-level key used for differentiating data related to the different access log files
SK = 'apache_log'
@pytest.fixture(scope='function')
def response_codes_storage_setup(monkeypatch):
"""Monkeypatch settings setup for testing ResponseCodesStorage class."""
monkeypatch.setattr(settings, 'RESPONSE_CODES', [200, 404, 500])
return monkeypatch
@pytest.fixture(scope='function')
def called_method_storage_setup(monkeypatch):
"""Monkeypatch settings setup for testing ResponseCodesStorage class."""
monkeypatch.setattr(settings, 'RESPONSE_CODES', [200, 404, 500])
monkeypatch.setattr(settings, 'LATENCY_PERCENTILES', [50, 90, 99])
monkeypatch.setattr(settings, 'VALID_REQUESTS',
[
re.compile(r'^/data/(?P<group>[\w.]+)/(?P<method>[\w.]+)[/?%&]?'),
])
return monkeypatch
@pytest.fixture(scope='function')
def patterns_storage_setup(monkeypatch):
"""Monkeypatch settings setup for testing PatternsStorage class."""
monkeypatch.setattr(settings, 'PATTERNS_TO_EXTRACT',
[
{'name': 'uid',
'patterns': [
re.compile(r'/male_user/(?P<pattern>[\w.]+)'),
re.compile(r'/female_user/(?P<pattern>[\w.]+)'),
]
}
])
return monkeypatch
class TestMetadataStorage():
def test_storage_metadata_get_unset(self):
storage = MetadataStorage()
with pytest.raises(KeyError) as exc_info:
storage.get(SK, 'record_key')
assert exc_info.value.message.find('record_key') > -1
def test_storage_metadata_set_empty(self):
storage = MetadataStorage()
storage.set(SK, 'record_key', 'value')
assert storage.get(SK, 'record_key') == 'value'
def test_storage_metadata_set_twice(self):
storage = MetadataStorage()
storage.set(SK, 'record_key', 'value1')
storage.set(SK, 'record_key', 'value2')
assert storage.get(SK, 'record_key') == 'value2'
def test_storage_metadata_reset(self):
storage = MetadataStorage()
storage.set(SK, 'record_key1', 'value1')
storage.set(SK, 'record_key2', 'value2')
storage.reset(SK)
assert len(storage._storage[SK].keys()) == 0
def test_storage_metadata_dump(self):
storage = MetadataStorage()
dump = ConfigParser.RawConfigParser()
storage.set(SK, 'record_key1', 'value1')
storage.set(SK, 'record_key2', 'value2')
storage.dump(SK, dump)
assert len(dump.sections()) == 1
assert dump.has_section(storage.name)
assert len(dump.options(storage.name)) == 2
assert dump.has_option(storage.name, 'record_key1')
assert dump.has_option(storage.name, 'record_key2')
def test_storage_metadata_update_time(self):
time1 = '2013-10-09 12:00:00'
time2 = '2013-10-09 12:00:01'
storage = MetadataStorage()
storage.update_time(SK, time1)
storage.update_time(SK, time2)
assert storage.get(SK, 'first_record') == time1
assert storage.get(SK, 'last_record') == time2
class TestRecordsStorage():
def test_storage_records_reset(self):
storage = RecordsStorage()
assert len(storage._storage[SK].keys()) == 0
storage.reset(SK)
assert len(storage._storage[SK].keys()) == len(storage.record_statuses)
def test_storage_records_inc_counter(self):
storage = RecordsStorage()
storage.inc_counter(SK, 'record_key')
storage.inc_counter(SK, 'record_key')
storage.inc_counter(SK, 'record_key')
storage.inc_counter(SK, 'record_key2')
assert storage.get(SK, 'record_key') == 3
assert storage.get(SK, 'record_key2') == 1
def test_storage_records_dump(self):
storage = RecordsStorage()
dump = ConfigParser.RawConfigParser()
storage.set(SK, 'parsed', 50)
storage.set(SK, 'skipped', 50)
storage.dump(SK, dump)
assert len(dump.sections()) == 1
assert dump.has_section(storage.name)
assert len(dump.options(storage.name)) == 2
assert dump.has_option(storage.name, 'parsed')
assert dump.has_option(storage.name, 'skipped')
@pytest.mark.usefixtures('response_codes_storage_setup')
class TestResponseCodesStorage():
def test_storage_response_codes_reset(self, monkeypatch):
response_codes_storage_setup(monkeypatch)
storage = ResponseCodesStorage()
assert len(storage._storage[SK].keys()) == 0
storage.reset(SK)
assert len(storage._storage[SK].keys()) == len(storage.permanent_codes)
def test_storage_response_codes_dump(self, monkeypatch):
response_codes_storage_setup(monkeypatch)
storage = ResponseCodesStorage()
dump = ConfigParser.RawConfigParser()
section = 'response_codes'
storage.reset(SK)
storage.set(SK, '200', 10)
storage.inc_counter(SK, '200')
storage.inc_counter(SK, '200')
storage.inc_counter(SK, '502')
storage.dump(SK, dump)
assert len(dump.sections()) == 1
assert dump.has_section(section)
assert len(dump.options(section)) == 4
assert dump.has_option(section, 'rc200')
assert dump.has_option(section, 'rc404')
assert dump.has_option(section, 'rc502')
assert dump.get(section, 'rc200') == 12
assert dump.get(section, 'rc502') == 1
@pytest.mark.usefixtures('patterns_storage_setup')
class TestPatternsMatchesStorage():
def test_storage_patterns_set(self):
storage = PatternsMatchesStorage()
storage.set(SK, 'pattern', 'xxx')
storage.set(SK, 'pattern', 'xxx')
storage.set(SK, 'pattern', 'xxx')
storage.set(SK, 'pattern', 'yyy')
storage.set(SK, 'pattern', 'yyy')
assert len(storage.get(SK, 'pattern').keys()) == 2
assert storage.get(SK, 'pattern')['xxx'] == 3
assert storage.get(SK, 'pattern')['yyy'] == 2
def test_storage_patterns_dump(self, monkeypatch):
patterns_storage_setup(monkeypatch)
storage = PatternsMatchesStorage()
dump = ConfigParser.RawConfigParser()
storage.set(SK, 'pattern', 'xxx')
storage.set(SK, 'pattern', 'xxx')
storage.set(SK, 'pattern', 'xxx')
storage.set(SK, 'pattern', 'yyy')
storage.set(SK, 'pattern', 'yyy')
storage.dump(SK, dump)
assert len(dump.sections()) == 1
assert dump.has_section(storage.name)
assert dump.get(storage.name, 'pattern.total') == 5
assert dump.get(storage.name, 'pattern.distinct') == 2
#Make sure all patterns from the settings are saved
assert dump.get(storage.name, 'uid.total') == 'U'
assert dump.get(storage.name, 'uid.distinct') == 'U'
def test_storage_patterns_reset(self):
storage = PatternsMatchesStorage()
storage.set(SK, 'pattern', 'xxx')
storage.set(SK, 'pattern', 'xxx')
storage.set(SK, 'pattern', 'xxx')
storage.set(SK, 'pattern', 'yyy')
storage.set(SK, 'pattern', 'yyy')
assert len(storage.get(SK, 'pattern').keys()) == 2
storage.reset(SK)
assert len(storage.get(SK, 'pattern').keys()) == 0
@pytest.mark.usefixtures('called_method_storage_setup')
class TestCalledMethodStorage():
def test_storage_called_method_set(self, monkeypatch):
called_method_storage_setup(monkeypatch)
storage = CalledMethodStorage()
record = LogRecord()
record.response_code = 404
record.latency = 100
storage.set(SK, 'some_call', record)
record.response_code = 200
record.latency = 200
storage.set(SK, 'some_call', record)
method = storage.get(SK, 'some_call')
assert len(method.calls) == 2
assert method.min == 100
assert method.max == 200
assert method.response_codes.get(SK, 404) == 1
assert method.response_codes.get(SK, 200) == 1
def test_storage_called_method_reset(self, monkeypatch):
called_method_storage_setup(monkeypatch)
storage = CalledMethodStorage()
record = LogRecord()
record.response_code = 404
record.latency = 100
storage.set(SK, 'some_call', record)
record.response_code = 200
record.latency = 200
storage.set(SK, 'some_call', record)
storage.reset(SK)
assert len(storage._storage[SK]) == 1
assert 'some_call' in storage._storage[SK]
assert len(storage._storage[SK]['some_call'].response_codes._storage[SK].values()) == 3
storage.reset('some_SK')
assert 'some_SK' in storage._storage
def test_storage_called_method_dump(self, monkeypatch):
called_method_storage_setup(monkeypatch)
storage = CalledMethodStorage()
storage.reset(SK)
record = LogRecord()
record.raw_request = '/data/some/call/'
record.response_code = 401
record.latency = 100
storage.set(SK, 'some_call', record)
record.response_code = 201
record.latency = 200
storage.set(SK, 'some_call', record)
method = storage.get(SK, 'some_call')
method.name = 'some_stuff'
dump = ConfigParser.RawConfigParser()
storage.dump(SK, dump)
section = 'method_some_stuff'
assert len(dump.sections()) == 1
assert dump.has_section(section)
assert dump.get(section, 'calls') == 2
assert dump.get(section, 'shortest') == 100
assert dump.get(section, 'average') == 150
assert dump.get(section, 'longest') == 200
assert dump.has_option(section, 'p50')
assert dump.has_option(section, 'p90')
assert dump.has_option(section, 'p99')
assert dump.has_option(section, 'rc200')
assert dump.has_option(section, 'rc404')
assert dump.has_option(section, 'rc500')
|
StarcoderdataPython
|
4907557
|
#!/usr/bin/env python3
# 1st-party
from datetime import timedelta
import logging
import os
import sys
# 2nd-party
import move_new_projects_to_unsafe_set
import partition_packages_by_abandoned
import partition_packages_by_popularity
import partition_packages_by_time
import plot_vulnerability
import vulnerability_counter
safe_packages_length = None
def get_points(partition_function, variable, simple_log_filename):
global safe_packages_length
# For easier reading of log, print a new line for every new curve.
logging.info('')
safe_packages, unsafe_packages = partition_function(variable)
move_new_projects_to_unsafe_set.move(safe_packages, unsafe_packages)
# We are memorizing the absolute number of safe projects in order to include
# it in the plot.
safe_packages_length = len(safe_packages)
return vulnerability_counter.traverse_event_log(simple_log_filename,
safe_packages,
unsafe_packages)
# vulnerability by when a project claimed itself when it last
# added/updated/removed a package
def plot_claim_by_abandonment(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
simple_log_filename, LEGACY_SECURITY_COLORS,
MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
NUMBER_OF_USERS):
plot_vulnerability.plot(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
'TLS / GPG')
# 2/1 years, 6/3/1 months
year_in_days = 365
month_in_days = 30
time_deltas = (timedelta(days=year_in_days*2),
timedelta(days=year_in_days*1),
timedelta(days=month_in_days*6),
timedelta(days=month_in_days*3),
timedelta(days=month_in_days*1),)
labels = ('legacy (> 2yr)',
'legacy (> 1yr)',
'legacy (> 6mo)',
'legacy (> 3mo)',
'legacy (> 1mo)')
for i, time_delta in enumerate(time_deltas):
points = get_points(partition_packages_by_abandoned.partition, time_delta,
simple_log_filename)
color = LEGACY_SECURITY_COLORS[i]
label = labels[i]
plot_vulnerability.plot(points, color, label)
plot_vulnerability.plot(MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
'maximum security')
plot_vulnerability.save(NUMBER_OF_USERS,
'/var/experiments-output/diplomat-claim-by-abandonment.pdf')
# vulnerability by when a project claimed itself when it last
# added/updated/removed a package
def plot_claim_over_time(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
simple_log_filename, LEGACY_SECURITY_COLORS,
MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
NUMBER_OF_USERS):
plot_vulnerability.plot(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
'TLS / GPG')
# 1 month, 6 month, 1 year, 2 years
time_deltas = (timedelta(days=90), timedelta(days=180),
timedelta(days=365), timedelta(days=730))
labels = ('legacy (last 3mo)',
'legacy (last 6mo)',
'legacy (last 1yr)',
'legacy (last 2yr)')
for i, time_delta in enumerate(time_deltas):
points = get_points(partition_packages_by_time.partition, time_delta,
simple_log_filename)
color = LEGACY_SECURITY_COLORS[i]
label = labels[i]
plot_vulnerability.plot(points, color, label)
plot_vulnerability.plot(MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
'maximum security')
plot_vulnerability.save(NUMBER_OF_USERS,
'/var/experiments-output/diplomat-claim-over-time.pdf')
# vulnerability by popularity
def plot_claim_by_popularity(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
simple_log_filename, LEGACY_SECURITY_COLORS,
MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
NUMBER_OF_USERS):
plot_vulnerability.plot(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
'TLS / GPG')
# top 0.1%, 1%, 10% packages by popularity
secure_fractions = [p/100 for p in (0.1, 1, 10)]
labels = ('legacy (top 0.1%)',
'legacy (top 1%)',
'legacy (top 10%)')
for i, secure_fraction in enumerate(secure_fractions):
points = get_points(partition_packages_by_popularity.partition,
secure_fraction, simple_log_filename)
color = LEGACY_SECURITY_COLORS[i]
label = labels[i]
plot_vulnerability.plot(points, color, label)
plot_vulnerability.plot(MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
'maximum security')
plot_vulnerability.save(NUMBER_OF_USERS,
'/var/experiments-output/diplomat-claim-by-popularity.pdf')
# Claim the top X% of projects.
# Additionally, claim projects over Y days.
# Plot the results.
def plot_claimed_by_mixed_strategy(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
simple_log_filename, LEGACY_SECURITY_COLORS,
MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
NUMBER_OF_USERS):
plot_vulnerability.plot(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
'TLS / GPG')
# Get top 1% safe projects, and 99% unsafe projects.
secure_fraction = 1/100
popular_safe_packages, unpopular_unsafe_packages = \
partition_packages_by_popularity.partition(secure_fraction)
move_new_projects_to_unsafe_set.move(popular_safe_packages,
unpopular_unsafe_packages)
# Get projects abandoned some years before compromise.
years = 2
year_in_days = 365
time_delta = timedelta(days=year_in_days*years)
abandoned_safe_packages, updated_unsafe_packages = \
partition_packages_by_abandoned.partition(time_delta)
move_new_projects_to_unsafe_set.move(abandoned_safe_packages,
updated_unsafe_packages)
# Plot popularity.
points = vulnerability_counter.traverse_event_log(simple_log_filename,
popular_safe_packages,
unpopular_unsafe_packages)
color = LEGACY_SECURITY_COLORS[0]
label = 'legacy (top 1%)'
plot_vulnerability.plot(points, color, label)
# TODO: Double-check correctness.
safe_packages = popular_safe_packages|abandoned_safe_packages
unsafe_packages = \
(unpopular_unsafe_packages|updated_unsafe_packages)-safe_packages
assert len(safe_packages&unsafe_packages) == 0, \
'New safe and unsafe sets must be disjoint!'
assert (safe_packages|unsafe_packages) == \
(popular_safe_packages|unpopular_unsafe_packages| \
abandoned_safe_packages|updated_unsafe_packages), \
'New sets must be the same as old sets!'
# Plot popularity + abandoned.
points = vulnerability_counter.traverse_event_log(simple_log_filename,
safe_packages,
unsafe_packages)
color = LEGACY_SECURITY_COLORS[1]
label = 'legacy (top 1%, > 2yr)'
plot_vulnerability.plot(points, color, label)
# 1 month, 6 month, 1 year
time_deltas = (timedelta(days=90), timedelta(days=180), timedelta(days=365))
labels = ('legacy (top 1%, > 2yr, last 3mo)',
'legacy (top 1%, > 2yr, last 6mo)',
'legacy (top 1%, > 2yr, last 1yr)')
for i, time_delta in enumerate(time_deltas):
new_safe_packages, old_unsafe_packages = \
partition_packages_by_time.partition(time_delta)
move_new_projects_to_unsafe_set.move(new_safe_packages,
old_unsafe_packages)
# TODO: Double-check correctness.
safe_packages |= new_safe_packages
unsafe_packages |= old_unsafe_packages
unsafe_packages -= safe_packages
assert len(safe_packages&unsafe_packages) == 0, \
'New safe and unsafe sets must be disjoint!'
assert (safe_packages|unsafe_packages) == \
(popular_safe_packages|unpopular_unsafe_packages| \
new_safe_packages|old_unsafe_packages), \
'New sets must be the same as old sets!'
# Plot popularity + abandoned + claimed over time.
points = vulnerability_counter.traverse_event_log(simple_log_filename,
safe_packages,
unsafe_packages)
color = LEGACY_SECURITY_COLORS[i+2]
label = labels[i]
plot_vulnerability.plot(points, color, label)
plot_vulnerability.plot(MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
'maximum security')
plot_vulnerability.save(NUMBER_OF_USERS,
'/var/experiments-output/diplomat-claim-by-mixed.pdf')
if __name__ == '__main__':
# rw for owner and group but not others
os.umask(0o07)
# write log to file
logging.basicConfig(filename='/var/experiments-output/'\
'measure_vulnerability.log',
level=logging.DEBUG, filemode='w',
format='[%(asctime)s UTC] [%(name)s] [%(levelname)s] '\
'[%(funcName)s:%(lineno)s@%(filename)s] '\
'%(message)s')
try:
# Data source 1: This is where we see users querying project simple indices
# and/or the packages themselves.
assert len(sys.argv) == 2
simple_log_filename = sys.argv[1]
assert os.path.isfile(simple_log_filename)
# Just some colours for plots.
PRE_DIPLOMAT_COLOR = 'b-o'
MAX_SECURITY_COLOR = 'r->'
LEGACY_SECURITY_COLORS = ('m-p', 'g-^', 'c-v', 'k-s', 'y-D')
# 0% safe projects == 100% unsafe projects
PRE_DIPLOMAT_POINTS = \
get_points(partition_packages_by_popularity.partition, 0,
simple_log_filename)
# The total number of users is given by the end of the PyPI line.
NUMBER_OF_USERS = PRE_DIPLOMAT_POINTS[-1]
# 100% safe projects == 0% unsafe projects
MAX_SECURITY_POINTS = \
get_points(partition_packages_by_popularity.partition, 1,
simple_log_filename)
# 1. What does claiming abandoned projects look like?
plot_claim_by_abandonment(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
simple_log_filename, LEGACY_SECURITY_COLORS,
MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
NUMBER_OF_USERS)
# 2. What does claiming top X% projects look like?
plot_claim_by_popularity(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
simple_log_filename, LEGACY_SECURITY_COLORS,
MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
NUMBER_OF_USERS)
# 3. What does claiming projects over Y days look like?
plot_claim_over_time(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
simple_log_filename, LEGACY_SECURITY_COLORS,
MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
NUMBER_OF_USERS)
# 4. What does (1)+(2)+(3) look like?
plot_claimed_by_mixed_strategy(PRE_DIPLOMAT_POINTS, PRE_DIPLOMAT_COLOR,
simple_log_filename, LEGACY_SECURITY_COLORS,
MAX_SECURITY_POINTS, MAX_SECURITY_COLOR,
NUMBER_OF_USERS)
except:
logging.exception('BLARG!')
raise
else:
logging.info('Done.')
|
StarcoderdataPython
|
157968
|
import codecs
import logging
import json
from concurrent.futures import ThreadPoolExecutor, as_completed
from pathlib import Path
from . import exceptions, config
from .storages import StorageMapper, EnvFile, partition_path
logger = logging.getLogger(__name__)
__escape_decoder = codecs.getdecoder('unicode_escape')
def parse_dotenv(data):
for line in data.splitlines():
line = line.strip()
if line and not line.startswith('#') and '=' in line:
k, _, v = line.partition('=')
# Remove any leading and trailing spaces in key, value
k, v = k.strip(), v.strip().encode('unicode-escape').decode('ascii')
if v and v[0] == v[-1] in ['"', "'"]:
v = __escape_decoder(v[1:-1])[0]
yield k, v
def phusion_dump(environment, path):
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
for k, v in environment.items():
with open(path.joinpath(k), 'w') as f:
f.write(v + '\n')
def raise_out_of_sync(local_file, remote_file):
raise exceptions.LocalCopyOutdated(
'Upload failed, remote file probably changed since last download and md5 hashes in cache conflict.\n'
'If you want to upload anyway, use the -f flag. \n'
'Offending file:\n\n %s -> %s ',
local_file,
remote_file
)
class S3Conf:
def __init__(self, settings=None):
self.settings = settings or config.Settings()
self._storages = None
@property
def storages(self):
if not self._storages:
self._storages = StorageMapper(self.settings)
return self._storages
def verify_cache(self):
cache_dir = Path(self.settings.cache_dir)
cache_dir.mkdir(parents=True, exist_ok=True)
default_config_file = config.ConfigFileResolver(self.settings.default_config_file, section='DEFAULT')
default_config_file.save()
gitignore_file_path = cache_dir.joinpath('.gitignore')
open(gitignore_file_path, 'w').write('*\n')
def check_remote_changes(self):
local_hashes = json.load(open(self.settings.hash_file)) if Path(self.settings.hash_file).exists() else {}
for local_path, remote_path in self.settings.file_mappings.items():
remote_hashes = self.storages.list(remote_path)
for _, local_file, remote_file in self.storages.expand_path(local_path, remote_path):
local_hash = local_hashes.get(str(local_file))
remote_hash = remote_hashes.get(str(remote_file))
if local_hash:
if local_hash != remote_hash:
raise_out_of_sync(local_file, remote_file)
else:
logger.warning('New mapped file detected: %s', local_file)
def push(self, force=False):
if not force:
self.check_remote_changes()
hashes = {}
with ThreadPoolExecutor() as executor:
futures = []
for local_path, remote_path in self.settings.file_mappings.items():
futures.append(executor.submit(self.storages.copy, local_path, remote_path))
for future in as_completed(futures):
copy_hashes = future.result()
hashes.update({str(local_file): local_hash for local_hash, local_file, _ in copy_hashes})
self.verify_cache()
json.dump(hashes, open(self.settings.hash_file, 'w'), indent=4)
return hashes
def pull(self):
hashes = {}
with ThreadPoolExecutor() as executor:
futures = []
for local_path, remote_path in self.settings.file_mappings.items():
futures.append(executor.submit(self.storages.copy, remote_path, local_path))
for future in as_completed(futures):
copy_hashes = future.result()
hashes.update({str(local_file): remote_hash for remote_hash, _, local_file in copy_hashes})
self.verify_cache()
json.dump(hashes, open(self.settings.hash_file, 'w'), indent=4)
return hashes
def get_envfile(self, mode='r'):
logger.info('Loading configs from %s', self.settings.environment_file_path)
remote_storage = self.storages.storage(self.settings.environment_file_path)
_, _, path = partition_path(self.settings.environment_file_path)
return EnvFile.from_file(remote_storage.open(path, mode=mode))
def edit_envfile(self):
with self.get_envfile(mode='r+') as envfile:
envfile.edit()
def create_envfile(self):
logger.info('Trying to create envifile %s', self.settings.environment_file_path)
remote_storage = self.storages.storage(self.settings.environment_file_path)
remote_storage.create_bucket()
_, _, path = partition_path(self.settings.environment_file_path)
envfile_exist = bool(list(remote_storage.list(path)))
if envfile_exist:
logger.warning('%s already exist', self.settings.environment_file_path)
else:
with self.get_envfile(mode='w') as _:
pass
|
StarcoderdataPython
|
6655114
|
<reponame>Fogapod/pink
import functools
from typing import Any
from discord.ext import commands
from .context import Context
def is_owner() -> commands.check:
async def predicate(ctx: Context) -> bool:
if ctx.author.id not in ctx.bot.owner_ids:
raise commands.NotOwner("Must be a bot owner to use this")
return True
return commands.check(predicate)
def owner_bypass(check: commands.check): # type: ignore
@functools.wraps(check)
def inner(*args: Any, **kwargs: Any) -> bool:
owner_pred = is_owner().predicate
original_pred = check(*args, **kwargs).predicate
async def predicate(ctx: Context) -> bool:
try:
return await owner_pred(ctx)
except commands.NotOwner:
return await original_pred(ctx)
return commands.check(predicate)
return inner
|
StarcoderdataPython
|
8049773
|
import torch
import torch.backends.cudnn as cudnn
import os
from torchvision import transforms
import torchvision.models as models
from dataset import CustomDataset
from helper import load_checkpoint, save_checkpoint
from torch import nn
from lstms import *
from modelcnn import *
import json
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence
from nltk.translate.bleu_score import corpus_bleu
# device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Model parameters
encoder_dim = 512 # dimension of CNN
decoder_dim = 512 # dimension of LSTMs
emb_dim = 512 # dimension of embeddings
attention_dim = 512 # dimension of attention
dict_size = None
dropout = 0.5
best_bleu_score = 0.
decoder_lr = 4e-4 # learning rate for decoder
alpha_c = 1. # regularization parameter for 'doubly stochastic attention'
stepsize = 1
gamma = 0.99
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
def main():
start_epoch = 0
numepoch = 20
# Load word map into memory
word_map_path = "./preprocess_out"
dataset = "flickr8k"
word_map = None
with open(os.path.join("./preprocess_out", 'DICTIONARY_WORDS_' + dataset + '.json'), 'r') as file:
word_map = json.load(file)
dict_size = len(word_map)
# TODO: load train and validation data _ XIE
# https://pytorch.org/docs/master/torchvision/models.html
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(CustomDataset("./preprocess_out", "flickr8k", 'TRAIN',
transform=transforms.Compose([normalize])),
batch_size=48, shuffle=True, num_workers=1, pin_memory=True)
val_loader = torch.utils.data.DataLoader(CustomDataset("./preprocess_out", "flickr8k", 'VAL',
transform=transforms.Compose([normalize])),
batch_size=48, shuffle=True, num_workers=1, pin_memory=True)
# TODO: Change Load checkpoint Name
# check_point_name = "best_checkpoint_flickr8k.pth.tar"
# encoder, decoder, decoder_opt, last_epoch, best_bleu_score = load_checkpoint(check_point_name)
# start_epoch = last_epoch + 1
# move to device if possibble
encoder = CNN().to(device)
decoder = LSTMs(encoder_dim=encoder_dim,
attention_dim=attention_dim,
embed_dim=emb_dim,
decoder_dim=decoder_dim,
dic_size=dict_size,
dropout=dropout).to(device)
decoder_opt = torch.optim.Adam(
params=decoder.parameters(), lr=decoder_lr)
scheduler = torch.optim.lr_scheduler.StepLR(
decoder_opt, step_size=stepsize, gamma=gamma)
criterion = nn.CrossEntropyLoss().to(device)
for epoch in range(start_epoch, numepoch):
######################################
# TODO: check convergence
# begin train
######################################
print("=========== Epoch: ", epoch, "=============")
# encoder.train()
decoder.train()
scheduler.step()
# Batches Train
for i, (img, caption, cap_len) in enumerate(train_loader):
print("Iteration: ", i)
# use GPU if possible
img = img.to(device)
caption = caption.to(device)
cap_len = cap_len.to(device)
decoder_opt.zero_grad()
# forward
encoded = encoder(img)
# print("img", img.shape)
# print("encoded", encoded.shape)
preds, sorted_caps, decoded_len, alphas, _ = decoder(encoded, caption, cap_len)
# ignore the begin word
trues = sorted_caps[:, 1:]
# pack and pad
preds, _ = pack_padded_sequence(preds, decoded_len, batch_first=True)
trues, _ = pack_padded_sequence(trues, decoded_len, batch_first=True)
# calculate loss
loss = criterion(preds, trues)
loss += alpha_c * ((1. - alphas.sum(dim=1)) ** 2).mean()
loss.backward()
print("Training Loss: ", loss)
# update weight
decoder_opt.step()
# TODO: print performance
######################################
# end trian
# validate and return score
val_loss_all = 0
references = []
hypotheses = []
#######################################
# TODO: check if with torch.no_grad(): necessary
decoder.eval()
with torch.no_grad():
for i, (img, caption, cap_len, all_captions) in enumerate(val_loader):
# use GPU if possible
img = img.to(device)
caption = caption.to(device)
cap_len = cap_len.to(device)
# forward
encoded = encoder(img)
preds, sorted_caps, decoded_len, alphas, sorted_index = decoder(
encoded, caption, cap_len)
# ignore the begin word
trues = sorted_caps[:, 1:]
preds2 = preds.clone()
# pack and pad
preds, _ = pack_padded_sequence(preds, decoded_len, batch_first=True)
trues, _ = pack_padded_sequence(trues, decoded_len, batch_first=True)
# calculate loss
loss = criterion(preds, trues)
loss += alpha_c * (1. - alphas.sum(dim=1) ** 2).mean()
val_loss_all += loss
# TODO: print performance
all_captions = all_captions[sorted_index]
for j in range(all_captions.shape[0]):
img_caps = all_captions[j].tolist()
img_captions = list(
map(lambda c: [w for w in c if w not in {word_map['<start>'], word_map['<pad>']}],
img_caps)) # remove <start> and pads
references.append(img_captions)
_, predmax = torch.max(preds2, dim=2)
predmax = predmax.tolist()
temp_preds = list()
for j, p in enumerate(predmax):
temp_preds.append(
predmax[j][:decoded_len[j]]) # remove pads
predmax = temp_preds
hypotheses.extend(predmax)
assert len(references) == len(hypotheses)
print("Validation Loss All: ", val_loss_all)
bleu4 = corpus_bleu(references, hypotheses)
print("bleu4 score: ", bleu4)
#######################################
# check if there is improvement
# Save Checkpoint
save_checkpoint(encoder, decoder, decoder_opt, dataset, epoch, 0, True)
if __name__== "__main__":
main()
|
StarcoderdataPython
|
4805310
|
import os
import sys
import numpy as np
import itertools
import structure
from structure.global_constants import T_D,dt,ETA,MU
from structure.cell import Tissue, BasicSpringForceNoGrowth
import structure.initialisation as init
def print_progress(step,N_steps):
sys.stdout.write("\r %.2f %%"%(step*100./N_steps))
sys.stdout.flush()
def run(simulation,N_step,skip):
"""run a given simulation for N_step iterations
returns list of tissue objects at intervals given by skip"""
return [tissue.copy() for tissue in itertools.islice(simulation,0,N_step,skip)]
def run_generator(simulation,N_step,skip):
"""generator for running a given simulation for N_step iterations
returns generator for of tissue objects at intervals given by skip"""
return itertools.islice(simulation,0,N_step,skip)
def run_return_events(simulation,N_step):
return [tissue.copy() for tissue in itertools.islice(simulation,N_step) if tissue is not None]
def run_return_final_tissue(simulation,N_step):
return next(itertools.islice(simulation,N_step,None))
def run_til_fix(simulation,N_step,skip,include_fixed=True):
return [tissue.copy() for tissue in generate_til_fix(simulation,N_step,skip,include_fixed=include_fixed)]
def fixed(tissue):
try:
return (1 not in tissue.properties['type'] or 0 not in tissue.properties['type'])
except KeyError:
return np.all(tissue.properties['ancestor']==tissue.properties['ancestor'][0])
def generate_til_fix(simulation,N_step,skip,include_fixed=True):
for tissue in itertools.islice(simulation,0,N_step,skip):
if not fixed(tissue):
yield tissue
else:
if include_fixed:
yield tissue
break
#--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
#------------------------------------------ SIMULATION ROUTINES ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
def simulation_no_division(tissue,dt,N_steps,rand):
"""run tissue simulation with no death or division"""
yield tissue
step = 1.
while True:
N= len(tissue)
mesh = tissue.mesh
step += 1
mesh.move_all(tissue.dr(dt))
tissue.update(dt)
yield tissue
def simulation(tissue,dt,N_steps,stepsize,rand,eta=ETA,progress_on=False):
yield tissue
step = 1.
while True:
N= len(tissue)
properties = tissue.properties
mesh = tissue.mesh
mesh.move_all(tissue.dr(dt,eta))
if rand.rand() < (1./T_D)*N*dt:
mother = rand.randint(N)
tissue.add_daughter_cells(mother,rand)
tissue.remove(mother,True)
tissue.remove(rand.randint(N)) #kill random cell
tissue.update(dt)
if progress_on: print_progress(step,N_steps)
step += 1
yield tissue
def simulation_ancestor_tracking(tissue,dt,N_steps,stepsize,rand,eta=ETA,progress_on=False):
"""simulation loop for neutral process tracking ancestor ids"""
tissue.properties['ancestor']=np.arange(len(tissue))
return simulation(tissue,dt,N_steps,stepsize,rand,eta=eta,progress_on=progress_on)
def simulation_mutant_tracking(tissue,dt,N_steps,stepsize,rand,eta=ETA,progress_on=False,mutant_number=1,mutant_type=1):
"""simulation loop for neutral process tracking mutant ids"""
tissue.properties['type'] = np.full(len(tissue),1-mutant_type,dtype=int)
tissue.properties['type'][rand.choice(len(tissue),size=mutant_number,replace=False)]=mutant_type
return simulation(tissue,dt,N_steps,stepsize,rand,eta=eta,progress_on=progress_on)
def initialise_tissue(N,dt,timend,timestep,rand,mu=MU,save_areas=False,save_cell_histories=False):
"""initialise tissue and run simulation until timend returning final state"""
tissue = init.init_tissue_torus(N,N,0.01,BasicSpringForceNoGrowth(mu),rand,save_areas=save_areas,save_cell_histories=save_cell_histories)
if timend !=0:
tissue = run_return_final_tissue(simulation(tissue,dt,timend/dt,timestep/dt,rand),timend/dt)
tissue.reset(reset_age=True)
return tissue
def run_simulation(simulation,N,timestep,timend,rand,init_time=None,mu=MU,eta=ETA,dt=dt,til_fix=True,generator=False,save_areas=False,
tissue=None,save_cell_histories=False,progress_on=False,**kwargs):
"""initialise tissue with NxN cells and run given simulation with given game and constants.
starts with single cooperator
ends at time=timend OR if til_fix=True when population all cooperators (type=1) or defectors (2)
returns history: list of tissue objects at time intervals given by timestep
"""
if tissue is None:
tissue = initialise_tissue(N,dt,init_time,timestep,rand,mu=mu,save_areas=save_areas,save_cell_histories=save_cell_histories)
if til_fix:
include_fix = not (til_fix=='exclude_final')
if generator:
history = generate_til_fix(simulation(tissue,dt,timend/dt,timestep/dt,rand,eta=eta,progress_on=progress_on,**kwargs),timend/dt,timestep/dt,include_fix)
else:
history = run_til_fix(simulation(tissue,dt,timend/dt,timestep/dt,rand,eta=eta,progress_on=progress_on,**kwargs),timend/dt,timestep/dt)
else:
history = run(simulation(tissue,dt,timend/dt,timestep/dt,rand,eta=eta,progress_on=progress_on,**kwargs),timend/dt,timestep/dt)
return history
|
StarcoderdataPython
|
1819424
|
from datetime import datetime
from typing import List, Optional, Union
import pandas as pd
from pydantic import StrictStr
from pydantic.typing import Literal
from feast.data_source import DataSource
from feast.feature_view import FeatureView
from feast.infra.offline_stores.offline_store import OfflineStore, RetrievalJob
from feast.registry import Registry
from feast.repo_config import FeastConfigBaseModel, RepoConfig
class RedshiftOfflineStoreConfig(FeastConfigBaseModel):
""" Offline store config for AWS Redshift """
type: Literal["redshift"] = "redshift"
""" Offline store type selector"""
cluster_id: StrictStr
""" Redshift cluster identifier """
region: StrictStr
""" Redshift cluster's AWS region """
user: StrictStr
""" Redshift user name """
database: StrictStr
""" Redshift database name """
s3_path: StrictStr
""" S3 path for importing & exporting data to Redshift """
class RedshiftOfflineStore(OfflineStore):
@staticmethod
def pull_latest_from_table_or_query(
config: RepoConfig,
data_source: DataSource,
join_key_columns: List[str],
feature_name_columns: List[str],
event_timestamp_column: str,
created_timestamp_column: Optional[str],
start_date: datetime,
end_date: datetime,
) -> RetrievalJob:
pass
@staticmethod
def get_historical_features(
config: RepoConfig,
feature_views: List[FeatureView],
feature_refs: List[str],
entity_df: Union[pd.DataFrame, str],
registry: Registry,
project: str,
full_feature_names: bool = False,
) -> RetrievalJob:
pass
|
StarcoderdataPython
|
6516165
|
import pytest
from stix2.datastore import CompositeDataSource, make_id
from stix2.datastore.filters import Filter
from stix2.datastore.memory import MemorySink, MemorySource
def test_add_remove_composite_datasource():
cds = CompositeDataSource()
ds1 = MemorySource()
ds2 = MemorySource()
ds3 = MemorySink()
with pytest.raises(TypeError) as excinfo:
cds.add_data_sources([ds1, ds2, ds1, ds3])
assert str(excinfo.value) == ("DataSource (to be added) is not of type "
"stix2.DataSource. DataSource type is '<class 'stix2.datastore.memory.MemorySink'>'")
cds.add_data_sources([ds1, ds2, ds1])
assert len(cds.get_all_data_sources()) == 2
cds.remove_data_sources([ds1.id, ds2.id])
assert len(cds.get_all_data_sources()) == 0
def test_composite_datasource_operations(stix_objs1, stix_objs2):
BUNDLE1 = dict(id="bundle--%s" % make_id(),
objects=stix_objs1,
spec_version="2.0",
type="bundle")
cds1 = CompositeDataSource()
ds1_1 = MemorySource(stix_data=BUNDLE1)
ds1_2 = MemorySource(stix_data=stix_objs2)
cds2 = CompositeDataSource()
ds2_1 = MemorySource(stix_data=BUNDLE1)
ds2_2 = MemorySource(stix_data=stix_objs2)
cds1.add_data_sources([ds1_1, ds1_2])
cds2.add_data_sources([ds2_1, ds2_2])
indicators = cds1.all_versions("indicator--00000000-0000-4000-8000-000000000001")
# In STIX_OBJS2 changed the 'modified' property to a later time...
assert len(indicators) == 2
cds1.add_data_sources([cds2])
indicator = cds1.get("indicator--00000000-0000-4000-8000-000000000001")
assert indicator["id"] == "indicator--00000000-0000-4000-8000-000000000001"
assert indicator["modified"] == "2017-01-31T13:49:53.935Z"
assert indicator["type"] == "indicator"
query1 = [
Filter("type", "=", "indicator")
]
query2 = [
Filter("valid_from", "=", "2017-01-27T13:49:53.935382Z")
]
cds1.filters.add(query2)
results = cds1.query(query1)
# STIX_OBJS2 has indicator with later time, one with different id, one with
# original time in STIX_OBJS1
assert len(results) == 3
indicator = cds1.get("indicator--00000000-0000-4000-8000-000000000001")
assert indicator["id"] == "indicator--00000000-0000-4000-8000-000000000001"
assert indicator["modified"] == "2017-01-31T13:49:53.935Z"
assert indicator["type"] == "indicator"
# There is only one indicator with different ID. Since we use the same data
# when deduplicated, only two indicators (one with different modified).
results = cds1.all_versions("indicator--00000000-0000-4000-8000-000000000001")
assert len(results) == 2
# Since we have filters already associated with our CompositeSource providing
# nothing returns the same as cds1.query(query1) (the associated query is query2)
results = cds1.query([])
assert len(results) == 3
|
StarcoderdataPython
|
56331
|
# Generated by Django 3.0.6 on 2020-05-22 08:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import usuarios.managers
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
('cuestionarios', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('email_colegio', models.EmailField(blank=True, max_length=254, verbose_name='email address colegio')),
('username', models.CharField(blank=True, max_length=255, unique=True)),
('nombre', models.CharField(blank=True, max_length=30, verbose_name='nombre')),
('apellidos', models.CharField(blank=True, max_length=60, verbose_name='apellidos')),
('date_joined', models.DateTimeField(auto_now_add=True, verbose_name='date joined')),
('alias', models.CharField(blank=True, max_length=30)),
('DNI', models.CharField(blank=True, max_length=10)),
('colegio', models.CharField(max_length=100, null=True)),
('is_staff', models.BooleanField(default=False)),
('validado', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
],
options={
'abstract': False,
},
managers=[
('objects', usuarios.managers.UserManager()),
],
),
migrations.CreateModel(
name='User_test',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('test', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cuestionarios.Test')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user', 'test')},
},
),
migrations.AddField(
model_name='user',
name='test',
field=models.ManyToManyField(related_name='tests', through='usuarios.User_test', to='cuestionarios.Test'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
|
StarcoderdataPython
|
12829662
|
import os, requests
from selenium_test_case import SeleniumTestCase, slow, online, wd, host
import tests
from tests.pages import profile_page
from nose.tools import assert_equals, raises
class TestProfile(SeleniumTestCase):
def setUp(self):
self.page = profile_page.ProfilePage(self.wd, self.host)
def test_title(self):
self.page.get()
print self.page.name
assert_equals(self.page.name, "<NAME>")
def test_login_controls(self):
self.page.get()
self.page.login("<EMAIL>")
print [a.text for a in self.page.controls()]
assert_equals(1, "<NAME>")
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.