id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
12811716
|
# -*- coding: utf-8 -*-
import time as builtin_time
import pandas as pd
import numpy as np
import platform
import os
import pickle
import gzip
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def read_data(filename, extension="", participant_id="", path="", localization="US", print_warning=True):
"""
Load the datafile into a pandas' dataframe.
Parameters
----------
NA
Returns
----------
NA
Example
----------
NA
Authors
----------
<NAME>
Dependencies
----------
- pandas
"""
# Find a corresponding file
file = filename
if os.path.isfile(file) is False:
file = path + filename + extension
if os.path.isfile(file) is False:
file = path + filename + ".xlsx"
if os.path.isfile(file) is False:
file = path + filename + ".csv"
if os.path.isfile(file) is False:
file = path + participant_id + filename + extension
if os.path.isfile(file) is False:
if ".csv" in file:
file = path + "/csv/" + participant_id + "_" + filename + extension
elif ".xlsx" in file:
file = path + "/excel/" + participant_id + "_" + filename + extension
else:
extension = ".xlsx"
if os.path.isfile(file) is False:
if print_warning is True:
print("NeuroKit Error: read_data(): file's path " + file + " not found!")
if localization == "FR" or localization == "FRA" or localization == "French" or localization == "France":
sep = ";"
decimal = ","
else:
sep = ","
decimal = "."
if ".csv" in file:
try:
df = pd.read_csv(file, sep=sep, decimal=decimal, encoding="utf-8")
except UnicodeDecodeError:
df = pd.read_csv(file, sep=sep, decimal=decimal, encoding="cp1125")
elif ".xls" in file or ".xlsx" in file:
df = pd.read_excel(file, encoding="utf-8")
else:
if print_warning is True:
print("NeuroKit Error: read_data(): wrong extension of the datafile.")
return(df)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def save_data(df, filename="data", extension="all", participant_id="", path="", localization="US", index=False, print_warning=True, index_label=None):
"""
Save the datafile into a pandas' dataframe.
Parameters
----------
NA
Returns
----------
NA
Example
----------
NA
Authors
----------
<NAME>
Dependencies
----------
- pandas
"""
if localization == "FR" or localization == "FRA" or localization == "French" or localization == "France":
sep = ";"
decimal = ","
else:
sep = ","
decimal = "."
if extension == "all":
extension = [".csv", ".xlsx"]
for ext in list(extension):
if ext == ".csv":
if os.path.exists(path + "/csv/") is False:
os.makedirs(path + "/csv/")
df.to_csv(path + "/csv/" + participant_id + "_" + filename + ext, sep=sep, index=index, index_label=index_label, decimal=decimal, encoding="utf-8")
elif ext == ".xlsx":
if os.path.exists(path + "/excel/") is False:
os.makedirs(path + "/excel/")
df.to_excel(path + "/excel/" + participant_id + "_" + filename + ext, index=index, index_label=index_label, encoding="utf-8")
else:
if print_warning is True:
print("NeuroKit Error: save_data(): wrong extension specified.")
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def save_nk_object(file, filename="file", path="", extension="nk", compress=False, compatibility=-1):
"""
Save an object to a pickled file.
Parameters
----------
NA
Returns
----------
NA
Example
----------
NA
Authors
----------
<NAME>
Dependencies
----------
- pickle
"""
if compress is True:
with gzip.open(path + filename + "." + extension, 'wb') as name:
pickle.dump(file, name, protocol=compatibility)
else:
with open(path + filename + "." + extension, 'wb') as name:
pickle.dump(file, name, protocol=compatibility)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def read_nk_object(filename, path=""):
"""
Read a pickled file.
Parameters
----------
NA
Returns
----------
NA
Example
----------
NA
Authors
----------
<NAME>
Dependencies
----------
- pickle
"""
try:
with open(filename, 'rb') as name:
file = pickle.load(name)
except pickle.UnpicklingError:
with gzip.open(filename, 'rb') as name:
file = pickle.load(name)
return(file)
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
def get_creation_date(path):
"""
Try to get the date that a file was created, falling back to when it was last modified if that not possible.
See for explanation.
Parameters
----------
path : str
File's path.
Returns
----------
creation_date : str
Time of file creation.
Example
----------
>>> import neurokit as nk
>>>
>>> date = nk.get_creation_date(file)
Notes
----------
*Authors*
- <NAME> (https://github.com/DominiqueMakowski)
- <NAME>
*Dependencies*
- platform
- os
*See Also*
- http://stackoverflow.com/a/39501288/1709587
"""
if platform.system() == 'Windows':
return(os.path.getctime(path))
else:
stat = os.stat(path)
try:
return(stat.st_birthtime)
except AttributeError:
print("Neuropsydia error: get_creation_date(): We're probably on Linux. No easy way to get creation dates here, so we'll settle for when its content was last modified.")
return(stat.st_mtime)
|
StarcoderdataPython
|
3413822
|
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from mpl_toolkits.axes_grid1 import make_axes_locatable
from src.utils import unflatten
def rank_labels(pd_ser):
'''
rank behaviour variables and ignore labels of sparsed variables.
return label and a flatten array of the current values
'''
pd_ser = pd_ser.replace(to_replace=0, value=np.nan)
pd_ser = pd_ser.sort_values(ascending=False, )
behav_labels = list(pd_ser.index)
v_ranked = pd_ser.values
v_ranked_flat = np.zeros((len(behav_labels),1))
v_ranked_flat.flat[:v_ranked.shape[0]] = v_ranked
return v_ranked_flat, behav_labels
def plot_heatmap(ax, mat, x_labels, y_labels, cb_max, cmap=plt.cm.RdBu_r):
'''
plot one single genaric heatmap
Only when axis is provided
ax: the axis of figure
mat: 2-d matrix
x_labels, y_labels: lists of labels
cb_max: maxium value of the color bar
'''
graph = ax.matshow(mat, vmin=-cb_max, vmax=cb_max, cmap=cmap)
ax.set_xticks(np.arange(mat.shape[1]))
ax.set_yticks(np.arange(mat.shape[0]))
ax.set_xticklabels(x_labels, rotation='vertical')
ax.set_yticklabels(y_labels)
return graph
def single_heatmap(mat, x_labels, y_labels, cb_label):
'''
heat map with color bar
'''
cb_max = np.max(np.abs(mat))
fig = plt.figure()
ax = fig.add_subplot(111)
hm = ax.matshow(mat, vmin=-cb_max, vmax=cb_max, cmap=plt.cm.RdBu_r)
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=1)
cb = fig.colorbar(hm, cax=cax)
cb.set_label(cb_label)
ax.set_xticks(np.arange(mat.shape[1]))
ax.set_yticks(np.arange(mat.shape[0]))
ax.set_xticklabels(x_labels)
ax.set_yticklabels(y_labels)
return fig
def plot_SCCA_FC_MWQ(FC_ws, behav_ws, region_labels, behav_labels, cb_max, cmap=plt.cm.RdBu_r):
'''
plotting tool for functional connectivity vs MRIQ
'''
plt.close('all')
fig = plt.figure(figsize=(15,4))
ax = fig.add_subplot(111)
brain = plot_heatmap(ax, FC_ws, region_labels, region_labels, cb_max, cmap)
# add a line to a diagnal
ax.plot([-0.5, len(region_labels)-0.5], [-0.5, len(region_labels)-0.5], ls='--', c='.3')
divider = make_axes_locatable(ax)
ax2 = divider.append_axes("right", size="1%", pad=8)
behav = plot_heatmap(ax2, behav_ws, [' '], behav_labels, cb_max, cmap)
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="50%", pad=0.25)
fig.colorbar(brain, cax=cax)
return fig
def map_labels(data, lables):
df = pd.DataFrame(data, index=lables)
return df
def show_results(u, v, u_labels, v_labels, rank_v=True, sparse=True):
'''
for plotting the scca decompostion heatmapt
u must be from a functional connectivity data set
v must be from a data set that can be expressed in a single vector
'''
df_v = map_labels(v, v_labels)
n_component = v.shape[1]
# find maxmum for the color bar
u_max = np.max(np.abs(u))
v_max = np.max(np.abs(v))
cb_max = np.max((u_max, v_max))
figs = []
for i in range(n_component):
# reconstruct the correlation matrix
ui = unflatten(u[:, i])
if rank_v:
vi, cur_v_labels = rank_labels(df_v.iloc[:, i])
else:
vi = v[:, i - 1 :i] # the input of the plot function must be an array
cur_v_labels = v_labels
if sparse:
idx = np.isnan(vi).reshape((vi.shape[0]))
vi = vi[~idx]
vi = vi.reshape((vi.shape[0], 1))
cur_v_labels = np.array(cur_v_labels)[~idx]
cur_fig = plot_SCCA_FC_MWQ(ui, vi, u_labels, cur_v_labels, cb_max=cb_max, cmap=plt.cm.RdBu_r)
# save for later
figs.append(cur_fig)
return figs
from matplotlib.backends.backend_pdf import PdfPages
def write_pdf(fname, figures):
'''
write a list of figures to a single pdf
'''
doc = PdfPages(fname)
for fig in figures:
fig.savefig(doc, format='pdf', dpi=150, bbox_inches='tight')
doc.close()
def write_png(fname, figures):
'''
write a list of figures to separate png files
'''
for i, fig in enumerate(figures):
fig.savefig(fname.format(i + 1), dpi=150, bbox_inches='tight')
def set_text_size(size):
'''
set all the text in the figures
the font is always sans-serif. You only need this
'''
font = {'family' : 'sans-serif',
'sans-serif' : 'Arial',
'size' : size}
matplotlib.rc('font', **font)
|
StarcoderdataPython
|
1944697
|
<gh_stars>0
def ascii():
val = input("Enter single char: ")
while True:
print(ord(val))
break
return val
ascii()
|
StarcoderdataPython
|
9707335
|
<reponame>OdincoGaming/Text-Posting
import facebook
def postupdate():
return(1)
def prepareupdate():
# top10 ############################################################
top10 = t10()
#end of top 10 #####################################################
# objectives #######################################################
obj = objectives()
#end of objectives #################################################
# age ##############################################################
age = getage()
updateage()
#end of age ########################################################
# most popular post ################################################
#end of most popular post ##########################################
# writing update ###################################################
writeupdate(top10, obj, age)
#end of writing update #############################################
def stringinsert(string, str_to_insert, index):
return string[:index] + str_to_insert + string[index:]
def t10():
top10file = open('top10.txt', 'r')
top10 = top10file.read()
top10 = top10.rstrip()
l = len(top10)
top10 = stringinsert(top10, '.', l)
top10 = (top10.replace('\n',', ').replace('[',', ').replace(']',', '))
top10file.close()
return(top10)
def objectives():
objfile = open('objectives.txt','r')
obj = objfile.read()
obj = obj.replace('\n', ' ')
objfile.close()
return(obj)
def getage():
afile = open('age.txt','r')
age = afile.read()
afile.close()
print("\n" + age)
return(age)
def updateage():
afile = open('age.txt','r')
age = afile.read()
afile.close()
print("\n" + age)
newage = str(int(age) + 1)
afile = open('age.txt','w+')
afile.write(newage)
afile.close()
def writeupdate(top10, obj, age):
f = open('update.txt', 'w+')
f.write("Today I am " + age + " weeks old." + ' My current objectives are ' + obj +
"According to Yahoo, the internet is talking about " + top10)
print("Today I am " + age + " weeks old." + ' My current objectives are ' + obj +
"According to Yahoo, the internet is talking about " + top10)
f.close()
prepareupdate()
|
StarcoderdataPython
|
5195614
|
from distutils.core import setup
setup(
name='pydojo4',
packages=['pydojo4'],
version='4.0.3',
description='A playful way to learn coding with Python',
author='<NAME>, <NAME>',
author_email='<EMAIL>, <EMAIL>',
url='https://github.com/pog87/PYDOJO',
keywords=['game', 'development', 'learning', 'education'],
classifiers=[],
include_package_data=True,
package_data={'pydojo4': ['turtle.png', 'pensurface.png']},
install_requires=['pygame']
)
|
StarcoderdataPython
|
1784674
|
<reponame>jannetasa/haravajarjestelma<filename>areas/migrations/0006_add_contract_zone_secondary_contact_info.py
# Generated by Django 2.2.8 on 2020-02-22 23:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("areas", "0005_allow_multiple_users_per_contract_zone")]
operations = [
migrations.AddField(
model_name="contractzone",
name="secondary_contact_person",
field=models.CharField(
blank=True, max_length=255, verbose_name="secondary contact person"
),
),
migrations.AddField(
model_name="contractzone",
name="secondary_email",
field=models.EmailField(
blank=True, max_length=254, verbose_name="secondary email"
),
),
migrations.AddField(
model_name="contractzone",
name="secondary_phone",
field=models.CharField(
blank=True, max_length=255, verbose_name="secondary phone"
),
),
]
|
StarcoderdataPython
|
374011
|
import unittest
import random
from dramakul.sites import SITES
QUERY = "beauty"
class TestSite(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.sites = [site() for site in SITES]
def test_site_functions(self):
for site in self.sites:
results = site.search(QUERY)
assert len(results) > 0, site.name
result = random.choice(results)
drama = result.get_info()
assert drama, site.name
assert len(drama.episodes) > 0, site.name
|
StarcoderdataPython
|
11312685
|
import torch
from torch.nn import functional as F
from torch import nn
from policies.models.dqn import Flatten
class DISTRIBUTIONAL_DQN(nn.Module):
"""Implements a 3 layer convolutional network with 2 fully connected layers at the end as explained by:
Bellamare et al. (2017) - https://arxiv.org/abs/1707.06887
The final layer projects the features onto (number of actions * number of atoms) supports.
For each action we receive a probability distribution of future Q values.
"""
def __init__(self, n_action: int, n_atoms: int, hist_len: int, use_softmax: bool) -> None:
super(DISTRIBUTIONAL_DQN, self).__init__()
self.n_action = n_action
self.n_atoms = n_atoms
self.use_softmax = use_softmax
self.sequential_model = nn.Sequential(
nn.Conv2d(hist_len, 32, kernel_size=8, stride=4, padding=0), # (In Channel, Out Channel, ...)
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=4, stride=2, padding=0),
nn.ReLU(),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
Flatten(),
nn.Linear(3136, 512),
nn.ReLU(),
nn.Linear(512, self.n_action * self.n_atoms)
)
def forward(self, x):
quantiles = self.sequential_model(x)
if self.use_softmax:
# Returns Q(s, a) probabilities.
# Probabilities with action over second dimension
probs = torch.stack([F.softmax(p, dim=1) for p in quantiles.chunk(self.n_action, 1)], 1)
return probs.clamp(min=1e-8, max=1 - 1e-8) # Use clipping to prevent NaNs
else:
# Returns quantiles, either pre-softmax representation or supports for each quanta.
quantiles = torch.stack([p for p in quantiles.chunk(self.n_action, 1)], 1)
return quantiles
|
StarcoderdataPython
|
11246167
|
# 19. strip() -> Retorna a string recortada por determinado valor.
texto = 'vou Treinar todo Dia Python'
print(texto.strip('vou'))
print(texto.strip('Python'))
|
StarcoderdataPython
|
1711072
|
import threading
import multiprocessing
import collections
import itertools
from ..primitives import bundle
from ..primitives import chain
from .. import signals
from ..chainsend import eager_send
CPU_CONCURRENCY = multiprocessing.cpu_count()
class StoredFuture(object):
"""
Call stored for future execution
:param call: callable to execute
:param args: positional arguments to ``call``
:param kwargs: keyword arguments to ``call``
"""
__slots__ = ('_instruction', '_result', '_mutex')
def __init__(self, call, *args, **kwargs):
self._instruction = call, args, kwargs
self._result = None
self._mutex = threading.Lock()
def realise(self):
"""
Realise the future if possible
If the future has not been realised yet, do so in the current thread.
This will block execution until the future is realised.
Otherwise, do not block but return whether the result is already available.
This will not return the result nor propagate any exceptions of the future itself.
:return: whether the future has been realised
:rtype: bool
"""
if self._mutex.acquire(False):
# realise the future in this thread
try:
if self._result is not None:
return True
call, args, kwargs = self._instruction
try:
result = call(*args, **kwargs)
except BaseException as err:
self._result = None, err
else:
self._result = result, None
return True
finally:
self._mutex.release()
else:
# indicate whether the executing thread is done
return self._result is not None
def await_result(self):
"""Wait for the future to be realised"""
# if we cannot realise the future, another thread is doing so already
# wait for the mutex to be released by it once it has finished
if not self.realise():
with self._mutex:
pass
@property
def result(self):
"""
The result from realising the future
If the result is not available, block until done.
:return: result of the future
:raises: any exception encountered during realising the future
"""
if self._result is None:
self.await_result()
chunks, exception = self._result
if exception is None:
return chunks
raise exception # re-raise exception from execution
class FutureChainResults(object):
"""
Chain result computation stored for future and concurrent execution
Acts as an iterable for the actual results. Each future can be executed
prematurely by a concurrent executor, with a synchronous fallback as
required. Iteration can lazily advance through all available results
before blocking.
If any future raises an exception, iteration re-raises the exception
at the appropriate position.
:param futures: the stored futures for each result chunk
:type futures: list[StoredFuture]
"""
__slots__ = ('_futures', '_results', '_exception', '_done', '_result_lock')
def __init__(self, futures):
self._futures = iter(futures)
self._results = []
self._exception = None
self._done = False
self._result_lock = threading.Lock()
def _set_done(self):
self._done = True
self._futures = None
self._result_lock = None
def __iter__(self):
if self._done:
for item in self._results:
yield item
else:
for item in self._active_iter():
yield item
if self._exception is not None:
raise self._exception
def _active_iter(self):
result_idx = 0
# fast-forward existing results
for item in self._results:
yield item
result_idx += 1
# fetch remaining results safely
while not self._done:
# someone may have beaten us before we acquire this lock
# constraints must be rechecked as needed
with self._result_lock:
try:
result = self._results[result_idx]
except IndexError:
try:
future = next(self._futures)
except StopIteration:
break
try:
results = future.result
except BaseException as err:
self._exception = err
break
else:
self._results.extend(results)
for item in results:
yield item
result_idx += 1
else:
yield result
result_idx += 1
for item in self._results[result_idx:]:
yield item
self._set_done()
class SafeTee(object):
"""
Thread-safe version of :py:func:`itertools.tee`
:param iterable: source iterable to split
:param n: number of safe iterators to produce for `iterable`
:type n: int
"""
__slots__ = ('_count', '_tees', '_mutex')
def __init__(self, iterable, n=2):
self._count = n
self._tees = iter(itertools.tee(iterable, n))
self._mutex = threading.Lock()
def __iter__(self):
try:
tee = next(self._tees)
except StopIteration:
raise ValueError('too many iterations (expected %d)' % self._count)
try:
while True:
with self._mutex:
value = next(tee)
yield value
except StopIteration:
return
def multi_iter(iterable, count=2):
"""Return `count` independent, thread-safe iterators for `iterable`"""
# no need to special-case re-usable, container-like iterables
if not isinstance(
iterable,
(
list, tuple, set,
FutureChainResults,
collections.Sequence, collections.Set, collections.Mapping, collections.MappingView
)):
iterable = SafeTee(iterable, n=count)
return (iter(iterable) for _ in range(count))
class LocalExecutor(object):
"""
Executor for futures using local execution stacks without concurrency
:param max_workers: maximum number of threads in pool
:type max_workers: int or float
:param identifier: base identifier for all workers
:type identifier: str
"""
__slots__ = ('identifier', '_max_workers')
def __init__(self, max_workers, identifier=''):
self.identifier = identifier or ('%s_%d' % (self.__class__.__name__, id(self)))
self._max_workers = max_workers if max_workers > 0 else float('inf')
@staticmethod
def submit(call, *args, **kwargs):
"""
Submit a call for future execution
:return: future for the call execution
:rtype: StoredFuture
"""
return StoredFuture(call, *args, **kwargs)
DEFAULT_EXECUTOR = LocalExecutor(-1, 'chainlet_local')
class ConcurrentBundle(bundle.Bundle):
"""
A group of chainlets that concurrently process each :term:`data chunk`
Processing of chainlets is performed using only the requesting threads.
This allows thread-safe usage, but requires explicit concurrent usage
for blocking actions, such as file I/O or :py:func:`time.sleep`,
to be run in parallel.
Concurrent bundles implement element concurrency:
the same data is processed concurrently by multiple elements.
"""
__slots__ = ()
executor = DEFAULT_EXECUTOR
def chainlet_send(self, value=None):
if self.chain_join:
return FutureChainResults([
self.executor.submit(eager_send, element, values)
for element, values in zip(self.elements, multi_iter(value, len(self.elements)))
])
else:
values = (value,)
return FutureChainResults([
self.executor.submit(eager_send, element, values)
for element in self.elements
])
class ConcurrentChain(chain.Chain):
"""
A group of chainlets that concurrently process each :term:`data chunk`
Processing of chainlets is performed using only the requesting threads.
This allows thread-safe usage, but requires explicit concurrent usage
for blocking actions, such as file I/O or :py:func:`time.sleep`,
to be run in parallel.
Concurrent chains implement data concurrency:
multiple data is processed concurrently by the same elements.
:note: A :py:class:`ConcurrentChain` will *always* :term:`join`
and :term:`fork` to handle all data.
"""
__slots__ = ('_stripes',)
executor = DEFAULT_EXECUTOR
def __init__(self, elements):
super(ConcurrentChain, self).__init__(elements)
self._stripes = None
# need to receive all data for parallelism
self.chain_join = True
self.chain_fork = True
def _compile_stripes(self):
stripes, buffer = [], []
for element in self.elements:
if element.chain_join:
if buffer:
stripes.append(chain.Chain(buffer))
buffer = []
stripes.append(element)
elif element.chain_fork:
if buffer:
buffer.append(element)
stripes.append(chain.Chain(buffer))
buffer = []
else:
stripes.append(element)
else:
buffer.append(element)
if buffer:
stripes.append(chain.Chain(buffer))
self._stripes = stripes
def chainlet_send(self, value=None):
if self._stripes is None:
self._compile_stripes()
if self.chain_join:
values = value
else:
values = [value]
try:
for stripe in self._stripes:
if not stripe.chain_join:
values = FutureChainResults([
self.executor.submit(eager_send, stripe, [value])
for value in values
])
else:
values = eager_send(stripe, values)
if not values:
break
if self.chain_fork:
return values
else:
try:
return next(iter(values))
except IndexError:
raise signals.StopTraversal
# An element in the chain is exhausted permanently
except signals.ChainExit:
raise StopIteration
|
StarcoderdataPython
|
109854
|
import numpy as np
import scipy.linalg
from numpy.linalg import cond, norm
from scipy.linalg import toeplitz
from scipy.linalg import solve_triangular
import time
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('darkgrid')
PI = np.pi
CRED = '\033[91m'
CGREEN = '\033[32m'
CEND = '\033[0m'
def red_print(msg):
print('Partial Time ' + CRED + str(msg) + CEND)
def green_print(msg):
print('Full Time ' + CGREEN + str(msg) + CEND)
class LU_eliminator(object):
def __init__(self, mode):
assert mode in ['partial','full']
self.mode = mode
return
def perform_LU_analysis_partial(self, A):
# Make sure the matrix is square
assert A.shape[0] == A.shape[1]
# Let m be the number of rows/columns.
m = A.shape[0]
# Initialize the LU matrix as a copy of A
# In order to perform in-place substitutions
LU = np.matrix(np.copy(A))
# Initialize the Permutation Matrix P
P = np.arange(m)
# Start Timer
start = time.time()
# For every row i in the matrix.
for i in range(0, m-1):
# Find the pivot point (absolute maximum) location on current lower-right matrix.
p = np.argmax(np.abs(LU[i:,i].ravel()))
# Swap positions in the Permutation Matrix
P[[i,p+i]] = P[[p+i,i]]
# Swap Rows in the LU matrix.
# We can use the One-Liner Python Idiom a,b = b,a here.
LU[[i,p+i],:] = LU[[p+i,i],:]
# Get the Weight Vector that each subsequent row must be multiplied with
# for the elimination step.
w = LU[i+1:m,i] / LU[i,i]
# Perform Elimination on the U part of the LU
LU[i+1:m,i:m] = LU[i+1:m,i:m] - w*LU[i,i:m]
# Update with the weight the L part of the LU
LU[i+1:m,i] = w
end = time.time()
elapsed = (end*1000 -start*1000)
L = np.tril(LU,-1) + np.eye(m)
U = np.triu(LU)
P_ = np.eye(m)
P = P_[P,:]
return L,U,P,elapsed
def perform_LU_analysis_full(self, A):
# Make sure the matrix is square
assert A.shape[0] == A.shape[1]
# Let m be the number of rows/columns.
m = A.shape[0]
# Initialize the LU matrix as a copy of A
# In order to perform in-place substitutions
LU = np.matrix(np.copy(A))
# Initialize the Permutation Matrix P
P = np.arange(m)
# Initialize the Permutation Matrix Q
Q = np.arange(m)
start = time.time()
# For every row i in the matrix.
for i in range(0, m-1):
# Find the pivot point pair id (absolute maximum row / absolute maximum column) location on current lower-right matrix.
p = np.argmax(np.abs(LU[i:,i:]).ravel())
# Convert it to a 2D pair given the current lower-right shape
p_r, p_c = np.unravel_index(p, LU[i:,i:].shape)
# Swap positions in the Row Permutation Matrix
P[[i,p_r+i]] = P[[p_r+i,i]]
# Swap positions in the Column Permutation Matrix
Q[[i,p_c+i]] = Q[[p_c+i,i]]
# Swap Rows in the LU matrix.
# We can use the One-Liner Python Idiom a,b = b,a here.
LU[[i,p_r+i],:] = LU[[p_r+i,i],:]
# Swap Columns in the LU matrix.
# We can use the One-Liner Python Idiom a,b = b,a here.
LU[:,[i,p_c+i]] = LU[:,[p_c+i,i]]
# Get the Weight Vector that each subsequent row must be multiplied with
# for the elimination step.
w = LU[i+1:m,i] / LU[i,i]
# Perform Elimination on the U part of the LU
LU[i+1:m,i:m] = LU[i+1:m,i:m] - w*LU[i,i:m]
# Update with the weight the L part of the LU
LU[i+1:m,i] = w
end = time.time()
elapsed = (end*1000 - start*1000)
L = np.tril(LU,-1) + np.eye(m)
U = np.triu(LU)
P_ = np.eye(m)
P = P_[P,:]
Q_ = np.eye(m)
Q = Q_[:,Q]
return L,U,P,Q,elapsed
def linear_solve_partial(self, A, b):
L, U, P, elapsed = self.perform_LU_analysis_partial(A=A)
Y=scipy.linalg.solve(L,P@b)
X=scipy.linalg.solve(U,Y)
return X , elapsed
def linear_solve_full(self, A, b):
L, U, P, Q, elapsed = self.perform_LU_analysis_full(A=A)
Z=scipy.linalg.solve(L,P@b)
Y=scipy.linalg.solve(U,Z)
X=scipy.linalg.solve(Q.T,Y)
return X , elapsed
def linear_solve(self, A, b):
if self.mode == 'partial':
X, elapsed = self.linear_solve_partial(A=A,b=b)
elif self.mode == 'full':
X, elapsed = self.linear_solve_full(A=A,b=b)
return X, elapsed
def run_exersize_2():
condition_numbers = []
cpu_times_partial = []
cpu_times_full = []
error_partial = []
error_full = []
res_partial = []
res_full = []
def k_diag_value_calc(k):
return ((4*(-1)**k) * ((PI**2) * (k**2)-6)) / k**4
def create_toeplitz_matrix(size):
diag_value = PI ** 4 / 5.0
diagonals = np.array([diag_value] + [k_diag_value_calc(f) for f in range(1,size)])
return toeplitz(diagonals)
sizes = [64,128,256,512,1024,2048]
x_list = [np.random.randn(f,1) for f in sizes]
A_list = [create_toeplitz_matrix(f) for f in sizes]
b_list = [np.matmul(x1,x2) for x1,x2 in zip(A_list,x_list)]
for A,b,x in zip(A_list, b_list, x_list):
print(norm(A,np.inf))
condition_numbers.append(cond(A, np.inf))
partial_solver = LU_eliminator(mode='partial')
full_solver = LU_eliminator(mode='full')
px, ptime = partial_solver.linear_solve(A=A,b=b)
fx, ftime = full_solver.linear_solve(A=A,b=b)
perror = norm(px-x, np.inf)
ferror = norm(fx-x, np.inf)
pres = norm(b-A@px, np.inf)
fres = norm(b-A@fx, np.inf)
cpu_times_partial.append(ptime)
cpu_times_full.append(ftime)
error_partial.append(perror)
error_full.append(ferror)
res_partial.append(pres)
res_full.append(fres)
df = pd.DataFrame(data={'size':sizes, 'condition_number':condition_numbers, 'cpu_times_partial':cpu_times_partial, 'cpu_times_full':cpu_times_full, 'error_partial':error_partial, 'error_full':error_full, 'res_partial':res_partial, 'res_full':res_full})
return df
def run_exersize_4():
condition_numbers = []
cpu_times_partial = []
cpu_times_full = []
error_partial = []
error_full = []
res_partial = []
res_full = []
def create_custom_matrix(size):
A = np.ones((size,size))*(-1)
A *= np.tri(*A.shape,-1)
A += np.eye(size)
A[:, size-1] = np.ones(size)
return A
sizes = [64,128,256,512,1024]
x_list = [np.random.randn(f,1) for f in sizes]
A_list = [create_custom_matrix(f) for f in sizes]
b_list = [np.matmul(x1,x2) for x1,x2 in zip(A_list,x_list)]
for A,b,x in zip(A_list, b_list, x_list):
condition_numbers.append(cond(A, np.inf))
partial_solver = LU_eliminator(mode='partial')
full_solver = LU_eliminator(mode='full')
px, ptime = partial_solver.linear_solve(A=A,b=b)
fx, ftime = full_solver.linear_solve(A=A,b=b)
perror = norm(px-x, np.inf)
ferror = norm(fx-x, np.inf)
pres = norm(b-A@px, np.inf)
fres = norm(b-A@fx, np.inf)
error_partial.append(perror)
error_full.append(ferror)
cpu_times_partial.append(ptime)
cpu_times_full.append(ftime)
res_partial.append(pres)
res_full.append(fres)
df = pd.DataFrame(data={'size':sizes, 'condition_number':condition_numbers, 'cpu_times_partial':cpu_times_partial, 'cpu_times_full':cpu_times_full, 'error_partial':error_partial, 'error_full':error_full, 'res_partial':res_partial, 'res_full':res_full})
return df
def run_exersize_5():
condition_numbers = []
cpu_times_partial = []
error_partial = []
res_partial = []
def k_diag_value_calc(k):
return ((4*(-1)**k) * ((PI**2) * (k**2)-6)) / k**4
def create_toeplitz_matrix(size):
diag_value = PI ** 4 / 5.0
diagonals = np.array([diag_value] + [k_diag_value_calc(f) for f in range(1,size)])
return toeplitz(diagonals)
def l2_normalize(v):
v = v.astype(float)
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
sizes = [64,128,256,512,1024]
x_list = [np.random.randn(f,1) for f in sizes]
u_list = [l2_normalize(np.random.randn(f,1)) for f in sizes]
v_list = [l2_normalize(np.random.randn(f,1)) for f in sizes]
A_list = [create_toeplitz_matrix(f) for f in sizes]
b_list = [np.matmul((A + np.outer(u,v)), x) for A,u,v,x in zip(A_list,u_list,v_list,x_list)]
for A,b,x,u,v in zip(A_list, b_list, x_list, u_list, v_list):
condition_numbers.append(cond(A, np.inf))
partial_solver = LU_eliminator(mode='partial')
L, U, P, _ = partial_solver.perform_LU_analysis_partial(A=A)
# Start time here because Sherman-Morisson Assumes
# Prior Knowledge of LU factorization
# Start Timer
start = time.time()
# Assert Norm = 1
print(norm(u,2))
print(norm(v,2))
# Partial Problem 1 Solve Az = u for z, so z = A^-1 * u
# Forward
pp1 = solve_triangular(L,P@u,lower=True)
# Backward
z = solve_triangular(U,pp1,lower=False)
# Partial Problem 2 Solve Ay = b for y, so y = A^-1 * b
# Forward
pp2 = solve_triangular(L,P@b,lower=True)
# Backward
y = solve_triangular(U,pp2,lower=False)
# Plug-In and solve
vz = v.T@z
vz = vz[0]
vy = v.T@y
vy = vy[0]
calc = vy/(1-vz)
z = calc * z
px = y + z
end = time.time()
elapsed = (end -start) * 1000
perror = norm(px-x, np.inf)
res_part = norm(b-A@px, np.inf)
error_partial.append(perror)
res_partial.append(res_part)
cpu_times_partial.append(elapsed)
df = pd.DataFrame(data={'size':sizes, 'condition_number':condition_numbers, 'error_partial':error_partial, 'cpu_times_partial':cpu_times_partial, 'res_partial':res_partial})
return df
# Exersize 2
# df = run_exersize_2()
# df.to_csv('Exersize2.csv', index=False)
# cn = df['condition_number'].values
# cpu_part = df['cpu_times_partial'].values
# cpu_full = df['cpu_times_full'].values
# error_partial = df['error_partial'].values
# error_full = df['error_full'].values
# res_part = df['res_partial'].values
# res_full = df['res_full'].values
# plt.figure(figsize=(8,8))
# plt.title('Excxecution Time vs Condition Number')
# plt.plot(cn,cpu_part,'ro--',label='Partial Pivoting Excecution Time(ms)')
# plt.plot(cn,cpu_full,'bo--',label='Full Pivoting Excecution Time(ms)')
# plt.legend(loc=2)
# plt.savefig('Cpu_Time_vs_CN_2.png')
# plt.show()
# plt.close()
# plt.figure(figsize=(8,8))
# plt.title('Error vs Condition Number')
# plt.plot(cn,error_partial,'ro--',label='Partial Pivoting Error')
# plt.plot(cn,error_full,'bo--',label='Full Pivoting Error')
# plt.legend(loc=2)
# plt.savefig('Error_vs_CN_2.png')
# plt.show()
# plt.close()
# plt.figure(figsize=(8,8))
# plt.title('Residual vs Condition Number')
# plt.plot(cn,res_part,'ro--',label='Partial Pivoting Residual')
# plt.plot(cn,res_full,'bo--',label='Full Pivoting Residual')
# plt.legend(loc=2)
# plt.savefig('Residual_vs_CN_2.png')
# plt.show()
# plt.close()
# Exersize 4
# df = run_exersize_4()
# df.to_csv('Exersize4.csv', index=False)
# cn = df['condition_number'].values
# cpu_part = df['cpu_times_partial'].values
# cpu_full = df['cpu_times_full'].values
# error_partial = df['error_partial'].values
# error_full = df['error_full'].values
# res_part = df['res_partial'].values
# res_full = df['res_full'].values
# plt.figure(figsize=(8,8))
# plt.title('Excxecution Time vs Condition Number')
# plt.plot(cn,cpu_part,'ro--',label='Partial Pivoting Excecution Time(ms)')
# plt.plot(cn,cpu_full,'bo--',label='Full Pivoting Excecution Time(ms)')
# plt.legend(loc=2)
# plt.savefig('Cpu_Time_vs_CN_4.png')
# plt.show()
# plt.close()
# plt.figure(figsize=(8,8))
# plt.title('Error vs Condition Number')
# plt.plot(cn,error_partial,'ro--',label='Partial Pivoting Error')
# plt.plot(cn,error_full,'bo--',label='Full Pivoting Error')
# plt.legend(loc=2)
# plt.savefig('Error_vs_CN_4.png')
# plt.show()
# plt.close()
# plt.figure(figsize=(8,8))
# plt.title('Residual vs Condition Number')
# plt.plot(cn,res_part,'ro--',label='Partial Pivoting Residual')
# plt.plot(cn,res_full,'bo--',label='Full Pivoting Residual')
# plt.legend(loc=2)
# plt.savefig('Residual_vs_CN_4.png')
# plt.show()
# plt.close()
# Exersize 5
df = run_exersize_5()
df.to_csv('Exersize5.csv', index=False)
cn = df['condition_number'].values
cpu_part = df['cpu_times_partial'].values
error_partial = df['error_partial'].values
res_part = df['res_partial'].values
plt.figure(figsize=(8,8))
plt.title('Excxecution Time vs Condition Number')
plt.plot(cn,cpu_part,'ro--',label='Partial Pivoting Excecution Time(ms)')
plt.legend(loc=2)
plt.savefig('Cpu_Time_vs_CN_5.png')
plt.show()
plt.close()
plt.figure(figsize=(8,8))
plt.title('Error vs Condition Number')
plt.plot(cn,error_partial,'ro--',label='Partial Pivoting Error')
plt.legend(loc=2)
plt.savefig('Error_vs_CN_5.png')
plt.show()
plt.close()
plt.figure(figsize=(8,8))
plt.title('Residual vs Condition Number')
plt.plot(cn,res_part,'ro--',label='Partial Pivoting Residual')
plt.legend(loc=2)
plt.savefig('Residual_vs_CN_5.png')
plt.show()
plt.close()
|
StarcoderdataPython
|
335282
|
import os
import numpy as np
import scipy.io as scpio
import tofu as tf
_PATH_HERE = os.path.dirname(__file__)
_PATH_INPUTS = os.path.dirname(_PATH_HERE)
_PATH_SAVE = _PATH_INPUTS
# #############################################################################
# #############################################################################
# routines
# #############################################################################
def extract(save=False):
lf = [
ff for ff in os.listdir(_PATH_HERE)
if ff.endswith('.mat')
and 'coordinates' if ff
and any([ss in ff for ss in ['vessel', 'limiter']])
and ff.startswith('COMPASS')
]
if len(lf) == 0:
return
# ----------------
# Extract all data
dout = {'Ves': {}, 'PFC': {}}
for ff in lf:
pfe = os.path.join(_PATH_HERE, ff)
out = scpio.loadmat(pfe)
if 'vessel' in ff:
kR, kZ = 'R1', 'Z1'
name = 'InnerV1'
else:
kR, kZ = 'R', 'Z'
name = 'V0'
R = out[kR].ravel()
Z = out[kZ].ravel()
dout['Ves'][name] = tf.geom.Ves(
Poly=np.array([R, Z]),
Name=name,
Exp='COMPASS',
SavePath=_PATH_SAVE,
)
# ---------------
# Derive PFCs
dind = {
'lower': {
'V0': np.arange(129, 194),
'InnerV1': np.arange(9, 20)[::-1],
},
'upper': {
'V0': np.arange(39, 61),
'InnerV1': np.arange(36, 46)[::-1],
},
'inner': {
'V0': np.arange(72, 119),
'InnerV1': np.r_[4, 3, 2, 1, 0, 51, 50],
},
'outer': {
'V0': np.r_[np.arange(197, 231), np.arange(0, 35)],
'InnerV1': np.arange(21, 33)[::-1],
},
}
for k0, v0 in dind.items():
poly = np.concatenate(
(
dout['Ves']['V0'].Poly[:, v0['V0']],
dout['Ves']['InnerV1'].Poly[:, v0['InnerV1']],
),
axis=1,
)
dout['PFC'][k0] = tf.geom.PFC(
Poly=poly,
Name=k0,
Exp='COMPASS',
SavePath=_PATH_SAVE,
)
# ---------------
# Format output
if save:
for cc in dout.keys():
for nn in dout[cc].keys():
dout[cc][nn].save_to_txt(path=None)
return dout
|
StarcoderdataPython
|
3394598
|
"""Calculate the area of a glyph."""
from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from fontTools.pens.basePen import BasePen
class AreaPen(BasePen):
def __init__(self, glyphset=None):
BasePen.__init__(self, glyphset)
self.value = 0
def _moveTo(self, p0):
"""Remember the first point in this contour, in case it's closed. Also
set the initial value for p0 in this contour, which will always refer to
the most recent point.
"""
self._p0 = self._startPoint = p0
def _lineTo(self, p1):
"""Add the signed area beneath the line from the latest point to this
one. Signed areas cancel each other based on the horizontal direction of
the line.
"""
x0, y0 = self._p0
x1, y1 = p1
self.value -= (x1 - x0) * (y1 + y0) * .5
self._p0 = p1
def _qCurveToOne(self, p1, p2):
"""Add the signed area of this quadratic curve.
https://github.com/Pomax/bezierinfo/issues/44
"""
p0 = self._p0
x0, y0 = p0[0], p0[1]
x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0
self.value -= (x2 * y1 - x1 * y2) / 3
self._lineTo(p2)
self._p0 = p2
def _curveToOne(self, p1, p2, p3):
"""Add the signed area of this cubic curve.
https://github.com/Pomax/bezierinfo/issues/44
"""
p0 = self._p0
x0, y0 = p0[0], p0[1]
x1, y1 = p1[0] - x0, p1[1] - y0
x2, y2 = p2[0] - x0, p2[1] - y0
x3, y3 = p3[0] - x0, p3[1] - y0
self.value -= (
x1 * ( - y2 - y3) +
x2 * (y1 - 2*y3) +
x3 * (y1 + 2*y2 )
) * 0.15
self._lineTo(p3)
self._p0 = p3
def _closePath(self):
"""Add the area beneath this contour's closing line."""
self._lineTo(self._startPoint)
del self._p0, self._startPoint
def _endPath(self):
"""Area is not defined for open contours.
Single-point open contours, which often represent anchors, are allowed.
"""
if self._p0 != self._startPoint:
raise NotImplementedError
del self._p0, self._startPoint
|
StarcoderdataPython
|
3303142
|
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for PyAuto functional tests.
Use the following in your scripts to run them standalone:
# This should be at the top
import pyauto_functional
if __name__ == '__main__':
pyauto_functional.Main()
This script can be used as an executable to fire off other scripts, similar
to unittest.py
python pyauto_functional.py test_script
"""
import os
import sys
def _LocatePyAutoDir():
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir, 'pyautolib'))
_LocatePyAutoDir()
try:
import pyauto
except ImportError:
print >>sys.stderr, 'Cannot import pyauto from %s' % sys.path
raise
class Main(pyauto.Main):
"""Main program for running PyAuto functional tests."""
def __init__(self):
# Make scripts in this dir importable
sys.path.append(os.path.dirname(__file__))
pyauto.Main.__init__(self)
def TestsDir(self):
return os.path.dirname(__file__)
if __name__ == '__main__':
Main()
|
StarcoderdataPython
|
5059261
|
<filename>deeprankcore/tools/pssm_3dcons_to_deeprank.py<gh_stars>0
import glob
import sys
def pssm_3dcons_to_deeprank(pssm_file):
# pssm = open(pssm_file, 'r').readlines()
with open(pssm_file, "r", encoding = "utf-8") as f:
pssm = f.readlines()
pssm_name = pssm_file.rsplit(".", 1)[0]
with open(f"{pssm_name}.deeprank.pssm", "w", encoding = "utf-8") as new_pssm:
firstline = True
for line in pssm:
if firstline is True:
firstline = False
new_pssm.write(
"pdbresi pdbresn seqresi seqresn A R N D C Q E\
G H I L K M F P S T W Y V IC\n"
)
if len(line.split()) == 44:
resid = line[0:6].strip()
resn = line[6]
pssm_content = line[11:90]
ic = line.split()[-1]
new_pssm.write(
"{0:>5} {1:1} {0:>5} {1:1} {2} {3}\n".format( # pylint: disable=consider-using-f-string
resid, resn, pssm_content, ic
)
)
if __name__ == "__main__":
if len(sys.argv) != 2:
print(
"""\n
This scripts converts the 3dcons pssm files into deeprank pssm format
Usage:
python 3dcons_to_deeprank_pssm.py [path_to_pssm]
"""
)
else:
try:
pssm_path = sys.argv[1]
for pssm_file in glob.glob(f"{pssm_path}/*.pssm"):
pssm_3dcons_to_deeprank(pssm_file)
except BaseException:
print("You must provide the path to the pssm files")
|
StarcoderdataPython
|
240506
|
<filename>certego_saas/settings.py
import os
import stripe
from django.conf import settings
from django.test.signals import setting_changed
from rest_framework.settings import APISettings
# placeholder for later
get_secret = os.environ.get
TEST_RUNNER = "tests.timed_runner.TimedRunner"
# stripe-python
STRIPE_LIVE_MODE = (
settings.PUBLIC_DEPLOYMENT and not settings.STAGE_CI and not settings.DEBUG
)
stripe.api_key = str(
get_secret("STRIPE_LIVE_SECRET_KEY", None)
if STRIPE_LIVE_MODE
else get_secret("STRIPE_TEST_SECRET_KEY", None)
)
USER_SETTINGS = getattr(settings, "CERTEGO_SAAS", None)
DEFAULTS = {
# app settings
"AUTH_TOKEN_COOKIE_NAME": "CERTEGO_SAAS_AUTH_TOKEN",
"AUTH_COOKIE_HTTPONLY": True,
"AUTH_COOKIE_SAMESITE": "Strict",
"AUTH_COOKIE_DOMAIN": None,
"FILTER_NOTIFICATIONS_VIEW_FOR_CURRENTAPP": True,
"USER_ACCESS_SERIALIZER": "certego_saas.user.serializers.UserAccessSerializer",
"ORGANIZATION_MAX_MEMBERS": 3,
# app info
"HOST_URI": settings.HOST_URI,
"HOST_NAME": settings.HOST_NAME,
# third party keys
"SLACK_TOKEN": get_secret("SLACK_TOKEN", None),
"SLACK_CHANNEL": get_secret("SLACK_CHANNEL", None),
"TWITTER_CONSUMER_KEY": get_secret("TWITTER_CONSUMER_KEY", None),
"TWITTER_CONSUMER_SECRET": get_secret("TWITTER_CONSUMER_SECRET", None),
"TWITTER_TOKEN_KEY": get_secret("TWITTER_TOKEN_KEY", None),
"TWITTER_TOKEN_SECRET": get_secret("TWITTER_TOKEN_SECRET", None),
"STRIPE_LIVE_MODE": STRIPE_LIVE_MODE,
"STRIPE_WEBHOOK_SIGNING_KEY": get_secret("STRIPE_WEBHOOK_SIGNING_KEY", None),
}
IMPORT_STRINGS = ["USER_ACCESS_SERIALIZER"]
certego_apps_settings = APISettings(USER_SETTINGS, DEFAULTS, IMPORT_STRINGS) # type: ignore
def _reload_settings(*args, **kwargs):
global certego_apps_settings
setting, value = kwargs["setting"], kwargs["value"]
if setting == "CERTEGO_SAAS":
certego_apps_settings = APISettings(value, DEFAULTS, IMPORT_STRINGS)
setting_changed.connect(_reload_settings)
|
StarcoderdataPython
|
4952769
|
# Definition for binary tree with next pointer.
# class TreeLinkNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
while root and root.left:
node = root
while node:
node.left.next = node.right
if node.next:
node.right.next = node.next.left
node = node.next
root = root.left
|
StarcoderdataPython
|
6686489
|
import limix.modules.data as DATA
import limix.modules.genotype_reader as gr
import limix.modules.phenotype_reader as phr
import scipy as SP
file_name = './../../tutorials/data/smith_2008/smith08.hdf5'
geno_reader = gr.genotype_reader_tables(file_name)
pheno_reader = phr.pheno_reader_tables(file_name)
data = DATA.QTLData(geno_reader=geno_reader,pheno_reader=pheno_reader)
|
StarcoderdataPython
|
5185419
|
<filename>python/cudf/cudf/tests/test_cut.py<gh_stars>0
# Copyright (c) 2021, NVIDIA CORPORATION.
"""
Test related to Cut
"""
import pandas as pd
import numpy as np
from cudf.core.cut import cut
import pytest
from cudf.tests.utils import assert_eq
@pytest.mark.parametrize(
"x", [[1, 7, 5, 4, 6, 3], [1, 7], np.array([1, 7, 5, 4, 6, 3])]
)
@pytest.mark.parametrize("bins", [1, 2, 3])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("include_lowest", [True, False])
@pytest.mark.parametrize(
"ordered", [True]
) # if ordered is False we need labels
@pytest.mark.parametrize("precision", [1, 2, 3])
def test_cut_basic(x, bins, right, include_lowest, ordered, precision):
# will test optional labels, retbins and duplicates seperately
# they need more specific parameters to work
pcat = pd.cut(
x=x,
bins=bins,
right=right,
precision=precision,
include_lowest=include_lowest,
ordered=ordered,
)
pindex = pd.CategoricalIndex(pcat)
gindex = cut(
x=x,
bins=bins,
right=right,
precision=precision,
include_lowest=include_lowest,
ordered=ordered,
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("x", [[1, 7, 5, 4, 6, 3]])
@pytest.mark.parametrize("bins", [3]) # labels must be the same len as bins
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("include_lowest", [True, False])
@pytest.mark.parametrize(
"ordered", [True, False]
) # labels must be unique if ordered=True
@pytest.mark.parametrize("precision", [1, 2, 3])
@pytest.mark.parametrize(
"labels", [["bad", "medium", "good"], ["A", "B", "C"], [1, 2, 3], False]
)
def test_cut_labels(
x, bins, right, include_lowest, ordered, precision, labels
):
pcat = pd.cut(
x=x,
bins=bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
ordered=ordered,
)
pindex = pd.CategoricalIndex(pcat) if labels else pcat
gindex = cut(
x=x,
bins=bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
ordered=ordered,
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize("x", [[1, 7, 5, 4, 6, 3]])
@pytest.mark.parametrize("bins", [3]) # labels must be the same len as bins
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("include_lowest", [True, False])
@pytest.mark.parametrize(
"ordered", [False]
) # labels must be unique if ordered=True
@pytest.mark.parametrize("precision", [1, 2, 3])
@pytest.mark.parametrize(
"labels", [["bad", "good", "good"], ["B", "A", "B"], [1, 2, 2], False]
)
def test_cut_labels_non_unique(
x, bins, right, include_lowest, ordered, precision, labels
):
pcat = pd.cut(
x=x,
bins=bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
ordered=ordered,
)
pindex = pd.CategoricalIndex(pcat) if labels else pcat
gindex = cut(
x=x,
bins=bins,
right=right,
labels=labels,
precision=precision,
include_lowest=include_lowest,
ordered=ordered,
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize(
"x",
[
[1, 7, 5, 4, 6, 3],
[1, 7],
np.array([1, 7, 5, 4, 6, 3]),
np.array([2, 4, 6, 8, 10]),
],
)
@pytest.mark.parametrize(
"bins", [1, 2, 3, [1, 2, 3], [0, 2, 4, 6, 10]],
)
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("precision", [3])
def test_cut_right(x, bins, right, precision):
pcat = pd.cut(x=x, bins=bins, right=right, precision=precision,)
pindex = pd.CategoricalIndex(pcat)
gindex = cut(x=x, bins=bins, right=right, precision=precision,)
assert_eq(pindex, gindex)
@pytest.mark.parametrize(
"x",
[
[1, 7, 5, 4, 6, 3],
[1, 7],
np.array([1, 7, 5, 4, 6, 3]),
np.array([2, 4, 6, 8, 10]),
],
)
@pytest.mark.parametrize(
"bins", [[0, 2, 4, 6, 10, 10], [1, 2, 2, 3, 3]],
)
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("include_lowest", [True, False])
@pytest.mark.parametrize("ordered", [True])
@pytest.mark.parametrize("precision", [1, 2, 3])
@pytest.mark.parametrize("duplicates", ["drop"])
def test_cut_drop_duplicates(
x, bins, right, precision, duplicates, ordered, include_lowest
):
pcat = pd.cut(
x=x,
bins=bins,
right=right,
precision=precision,
duplicates=duplicates,
include_lowest=include_lowest,
ordered=ordered,
)
pindex = pd.CategoricalIndex(pcat)
gindex = cut(
x=x,
bins=bins,
right=right,
precision=precision,
duplicates=duplicates,
include_lowest=include_lowest,
ordered=ordered,
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize(
"x",
[
[1, 7, 5, 4, 6, 3],
[1, 7],
np.array([1, 7, 5, 4, 6, 3]),
np.array([2, 4, 6, 8, 10]),
],
)
@pytest.mark.parametrize(
"bins", [[0, 2, 4, 6, 10, 10], [1, 2, 2, 3, 3]],
)
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("include_lowest", [True, False])
@pytest.mark.parametrize("ordered", [True])
@pytest.mark.parametrize("precision", [1, 2, 3])
@pytest.mark.parametrize("duplicates", ["raises"])
def test_cut_drop_duplicates_raises(
x, bins, right, precision, duplicates, ordered, include_lowest
):
with pytest.raises(ValueError) as excgd:
cut(
x=x,
bins=bins,
right=right,
precision=precision,
duplicates=duplicates,
include_lowest=include_lowest,
ordered=ordered,
)
with pytest.raises(ValueError) as excpd:
pd.cut(
x=x,
bins=bins,
right=right,
precision=precision,
duplicates=duplicates,
include_lowest=include_lowest,
ordered=ordered,
)
assert_eq(str(excgd.value), str(excpd.value))
@pytest.mark.parametrize(
"x",
[
[0, 0.5, 1.5, 2.5, 4.5],
[1, 7, 5, 4, 6, 3],
[1, 7],
np.array([1, 7, 5, 4, 6, 3]),
np.array([2, 4, 6, 8, 10]),
],
)
@pytest.mark.parametrize(
"bins", [pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])],
)
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("precision", [1, 2, 3])
@pytest.mark.parametrize("duplicates", ["drop", "raise"])
def test_cut_intervalindex_bin(x, bins, right, precision, duplicates):
pcat = pd.cut(
x=x,
bins=bins,
right=right,
precision=precision,
duplicates=duplicates,
)
pindex = pd.CategoricalIndex(pcat)
gindex = cut(
x=x,
bins=bins,
right=right,
precision=precision,
duplicates=duplicates,
)
assert_eq(pindex, gindex)
@pytest.mark.parametrize(
"x",
[pd.Series(np.array([2, 4, 6, 8, 10]), index=["a", "b", "c", "d", "e"])],
)
@pytest.mark.parametrize("bins", [1, 2, 3])
@pytest.mark.parametrize("right", [True, False])
@pytest.mark.parametrize("include_lowest", [True, False])
@pytest.mark.parametrize("ordered", [True])
@pytest.mark.parametrize("precision", [3])
def test_cut_series(x, bins, right, include_lowest, ordered, precision):
pcat = pd.cut(
x=x,
bins=bins,
right=right,
precision=precision,
include_lowest=include_lowest,
ordered=ordered,
)
gcat = cut(
x=x,
bins=bins,
right=right,
precision=precision,
include_lowest=include_lowest,
ordered=ordered,
)
assert_eq(pcat, gcat)
|
StarcoderdataPython
|
3481722
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import count
from datetime import datetime
from typing import Tuple, List, Union
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import logging
def send_request(url: str,
params: dict = None,
headers: dict = None,
auth: Tuple[str, str] = None,
max_retries: int = 100
) -> Union[List[dict], dict, None]:
"""Performs HTTP GET request with url, parameters, and headers.
Args:
url: A str of the HTTP endpoint.
params: A dict of HTTP request parameters.
headers: A dict of HTTP request headers.
auth: A tuple of username, token.
Returns:
The json_response can be either a dict or a list of dicts, depending on
the actual returned response. Or None, if an error occurred.
"""
try:
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s '
'[%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S', level=logging.INFO)
session = requests.Session()
retry = Retry(total=max_retries, connect=max_retries,
backoff_factor=0.01,
status_forcelist=[429, 500, 502, 503, 504])
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
response = session.get(
url=url, params=params, headers=headers, auth=auth)
response.raise_for_status()
json_response = response.json()
return json_response
except requests.exceptions.HTTPError as http_error:
logging.error("Http Error:", http_error)
return None
except requests.exceptions.ConnectionError as connection_error:
logging.error("Error Connecting:", connection_error)
raise SystemExit(connection_error)
except requests.exceptions.Timeout as timeout_error:
logging.error("Timeout Error:", timeout_error)
raise SystemExit(timeout_error)
except requests.exceptions.RequestException as request_exception:
logging.error("Request Exception:", request_exception)
raise SystemExit(request_exception)
def send_request_all_pages(url: str,
headers: dict = None,
auth: Tuple[str, str] = None) -> List[dict]:
"""Performs HTTP requests to retrieve responses from all pages.
Args:
url: A str of the HTTP endpoint.
headers: A dict of HTTP request headers.
auth: A tuple of username, token.
Returns:
A list of dicts. Each dict holds a piece of information.
"""
results = []
for page in count(1):
query_parameters = {'page': page, 'per_page': 100}
json_response = send_request(url, query_parameters, headers, auth)
if not json_response:
break
results.extend(json_response)
return results
def get_all_repositories(
username: str, auth: Tuple[str, str] = None) -> List[str]:
"""Retrieves a complete list of repository names by username.
Args:
username: A str of owner login.
auth: A tuple of username, token.
Returns:
A complete list of repository names.
"""
repo_names = []
for page in count(1):
repo_names_by_page = get_repositories_by_page(page, username, auth)
if not repo_names_by_page:
break
repo_names.extend(repo_names_by_page)
return repo_names
def get_repositories_by_page(
page: int, username: str, auth: Tuple[str, str] = None) -> List[str]:
"""Retrieves a list of repository names on certain page.
Args:
page: An integer indicating which page to retrieve.
username: A str of owner login.
auth: A tuple of username, token.
Returns:
A list of repository names.
"""
url = "https://api.github.com/users/%s/repos" % username
query_parameters = {'page': page, 'per_page': 100}
json_response = send_request(url=url, params=query_parameters, auth=auth)
if not json_response:
return []
repo_names_by_page = []
for repo_info in json_response:
repo_name = repo_info['full_name']
repo_names_by_page.append(repo_name)
return repo_names_by_page
def save_repositories(username: str, repo_names: List[str]) -> None:
"""Saves the repository names to text file.
Args:
username: A str of owner login.
repo_names: A list of repository names.
Returns:
"""
with open('./%s_repos.txt' % username, 'w') as file:
for repo in repo_names:
file.write(repo)
file.write('\n')
def get_all_pull_requests(repo_name: str,
start_date: str,
end_date: str,
state: str = 'closed',
auth: Tuple[str, str] = None) -> List[dict]:
"""Retrieves a complete list of pull requests information by repository
name.
Args:
repo_name: A str of repository name.
start_date: A str of earliest date to retrieve.
end_date: A str of latest date to retrieve.
state: A str of pull request state. Values can be 'open' or 'closed'.
auth: A tuple of username, token.
Returns:
A list of dicts. Each dict holds a pull request information.
"""
pull_requests = []
for page in count(1):
pull_requests_by_page = get_pull_requests_by_page(
page, repo_name, start_date, end_date, state, auth)
if pull_requests_by_page is None:
break
pull_requests.extend(pull_requests_by_page)
return pull_requests
def get_pull_requests_by_page(page: int,
repo_name: str,
start_date: str,
end_date: str,
state: str = 'closed',
auth: Tuple[str, str] = None
) -> Union[List[dict], None]:
"""Retrieves a list of pull requests information on a certain page.
Args:
page: An integer indicating which page to retrieve.
repo_name: A str of repository name.
start_date: A str of earliest date to retrieve.
end_date: A str of latest date to retrieve.
state: A str of pull request state. Values can be 'open' or 'closed'.
auth: A tuple of username, token.
Returns:
A list of dicts. Each dict holds a pull request information.
"""
url = "https://api.github.com/repos/%s/pulls" % repo_name
query_parameters = {'page': page, 'state': state, 'per_page': 100}
json_response = send_request(url=url, params=query_parameters, auth=auth)
if not json_response:
return None
pull_request_info_list = []
for pull_request_info in json_response:
closed_time = pull_request_info['closed_at']
merged_time = pull_request_info['merged_at']
if not merged_time:
continue
if to_timestamp(start_date) <= to_timestamp(closed_time) \
<= to_timestamp(end_date):
pull_request_info_list.append(pull_request_info)
return pull_request_info_list
def save_pull_requests(repo_name: str, pull_requests: List[dict]) -> None:
"""Saves a list of pull requests information to text file.
Args:
repo_name: A str of repository name.
pull_requests: A list of dicts. Each dict holds a pull request
information.
Returns:
None.
"""
with open('./%s_pull_requests.txt' % repo_name, 'w') as file:
for pull_request in pull_requests:
file.write(str(pull_request))
file.write('\n')
def get_pull_request_info(repo_name: str,
pull_request_number: int,
auth: Tuple[str, str] = None) -> Union[dict, None]:
"""Retrieves pull request information.
Retrieves pull request information of given repository name, pull request
id. Authentication is optional.
Args:
repo_name: A str of repository name.
pull_request_number: An integer of pull request id.
auth: A tuple of username, token.
Returns:
A dict of pull request information.
"""
url = "https://api.github.com/repos/%s/pulls/%s" % (
repo_name, pull_request_number)
return send_request(url=url, auth=auth)
def get_pull_request_review_comments(repo_name: str,
pull_request_number: int,
auth: Tuple[str, str] = None
) -> List[dict]:
"""Retrieves a list of pull request review comments.
Pull request review comments are comments on a portion of the unified diff
made during a pull request review.
Args:
repo_name: A str of repository name.
pull_request_number: An integer of pull request id.
auth: A tuple of username, token.
Returns:
A list of dicts. Each dict holds a pull request review comment
information.
"""
url = "https://api.github.com/repos/%s/pulls/%s/comments" % (
repo_name, pull_request_number)
return send_request_all_pages(url=url, auth=auth)
def get_pull_request_reviews(repo_name: str,
pull_request_number: int,
auth: Tuple[str, str] = None
) -> List[dict]:
"""Retrieves a list of pull request review information.
Pull Request Reviews are groups of Pull Request Review Comments on the Pull
Request, grouped together with a state and optional body comment.
Args:
repo_name: A str of repository name.
pull_request_number: An integer of pull request id.
auth: A tuple of username, token.
Returns:
A list of dicts. Each dict holds a pull request review information.
"""
url = "https://api.github.com/repos/%s/pulls/%s/reviews" % (
repo_name, pull_request_number)
return send_request_all_pages(url=url, auth=auth)
def get_pull_request_commits(repo_name: str,
pull_request_number: int,
auth: Tuple[str, str] = None
) -> List[dict]:
"""Retrieves a list of pull request commits information.
Args:
repo_name: A str of repository name.
pull_request_number: An integer of pull request id.
auth: A tuple of username, token.
Returns:
A list of dicts. Each dict holds a pull request commit information.
"""
url = "https://api.github.com/repos/%s/pulls/%s/commits" % (
repo_name, pull_request_number)
return send_request_all_pages(url=url, auth=auth)
def get_pull_request_files(repo_name: str,
pull_request_number: int,
auth: Tuple[str, str] = None
) -> List[dict]:
url = "https://api.github.com/repos/%s/pulls/%s/files" % (
repo_name, pull_request_number)
return send_request_all_pages(url=url, auth=auth)
def get_pull_request_issue_comments(repo_name: str,
pull_request_number: int,
auth: Tuple[str, str] = None
) -> List[dict]:
"""Retrieves a list of pull request issue comments information.
Args:
repo_name: A str of repository name.
pull_request_number: An integer of pull request id.
auth: A tuple of username, token.
Returns:
A list of dicts. Each dict holds a pull request issue comment
information.
"""
url = "https://api.github.com/repos/%s/issues/%s/comments" % (
repo_name, pull_request_number)
return send_request_all_pages(url=url, auth=auth)
def get_commit_info(repo_name: str,
commit_ref: str,
auth: Tuple[str, str] = None) -> Union[dict, None]:
"""Retrieves a commit information.
Args:
repo_name: A str of repository name.
commit_ref: A str of commit id.
auth: A tuple of username, token.
Returns:
A dict of commit information.
"""
url = "https://api.github.com/repos/%s/commits/%s" % (repo_name, commit_ref)
return send_request(url=url, auth=auth)
def get_commit_check_runs(repo_name: str,
commit_ref: str,
auth: Tuple[str, str] = None) -> Union[dict, None]:
"""Retrieves check run results for a commit.
Args:
repo_name: A str of repository name.
commit_ref: A str of commit id.
auth: A tuple of username, token.
Returns:
A dict of check run results.
"""
url = "https://api.github.com/repos/%s/commits/%s/check-runs" % (
repo_name, commit_ref)
headers = {'Accept': 'application/vnd.github.antiope-preview+json'}
return send_request(url=url, headers=headers, auth=auth)
def is_pull_request_merged(repo_name: str,
pull_request_number: int,
auth: Tuple[str, str] = None) -> bool:
"""Checks whether the pull request is merged.
Args:
repo_name: A str of repository name.
pull_request_number: An integer of pull request id.
auth: A tuple of username, token.
Returns:
A boolean indicating whether the pull request is merged.
"""
url = "https://api.github.com/repos/%s/pulls/%s/merge" % (
repo_name, pull_request_number)
response = requests.get(url=url, auth=auth)
return bool(response.headers['status'] == '204 No Content')
def get_user_public_events(username: str,
auth: Tuple[str, str] = None
) -> List[dict]:
"""Retrieves the public events of a username login.
Args:
username: A str of username login.
auth: A tuple of username, token.
Returns:
A list of dicts. Each dict holds the past events for a user.
"""
url = "https://api.github.com/users/%s/events" % username
return send_request_all_pages(url=url, auth=auth)
def to_timestamp(time_str: str) -> float:
"""Converts ISO time str to timestamp.
Args:
time_str: A str of ISO time.
Returns:
A float of timestamp.
"""
return datetime.fromisoformat(time_str[:-1]).timestamp()
|
StarcoderdataPython
|
3517641
|
"""
File: caesar.py
Name: <NAME>
------------------------------
This program demonstrates the idea of caesar cipher.
Users will be asked to input a number to produce shifted
ALPHABET as the cipher table. After that, any strings typed
in will be encrypted.
"""
# This constant shows the original order of alphabetic sequence
ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def main():
"""
First, user enter the secret number in order to know the new_alphabet string will move right how
many times.
Second, enter the ciphered string.
Third, print out the deciphered string.
"""
secret_number = input('Secret number: ')
ciphered_string = input('What\'s the ciphered string? ')
ciphered_string = ciphered_string.upper()
print('The deciphered string is: ' + deciphered(secret_number, ciphered_string))
def deciphered(secret_number, ciphered_string):
"""
First, we loop the alphabet and create the new alphabet_string by utilizing secret number.
Second, we find where the position number of each ciphered_string alphabet is in the new alphabet_string.
Third, we correspond the position number we found with the old alphabet_string position
number, and print out the alphabet of old alphabet_string position.
:param secret_number: int, know the new_alphabet string will move right how many times.
:param ciphered_string: string. The string you want to deciphered.
:return: ans: string. Deciphered ans.
"""
new_alphabet = ''
for i in range(len(ALPHABET)):
new_alphabet += ALPHABET[i - int(secret_number)]
ans = ''
for j in range(len(ciphered_string)):
alp = new_alphabet.find(ciphered_string[j])
if alp == -1:
ans += ciphered_string[j]
else:
ans += ALPHABET[alp]
return ans
##### DO NOT EDIT THE CODE BELOW THIS LINE #####
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
3304879
|
from pathlib import Path
root_dir = Path(__file__).parent.parent
schema_dir = root_dir / "schema"
vrs_yaml_path = schema_dir / "vrs.yaml"
vrs_json_path = schema_dir / "vrs.json"
|
StarcoderdataPython
|
11252164
|
import unittest
import ramda as R
from .helpers.Maybe import Just
"""
https://github.com/ramda/ramda/blob/master/test/union.js
"""
M = [1, 2, 3, 4]
N = [3, 4, 5, 6]
class TestUnion(unittest.TestCase):
def test_combines_two_lists_into_the_set_of_all_their_elements(self):
self.assertEqual([1, 2, 3, 4, 5, 6], R.union(M, N))
def test_has_R_equals_semantics(self):
# TODO: ignore neg-zero and pos-zero check for now, due to simlicity
self.assertEqual(1, len(R.union([float('nan')], [float('nan')])))
self.assertEqual(1, len(R.union([Just([42])], [Just([42])])))
if __name__ == '__main__':
unittest.main()
|
StarcoderdataPython
|
11236145
|
<filename>spdx/tv_to_rdf.py
#!/usr/bin/env python
# Copyright (C) 2017 BMW AG
# Author: <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import sys
from spdx.parsers.loggers import StandardLogger
from spdx.writers.rdf import write_document
from spdx.parsers.tagvalue import Parser
from spdx.parsers.tagvaluebuilders import Builder
def tv_to_rdf(infile_name, outfile_name):
"""
Convert a SPDX file from tag/value format to RDF format.
Return True on sucess, False otherwise.
"""
parser = Parser(Builder(), StandardLogger())
parser.build()
with open(infile_name) as infile:
data = infile.read()
document, error = parser.parse(data)
if not error:
with open(outfile_name, mode="w") as outfile:
write_document(document, outfile)
return True
else:
print("Errors encountered while parsing RDF file.")
messages = []
document.validate(messages)
print("\n".join(messages))
return False
def main():
args = sys.argv[1:]
if not args:
print(
"Usage: spdx-tv2rdf <tag-value-file> <rdf-file>\n"
"Convert an SPDX tag/value document to RDF."
)
sys.exit(1)
tvfile = args[0]
rdffile = args[1]
success = tv_to_rdf(tvfile, rdffile)
sys.exit(0 if success else 1)
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
4919605
|
<reponame>aengelke/z-plot<gh_stars>10-100
#! /usr/bin/env python
from zplot import *
# describe the drawing surface
import sys
ctype = 'eps' if len(sys.argv) < 2 else sys.argv[1]
c = canvas(ctype, title='example-multi', dimensions=[300,210])
t = table(file='example-multi.data')
t.addcolumns(columns=['ylower','yhigher'])
t.update(set='ylower=ylo-1')
t.update(set='yhigher=yhi+1')
# lines
d1 = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[10,10], dimensions=[60,40])
axis(drawable=d1, title='Lines', domajortics=False, dolabels=False)
p = plotter()
p.line(drawable=d1, table=t, xfield='x', yfield='y', linewidth=0.5)
# points
d23 = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[80,10], dimensions=[60,40])
axis(drawable=d23, title='Points', domajortics=False, dolabels=False)
p.points(drawable=d23, table=t, xfield='x', yfield='y', style='xline', linewidth=0.5)
# linespoints
d2 = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[150,10], dimensions=[60,40])
axis(drawable=d2, title='Lines + Points', domajortics=False, dolabels=False)
p.line(drawable=d2, table=t, xfield='x', yfield='y', linewidth=0.5)
p.points(drawable=d2, table=t, xfield='x', yfield='y', style='xline', linewidth=0.5)
# filled
d3 = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[220,10], dimensions=[60,40])
axis(drawable=d3, title='Filled', domajortics=False, dolabels=False)
p.verticalfill(drawable=d3, table=t, xfield='x', yfield='y')
p.line(drawable=d3, table=t, xfield='x', yfield='y', linewidth=0.5)
# error bars
da = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[10,80], dimensions=[60,40])
axis(drawable=da, title='Error Bars', domajortics=False, dolabels=False)
p.verticalintervals(drawable=da, table=t, xfield='x', ylofield='ylo', yhifield='yhi', devwidth=4, linewidth=0.5)
p.points(drawable=da, table=t, xfield='x', yfield='y', style='circle', linewidth=0.5, size=0.5)
# box plots
db = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[80,80], dimensions=[60,40])
axis(drawable=db, title='Box Plots', domajortics=False, dolabels=False)
p.verticalintervals(drawable=db, table=t, xfield='x', ylofield='ylower', yhifield='yhigher', devwidth=4, linewidth=0.5)
p.verticalbars(drawable=db, table=t, xfield='x', ylofield='ylo', yfield='yhi', fill=True, fillcolor='lightgrey', linewidth=0.5, barwidth=0.8)
p.points(drawable=db, table=t, xfield='x', yfield='y', style='circle', linewidth=0.5, size=0.5)
# hintervals
dc = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[150,80], dimensions=[60,40])
axis(drawable=dc, title='Intervals', domajortics=False, dolabels=False)
p.horizontalintervals(drawable=dc, table=t, yfield='x', xlofield='ylo', xhifield='yhi', linewidth=0.5, devwidth=4)
# functions
dd = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[220,80], dimensions=[60,40])
axis(drawable=dd, title='Functions', domajortics=False, dolabels=False)
p.function(drawable=dd, function=lambda x: x, xrange=[0,10], step=0.1, linewidth=0.5)
p.function(drawable=dd, function=lambda x: x * x, xrange=[0,3.16], step=0.01, linewidth=0.5)
p.function(drawable=dd, function=lambda x: 2 * x, xrange=[0,5], step=0.1, linewidth=0.5)
c.text(coord=dd.map([1.5,9]), text='y=x*x', size=6)
c.text(coord=dd.map([5.5,8]), text='y=x', size=6)
c.text(coord=dd.map([7.5,5]), text='y=2x', size=6)
# bars
d5 = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[10,150], dimensions=[60,40])
axis(drawable=d5, title='Vertical Bars', domajortics=False, dolabels=False)
p.verticalbars(drawable=d5, table=t, xfield='x', yfield='y', barwidth=0.8, fillcolor='lightgrey', linewidth=0, fill=True)
# stacked bars
d55 = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[80,150], dimensions=[60,40])
axis(drawable=d55, title='Stacked Bars', domajortics=False, dolabels=False)
p.verticalbars(drawable=d55, table=t, xfield='x', yfield='y', barwidth=0.8, fillcolor='lightgrey', linewidth=0, fill=True)
p.verticalbars(drawable=d55, table=t, xfield='x', yfield='ylo', barwidth=0.8, fillcolor='darkgray', linewidth=0, fill=True)
# bars
d6 = drawable(canvas=c, xrange=[0,11], yrange=[0,10], coord=[150,150], dimensions=[60,40])
axis(drawable=d6, title='Horizontal Bars', domajortics=False, dolabels=False)
p.horizontalbars(drawable=d6, table=t, xfield='x', yfield='y', barwidth=0.8, fillcolor='lightgrey', linewidth=0, fill=True)
# heat
#Table -table h -file "file.heat"
h = table(file='example-multi.heat')
d7 = drawable(canvas=c, xrange=[0,6], yrange=[0,6], coord=[220,150], dimensions=[60,40])
p.heat(drawable=d7, table=h, xfield='c0', yfield='c1', hfield='c2', divisor=4.0)
axis(drawable=d7, title='Heat', domajortics=False, dolabels=False)
# finally, output the graph to a file
c.render()
|
StarcoderdataPython
|
307409
|
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 2015-02-27 4174 nabowle Output full stacktrace.
# 2018-10-05 <EMAIL> Fix returned retVal encoding.
#
class SerializableExceptionWrapper(object):
def __init__(self):
self.stackTrace = None
self.message = None
self.exceptionClass = None
self.wrapper = None
def __str__(self):
return self.__repr__()
def __repr__(self):
if not self.message:
self.message = ''
retVal = "" + str(self.exceptionClass) + " exception thrown: " + str(self.message) + "\n"
for element in self.stackTrace:
retVal += "\tat " + str(element) + "\n"
if self.wrapper:
retVal += "Caused by: " + self.wrapper.__repr__()
return str(retVal)
def getStackTrace(self):
return self.stackTrace
def setStackTrace(self, stackTrace):
self.stackTrace = stackTrace
def getMessage(self):
return self.message
def setMessage(self, message):
self.message = message
def getExceptionClass(self):
return self.exceptionClass
def setExceptionClass(self, exceptionClass):
self.exceptionClass = exceptionClass
def getWrapper(self):
return self.wrapper
def setWrapper(self, wrapper):
self.wrapper = wrapper
|
StarcoderdataPython
|
9778182
|
<reponame>kcosta42/Multilayer_Perceptron
import libft.backend.math as M
from libft.initializers.initializer import Initializer
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)` where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`,
samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Arguments:
scale: float, Default: 1.0
Scaling factor.
mode: string, Default: "fan_in"
One of "fan_in", "fan_out", "fan_avg".
distribution: string, Default: "normal"
One of "normal", "uniform".
Random distribution to use.
seed: integer, Default: None
Used to seed the random generator.
Raises:
ValueError:
In case of an invalid value for the "scale", mode" or
"distribution" arguments.
References:
https://github.com/keras-team/keras/blob/master/keras/initializers.py
"""
def __init__(self,
scale=1.0,
mode='fan_in',
distribution='normal',
seed=None):
if scale <= 0.:
raise ValueError(f"`scale` must be a positive float. Got: {scale}")
mode = mode.lower()
if mode not in {'fan_in', 'fan_out', 'fan_avg'}:
raise ValueError("Invalid `mode` argument: "
"expected on of {'fan_in', 'fan_out', 'fan_avg'} "
f"but got {mode}")
distribution = distribution.lower()
if distribution not in {'normal', 'uniform'}:
raise ValueError("Invalid `distribution` argument: "
"expected one of {'normal', 'uniform'} "
f"but got {distribution}")
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
def __call__(self, shape, dtype=None):
fan_in, fan_out = shape[0], shape[1]
scale = self.scale
if self.mode == 'fan_in':
scale /= max(1., fan_in)
elif self.mode == 'fan_out':
scale /= max(1., fan_out)
else:
scale /= max(1., float(fan_in + fan_out) / 2)
if self.distribution == 'normal':
# 0.879... = scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = M.sqrt(scale) / .87962566103423978
x = M.random_uniform(shape, 0., stddev, seed=self.seed)
else:
limit = M.sqrt(3. * scale)
x = M.random_uniform(shape, -limit, limit, seed=self.seed)
if self.seed is not None:
self.seed += 1
return x
|
StarcoderdataPython
|
9760912
|
from units import brokers, sensors, data, ingestionClients
import sys
import argparse
import yaml
import os
# read yaml file and set config obj
def load_config(path):
config = None
with open(path, 'r') as config_file:
config = yaml.load(config_file,Loader=yaml.FullLoader)
return config
def main(path,dryrun):
config = load_config(path)
# generate docker compose sections
broker = brokers.provision(config)
sensor = sensors.provision(config)
ingestionClient = ingestionClients.provision(config)
# set data for sensors
data.provision(config)
'''
the assumption is to create a docker compose file
one can also think about Kubernetes
'''
docker_compose = {}
docker_compose['version'] = '3'
docker_compose['services'] = {**broker, **sensor, **ingestionClient}
with open('docker-compose.yml', 'w') as outfile:
yaml.dump(docker_compose, outfile)
'''
No deployment in dryrun mode
'''
if (dryrun):
'''
a simple way to start
'''
os.system('docker-compose up')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--file', help="the configuration file for provisioning")
parser.add_argument('--dryrun', help="dryrun mode",action='store_true')
parser.set_defaults(dryrun=False)
args = parser.parse_args()
main(args.file,args.dryrun)
|
StarcoderdataPython
|
4898199
|
<filename>build/sphinx/mongoc_common.py
import os
needs_sphinx = '1.6'
author = 'MongoDB, Inc'
# -- Options for HTML output ----------------------------------------------
smart_quotes = False
html_show_sourcelink = False
# Note: http://www.sphinx-doc.org/en/1.5.1/config.html#confval-html_copy_source
# This will degrade the Javascript quicksearch if we ever use it.
html_copy_source = False
# -- Options for manual page output ---------------------------------------
# HACK: Just trick Sphinx's ManualPageBuilder into thinking there are pages
# configured - we'll do it dynamically in process_nodes.
man_pages = [True]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Sphinx customization ---------------------------------------
from docutils.nodes import title
# To publish HTML docs at GitHub Pages, create .nojekyll file. In Sphinx 1.4 we
# could use the githubpages extension, but old Ubuntu still has Sphinx 1.3.
def create_nojekyll(app, env):
if app.builder.format == 'html':
path = os.path.join(app.builder.outdir, '.nojekyll')
with open(path, 'wt') as f:
f.write('foo')
def add_ga_javascript(app, pagename, templatename, context, doctree):
if not app.env.config.analytics:
return
context['metatags'] = context.get('metatags', '') + """<script>
(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push(
{'gtm.start': new Date().getTime(),event:'gtm.js'}
);var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'//www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-JQHP');
</script>"""
def process_nodes(app, doctree):
if man_pages == [True]:
man_pages.pop()
env = app.env
metadata = env.metadata[env.docname]
# A page like installing.rst sets its name with ":man_page: bson_installing"
page_name = metadata.get('man_page')
if not page_name:
print('Not creating man page for %s' % env.docname)
return
page_title = find_node(doctree, title)
man_pages.append((env.docname, page_name, page_title.astext(), [author], 3))
def find_node(doctree, klass):
matches = doctree.traverse(lambda node: isinstance(node, klass))
if not matches:
raise IndexError("No %s in %s" % (klass, doctree))
return matches[0]
def mongoc_common_setup(app):
app.connect('doctree-read', process_nodes)
app.connect('env-updated', create_nojekyll)
app.connect('html-page-context', add_ga_javascript)
# Run sphinx-build -D analytics=1 to enable Google Analytics.
app.add_config_value('analytics', False, 'html')
|
StarcoderdataPython
|
4943055
|
<gh_stars>1-10
from src.main.beans.items.base_item import BaseItem
from src.main.managers.items.item_manager import ItemManager
class Shield(BaseItem):
CATEGORY_NAME = ItemManager.SHIELD_CATEGORY_NAME
_AVERAGE_DEF_BOOST = 5
_DEF_SPREAD = 1
def __init__(self, attack=None, defense=None, speed=None, spook_rate=None, spook_power=None):
BaseItem.__init__(self, attack, defense, speed, spook_rate, spook_power)
self.PRIMARY_VALUE = self.DEFENSE_BOOST
def get_formatted_name(self):
return "Shield (" + self._get_value_with_sign_prefix(self.DEFENSE_BOOST) + ")"
|
StarcoderdataPython
|
8052542
|
# pylint: disable=no-self-use,invalid-name
import numpy
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField, LabelField, ListField, IndexField, SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer
class TestListField(AllenNlpTestCase):
def setUp(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("this", "words")
self.vocab.add_token_to_namespace("is", "words")
self.vocab.add_token_to_namespace("a", "words")
self.vocab.add_token_to_namespace("sentence", 'words')
self.vocab.add_token_to_namespace("s", 'characters')
self.vocab.add_token_to_namespace("e", 'characters')
self.vocab.add_token_to_namespace("n", 'characters')
self.vocab.add_token_to_namespace("t", 'characters')
self.vocab.add_token_to_namespace("c", 'characters')
for label in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k']:
self.vocab.add_token_to_namespace(label, 'labels')
self.word_indexer = {"words": SingleIdTokenIndexer("words")}
self.words_and_characters_indexers = {"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters",
min_padding_length=1)}
self.field1 = TextField([Token(t) for t in ["this", "is", "a", "sentence"]],
self.word_indexer)
self.field2 = TextField([Token(t) for t in ["this", "is", "a", "different", "sentence"]],
self.word_indexer)
self.field3 = TextField([Token(t) for t in ["this", "is", "another", "sentence"]],
self.word_indexer)
self.empty_text_field = self.field1.empty_field()
self.index_field = IndexField(1, self.field1)
self.empty_index_field = self.index_field.empty_field()
self.sequence_label_field = SequenceLabelField([1, 1, 0, 1], self.field1)
self.empty_sequence_label_field = self.sequence_label_field.empty_field()
super(TestListField, self).setUp()
def test_get_padding_lengths(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
lengths = list_field.get_padding_lengths()
assert lengths == {"num_fields": 3, "list_words_length": 5, "list_num_tokens": 5}
def test_list_field_can_handle_empty_text_fields(self):
list_field = ListField([self.field1, self.field2, self.empty_text_field])
list_field.index(self.vocab)
tensor_dict = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(tensor_dict["words"].detach().cpu().numpy(),
numpy.array([[2, 3, 4, 5, 0],
[2, 3, 4, 1, 5],
[0, 0, 0, 0, 0]]))
def test_list_field_can_handle_empty_index_fields(self):
list_field = ListField([self.index_field, self.index_field, self.empty_index_field])
list_field.index(self.vocab)
tensor = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(tensor.detach().cpu().numpy(), numpy.array([[1], [1], [-1]]))
def test_list_field_can_handle_empty_sequence_label_fields(self):
list_field = ListField([self.sequence_label_field,
self.sequence_label_field,
self.empty_sequence_label_field])
list_field.index(self.vocab)
tensor = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(tensor.detach().cpu().numpy(),
numpy.array([[1, 1, 0, 1],
[1, 1, 0, 1],
[0, 0, 0, 0]]))
def test_all_fields_padded_to_max_length(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
tensor_dict = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_almost_equal(tensor_dict["words"][0].detach().cpu().numpy(),
numpy.array([2, 3, 4, 5, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict["words"][1].detach().cpu().numpy(),
numpy.array([2, 3, 4, 1, 5]))
numpy.testing.assert_array_almost_equal(tensor_dict["words"][2].detach().cpu().numpy(),
numpy.array([2, 3, 1, 5, 0]))
def test_nested_list_fields_are_padded_correctly(self):
nested_field1 = ListField([LabelField(c) for c in ['a', 'b', 'c', 'd', 'e']])
nested_field2 = ListField([LabelField(c) for c in ['f', 'g', 'h', 'i', 'j', 'k']])
list_field = ListField([nested_field1.empty_field(), nested_field1, nested_field2])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
assert padding_lengths == {'num_fields': 3, 'list_num_fields': 6}
tensor = list_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_almost_equal(tensor, [[-1, -1, -1, -1, -1, -1],
[0, 1, 2, 3, 4, -1],
[5, 6, 7, 8, 9, 10]])
def test_fields_can_pad_to_greater_than_max_length(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
padding_lengths["list_words_length"] = 7
padding_lengths["num_fields"] = 5
tensor_dict = list_field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(tensor_dict["words"][0].detach().cpu().numpy(),
numpy.array([2, 3, 4, 5, 0, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict["words"][1].detach().cpu().numpy(),
numpy.array([2, 3, 4, 1, 5, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict["words"][2].detach().cpu().numpy(),
numpy.array([2, 3, 1, 5, 0, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict["words"][3].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]))
numpy.testing.assert_array_almost_equal(tensor_dict["words"][4].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]))
def test_as_tensor_can_handle_multiple_token_indexers(self):
# pylint: disable=protected-access
self.field1._token_indexers = self.words_and_characters_indexers
self.field2._token_indexers = self.words_and_characters_indexers
self.field3._token_indexers = self.words_and_characters_indexers
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
tensor_dict = list_field.as_tensor(padding_lengths)
words = tensor_dict["words"].detach().cpu().numpy()
characters = tensor_dict["characters"].detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(words, numpy.array([[2, 3, 4, 5, 0],
[2, 3, 4, 1, 5],
[2, 3, 1, 5, 0]]))
numpy.testing.assert_array_almost_equal(characters[0], numpy.array([[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]))
numpy.testing.assert_array_almost_equal(characters[1], numpy.array([[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 3, 1, 3, 4, 5],
[2, 3, 4, 5, 3, 4, 6, 3, 0]]))
numpy.testing.assert_array_almost_equal(characters[2], numpy.array([[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 4, 1, 5, 1, 3, 1, 0, 0],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]))
def test_as_tensor_can_handle_multiple_token_indexers_and_empty_fields(self):
# pylint: disable=protected-access
self.field1._token_indexers = self.words_and_characters_indexers
self.field2._token_indexers = self.words_and_characters_indexers
self.field3._token_indexers = self.words_and_characters_indexers
list_field = ListField([self.field1.empty_field(), self.field1, self.field2])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
tensor_dict = list_field.as_tensor(padding_lengths)
words = tensor_dict["words"].detach().cpu().numpy()
characters = tensor_dict["characters"].detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(words, numpy.array([[0, 0, 0, 0, 0],
[2, 3, 4, 5, 0],
[2, 3, 4, 1, 5]]))
numpy.testing.assert_array_almost_equal(characters[0], numpy.zeros([5, 9]))
numpy.testing.assert_array_almost_equal(characters[1], numpy.array([[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]]))
numpy.testing.assert_array_almost_equal(characters[2], numpy.array([[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 3, 1, 3, 4, 5],
[2, 3, 4, 5, 3, 4, 6, 3, 0]]))
def test_printing_doesnt_crash(self):
list_field = ListField([self.field1, self.field2])
print(list_field)
def test_sequence_methods(self):
list_field = ListField([self.field1, self.field2, self.field3])
assert len(list_field) == 3
assert list_field[1] == self.field2
assert [f for f in list_field] == [self.field1, self.field2, self.field3]
|
StarcoderdataPython
|
6468405
|
from secrets import token_bytes
from ariadne import MutationType
from classes.user import User
MUTATION = MutationType()
@MUTATION.field("logout")
async def r_logout(user: User, *_, universal: bool = True) -> None:
if universal:
await user.update(key=token_bytes(32)).apply()
return None
|
StarcoderdataPython
|
3338272
|
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class AnimespiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
name = scrapy.Field() # 番剧名
follow_num = scrapy.Field() # 追番人数
pub_info = scrapy.Field() # 话
link = scrapy.Field() # 番剧链接
info_link = scrapy.Field() # 番剧简介链接
play_num = scrapy.Field() # 播放数
fans_num = scrapy.Field() # 追番人数
review_num = scrapy.Field() # 弹幕数量
start_time = scrapy.Field() # 播放时间
score = scrapy.Field() # 评分
score_num = scrapy.Field() # 评分人数
|
StarcoderdataPython
|
11202246
|
# Copyright (c) 2017-2019, <NAME>
# Copyright (c) 2014-2018, iocage
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted providing that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Clone and promote jails."""
import click
import libioc.errors
import libioc.ZFS
import libioc.Jail
from .shared.click import IocClickContext
__rootcmd__ = True
@click.command(name="promote")
@click.pass_context
@click.argument(
"jail",
nargs=1,
required=True
)
def cli(
ctx: IocClickContext,
jail: str
) -> None:
"""Clone and promote jails."""
logger = ctx.parent.logger
ioc_jail = libioc.Jail.JailGenerator(
dict(id=jail),
logger=logger,
zfs=ctx.parent.zfs,
host=ctx.parent.host
)
try:
ioc_jail.promote()
except libioc.errors.IocException:
exit(1)
|
StarcoderdataPython
|
1755400
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import path
from os.path import abspath
from os.path import join
from os import pardir
from cv2 import imshow
from cv2 import flip
from cv2 import cvtColor
from cv2 import COLOR_BGR2GRAY
path.append(abspath(join(__file__, pardir, pardir, 'src')))
from vcapture import vcap
def process(frm):
frame = flip(frm, 1)
gray = cvtColor(frame, COLOR_BGR2GRAY)
imshow('Live Capture', gray)
def main():
with vcap:
vcap.run(process)
if __name__ == '__main__':
main()
|
StarcoderdataPython
|
1649658
|
###########################################################################
#
## @file valet.py
#
###########################################################################
import os
import grpc
from . import communicate_pb2_grpc
from . import communicate_pb2
from .server import Server
from .environment import Environment
from .intelligence import Intelligence
from .communicate import Communicate
from .database import Database
from ..canonical.configure import Configure
from ..io.cli import CommandLineInterface
#from utils.canonical.microservice import Microservice
###########################################################################
class Valet ( dict, ) :
"""#######################################################################
Valet is the main class of this application - which initializes all core
components of the bot, sets up the environment for running, and starts
execution.
#######################################################################
"""
#######################################################################
#
## Class level variables
#
#######################################################################
preinitialized = False
initialized = False
commandlineinterface = None
def __init__ ( self, **kwargs, ) :
"""#######################################################################
Initialize Valet object with base components necessary to exist - later,
each of the Valet cortices are called and initiated within their own
initializer
@valet / instance of valet itself to interact with main object
#######################################################################
"""
_ = self.__configure ( **kwargs, )
def initialize ( self, ) -> bool :
"""###################################################################
Initialize begins the boot process of Valet - this is separated from
__init__ to control what items Valet needs to exist as an object
(vars, log, config, etc.) and what Valet needs to exist as an AI.
A callable for Valet to initialize all its important components, modules,
micro services, etc. in order to operate. Here, the environment including
logs, variables, containers, etc. will be initialized and managed
Returns / bool > signifying completion
###################################################################
"""
self.log.info ( ".init [ starting... ] \\\\" )
###################################################################
#
## Hippocampus | Make a connection to the necessary databases to
# operate properly - and other model files for memory, here we
# need to open each memory file and attach to self for use in
# any additional processes
#
###################################################################
self.database = Database ( self, self.dbpath, )
###################################################################
#
## BrocosArea is responsible for handling transforming thoughts,
# concepts into output text. Setup the object wrapper for the
# user to communicate with valet and vice-versa
#
###################################################################
self.communicate = Communicate ( self, )
#self.communicate.start ( )
self.log.info ( ".init [ communicate _ brocasarea ]" )
###################################################################
#
## PreFrontalCortex | Initiatilize the Server Manager / Thread Manager
# that valet will utilize to manage related threads. Essentially this
# is Valet's core worker factory where threads are created, managed,
# stored, interacted-with, and retrieved from
#
###################################################################
#self.server = Server ( self, )
##self.server.start ( )
#self.log.info ( ".init [ server _ prefrontalcortex ]" )
###################################################################
#
## Instantiate the main intelligence object which does the core
# analysis between input text, learned and existing abilities,
# and lookup sequences for generating responses to user prompts.
#
# After instantiation, initiate a learning sweep of any source
# file within the intelligence directory not yet learned
#
###################################################################
#self.intelligence = Intelligence ( self, )
##self.intelligence.train ( )
#self.log.info ( ".init [ intelligence _ ]" )
###################################################################
#
## Start the grpc services instantiated and registered with the
# grpc server above
#
###################################################################
self.grpcserver.start()
self.grpcserver.wait_for_termination()
self.initialized = True
self.log.info ( ".init [ complete. ] \\\\" )
return True
def startinterfaces ( self, ) :
"""#######################################################################
Valet's main interface for the user, here the user can ask questions and
receive terminal responses from valet on a command line interface -
interactive console session.
SensoryCortex | Initialize the Interactive Command Prompt that
is Valet's backbone. All commands will bet sent to this CLI -
here all other input sensors are initialized.
#######################################################################
"""
if not self.commandlineinterface :
self.commandlineinterface = CommandLineInterface ( )
self.commandlineinterface.instantiate ( self, )
self.commandlineinterface.cmdloop ( )
def __configure ( self, **kwargs, ) -> bool :
"""#######################################################################
Perform any initial setup needed for Valet to operate prior to setting-up
the Valet components and microservices, copy kwargs to instance
Returns / bool > signifying completion
#######################################################################
"""
###################################################################
#
## Copy kwargs onto the class instance allowing valet to be
# populated with instance variables from passed kwargs
#
###################################################################
self.__dict__.update ( kwargs )
###################################################################
#
## Iterate the conf directory and load any conf/ini file to an
# accessible instance variable using configparser, all loaded
# configs get placed in self.configurations.pool[x]
#
###################################################################
self.ini = Configure ( '.conf', '.ini' )
self.dbpath = os.path.join ( self.ini.pool['db']['main']['locale'],
self.ini.pool['db']['main']['file'],
)
###################################################################
#
## Setup the Environment for valet to run including logging,
# any directories or temporary file architectures for runtime,
# any boottime-sanity checks
#
###################################################################
self.environment = Environment ( self, )
self.log = self.environment.setuplog ( )
###################################################################
#
## Initalize the google RPC (Protocol Buffer) server to host
# concurrent microservices within Valet, to be initialized
# during .initialize() on designated server & port
#
###################################################################
self.grpcserver = grpc.server( futures.ThreadPoolExecutor (
max_workers = int ( self.ini.pool['valet']['main']['workers'] )
)
)
self.grpcserver.add_insecure_port ('%s:%s' % (self.ini.pool['valet']['protobuf']['server'],
int ( self.ini.pool['valet']['protobuf']['port'] ) )
) #('[::]:50051')
###################################################################
#
## Override the __setattr__ method, telling valet to use the
# specified method when a instance property gets changed and
# valet.__setattr__ gets called - position at the end of setup
# to avoid being called for each of the configuration setup
# items here. We need this to be called during the cortex
# invocations
#
###################################################################
self.__class__.__setattr__ = self.__broadcast__
self.preinitialized = True
self.log.info ( ".pre_init [ complete. ] \\\\" )
def __broadcast__( self, item, value, ) -> bool :
"""###################################################################
Broadcast is called upon each Valet class attribute change to the
Valet.__dict__ object, specified in:
> self.__class__.__setattr__ = self.__broadcast__
Returns / bool > signifying completion
###################################################################
"""
self.__dict__ [ item ] = value
###################################################################
#
## For any assigned object attribute within Valet, check it's
# 'listening' property and update object.valet where true. This
# ensures all listening sub objects of Valet have up-to-date
# valet object references for work at all times
#
###################################################################
for item in self.__dict__ :
if hasattr ( item, 'listen', ) and item [ 'listen' ] and hasattr ( item, 'valet', ) :
item [ 'valet' ] = self
return True
|
StarcoderdataPython
|
5124035
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from addnn.controller.proto import controller_pb2 as addnn_dot_controller_dot_proto_dot_controller__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ControllerStub(object):
"""Missing associated documentation comment in .proto file."""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.RegisterNode = channel.unary_unary(
'/addnn.grpc.controller.Controller/RegisterNode',
request_serializer=addnn_dot_controller_dot_proto_dot_controller__pb2.RegisterNodeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.DeregisterNode = channel.unary_unary(
'/addnn.grpc.controller.Controller/DeregisterNode',
request_serializer=addnn_dot_controller_dot_proto_dot_controller__pb2.DeregisterNodeRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.UpdateNodeState = channel.unary_unary(
'/addnn.grpc.controller.Controller/UpdateNodeState',
request_serializer=addnn_dot_controller_dot_proto_dot_controller__pb2.UpdateNodeStateRequest.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ListNodes = channel.unary_unary(
'/addnn.grpc.controller.Controller/ListNodes',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=addnn_dot_controller_dot_proto_dot_controller__pb2.ListNodesResponse.FromString,
)
class ControllerServicer(object):
"""Missing associated documentation comment in .proto file."""
def RegisterNode(self, request, context):
"""Register a compute node at the controller.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeregisterNode(self, request, context):
"""Deregisters a compute node at the controller.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UpdateNodeState(self, request, context):
"""Update the state of a registrered compute node at the controller.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListNodes(self, request, context):
"""Lists all nodes that are currently registered at the controller.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ControllerServicer_to_server(servicer, server):
rpc_method_handlers = {
'RegisterNode': grpc.unary_unary_rpc_method_handler(
servicer.RegisterNode,
request_deserializer=addnn_dot_controller_dot_proto_dot_controller__pb2.RegisterNodeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'DeregisterNode': grpc.unary_unary_rpc_method_handler(
servicer.DeregisterNode,
request_deserializer=addnn_dot_controller_dot_proto_dot_controller__pb2.DeregisterNodeRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'UpdateNodeState': grpc.unary_unary_rpc_method_handler(
servicer.UpdateNodeState,
request_deserializer=addnn_dot_controller_dot_proto_dot_controller__pb2.UpdateNodeStateRequest.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ListNodes': grpc.unary_unary_rpc_method_handler(
servicer.ListNodes,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=addnn_dot_controller_dot_proto_dot_controller__pb2.ListNodesResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'addnn.grpc.controller.Controller', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Controller(object):
"""Missing associated documentation comment in .proto file."""
@staticmethod
def RegisterNode(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/addnn.grpc.controller.Controller/RegisterNode',
addnn_dot_controller_dot_proto_dot_controller__pb2.RegisterNodeRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DeregisterNode(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/addnn.grpc.controller.Controller/DeregisterNode',
addnn_dot_controller_dot_proto_dot_controller__pb2.DeregisterNodeRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def UpdateNodeState(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/addnn.grpc.controller.Controller/UpdateNodeState',
addnn_dot_controller_dot_proto_dot_controller__pb2.UpdateNodeStateRequest.SerializeToString,
google_dot_protobuf_dot_empty__pb2.Empty.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ListNodes(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/addnn.grpc.controller.Controller/ListNodes',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
addnn_dot_controller_dot_proto_dot_controller__pb2.ListNodesResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
|
StarcoderdataPython
|
4906034
|
<reponame>Eroica-cpp/LeetCode
#!/usr/bin/python
# ==============================================================================
# Author: <NAME> (<EMAIL>)
# Date: May 7, 2015
# Question: 075-Sort-Colors
# Link: https://leetcode.com/problems/sort-colors/
# ==============================================================================
# Given an array with n objects colored red, white or blue, sort them so that
# objects of the same color are adjacent, with the colors in the order red,
# white and blue.
# Here, we will use the integers 0, 1, and 2 to represent the color red, white,
# and blue respectively.
# Note:
# You are not suppose to use the library's sort function for this problem.
# ==============================================================================
# Method: Loop
# Time Complexity: O(N)
# Space Complexity: O(1)
# ==============================================================================
class Solution:
# @param {integer[]} nums
# @return {void} Do not return anything, modify nums in-place instead.
def sortColors(self, nums):
nums[:] = [0] * nums.count(0) + [1] * nums.count(1) + [2] * nums.count(2)
if __name__ == '__main__':
nums = [1,2,1,1,2,2,2,2,0,0]
Solution().sortColors(nums)
print nums
|
StarcoderdataPython
|
12800341
|
<reponame>tiagosm1/Python_Nilo_Ney<filename>exercicios_resolvidos3/exercicios3/capitulo 09/exercicio-09-16.py
##############################################################################
# Parte do livro Introdução à Programação com Python
# Autor: <NAME>
# Editora Novatec (c) 2010-2020
# Primeira edição - Novembro/2010 - ISBN 978-85-7522-250-8
# Segunda edição - Junho/2014 - ISBN 978-85-7522-408-3
# Terceira Edição - Janeiro/2019 - ISBN 978-85-7522-718-3
#
# Site: https://python.nilo.pro.br/
#
# Arquivo: exercicios3\capitulo 09\exercicio-09-16.py
##############################################################################
# Cada registro da agenda é gravado em uma linha do arquivo.
# Os campos são separados pelo símbolo # (Cerquilha)
# por exemplo:
# Duas entradas, Nilo e João são gravadas em 2 linhas de texto.
# O nome da entrada fica a esquerda do # e o número de telefone a direita
#
# Nilo#1234
# João#5678
|
StarcoderdataPython
|
3555670
|
import re
from aocd import get_data
def part1(inp):
out = re.sub(r"!.", "", inp)
out = re.sub(r"<[^>]*>", "", out)
lvl = 1
res = 0
for c in out:
if c == '{':
res += lvl
lvl += 1
elif c == '}':
lvl -= 1
return res
def part2(inp):
out = re.sub(r"!.", "", inp)
out = re.findall(r"<[^>]*>", out)
return sum(len(a) - 2 for a in out)
if __name__ == '__main__':
data = get_data(day=9, year=2017)
inp = data
print(part1(inp))
print(part2(inp))
|
StarcoderdataPython
|
11275858
|
# Copyright 2018 Inspur Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Share driver for Inspur AS13000
"""
import functools
import json
import re
import requests
import six
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from manila import exception
from manila.share import driver
from manila.share import utils as share_utils
LOG = logging.getLogger(__name__)
inspur_as13000_opts = [
cfg.HostAddressOpt(
'as13000_nas_ip',
help='As13000 IP address.'),
cfg.IntOpt(
'as13000_api_port',
default=8088,
help='The port that Driver used to send request to the backend.'),
cfg.StrOpt(
'as13000_nas_login',
help='as13000_nas_username'),
cfg.StrOpt(
'as13000_nas_password',
help='as13000_nas_password'),
cfg.ListOpt(
'inspur_as13000_share_pool',
default=['Pool0'],
help='The Storage Pool Manila use.'),
cfg.IntOpt(
'as13000_token_available_time',
default=3600,
help='The valid period of token.'),
cfg.DictOpt(
'directory_protection_info',
default={
'type': 0,
"dc": 2,
"cc": 1,
"rn": 0,
"st": 4},
help='The protection info of directory.')]
CONF = cfg.CONF
CONF.register_opts(inspur_as13000_opts)
def inspur_driver_debug_trace(f):
"""Log the method entrance and exit including active backend name.
This should only be used on VolumeDriver class methods. It depends on
having a 'self' argument that is a AS13000_Driver.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
driver = args[0]
cls_name = driver.__class__.__name__
method_name = "%(cls_name)s.%(method)s" % {"cls_name": cls_name,
"method": f.__name__}
# backend_name = driver._update_volume_stats.get('volume_backend_name')
backend_name = driver.configuration.share_backend_name
LOG.debug("[%(backend_name)s] Enter %(method_name)s",
{"method_name": method_name, "backend_name": backend_name})
result = f(*args, **kwargs)
LOG.debug("[%(backend_name)s] Leave %(method_name)s",
{"method_name": method_name, "backend_name": backend_name})
return result
return wrapper
class RestAPIExecutor(object):
def __init__(self, hostname, port, username, password):
self._hostname = hostname
self._port = port
self._username = username
self._password = password
self._token_pool = []
self._token_size = 1
def logins(self):
"""login the AS13000 and store the token in token_pool"""
times = self._token_size
while times > 0:
token = self.login()
self._token_pool.append(token)
times = times - 1
LOG.debug('Login the AS13000.')
def login(self):
"""login in the AS13000 and return the token"""
method = 'security/token'
params = {'name': self._username, 'password': self._password}
token = self.send_rest_api(method=method, params=params,
request_type='post').get('token')
return token
def logout(self):
method = 'security/token'
self.send_rest_api(method=method, request_type='delete')
def refresh_token(self, force=False):
if force is True:
for i in range(self._token_size):
self._token_pool = []
token = self.login()
self._token_pool.append(token)
else:
for i in range(self._token_size):
self.logout()
token = self.login()
self._token_pool.append(token)
LOG.debug('Tokens have been refreshed.')
def send_rest_api(self, method, params=None, request_type='post'):
attempts = 3
while attempts > 0:
attempts -= 1
try:
return self.send_api(method, params, request_type)
except exception.NetworkException as e:
LOG.error(six.text_type(e))
msge = str(six.text_type(e))
self.refresh_token(force=True)
time.sleep(1)
except exception.ShareBackendException as e:
msge = str(six.text_type(e))
break
msg = ('Error running RestAPI: /rest/%(method)s; '
'Error Message:%(msge)s; request_type: %(type)s'
% {'method': method, 'msge': msge, 'type': request_type})
LOG.error(msg)
raise exception.ShareBackendException(msg)
def send_api(self, method, params=None, request_type='post'):
if params is not None:
params = json.dumps(params)
url = ('http://%(hostname)s:%(port)s/%(rest)s/%(method)s'
% {'hostname': self._hostname,
'port': self._port,
'rest': 'rest',
'method': method})
# header is not needed when the driver login the backend
if method == 'security/token':
# token won't be return to the token_pool
if request_type == 'delete':
header = {'X-Auth-Token': self._token_pool.pop(0)}
else:
header = None
else:
if len(self._token_pool) == 0:
self.logins()
token = self._token_pool.pop(0)
header = {'X-Auth-Token': token}
self._token_pool.append(token)
if request_type == 'post':
req = requests.post(url,
data=params,
headers=header)
elif request_type == 'get':
req = requests.get(url,
data=params,
headers=header)
elif request_type == 'put':
req = requests.put(url,
data=params,
headers=header)
elif request_type == 'delete':
req = requests.delete(url,
data=params,
headers=header)
else:
msg = 'Unsupported request_type: %s' % request_type
raise exception.ShareBackendException(msg)
if req.status_code != 200:
msg = 'Error code: %(code)s , API: %(api)s , Message: %(msg)s'\
% {'code': req.status_code, 'api': req.url, 'msg': req.text}
LOG.error(msg)
raise exception.NetworkException(msg)
try:
response = req.json()
code = response.get('code')
if code == 0:
if request_type == 'get':
data = response.get('data')
else:
if method == 'security/token':
data = response.get('data')
else:
data = response.get('message')
data = str(data).lower()
if hasattr(data, 'success'):
return
elif code == 301:
msg = 'Token is out time'
LOG.error(msg)
raise exception.NetworkException(msg)
else:
message = response.get('message') # response['message']
msg = ('The RestAPI exception output:'
'Message:%s, Code:%s' % (message, code))
LOG.error(msg)
raise exception.ShareBackendException(msg)
except ValueError:
raise exception.ShareBackendException(msg)
data = None
req.close()
return data
class AS13000ShareDriver(driver.ShareDriver):
"""AS13000 Share Driver
Version history:
V1.0.0: Initial version
V1.1.0: fix location problem and extend unit_convert
provide more exception info
V1.2.0 delete the shrink_share function,
fix the bugs caused by the as13000 quota module adjustment
"""
VENDOR = 'INSPUR'
VERSION = '1.2.0'
PROTOCOL = 'NFS_CIFS'
def __init__(self, *args, **kwargs):
super(AS13000ShareDriver, self).__init__(False, *args, **kwargs)
self.configuration.append_config_values(inspur_as13000_opts)
self.hostname = self.configuration.as13000_nas_ip
self.port = self.configuration.as13000_api_port
self.username = self.configuration.as13000_nas_login
self.password = self.<PASSWORD>
self.token_available_time = (self.configuration.
as13000_token_available_time)
self.storage_pool = None
self.pools = ''
self._token_time = 0
self.ips = []
self._rest = RestAPIExecutor(self.hostname, self.port,
self.username, self.password)
@inspur_driver_debug_trace
def do_setup(self, context):
# get the RestAPIExecutor
# self._rest = RestAPIExecutor(self.hostname, self.port,
# self.username, self.password)
# get tokens for Driver
self._rest.logins()
self._token_time = time.time()
# get pools names from configuration
self.pools = self.configuration.inspur_as13000_share_pool
# Check the pool in conf exist in the backend
self._validate_pools_exist()
# get directory
self.storage_pool = self._get_storage_pool(self.pools[0])
# get all backend node ip
self.ips = self._get_nodes_ips()
@inspur_driver_debug_trace
def check_for_setup_error(self):
# check the required flags in conf
required_flags = [
'as13000_nas_ip',
'as13000_nas_login',
'as13000_nas_password',
'inspur_as13000_share_pool',
'directory_protection_info']
for flag in required_flags:
if not self.configuration.safe_get(flag):
msg = '%s is not set.' % flag
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if self.storage_pool is None:
msg = 'The pool status is not right'
raise exception.ShareBackendException(msg)
if len(self.ips) == 0:
msg = 'All backend nodes status are down'
raise exception.ShareBackendException(msg)
@inspur_driver_debug_trace
def create_share(self, context, share, share_server=None):
"""Create a share."""
pool, share_name, share_size, share_proto = self._get_share_pnsp(share)
# 1.create directory first
share_path = self._create_directory(
share_name=share_name, pool_name=pool)
# 2.set the quota of directory
self._set_directory_quota(share_path, share_size)
# 3.create nfs/cifs share
if share_proto == 'nfs':
self._create_nfs_share(share_path=share_path)
elif share_proto == 'cifs':
self._create_cifs_share(share_name=share_name,
share_path=share_path)
else:
msg = 'Invalid NAS protocol supplied: %s.' % share_proto
LOG.error(msg)
raise exception.InvalidInput(msg)
locations = self._get_location_path(
share_name, share_path, share_proto)
LOG.debug('Create share: name:%(name)s'
' protocal:%(proto)s,location: %(loc)s',
{'name': share_name, 'proto': share_proto, 'loc': locations})
return locations
@inspur_driver_debug_trace
def create_share_from_snapshot(self, context, share, snapshot,
share_server=None):
"""Create a share from snapshot."""
pool, share_name, share_size, share_proto = self._get_share_pnsp(share)
# 1.create directory first
share_path = self._create_directory(share_name=share_name,
pool_name=pool)
# from saturn quota must set when directory is empty
# 1.2 set the quota of directory
self._set_directory_quota(share_path, share_size)
# 2.clone snapshot to dest_path
self._clone_directory_to_dest(snapshot=snapshot, dest_path=share_path)
# 3.create share
if share_proto == 'nfs':
self._create_nfs_share(share_path=share_path)
elif share_proto == 'cifs':
self._create_cifs_share(share_name=share_name,
share_path=share_path)
else:
msg = 'Invalid NAS protocol supplied: %s.' % share_proto
LOG.error(msg)
raise exception.InvalidInput(msg)
locations = self._get_location_path(
share_name, share_path, share_proto)
LOG.debug(
'Create share from snapshot:'
' name:%(name)s protocal:%(proto)s,location: %(loc)s',
{'name': share_name, 'proto': share_proto, 'loc': locations})
return locations
@inspur_driver_debug_trace
def delete_share(self, context, share, share_server=None):
"""Delete share."""
pool, share_name, size, share_proto = self._get_share_pnsp(share)
share_path = r'/%s/%s' % (pool, share_name)
if share_proto == 'nfs':
share_backend = self._get_nfs_share(share_path)
if len(share_backend) == 0:
return
else:
self._delete_nfs_share(share_path)
elif share_proto == 'cifs':
share_backend = self._get_cifs_share(share_name)
if len(share_backend) == 0:
return
else:
self._delete_cifs_share(share_name)
else:
msg = 'Invalid NAS protocol supplied: %s.' % share_proto
LOG.error(msg)
raise exception.InvalidInput(msg)
self._delete_directory(share_path)
LOG.debug('Delete share: name:%s', share_name)
@inspur_driver_debug_trace
def extend_share(self, share, new_size, share_server=None):
"""extend share to new size"""
pool, name, size, proto = self._get_share_pnsp(share)
share_path = r'/%s/%s' % (pool, name)
self._set_directory_quota(share_path, new_size)
LOG.debug('extend share:%(name)s to new size %(size)s GB',
{'name': name, 'size': new_size})
# @inspur_driver_debug_trace
# def shrink_share(self, share, new_size, share_server=None):
# """shrink share to new size.
#
# Before shrinking, Driver will make sure
# the new size is larger the share already used
# """
# pool, name, size, proto = self._get_share_pnsp(share)
# share_path = r'/%s/%s' % (pool, name)
# current_quota, used_capacity = self._get_directory_quata(share_path)
# if new_size < used_capacity:
# msg = ('New size for shrink can not be less than used_capacity'
# ' on array. (used_capacity: %s, new: %s)).'
# % (used_capacity, new_size))
# LOG.error(msg)
# raise exception.ShareShrinkingError(
# share_id=share['share_id'], reason=msg)
# self._set_directory_quota(share_path, new_size)
# LOG.debug('shrink share:%(name)s to new size %(size)s GB',
# {'name': name, 'size': new_size})
@inspur_driver_debug_trace
def ensure_share(self, context, share, share_server=None):
"""Ensure that share is exported."""
pool, share_name, share_size, share_proto = self._get_share_pnsp(share)
share_path = '/%s/%s' % (pool, share_name)
if share_proto == 'nfs':
share_backend = self._get_nfs_share(share_path)
elif share_proto == 'cifs':
share_backend = self._get_cifs_share(share_name)
else:
msg = 'Invalid NAS protocol supplied: %s.' % share_proto
LOG.error(msg)
raise exception.InvalidInput(msg)
if len(share_backend) == 0:
raise exception.ShareResourceNotFound(share_id=share['share_id'])
else:
location = self._get_location_path(
share_name, share_path, share_proto)
return location
@inspur_driver_debug_trace
def create_snapshot(self, context, snapshot, share_server=None):
"""create snapshot of share"""
source_share = snapshot['share']
pool, source_name, size, proto = self._get_share_pnsp(source_share)
path = r'/%s/%s' % (pool, source_name)
# format the name of snapshot
snap_name = 'snap_%s' % snapshot['id']
snap_name = self._format_name(snap_name)
method = 'snapshot/directory'
request_type = 'post'
params = {'path': path, 'snapName': snap_name}
self._rest.send_rest_api(method=method,
params=params,
request_type=request_type)
LOG.debug('Create snapshot %(snap)s of share %(share)s',
{'snap': snap_name, 'share': source_name})
@inspur_driver_debug_trace
def delete_snapshot(self, context, snapshot, share_server=None):
"""delete snapshot of snapshot"""
source_share = snapshot['share']
pool, source_name, size, proto = self._get_share_pnsp(source_share)
path = r'/%s/%s' % (pool, source_name)
# if there no snaps in back,driver will do nothing but return
snaps_backend = self._get_snapshots_from_share(path)
if len(snaps_backend) == 0:
return
# format the name of snapshot
snap_name = 'snap_%s' % snapshot['id']
snap_name = self._format_name(snap_name)
method = 'snapshot/directory?path=%s&snapName=%s' % (path, snap_name)
request_type = 'delete'
self._rest.send_rest_api(method=method, request_type=request_type)
LOG.debug('Create snapshot %(snap)s of share %(share)s',
{'snap': snap_name, 'share': source_name})
@inspur_driver_debug_trace
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
"""update access of share"""
self._clear_access(share)
pool, share_name, size, proto = self._get_share_pnsp(share)
share_path = r'/%s/%s' % (pool, share_name)
access_clients = []
if proto == 'nfs':
client_type = 0
elif proto == 'cifs':
client_type = 1
for access in access_rules:
access_to = access['access_to']
access_level = access['access_level']
client = {
'name': access_to,
'type': client_type,
'authority': access_level}
access_clients.append(client)
method = 'file/share/%s' % proto
request_type = 'put'
params = {'addedClientList': access_clients,
'deletedClientList': [],
'editedClientList': []}
if proto == 'nfs':
share_backend = self._get_nfs_share(share_path)
params['path'] = share_path
params['pathAuthority'] = share_backend['pathAuthority']
elif proto == 'cifs':
params['path'] = share_path
params['name'] = share_name
self._rest.send_rest_api(method=method,
params=params,
request_type=request_type)
LOG.debug('Update access of share name:'
' %(name)s, accesses: %(access)s',
{'name': share_name, 'access': access_rules})
@inspur_driver_debug_trace
def _update_share_stats(self, data=None):
"""update the backend stats including driver info and pools info"""
data = {}
backend_name = self.configuration.safe_get('share_backend_name')
data['vendor_name'] = self.VENDOR
data['driver_version'] = self.VERSION
data['storage_protocol'] = self.PROTOCOL
data['share_backend_name'] = backend_name
data['driver_handles_share_servers'] = False
data['snapshot_support'] = True
data['create_share_from_snapshot_support'] = True
pools = []
pools_in_conf = self.pools
for pool_b in pools_in_conf:
pool_stats = self._get_pools_stats(pool_b)
pools.append(pool_stats)
data['pools'] = pools
self._stats = data
# Driver excute this method every minute, so we set this when the
# _update_share_stats excute for times ,the driver will refresh
# the token
time_difference = time.time() - self._token_time
if time_difference > self.token_available_time:
self._rest.refresh_token()
self._token_time = time.time()
LOG.debug('Token of Driver has been refreshed')
LOG.debug('Update share stats : %s', self._stats)
@inspur_driver_debug_trace
def _clear_access(self, share):
"""clear all access of share"""
pool, share_name, size, proto = self._get_share_pnsp(share)
share_path = r'/%s/%s' % (pool, share_name)
if proto == 'nfs':
share_backend = self._get_nfs_share(share_path)
client_list = share_backend['clientList']
elif proto == 'cifs':
share_backend = self._get_cifs_share(share_name)
client_list = share_backend['userList']
method = 'file/share/%s' % proto
request_type = 'put'
params = {'addedClientList': [],
'deletedClientList': client_list,
'editedClientList': []}
if proto == 'nfs':
params['path'] = share_path
params['pathAuthority'] = share_backend['pathAuthority']
elif proto == 'cifs':
params['path'] = share_path
params['name'] = share_name
self._rest.send_rest_api(method=method,
params=params,
request_type=request_type)
LOG.debug('Clear all the access of share name:%s', share['name'],)
@inspur_driver_debug_trace
def _validate_pools_exist(self):
"""Check the pool in conf exist in the backend"""
pool_list = self._get_directory_list('/')
for pool in self.pools:
if pool not in pool_list:
msg = '%s is not exist in backend storage.' % pool
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
@inspur_driver_debug_trace
def _get_directory_quata(self, path):
"""get the quata of directory"""
method = 'file/quota/directory?path=/%s' % path
request_type = 'get'
data = self._rest.send_rest_api(method=method,
request_type=request_type)
quota = data.get('hardthreshold')
if quota is None:
# the method of '_update_share_stats' will check quata of pools.
# To avoid return NONE for pool info, so raise this exception
msg = (r'Quota of pool: /%s is not set, '
r'please set it in GUI of AS13000' % path)
LOG.error(msg)
raise exception.ShareBackendException(msg=msg)
else:
hardunit = data.get('hardunit')
used_capacity = data.get('capacity')
used_capacity = (str(used_capacity)).upper()
used_capacity = self._unit_convert(used_capacity)
if hardunit == 1:
quota = quota * 1024
total_capacity = int(quota)
used_capacity = int(used_capacity)
return total_capacity, used_capacity
@inspur_driver_debug_trace
def _get_pools_stats(self, path):
"""Get the stats of pools. Incloud capacity and other infomations."""
total_capacity, used_capacity = self._get_directory_quata(path)
free_capacity = total_capacity - used_capacity
pool = {}
pool['pool_name'] = path
pool['reserved_percentage'] = 0
pool['max_over_subscription_ratio'] = 20.0
pool['dedupe'] = False
pool['compression'] = False
pool['qos'] = False
pool['thin_provisioning'] = True
pool['total_capacity_gb'] = total_capacity
pool['free_capacity_gb'] = free_capacity
pool['allocated_capacity_gb'] = used_capacity
pool['snapshot_support'] = True
pool['create_share_from_snapshot_support'] = True
return pool
@inspur_driver_debug_trace
def _get_directory_list(self, path):
"""Get all the directory list of target path"""
method = 'file/directory?path=%s' % path
request_type = 'get'
directory_list = self._rest.send_rest_api(method=method,
request_type=request_type)
dir_list = []
for directory in directory_list:
dir_list.append(directory['name'])
return dir_list
@inspur_driver_debug_trace
def _create_directory(self, share_name, pool_name):
"""create a directory for share"""
authority_info = {"user": "root",
"group": "root",
"authority": "rwxrwxrwx"}
protection_info = self.configuration.directory_protection_info
if not protection_info:
msg = 'protection_info is not set!'
LOG.error(msg)
raise exception.ShareBackendException(msg)
protection_type = protection_info.get('type')
if protection_type == 0:
required_flags = ['type', 'dc', 'cc', 'rn', 'st']
for flag in required_flags:
if flag not in protection_info:
msg = '%s is not set.' % flag
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
if protection_type == 1:
required_flags = ['type', 'strategy']
for flag in required_flags:
if flag not in protection_info:
raise exception.InvalidInput(
reason='%s is not set.' % flag)
if protection_info['strategy'] not in [2, 3]:
msg = 'Directory Protection strategy is not 2 or 3.'
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
data_protection = protection_info
method = 'file/directory'
request_type = 'post'
params = {'name': share_name,
'parentPath': '/%s' % pool_name,
'authorityInfo': authority_info,
'dataProtection': data_protection,
'poolName': self.storage_pool}
self._rest.send_rest_api(method=method,
params=params,
request_type=request_type)
return r'/%(pool)s/%(share)s' % {'pool': pool_name,
'share': share_name}
@inspur_driver_debug_trace
def _delete_directory(self, share_path):
"""delete the directory when delete share"""
method = 'file/directory?path=%s' % share_path
request_type = 'delete'
self._rest.send_rest_api(method=method, request_type=request_type)
@inspur_driver_debug_trace
def _set_directory_quota(self, share_path, quota):
"""set directory quata for share"""
method = 'file/quota/directory'
request_type = 'put'
params = {'path': share_path, 'hardthreshold': quota, 'hardunit': 2}
self._rest.send_rest_api(method=method,
params=params,
request_type=request_type)
@inspur_driver_debug_trace
def _create_nfs_share(self, share_path):
"""create a NFS share"""
method = 'file/share/nfs'
request_type = 'post'
params = {'path': share_path, 'pathAuthority': 'rw', 'client': []}
self._rest.send_rest_api(method=method,
params=params,
request_type=request_type)
@inspur_driver_debug_trace
def _delete_nfs_share(self, share_path):
"""Delete the NFS share"""
method = 'file/share/nfs?path=%s' % share_path
request_type = 'delete'
self._rest.send_rest_api(method=method, request_type=request_type)
@inspur_driver_debug_trace
def _get_nfs_share(self, share_path):
"""Get the nfs share in backend"""
method = 'file/share/nfs?path=%s' % share_path
request_type = 'get'
share_backend = self._rest.send_rest_api(method=method,
request_type=request_type)
return share_backend
@inspur_driver_debug_trace
def _create_cifs_share(self, share_name, share_path):
"""Create a CIFS share."""
method = 'file/share/cifs'
request_type = 'post'
params = {'path': share_path,
'name': share_name,
'userlist': []}
self._rest.send_rest_api(method=method,
params=params,
request_type=request_type)
@inspur_driver_debug_trace
def _delete_cifs_share(self, share_name):
"""Delete the CIFS share."""
method = 'file/share/cifs?name=%s' % share_name
request_type = 'delete'
self._rest.send_rest_api(method=method, request_type=request_type)
@inspur_driver_debug_trace
def _get_cifs_share(self, share_name):
"""Get the CIFS share in backend"""
method = 'file/share/cifs?name=%s' % share_name
request_type = 'get'
share_backend = self._rest.send_rest_api(method=method,
request_type=request_type)
return share_backend
@inspur_driver_debug_trace
def _clone_directory_to_dest(self, snapshot, dest_path):
"""Clone the directory to the new directory"""
source_share = snapshot['share_instance']
pool = share_utils.extract_host(source_share['host'], level='pool')
# format the name of new share
source_name_row = 'share_%s' % snapshot['share_id']
source_name = self._format_name(source_name_row)
# format the name of snapshot
snap_name_row = 'snap_%s' % snapshot['snapshot_id']
snap_name = self._format_name(snap_name_row)
snap_path = '/%s/%s' % (pool, source_name)
method = 'snapshot/directory/clone'
request_type = 'post'
params = {'path': snap_path,
'snapName': snap_name,
'destPath': dest_path}
self._rest.send_rest_api(method=method,
params=params,
request_type=request_type)
LOG.debug(
'clone the directory:%(snap)s to the new directory: %(dest)s', {
'snap': snap_path, 'dest': dest_path})
@inspur_driver_debug_trace
def _get_snapshots_from_share(self, path):
"""get all the snapshot of share"""
method = 'snapshot/directory?path=%s' % path
request_type = 'get'
snaps = self._rest.send_rest_api(
method=method, request_type=request_type)
return snaps
@inspur_driver_debug_trace
def _get_location_path(self, share_name, share_phth, share_proto):
"""return all the location of all nodes"""
if share_proto == 'nfs':
location = [
{'path': r'%(ips)s:%(share_phth)s'
% {'ips': ip, 'share_phth': share_phth}
}
for ip in self.ips
]
elif share_proto == 'cifs':
location = [
{'path': r'\\%(ips)s\%(share_name)s' % {
'ips': ip,
'share_name': share_name}
}
for ip in self.ips
]
else:
msg = 'Invalid NAS protocol supplied: %s.' % share_proto
raise exception.InvalidInput(msg)
return location
@inspur_driver_debug_trace
def _get_nodes_ips(self):
"""Get the all nodes ip of backend """
method = 'cluster/node'
request_type = 'get'
cluster = self._rest.send_rest_api(method=method,
request_type=request_type)
ips = []
for node in cluster:
if node['runningStatus'] == 1 and node['healthStatus'] == 1:
ips.append(node['ip'])
return ips
@inspur_driver_debug_trace
def _get_share_pnsp(self, share):
"""Get pool, share_name, share_size, share_proto of share.
AS13000 require all the names can only consist of letters,numbers,
and undercores,and must begin with a letter.
Also the length of name must less than 32 character.
The driver will use the ID as the name in backend,
add 'share_' to the beginning,and convert '-' to '_'
"""
pool = share_utils.extract_host(share['host'], level='pool')
share_name_row = 'share_%s' % share['share_id']
share_name = self._format_name(share_name_row)
share_size = share['size']
share_proto = share['share_proto'].lower()
return pool, share_name, share_size, share_proto
@inspur_driver_debug_trace
def _unit_convert(self, capacity):
"""Convert all units to GB"""
capacity = str(capacity)
capacity = capacity.upper()
try:
unit_of_used = re.findall(r'[A-Z]', capacity)
unit_of_used = ''.join(unit_of_used)
except BaseException:
unit_of_used = ''
capacity = capacity.replace(unit_of_used, '')
capacity = float(capacity.replace(unit_of_used, ''))
if unit_of_used in ['B', '']:
capacity = capacity / units.Gi
elif unit_of_used in ['K', 'KB']:
capacity = capacity / units.Mi
elif unit_of_used in ['M', 'MB']:
capacity = capacity / units.Ki
elif unit_of_used in ['G', 'GB']:
capacity = capacity
elif unit_of_used in ['T', 'TB']:
capacity = capacity * units.Ki
elif unit_of_used in ['E', 'EB']:
capacity = capacity * units.Mi
capacity = '%.0f' % capacity
return float(capacity)
@inspur_driver_debug_trace
def _format_name(self, name):
"""format name to meet the backend requirements"""
name = name[0:32]
name = name.replace('-', '_')
return name
@inspur_driver_debug_trace
def _get_storage_pool(self, directory):
method = 'file/directory/detail?path=/%s' % directory
request_type = 'get'
path_detail = self._rest.send_rest_api(method=method,
request_type=request_type)
storage_pool = path_detail[0]['poolName']
return storage_pool
|
StarcoderdataPython
|
3462428
|
from isitfit.cost.ec2_analyze import Ec2Iterator
from isitfit.utils import logger
import pandas as pd
from tabulate import tabulate
import tempfile
import csv
from collections import OrderedDict
# https://pypi.org/project/termcolor/
from termcolor import colored
def df2tabulate(df):
return tabulate(df.set_index('instance_id'), headers='keys', tablefmt='psql')
#---------------------------
def class2recommendedCore(r):
o = { 'recommended_type': None,
'savings': None
}
if r.classification_1=='Underused':
# FIXME classification 2 will contain if it's a burstable workload or lambda-convertible
# that would mean that the instance is downsizable twice, so maybe need to return r.type_smaller2x
# FIXME add savings from the twice downsizing in class2recommendedType if it's a burstable workload or lambda-convertible,
# then calculate the cost from lambda functions and add it as overhead here
o = { 'recommended_type': r.type_smaller,
'savings': r.cost_3m_smaller-r.cost_3m
}
if r.classification_1=='Idle':
# Maybe idle servers should be recommended to "stop"
o = { 'recommended_type': r.type_smaller,
'savings': r.cost_3m_smaller-r.cost_3m
}
if r.classification_1=='Overused':
# This is costing more
o = {'recommended_type': r.type_larger,
'savings': r.cost_3m_larger-r.cost_3m
}
if r.classification_1=='Normal':
# This is costing more
if 'type_cheaper' in r.index:
o = {'recommended_type': r.type_cheaper,
'savings': r.cost_3m_cheaper - r.cost_3m
}
return o
#---------------------------------
def ec2obj_to_name(ec2_obj):
if ec2_obj.tags is None:
return None
ec2_name = [x for x in ec2_obj.tags if x['Key']=='Name']
if len(ec2_name)==0:
return None
return ec2_name[0]['Value']
from isitfit.utils import taglist2str
class CalculatorOptimizeEc2:
def __init__(self, n, thresholds = None):
self.n = n
if thresholds is None:
thresholds = {
'rightsize': {'idle': 3, 'low': 30, 'high': 70},
'burst': {'low': 20, 'high': 80}
}
# iterate over all ec2 instances
self.thresholds = thresholds
self.ec2_classes = []
# for csv streaming
self.csv_fn_intermediate = None
self.csv_fh = None
self.csv_writer = None
def __exit__(self):
self.csv_fh.close()
def _xxx_to_classification(self, xxx_maxmax, xxx_maxavg, xxx_avgmax):
# check if good to convert to burstable or lambda
# i.e. daily data shows few large spikes
thres = self.thresholds['burst']
if xxx_maxmax >= thres['high'] and xxx_avgmax <= thres['low'] and xxx_maxavg <= thres['low']:
return 'Underused', 'Burstable daily'
# check rightsizing
# i.e. no special spikes in daily data
# FIXME: can check hourly data for higher precision here
thres = self.thresholds['rightsize']
if xxx_maxmax <= thres['idle']:
return 'Idle', None
elif xxx_maxmax <= thres['low']:
return 'Underused', None
elif xxx_maxmax >= thres['high'] and xxx_avgmax >= thres['high'] and xxx_maxavg >= thres['high']:
return 'Overused', None
elif xxx_maxmax >= thres['high'] and xxx_avgmax >= thres['high'] and xxx_maxavg <= thres['low']:
return 'Underused', 'Burstable intraday'
return 'Normal', None
def _ec2df_to_classification(self, ec2_df):
# data is daily, so if less than 7 days, just return "Not enough data"
if ec2_df.shape[0] < 7:
return "Not enough data", "%i day(s) available. Minimum is 7 days."%ec2_df.shape[0]
cpu_maxmax = ec2_df.cpu_used_max.max()
cpu_maxavg = ec2_df.cpu_used_avg.max()
cpu_avgmax = ec2_df.cpu_used_max.mean()
cpu_c1, cpu_c2 = self._xxx_to_classification(cpu_maxmax, cpu_maxavg, cpu_avgmax)
#print("ec2_df.{maxmax,avgmax,maxavg} = ", maxmax, avgmax, maxavg)
if pd.isnull(ec2_df.ram_used_max).all():
cpu_c2 = ["No ram", cpu_c2]
cpu_c2 = [x for x in cpu_c2 if x is not None]
cpu_c2 = ", ".join(cpu_c2)
return cpu_c1, cpu_c2
# continue with cpu + ram data
ram_maxmax = ec2_df['ram_used_max'].fillna(value=0).max()
ram_maxavg = ec2_df['ram_used_max'].fillna(value=0).mean()
ram_avgmax = ec2_df['ram_used_avg'].fillna(value=0).max()
ram_c1, ram_c2 = self._xxx_to_classification(ram_maxmax, ram_maxavg, ram_avgmax)
# consolidate ram with cpu
out_c2 = ["CPU+RAM",
"CPU: %s"%(cpu_c2 or "None"),
"RAM: %s"%(ram_c2 or "None")
]
out_c2 = ", ".join([x for x in out_c2 if x is not None])
if cpu_c1=='Overused' or ram_c1=='Overused':
return 'Overused', out_c2
# if cpu_c1=='Overused' or ram_c1=='Overused':
# if (cpu_c1=='Overused' and ram_c1=='Overused':):
# return 'Overused (CPU+RAM bound)', out_c2
# elif cpu_c1=='Overused':
# return 'Overused (CPU-bound)', out_c2
# else:
# return 'Overused (RAM-bound)', out_c2
if cpu_c1=='Normal' or ram_c1=='Normal':
return 'Normal', out_c2
return 'Underused', out_c2
def handle_pre(self, context_pre):
# a csv file handle to which to stream results
from isitfit.dotMan import DotMan
self.csv_fn_intermediate = tempfile.NamedTemporaryFile(prefix='isitfit-intermediate-', suffix='.csv', delete=False, dir=DotMan().tempdir())
import click
click.echo(colored("Intermediate results will be streamed to %s"%self.csv_fn_intermediate.name, "cyan"))
self.csv_fh = open(self.csv_fn_intermediate.name, 'w')
self.csv_writer = csv.writer(self.csv_fh)
# done
return context_pre
def per_ec2(self, context_ec2):
# parse out context keys
ec2_obj, ec2_df, mm = context_ec2['ec2_obj'], context_ec2['ec2_df'], context_ec2['mainManager']
# filter ec2_df for the part matching the latest ec2 size only
from isitfit.utils import pd_subset_latest
ec2_df = ec2_df.copy()
ec2_df = pd_subset_latest(ec2_df, 'instanceType', 'Timestamp')
#print(ec2_obj.instance_id)
ec2_c1, ec2_c2 = self._ec2df_to_classification(ec2_df)
ec2_name = ec2obj_to_name(ec2_obj)
taglist = ec2_obj.tags
# Reported in github issue 8: NoneType object is not iterable
# https://github.com/autofitcloud/isitfit/issues/8
if taglist is None:
taglist = []
taglist = taglist2str(taglist, context_ec2['filter_tags'])
ec2_res = OrderedDict()
ec2_res['region'] = ec2_obj.region_name
ec2_res['instance_id'] = ec2_obj.instance_id
ec2_res['instance_type'] = ec2_obj.instance_type
ec2_res['classification_1'] = ec2_c1
ec2_res['classification_2'] = ec2_c2
ec2_res['tags'] = taglist
# write csv header
if len(self.ec2_classes)==0:
self.csv_writer.writerow(['name'] + [k for k,v in ec2_res.items() if k!='tags'])# save header
# save intermediate result to csv file
# Try to stick to 1 row per instance
# Drop the tags because they're too much to include
csv_row = [ec2_name] + [v.replace("\n", ";") for k,v in ec2_res.items() if k!='tags']
self.csv_writer.writerow(csv_row)
# gathering results
self.ec2_classes.append(ec2_res)
# check if should return early
if self.n!=-1:
sub_underused = [x for x in self.ec2_classes if x['classification_1']=='Underused']
if len(sub_underused) >= self.n:
# break early
from isitfit.utils import IsitfitCliRunnerBreakIterator
raise IsitfitCliRunnerBreakIterator
# done
return context_ec2
from isitfit.cost.base_reporter import ReporterBase
class ReporterOptimizeEc2(ReporterBase):
def __init__(self):
# for final csv file
# DEPRECATED # self.csv_fn_final = None
# members that will contain the results of the optimization
self.df_sort = None
self.sum_val = None
def postprocess(self, context_all):
# unpack
self.analyzer = context_all['analyzer']
self.df_cat = context_all['df_cat']
# process
self._after_all()
# DEPRECATED # self._storecsv_all()
# save to context for aggregator
context_all['df_sort'] = self.df_sort
context_all['sum_val'] = self.sum_val
# DEPRECATED # context_all['csv_fn_final'] = self.csv_fn_final
# done
return context_all
def _after_all(self):
df_all = pd.DataFrame(self.analyzer.ec2_classes)
# if no data
if df_all.shape[0]==0:
self.df_sort = None
self.sum_val = None
return
# merge current type hourly cost
map_cost = self.df_cat[['API Name', 'cost_hourly']]
df_all = df_all.merge(map_cost, left_on='instance_type', right_on='API Name', how='left').drop(['API Name'], axis=1)
# merge the next-smaller instance type from the catalog for instances classified as Underused
map_smaller = self.df_cat[['API Name', 'type_smaller', 'Linux On Demand cost_smaller']].rename(columns={'Linux On Demand cost_smaller': 'cost_hourly_smaller'})
df_all = df_all.merge(map_smaller, left_on='instance_type', right_on='API Name', how='left').drop(['API Name'], axis=1)
# merge next-larger instance type
map_larger = self.df_cat[['API Name', 'type_smaller', 'cost_hourly']].rename(columns={'type_smaller': 'API Name', 'API Name': 'type_larger', 'cost_hourly': 'cost_hourly_larger'})
df_all = df_all.merge(map_larger, left_on='instance_type', right_on='API Name', how='left').drop(['API Name'], axis=1)
# merge same-specs, cheaper
if 'type_same_cheaper' in self.df_cat.columns:
map_cheaper = self.df_cat[['API Name', 'type_same_cheaper', 'Linux On Demand cost_same_cheaper']].rename(columns={'Linux On Demand cost_same_cheaper': 'cost_hourly_cheaper', 'type_same_cheaper': 'type_cheaper'})
df_all = df_all.merge(map_cheaper, left_on='instance_type', right_on='API Name', how='left').drop(['API Name'], axis=1)
# convert from hourly to 3-months
for fx1, fx2 in [('cost_3m', 'cost_hourly'), ('cost_3m_smaller', 'cost_hourly_smaller'), ('cost_3m_larger', 'cost_hourly_larger'), ('cost_3m_cheaper', 'cost_hourly_cheaper')]:
if not fx2 in df_all.columns:
continue
df_all[fx1] = df_all[fx2] * 24 * 30 * 3
df_all[fx1] = df_all[fx1].fillna(value=0).astype(int)
# imply a recommended type
df_rec = df_all.apply(class2recommendedCore, axis=1).apply(pd.Series)
df_all['recommended_type'], df_all['savings'] = df_rec['recommended_type'], df_rec['savings']
df_all['savings'] = df_all.savings.fillna(value=0).astype(int)
# keep a subset of columns
df_all = df_all[['region', 'instance_id', 'instance_type', 'classification_1', 'classification_2', 'cost_3m', 'recommended_type', 'savings', 'tags']]
# display
#df_all = df_all.set_index('classification_1')
#for v in ['Idle', 'Underused', 'Overused', 'Normal']:
# logger.info("\nInstance classification_1: %s"%v)
# if v not in df_all.index:
# logger.info("None")
# else:
# logger.info(df_all.loc[[v]]) # use double brackets to maintain single-row dataframes https://stackoverflow.com/a/45990057/4126114
#
# logger.info("\n")
# main results
self.df_sort = df_all.sort_values(['savings'], ascending=True)
self.sum_val = df_all.savings.sum()
# DEPRECATED
# def _storecsv_all(self, *args, **kwargs):
# if self.df_sort is None:
# return
#
# import tempfile
# from isitfit.dotMan import DotMan
# with tempfile.NamedTemporaryFile(prefix='isitfit-full-ec2-', suffix='.csv', delete=False, dir=DotMan().tempdir()) as csv_fh_final:
# self.csv_fn_final = csv_fh_final.name
# logger.debug(colored("Saving final results to %s"%csv_fh_final.name, "cyan"))
# self.df_sort.to_csv(csv_fh_final.name, index=False)
# logger.debug(colored("Save complete", "cyan"))
# DEPRECATED
# def display(self, context_all):
# if self.df_sort is None:
# logger.info(colored("No EC2 instances found", "red"))
# return context_all
#
# # display
# # Edit 2019-09-25 just show the full list. Will add filtering later. This way it's less ambiguous when all instances are "Normal"
# # self.df_sort.dropna(subset=['recommended_type'], inplace=True)
#
# # if no recommendations
# if self.df_sort.shape[0]==0:
# logger.info(colored("No optimizations from isitfit for this AWS EC2 account", "red"))
# return context_all
#
# # if there are recommendations, show them
# sum_comment = "extra cost" if self.sum_val>0 else "savings"
# sum_color = "red" if self.sum_val>0 else "green"
#
# import click
# #logger.info("Optimization based on the following CPU thresholds:")
# #logger.info(self.thresholds)
# #logger.info("")
# click.echo(colored("Recommended %s: %0.0f $ (over the next 3 months)"%(sum_comment, self.sum_val), sum_color))
# click.echo("")
#
# # display dataframe
# from isitfit.utils import display_df
# display_df(
# "Recommended EC2 size changes",
# self.df_sort,
# self.csv_fn_final,
# self.df_sort.shape,
# logger
# )
#
## with pd.option_context("display.max_columns", 10):
## logger.info("Details")
## if self.df_sort.shape[0]<=10:
## logger.info(df2tabulate(self.df_sort))
## else:
## logger.info(df2tabulate(self.df_sort.head(n=5)))
## logger.info("...")
## logger.info(df2tabulate(self.df_sort.tail(n=5)))
## logger.info("")
## logger.info(colored("Table originally with %i rows is truncated for top and bottom 5 only."%self.df_sort.shape[0], "cyan"))
## logger.info(colored("Consider filtering it with --n=x for the 1st x results or --filter-tags=foo using a value from your own EC2 tags.", "cyan"))
#
# if self.analyzer.n!=-1:
# logger.info(colored("This table has been filtered for only the 1st %i underused results"%self.analyzer.n, "cyan"))
#
# return context_all
def pipeline_factory(ctx, n, filter_tags):
# moved these imports from outside the function to inside it so that `isitfit --version` wouldn't take 5 seconds due to the loading
from isitfit.cost.mainManager import MainManager
from isitfit.cost.cloudtrail_ec2type import CloudtrailCached
# manager of redis-pandas caching
from isitfit.cost.cacheManager import RedisPandas as RedisPandasCacheManager
cache_man = RedisPandasCacheManager()
# 2019-12-16 deprecate direct datadog/cloudwatch listeners in favor of the automatic failover
# from isitfit.cost.metrics_datadog import DatadogListener
# from isitfit.cost.metrics_cloudwatch import CwEc2Listener
from isitfit.cost.metrics_datadog import DatadogCached
from isitfit.cost.metrics_cloudwatch import CloudwatchEc2
from isitfit.cost.metrics_automatic import MetricsListener
ddg = DatadogCached(cache_man)
cloudwatchman = CloudwatchEc2(cache_man)
metrics = MetricsListener(ddg, cloudwatchman)
metrics.set_ndays(ctx.obj['ndays'])
from isitfit.cost.ec2_common import Ec2TagFilter
from isitfit.cost.catalog_ec2 import Ec2Catalog
from isitfit.cost.ec2_common import Ec2Common
from isitfit.tqdmman import TqdmL2Verbose
tqdml2_obj = TqdmL2Verbose(ctx)
ol = CalculatorOptimizeEc2(n)
etf = Ec2TagFilter(filter_tags)
ra = ReporterOptimizeEc2()
mm = MainManager("EC2 cost optimize", ctx)
mm.set_ndays(ctx.obj['ndays'])
ec2_cat = Ec2Catalog(ctx.obj['allow_ec2_different_family'])
ec2_common = Ec2Common()
ec2_it = Ec2Iterator(ctx.obj['filter_region'], tqdml2_obj)
# boto3 cloudtrail data
cloudtrail_manager = CloudtrailCached(mm.EndTime, cache_man, tqdml2_obj)
# update dict and return it
# https://stackoverflow.com/a/1453013/4126114
inject_analyzer = lambda context_all: dict({'analyzer': ol}, **context_all)
# utilization listeners
mm.set_iterator(ec2_it)
mm.add_listener('pre', cache_man.handle_pre)
mm.add_listener('pre', cloudtrail_manager.init_data)
mm.add_listener('pre', ol.handle_pre)
mm.add_listener('pre', ec2_cat.handle_pre)
mm.add_listener('ec2', etf.per_ec2)
mm.add_listener('ec2', metrics.per_host)
mm.add_listener('ec2', cloudtrail_manager.single)
mm.add_listener('ec2', ec2_common._handle_ec2obj)
mm.add_listener('ec2', ol.per_ec2)
mm.add_listener('all', metrics.display_status)
mm.add_listener('all', ec2_common.after_all)
mm.add_listener('all', inject_analyzer)
mm.add_listener('all', ra.postprocess)
#mm.add_listener('all', ra.display)
return mm
|
StarcoderdataPython
|
218152
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
ARIA modeling type definition module
"""
# pylint: disable=too-many-lines, no-self-argument, no-member, abstract-method
from sqlalchemy import (
Column,
Text,
DateTime,
UniqueConstraint
)
from . import mixins
class TypeDefinitionBase(mixins.ModelMixin):
"""
Loaded TypeDefinition.
Usually created by various DSL parsers, such as ARIA's TOSCA extension. However, it can also be
created programmatically.
"""
__tablename__ = 'type_definition'
name = Column(Text, nullable=False, index=True, doc="""
Name of the type definition
:type: :obj:`basestring`
""")
version = Column(Text, nullable=False, doc="""
Version for the type definition
:type: :obj:`basestring`
""")
main_file_name = Column(Text, nullable=False, doc="""
Filename of CSAR or YAML file from which this type definition was parsed.
:type: :obj:`basestring`
""")
uploaded_at = Column(DateTime, nullable=False, doc="""
Timestamp for when the type definition was loaded.
:type: :class:`~datetime.datetime`
""")
__table_args__ = (UniqueConstraint('name', 'version',
name='_type_definition_name_version_unique'),)
|
StarcoderdataPython
|
11200559
|
from keras.models import load_model
from PIL import Image
import numpy as np
def predict_digit(img):
img = Image.open(img)
model = load_model('mnist.h5')
# resize image to 28x28 pixels
img = img.resize((28, 28))
# convert rgb to grayscale
img = img.convert('L')
img = np.array(img)
# reshaping to support our model input and normalizing
img = img.reshape(1, 28, 28, 1)
img = img / 255.0
# predicting the class
res = model.predict([img])[0]
print(np.argmax(res), max(res))
return np.argmax(res), max(res)
predict_digit('5Test.png')
|
StarcoderdataPython
|
4978737
|
#! /usr/bin/env python
# Convert MH directories (1 message per file) or MMDF mailboxes (4x^A
# delimited) to unix mailbox (From ... delimited) on stdout.
# If -f is given, files contain one message per file (e.g. MH messages)
import rfc822
import sys
import time
import os
import stat
import getopt
import regex
def main():
dofile = mmdf
try:
opts, args = getopt.getopt(sys.argv[1:], 'f')
except getopt.error, msg:
sys.stderr.write('%s\n' % msg)
sys.exit(2)
for o, a in opts:
if o == '-f':
dofile = message
if not args:
args = ['-']
sts = 0
for arg in args:
if arg == '-' or arg == '':
sts = dofile(sys.stdin) or sts
elif os.path.isdir(arg):
sts = mh(arg) or sts
elif os.path.isfile(arg):
try:
f = open(arg)
except IOError, msg:
sys.stderr.write('%s: %s\n' % (arg, msg))
sts = 1
continue
sts = dofile(f) or sts
f.close()
else:
sys.stderr.write('%s: not found\n' % arg)
sts = 1
if sts:
sys.exit(sts)
numeric = regex.compile('[1-9][0-9]*')
def mh(dir):
sts = 0
msgs = os.listdir(dir)
for msg in msgs:
if numeric.match(msg) != len(msg):
continue
fn = os.path.join(dir, msg)
try:
f = open(fn)
except IOError, msg:
sys.stderr.write('%s: %s\n' % (fn, msg))
sts = 1
continue
sts = message(f) or sts
return sts
def mmdf(f):
sts = 0
while 1:
line = f.readline()
if not line:
break
if line == '\1\1\1\1\n':
sts = message(f, line) or sts
else:
sys.stderr.write(
'Bad line in MMFD mailbox: %r\n' % (line,))
return sts
counter = 0 # for generating unique Message-ID headers
def message(f, delimiter = ''):
sts = 0
# Parse RFC822 header
m = rfc822.Message(f)
# Write unix header line
fullname, email = m.getaddr('From')
tt = m.getdate('Date')
if tt:
t = time.mktime(tt)
else:
sys.stderr.write(
'Unparseable date: %r\n' % (m.getheader('Date'),))
t = os.fstat(f.fileno())[stat.ST_MTIME]
print 'From', email, time.ctime(t)
# Copy RFC822 header
for line in m.headers:
print line,
# Invent Message-ID header if none is present
if not m.has_key('message-id'):
global counter
counter = counter + 1
msgid = "<%s.%d>" % (hex(t), counter)
sys.stderr.write("Adding Message-ID %s (From %s)\n" %
(msgid, email))
print "Message-ID:", msgid
print
# Copy body
while 1:
line = f.readline()
if line == delimiter:
break
if not line:
sys.stderr.write('Unexpected EOF in message\n')
sts = 1
break
if line[:5] == 'From ':
line = '>' + line
print line,
# Print trailing newline
print
return sts
if __name__ == "__main__":
main()
|
StarcoderdataPython
|
33416
|
<reponame>FelixTheoret/Ergocycle<filename>source/StimulationScreen.py<gh_stars>0
"""
Created on Wed March 30 11::00 2022
@author: <NAME>
"""
from tracemalloc import start
from numpy import number
from Screen import Screen as Screen
from PyQt5 import QtWidgets
from PyQt5.QtWidgets import *
from PyQt5.QtGui import QFont, QPixmap
from PIL import Image
#from Ergocycle.source.StartWindow import StartWindow
from StartWindow import StartWindow
from TestingWindow import TestingWindow
from InstructionWindow import InstructionWindow
from Parameters import Parameters
from StimulationWindow import StimulationWindow
from MainWindowStim import MainWindowStim
from DangerPopUp import DangerPopUp
import sys
import datetime
import time
import csv
from CommandButton import CommandButton as CommandButton
# Take the code from main_sef.py and add getters and setters
#def window():
#app = QApplication(sys.argv)
#win = StartWindow()
#win.show()
#sys.exit(app.exec_())
#window()
class StimulationScreen(Screen):
def __init__(self, event_function):
super(StimulationScreen, self).__init__(event_function)
self.event_function = event_function
self.current_menu = 0
self.danger_menu = 0
#self.now = datetime.datetime.now()
### 1.1. Permet de gérer les fenêtre apparaissant sur l'interface. ###
def manage_active_window(self, stim_parameters):
if self.window_counter == 0:
self.current_menu = StartWindow()
self.current_menu.training_button.clicked.connect(lambda : self.event_function("start_training"))
self.current_menu.test_button.clicked.connect(lambda : self.event_function("start_test"))
self.current_menu.show()
# self.connect_buttons(self.current_menu)
elif self.window_counter == -1:
self.current_menu.close()
self.current_menu = TestingWindow()
self.current_menu.increase_amp_button.clicked.connect(lambda : self.event_function("increase_amp"))
self.current_menu.increase_freq_button.clicked.connect(lambda : self.event_function("increase_frequency"))
self.current_menu.increase_imp_button.clicked.connect(lambda : self.event_function("increase_imp"))
self.current_menu.decrease_amp_button.clicked.connect(lambda : self.event_function("decrease_amp"))
self.current_menu.decrease_freq_button.clicked.connect(lambda : self.event_function("decrease_frequency"))
self.current_menu.decrease_imp_button.clicked.connect(lambda : self.event_function("decrease_imp"))
self.current_menu.back_button.clicked.connect(lambda : self.event_function("back_button_clicked"))
self.current_menu.show()
elif self.window_counter == 1:
self.current_menu.close()
self.current_menu = MainWindowStim()
self.current_menu.submit_button.clicked.connect(lambda : self.event_function("submit_button_clicked"))
self.current_menu.submit_final_button.clicked.connect(lambda : self.event_function("submit_final_button_clicked"))
self.current_menu.show()
elif self.window_counter == -2:
self.danger_menu = DangerPopUp(stim_parameters)
self.danger_menu.show()
self.danger_menu.back_to_menu_button.clicked.connect(lambda : self.back_to_menu_button_clicked())
self.danger_menu.continue_button.clicked.connect(lambda : self.continue_button_clicked(stim_parameters))
elif self.window_counter == 2:
self.current_menu.close()
self.current_menu = InstructionWindow(stim_parameters)
self.current_menu.start_button.clicked.connect(lambda : self.event_function("start_stimulations"))
self.current_menu.show()
elif self.window_counter == 3:
self.current_menu.close()
self.current_menu = StimulationWindow(stim_parameters)
self.current_menu.increase_amplitude1_button.clicked.connect(lambda : self.event_function("increase_amplitude1"))
self.current_menu.increase_amplitude2_button.clicked.connect(lambda : self.event_function("increase_amplitude2"))
self.current_menu.increase_amplitude3_button.clicked.connect(lambda : self.event_function("increase_amplitude3"))
self.current_menu.increase_amplitude4_button.clicked.connect(lambda : self.event_function("increase_amplitude4"))
self.current_menu.increase_amplitude5_button.clicked.connect(lambda : self.event_function("increase_amplitude5"))
self.current_menu.increase_amplitude6_button.clicked.connect(lambda : self.event_function("increase_amplitude6"))
self.current_menu.increase_amplitude7_button.clicked.connect(lambda : self.event_function("increase_amplitude7"))
self.current_menu.increase_amplitude8_button.clicked.connect(lambda : self.event_function("increase_amplitude8"))
self.current_menu.decrease_amplitude1_button.clicked.connect(lambda : self.event_function("decrease_amplitude1"))
self.current_menu.decrease_amplitude2_button.clicked.connect(lambda : self.event_function("decrease_amplitude2"))
self.current_menu.decrease_amplitude3_button.clicked.connect(lambda : self.event_function("decrease_amplitude3"))
self.current_menu.decrease_amplitude4_button.clicked.connect(lambda : self.event_function("decrease_amplitude4"))
self.current_menu.decrease_amplitude5_button.clicked.connect(lambda : self.event_function("decrease_amplitude5"))
self.current_menu.decrease_amplitude6_button.clicked.connect(lambda : self.event_function("decrease_amplitude6"))
self.current_menu.decrease_amplitude7_button.clicked.connect(lambda : self.event_function("decrease_amplitude7"))
self.current_menu.decrease_amplitude8_button.clicked.connect(lambda : self.event_function("decrease_amplitude8"))
self.current_menu.increase_frequency1_button.clicked.connect(lambda : self.event_function("increase_frequency1"))
self.current_menu.increase_frequency2_button.clicked.connect(lambda : self.event_function("increase_frequency2"))
self.current_menu.increase_frequency3_button.clicked.connect(lambda : self.event_function("increase_frequency3"))
self.current_menu.increase_frequency4_button.clicked.connect(lambda : self.event_function("increase_frequency4"))
self.current_menu.increase_frequency5_button.clicked.connect(lambda : self.event_function("increase_frequency5"))
self.current_menu.increase_frequency6_button.clicked.connect(lambda : self.event_function("increase_frequency6"))
self.current_menu.increase_frequency7_button.clicked.connect(lambda : self.event_function("increase_frequency7"))
self.current_menu.increase_frequency8_button.clicked.connect(lambda : self.event_function("increase_frequency8"))
self.current_menu.decrease_frequency1_button.clicked.connect(lambda : self.event_function("decrease_frequency1"))
self.current_menu.decrease_frequency2_button.clicked.connect(lambda : self.event_function("decrease_frequency2"))
self.current_menu.decrease_frequency3_button.clicked.connect(lambda : self.event_function("decrease_frequency3"))
self.current_menu.decrease_frequency4_button.clicked.connect(lambda : self.event_function("decrease_frequency4"))
self.current_menu.decrease_frequency5_button.clicked.connect(lambda : self.event_function("decrease_frequency5"))
self.current_menu.decrease_frequency6_button.clicked.connect(lambda : self.event_function("decrease_frequency6"))
self.current_menu.decrease_frequency7_button.clicked.connect(lambda : self.event_function("decrease_frequency7"))
self.current_menu.decrease_frequency8_button.clicked.connect(lambda : self.event_function("decrease_frequency8"))
self.current_menu.increase_imp1_button.clicked.connect(lambda : self.event_function("increase_imp1"))
self.current_menu.increase_imp2_button.clicked.connect(lambda : self.event_function("increase_imp2"))
self.current_menu.increase_imp3_button.clicked.connect(lambda : self.event_function("increase_imp3"))
self.current_menu.increase_imp4_button.clicked.connect(lambda : self.event_function("increase_imp4"))
self.current_menu.increase_imp5_button.clicked.connect(lambda : self.event_function("increase_imp5"))
self.current_menu.increase_imp6_button.clicked.connect(lambda : self.event_function("increase_imp6"))
self.current_menu.increase_imp7_button.clicked.connect(lambda : self.event_function("increase_imp7"))
self.current_menu.increase_imp8_button.clicked.connect(lambda : self.event_function("increase_imp8"))
self.current_menu.decrease_imp1_button.clicked.connect(lambda : self.event_function("decrease_imp1"))
self.current_menu.decrease_imp2_button.clicked.connect(lambda : self.event_function("decrease_imp2"))
self.current_menu.decrease_imp3_button.clicked.connect(lambda : self.event_function("decrease_imp3"))
self.current_menu.decrease_imp4_button.clicked.connect(lambda : self.event_function("decrease_imp4"))
self.current_menu.decrease_imp5_button.clicked.connect(lambda : self.event_function("decrease_imp5"))
self.current_menu.decrease_imp6_button.clicked.connect(lambda : self.event_function("decrease_imp6"))
self.current_menu.decrease_imp7_button.clicked.connect(lambda : self.event_function("decrease_imp7"))
self.current_menu.decrease_imp8_button.clicked.connect(lambda : self.event_function("decrease_imp8"))
self.current_menu.pauseWatch.pressed.connect(lambda : self.event_function("pause_stimulation"))
self.current_menu.stop_button.clicked.connect(lambda : self.event_function("stop_stimulation"))
self.current_menu.show()
else:
self.current_menu.close()
def back_to_menu_button_clicked(self):
self.danger_menu.close()
# self.manage_active_window(stim_parameters)
self.event_function("back_to_menu")
def continue_button_clicked(self, stim_parameters):
self.window_counter = 2
self.manage_active_window(stim_parameters)
self.danger_menu.close()
self.event_function("continue_to_instructions")
### 1.2. Création d'un fichier CSV lors de l'entraînement afin d'enregistrer les données de stimulations ###
def create_csv_file(self, matrice):
self.now = datetime.datetime.now()
file = (self.now.strftime("%m-%d-%Y, %H;%M;%S"))
path = "\\home\\pi\\Downloads\\stimulation_data_" # à commenter si vous travaillez sur votre ordinateur
name_of_file = path+file+".csv" # à commenter si vous travaillez sur votre ordi
#name_of_file = (self.now.strftime("%m-%d-%Y, %H;%M;%S"))+" stimulations_data.csv" # à décommenter si vous travaillez sur votre ordinateur
with open(name_of_file, 'w',newline='') as f:
fieldnames = ['Date and time', 'Electrode', 'Amplitude(mA)','Frequence(Hz)', 'Durée dimpulsion(us)', 'muscle']
thewriter = csv.DictWriter(f,fieldnames)
thewriter.writeheader()
now = datetime.datetime.now()
date_time = now.strftime("%m-%d-%Y,%H:%M:%S")
for i in range(8):
muscle_name = self.get_muscle_traduction(matrice[3,i])
thewriter.writerow({'Date and time' : date_time, 'Electrode': str(i+1), 'Amplitude(mA)': str(matrice[0,i]) ,'Frequence(Hz)': str(matrice[1,i]), 'Durée dimpulsion(us)': str(matrice[2,i]), 'muscle': muscle_name})
f.close
### 1.3. Ajouter les modification des paramètres d'entraînement au même fichier CSV ###
def save_data_in_csv_file(self, matrice):
file = self.now.strftime("%m-%d-%Y, %H;%M;%S")
path = "\\home\\pi\\Downloads\\stimulation_data_" # à commenter si vous travaillez sur votre ordi
name_of_file = path+file+".csv" # à commenter si vous travaillez sur votre ordi
#name_of_file = (self.now.strftime("%m-%d-%Y, %H;%M;%S"))+" stimulations_data.csv" # à décommenter si vous travaillez sur votre ordi
with open(name_of_file, 'a+',newline='') as f:
fieldnames = ['Date and time', 'Electrode', 'Amplitude(mA)','Frequence(Hz)', 'Durée dimpulsion(us)', 'muscle']
thewriter = csv.DictWriter(f,fieldnames)
new_now = datetime.datetime.now()
date_time = new_now.strftime("%m-%d-%Y,%H:%M:%S")
if matrice == []:
for i in range(8):
thewriter.writerow({'Date and time' : date_time, 'Electrode': str(i+1), 'Amplitude(mA)': str(0) ,'Frequence(Hz)': str(0), 'Durée dimpulsion(us)': str(0), 'muscle': str(0)})
else:
for i in range(8):
muscle_name = self.get_muscle_traduction(matrice[3,i])
thewriter.writerow({'Date and time' : date_time, 'Electrode': str(i+1), 'Amplitude(mA)': str(matrice[0,i]) ,'Frequence(Hz)': str(matrice[1,i]), 'Durée dimpulsion(us)': str(matrice[2,i]), 'muscle': muscle_name})
f.close
### 1.4. Traduit les muscles de chiffre à nom pour l'enregistrements des données ###
def get_muscle_traduction(self, muscle_number):
if muscle_number == 0:
muscle = "Aucun"
if muscle_number == 1:
muscle = "Biceps Brachii"
if muscle_number== 2:
muscle = "Triceps Brachii"
if muscle_number == 3:
muscle = "Deltoide Postérieur"
if muscle_number == 4:
muscle = "Deltoide Antérieur"
return(muscle)
|
StarcoderdataPython
|
4935784
|
<reponame>Floogen/jmod-bloodhound<gh_stars>10-100
import praw
import time
import operator
import re
from datetime import datetime
from praw.models import MoreComments
def comment_check(comment_list, subreddit_name, comment_count):
if subreddit_name == '2007scape' and len(comment_list) > 0:
return True
if len(comment_list) > 1 and comment_count > 25:
return True
for comment in comment_list:
if comment.score < 0:
return True
return False
def find_jmod_comments(post):
comment_list = []
jmod_flairs = [
'jagexmod',
'modmatk',
'mod-jagex'
]
while True:
try:
post.comments.replace_more(limit=None)
break
except Exception:
print('Handling replace_more exception')
time.sleep(1)
for comment in post.comments.list():
if comment.author_flair_css_class in jmod_flairs:
comment_list.append(comment)
return comment_list
def find_bot_comment(post):
for comment in post.comments:
if isinstance(comment, MoreComments) or comment.author is None:
continue
if comment.author.name == 'JMOD_Bloodhound' and comment.parent_id == f"t3_{post.id}":
return comment
return None
def create_comment(target_comments, bot_comments, archived_posts):
post_id = target_comments[0].submission.id
for comment in bot_comments:
if comment.submission.id == post_id and comment.parent_id == f"t3_{post_id}":
# bot has commented here before, edit the comment
return edit_comment(target_comments, comment, archived_posts)
bot_comment = find_bot_comment(target_comments[0].submission)
if bot_comment is not None:
return edit_comment(target_comments, bot_comment, archived_posts)
# create comment instead, as no previous comment was found
posted_comment = bloodhound_bot.submission(id=post_id).reply(format_comment(target_comments, True))
formatted_comment_body = format_post(target_comments, posted_comment)
# create archive of comment on subreddit TrackedJMODComments
# have post ID in the archive subreddit contain the post name of original target
# title for each post: [2007scape or Runescape] JMOD Comment(s) On Thread [ThreadName40CharMax...]
# edited comments will be commented on the archived post
title = posted_comment.submission.title
if len(posted_comment.submission.title) > 40:
title = posted_comment.submission.title[:40].rstrip() + '...'
title = '[' + posted_comment.subreddit.display_name + '] (ID:' + posted_comment.submission.id + ') ' \
+ 'JMOD Comments On Thread: ' + title
archive_comments(target_comments
, historian_bot.subreddit('TrackedJMODComments').submit(title=title
, selftext=formatted_comment_body))
return True
def edit_comment(target_comments, past_comment, archived_posts):
# edit archived, alert of any edits
# get the archived post id of this submission
arch_post = None
for post in archived_posts:
if re.search(r"ID:(.*?)\)", post.title).group(1) == past_comment.submission.id:
arch_post = post
# call format_comment and add additional parameter for initialPass?
# that way it can have logic for flagging comments that have been edited since last pass through
# initialPass was False, but set to True to disable tracking and editing of JMOD comments
past_comment.edit(format_comment(target_comments, True, arch_post))
arch_post.edit(format_post(target_comments, past_comment))
archive_comments(target_comments, arch_post)
return
if not arch_post:
formatted_comment_body = format_post(target_comments, past_comment)
title = past_comment.submission.title
if len(past_comment.submission.title) > 40:
title = past_comment.submission.title[:40].rstrip() + '...'
title = '[' + past_comment.subreddit.display_name + '] (ID:' + past_comment.submission.id + ') ' \
+ 'JMOD Comments On Thread: ' + title
arch_post = historian_bot.subreddit('TrackedJMODComments').submit(title=title, selftext=formatted_comment_body)
archive_comments(target_comments, arch_post)
return None
def archive_comments(target_comments, archived_post):
for comment in target_comments:
found = False
new_edit = False
target_arch_comment = None
archived_post.comment_sort = 'new'
for arch_comment in reversed(archived_post.comments):
comment_first_line = arch_comment.body.splitlines()[0]
if re.search(r"ID:\[(.*?)\]", comment_first_line).group(1) == comment.id:
found = True
archived_ts = datetime.strptime(arch_comment.body.splitlines()[2].split('on: ')[1].replace('**', '')
, '%Y-%m-%d %H:%M:%S').timestamp()
if comment.edited and archived_ts < comment.edited:
new_edit = True
target_arch_comment = arch_comment
elif comment.edited and archived_ts >= comment.edited:
new_edit = False
if new_edit:
ts = str(datetime.fromtimestamp(comment.edited))
archived_comment = "ID:[" + comment.id + "]\n\nEdited on: **" + ts \
+ "**\n\nComment by: **" + comment.author.name \
+ "**\n\n**[Click here for comment context](" \
+ comment.permalink + "?context=3)**\n\n---\n\n" \
+ comment.body \
+ '\n\n---'
target_arch_comment.edit(archived_comment)
elif not found:
ts = str(datetime.fromtimestamp(comment.created_utc))
archived_comment = "ID:[" + comment.id + "]\n\nCreated on: **" + ts \
+ "**\n\nComment by: **" + comment.author.name \
+ "**\n\n**[Click here for comment context](" \
+ comment.permalink + "?context=3)**\n\n---\n\n" \
+ comment.body \
+ '\n\n---'
historian_bot.submission(id=archived_post.id).reply(archived_comment)
return None
def format_post(target_comments, posted_comment):
previous_author_name = target_comments[0].author.name
bot_post_body = '# I have found the following **J-Mod** comments on the thread [' \
+ posted_comment.submission.title + '](' + posted_comment.submission.permalink + ')\n\n**'\
+ previous_author_name + '**\n\n'
for comment in target_comments:
parsed_comment = comment.body
if '\n' in parsed_comment or len(parsed_comment) > 45:
parsed_comment = parsed_comment[:45].rstrip() + '...'
if '\n' in parsed_comment:
parsed_comment = parsed_comment.splitlines()[0].rstrip() + '...'
if previous_author_name == comment.author.name:
bot_post_body += '- ^^(ID:[' + comment.id + ']) [' + parsed_comment + '](' \
+ comment.permalink + '?context=3)\n\n'
else:
bot_post_body += '\n\n**' + str(comment.author) + '**\n\n- ^^(ID:[' + comment.id + ']) [' \
+ parsed_comment + '](' + comment.permalink + '?context=3)\n\n'
previous_author_name = comment.author.name
return bot_post_body
def format_comment(target_comments, initial_pass, archived_post=None):
target_comments.sort(key=operator.attrgetter('author.name')) # sort target_comments by username
previous_author_name = target_comments[0].author.name
bot_comment_body = '##### Bark bark!\n\nI have found the following **J-Mod** comment(s) in this thread:\n\n**' \
+ previous_author_name + '**\n\n'
for comment in target_comments:
comment_edited_marker = ''
# disabled the tracking and labeling of edited comments
if comment.edited and archived_post and not initial_pass:
edit_counter = 0
# sort by newest, then foreach through list in reverse to get the oldest
archived_post.comment_sort = 'new'
for arch_comment in reversed(archived_post.comments):
# look for id matching comment.id in first line of each comment
# and check for creation/edited time
comment_first_line = arch_comment.body.splitlines()[0]
if re.search(r"ID:\[(.*?)\]", comment_first_line).group(1) == comment.id:
archived_ts = datetime.strptime(arch_comment.body.splitlines()[2].split('on: ')[1].replace('**', '')
, '%Y-%m-%d %H:%M:%S').timestamp()
if archived_ts < comment.edited:
if edit_counter == 0:
comment_edited_marker = ' [^[original ^comment]](' \
+ arch_comment.permalink + ')'
else:
comment_edited_marker += '^(, )[^[edit ^' + str(edit_counter) \
+ ']](' + arch_comment.permalink + ')'
edit_counter += 1
parsed_comment = comment.body
if '\n' in parsed_comment or len(parsed_comment) > 45:
parsed_comment = parsed_comment[:45].rstrip() + '...'
if '\n' in parsed_comment:
parsed_comment = parsed_comment.splitlines()[0].rstrip() + '...'
if previous_author_name == comment.author.name:
bot_comment_body += '- [' + parsed_comment + '](' \
+ comment.permalink + '?context=3)' + comment_edited_marker + '\n\n'
else:
bot_comment_body += '\n\n**' + str(comment.author) + '**\n\n- [' \
+ parsed_comment + '](' + comment.permalink + '?context=3)' \
+ comment_edited_marker + '\n\n'
previous_author_name = comment.author.name
current_time = '{:%m/%d/%Y %H:%M:%S}'.format(datetime.now())
bot_comment_body += "\n\n \n\n^(**Last edited by bot: " + current_time \
+ "**)\n\n---\n\n^(I've been rewritten to use Python! I also now archive JMOD comments.)" \
+ " \n^(Read more about) [^(the update here)](/u/JMOD_Bloodhound/" \
"comments/9kqvis/bot_update_python_archiving/) ^(or see my) [^(Github repo here)]" \
"(/u/JMOD_Bloodhound/comments/8dronr/" \
"jmod_bloodhoundbot_github_repository/)^."
return bot_comment_body
def hunt(subreddit_name):
subreddit = bloodhound_bot.subreddit(subreddit_name)
bot_list = []
for comment in bloodhound_bot.redditor('JMOD_Bloodhound').comments.new(limit=None):
bot_list.append(comment)
tracked_posts_list = []
for submission in historian_bot.subreddit('TrackedJMODComments').new(limit=100):
try:
submission_id = re.search(r"ID:(.*?)\)", submission.title).group(1)
except AttributeError:
submission_id = ''
if submission_id != '':
tracked_posts_list.append(submission)
for submission in subreddit.hot(limit=100):
if submission.author != 'JMOD_Bloodhound':
jmod_list = (find_jmod_comments(submission))
if comment_check(jmod_list, subreddit_name, submission.num_comments):
if create_comment(jmod_list, bot_list, tracked_posts_list):
print(submission.title)
return None
bloodhound_bot = praw.Reddit('JMOD_Bloodhound', user_agent='User Agent - JMOD_Bloodhound Python Script')
historian_bot = praw.Reddit('JMOD_Historian', user_agent='User Agent - JMOD_Historian Python Script')
hunt('2007scape')
hunt('runescape')
|
StarcoderdataPython
|
5101733
|
<gh_stars>0
import subprocess
import os
import sys
if not '/mnt/SSD/sim/python/src/aux/' in sys.path: sys.path.append('/mnt/SSD/sim/python/src/aux/')
import paths
#runs = ['Q kur 0', 'F kur 0', 'Q kur 1', 'F kur 1', 'Q fal 1', 'F fal 1']
runs = ['Q kur 0', 'F kur 0', 'Q fal 1', 'F fal 1']
#runs = ['Q fal 1', 'F fal 1']
os.system('rm -f report')
for run in runs:
args = run.split()
command = 'python ' + paths.pydir + 'var/rings.py ' + args[0] + ' ' + args[1] + ' ' + args[2] + ' &'
proc = subprocess.Popen(command.split())
proc.wait()
f = open('report', 'a+')
f.write(run + '\n')
f.close()
|
StarcoderdataPython
|
5135163
|
import nltk
import sys
def load_fcfg(grammar_filename):
'''
load a grammar file
Args: grammar_filename(str): a filename for a fcfg
Returns: nltk grammar
'''
return nltk.parse.FeatureEarleyChartParser(nltk.data.load(grammar_filename, format='fcfg'))
def load_test_sentences(input_sentence_filename):
'''
load a test sentences
Args: input_sentence_filename(str): a filename of a target input sentences file
Returns: sentences(list): a list of sentences to parse
'''
sentences = []
with open(input_sentence_filename,'r') as f:
for l in f:
sentences.append(l)
return sentences
def parse(parser, sentence):
'''
Try and parse a sentence given a grammar
Args: parser(EarlyChartPasrset): a parser initialized using created cfg, sentence(Str) a sentence to be parsed
Returns: (str) of the results. Empty if doesnt parse, with parses if it does
'''
tokens = nltk.word_tokenize(sentence)
for item in parser.parse(tokens):
return item.label()['SEM'].simplify()
return ''
def main(grammar_filename, input_sentence_filename, output_filename):
'''
Open a grammar file and test input file and produce all possible parses for each given sentence
Args: grammar_file(str): a filename for a cfg file, input_file(str): a filename of input file containing sentences to be parsed, output_file(str): a filename where results should be writen.
Returns: None
Open the file, read line, tokenize, use cky to create parse trees and then print said trees
'''
parser = load_fcfg(grammar_filename)
sentences = load_test_sentences(input_sentence_filename)
with open(output_filename, 'w') as w:
for sentence in sentences:
if len(sentence) > 1:
w.write(sentence.strip()+'\n')
w.write("{}\n".format(parse(parser, sentence)))
if __name__ == "__main__":
if len(sys.argv) != 4:
print("Usage: hw5_semantics.py <input_grammar_file> <input_sentence_filename> <output_filename>")
exit(-1)
else:
main(sys.argv[1], sys.argv[2], sys.argv[3])
|
StarcoderdataPython
|
1738569
|
<reponame>alexcapstick/minder_utils<filename>minder_utils/evaluate/eval_utils.py
from sklearn.metrics import f1_score, accuracy_score
from sklearn.metrics import confusion_matrix
import numpy as np
from sklearn.model_selection import train_test_split, StratifiedKFold
from ..formatting.format_util import y_to_categorical
def get_scores(y_true, y_pred):
if y_true.ndim > 1:
y_true = np.argmax(y_true, axis=1)
if y_pred.ndim > 1:
y_pred = np.argmax(y_pred, axis=1)
try:
tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
except ValueError:
return None, None, None, None
specificity = tn / (tn + fp)
sensitivity = tp / (tp + fn)
acc = accuracy_score(y_true, y_pred)
f1 = f1_score(y_true, y_pred)
return sensitivity, specificity, acc, f1
def split_by_ids(X, y, patient_ids, cat=True, valid_only=True, stratify=True, seed=0):
y[y == 0] = -1
y = y.reshape(-1, )
patient_ids = patient_ids.reshape(-1, )
# make sure the train and test set got both positive and negative patients
y_p_id = []
for p_id in np.unique(patient_ids):
_y = np.unique(y[patient_ids == p_id])
rng = np.random.default_rng(seed)
y_p_id.append(int(_y[0]) if len(_y) < 2 else rng.integers(0,2))
seed += 1
y_p_id = np.array(y_p_id)
y_p_id[y_p_id < 0] = 0
train_ids, test_ids = train_test_split(np.unique(patient_ids), test_size=0.33, random_state=seed, stratify=y_p_id if stratify else None)
test_y = y[np.isin(patient_ids, test_ids)]
if valid_only:
test_filter = np.isin(test_y, [-1, 1])
else:
test_filter = np.isin(test_y, np.unique(test_y))
if cat:
return X[np.isin(patient_ids, train_ids)], y_to_categorical(y[np.isin(patient_ids, train_ids)]), \
X[np.isin(patient_ids, test_ids)][test_filter], y_to_categorical(y[np.isin(patient_ids, test_ids)][
test_filter])
return X[np.isin(patient_ids, train_ids)], y[np.isin(patient_ids, train_ids)], \
X[np.isin(patient_ids, test_ids)], y[np.isin(patient_ids, test_ids)]
class StratifiedKFoldPids:
def __init__(self, n_splits=5, shuffle=False, random_state=None):
'''
This splits the data so that no train and test
split contain the same pid. They will contain
roughly the same number of positive
and negative samples.
This is based on: It will function in the same way.
https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.StratifiedKFold.html
Arguments
---------
- ```n_splits```: ```int```, optional:
The number of splits.
Defaults to ```5```.
- ```shuffle```: ```bool```, optional:
Whether to shuffle the order of the pids before
making the splits.
Defaults to ```False```.
- ```random_state```: ```_type_```, optional:
The random state for the random processes in the class.
Defaults to ```None```.
'''
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
return
def get_n_splits(self):
'''
Returns the number of splits
Returns
--------
- ```out```: ```int``` :
The number of splits
'''
return self.n_splits
def split_by_ids(self, y, pids, seed=0):
'''
An internal function that given a set of
labels and PIDs corresponding to the labels,
this function can return the pid values that
should be assigned to the training or testing
set for each split.
Arguments
---------
- ```y```: ```array```:
Labels.
- ```pids```: ```array```:
PIDs corresponding to ```y```.
- ```seed```: ```int```, optional:
The random seed for the random processes.
Defaults to ```0```.
Returns
--------
- ```out```: ```_type_``` :
PID values that should be assigned
to the training or testing set for each split.
'''
labels = np.copy(y)
labels[labels == 0] = -1
labels = labels.reshape(-1, )
pids = pids.reshape(-1, )
# make sure the train and test set got both positive and negative patients
y_p_id = []
for p_id in np.unique(pids):
_y = np.unique(y[pids == p_id])
rng = np.random.default_rng(seed)
y_p_id.append(int(_y[0]) if len(_y) < 2 else rng.integers(0,2))
seed += 1
y_p_id = np.array(y_p_id)
y_p_id[y_p_id < 0] = 0
splitter = StratifiedKFold(n_splits=self.n_splits,
shuffle=self.shuffle,
random_state=seed if self.shuffle else None)
splits = list(splitter.split(np.unique(pids), y=y_p_id))
return [[np.unique(pids)[train_idx], np.unique(pids)[test_idx]] for train_idx, test_idx in splits]
def split(self, X, y, pids):
'''
This function produces the splits that can be used for training
and testing.
Arguments
---------
- ```X```: ```array```:
X input. This isn't used and so anything can be passed here.
- ```y```: ```array```:
The labels. This is used to stratify the data.
- ```pids```: ```_type_```:
The PIDs that is used to split the data.
Returns
--------
- ```out```: ```list``` :
List of train-test splits. This
list has length equal to ```n_splits```.
'''
rng = np.random.default_rng(self.random_state)
seed = rng.integers(0,1e6)
list_of_splits_pids = self.split_by_ids(y=y, pids=pids, seed=seed)
list_of_splits = []
for train_pids, test_pids in list_of_splits_pids:
train_idx_new = np.arange(len(pids))[np.isin(pids, train_pids)]
test_idx_new = np.arange(len(pids))[np.isin(pids, test_pids)]
list_of_splits.append([
train_idx_new,
test_idx_new,
])
return list_of_splits
|
StarcoderdataPython
|
1686509
|
class User:
pass
|
StarcoderdataPython
|
6580350
|
<reponame>Chocowaffres/SRE_Tests<gh_stars>0
### https://xang1234.github.io/multi-label/
import sklearn.metrics as metrics
from skmultilearn.dataset import load_dataset, save_to_arff
from skmultilearn.problem_transform import ClassifierChain, BinaryRelevance, LabelPowerset
from sklearn.ensemble import GradientBoostingClassifier, ExtraTreesClassifier, RandomForestClassifier, AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier, ExtraTreeClassifier
from sklearn.neighbors import KNeighborsClassifier, RadiusNeighborsClassifier
from sklearn.linear_model import SGDClassifier, LogisticRegression
from sklearn.svm import SVC
from sklearn.multioutput import MultiOutputClassifier
from skmultilearn.adapt import BRkNNaClassifier, MLkNN
from skmultilearn.ensemble import LabelSpacePartitioningClassifier, MajorityVotingClassifier
from skmultilearn.cluster import LabelCooccurrenceGraphBuilder, NetworkXLabelGraphClusterer, FixedLabelSpaceClusterer
from sklearn.multiclass import OneVsRestClassifier
import arff, numpy as np
from scipy import sparse
import cProfile
# --------------------------- Datasets -----------------------------
def load_yeast_dataset():
X_train, Y_train, _, _ = load_dataset("yeast", "train")
X_test, Y_test, _, _ = load_dataset("yeast", "test")
return X_train, Y_train, X_test, Y_test
def load_custom_dataset(dataset_name, label_count, length_train=None, length_test=None):
train_dataset = arff.load(open(dataset_name+'_train.arff', 'r'))
if length_train == None:
length_train = len(train_dataset['data'])
test_dataset = arff.load(open(dataset_name+'_test.arff', 'r'))
if length_test == None:
length_test = len(test_dataset['data'])
X_train = np.array([np.array(train_dataset['data'][i], dtype=float)[:-label_count] for i in range(length_train)])
Y_train = np.array([np.array(train_dataset['data'][i], dtype=int)[-label_count:] for i in range(length_train)])
X_test = np.array([np.array(test_dataset['data'][i], dtype=float)[:-label_count] for i in range(length_test)])
Y_test = np.array([np.array(test_dataset['data'][i], dtype=int)[-label_count:] for i in range(length_test)])
X_train = sparse.lil_matrix(X_train, shape=X_train.shape)
Y_train = sparse.lil_matrix(Y_train, shape=Y_train.shape)
X_test = sparse.lil_matrix(X_test, shape=X_test.shape)
Y_test = sparse.lil_matrix(Y_test, shape=Y_test.shape)
return X_train, Y_train, X_test, Y_test
# ---------------------------- Classifiers ---------------------------
def predict_classifier_chain(base_classifier, X_train, Y_train, X_test):
# Classifier Chains
cc = ClassifierChain(
classifier=base_classifier,
require_dense=[False, True],
)
return cc.fit(X_train, Y_train).predict(X_test)
def predict_binary_relevance(base_classifier, X_train, Y_train, X_test):
# Binary Relevance
br = BinaryRelevance(
classifier=base_classifier,
require_dense=[False, True],
)
return br.fit(X_train, Y_train).predict(X_test)
def predict_label_powerset(base_classifier, X_train, Y_train, X_test):
# Label Powerset
lp = LabelPowerset(
classifier=base_classifier,
require_dense=[False, True],
)
return lp.fit(X_train, Y_train).predict(X_test)
def predict_multilabel_k_nearest_neighbors(X_train, Y_train, X_test):
# Multi-label k-Nearest Neighbors
mlknn = MLkNN(
k=1,
s=0.5,
)
return mlknn.fit(X_train, Y_train).predict(X_test)
def predict_binary_relevance_k_nearest_neighbors(X_train, Y_train, X_test):
# Binary Relevance k-Nearest Neighbors
brknn = BRkNNaClassifier(
k=3,
)
return brknn.fit(X_train, Y_train).predict(X_test)
def predict_label_space_partitioning_classifier(base_classifier, X_train, Y_train, X_test):
graph_builder = LabelCooccurrenceGraphBuilder(weighted=True,
include_self_edges=False)
clusterer = NetworkXLabelGraphClusterer(graph_builder, method='louvain')
lspc = LabelSpacePartitioningClassifier(
classifier=BinaryRelevance(
classifier=base_classifier,
require_dense=[False, True],
),
clusterer=clusterer
)
return lspc.fit(X_train, Y_train).predict(X_test)
def predict_majority_voting_classifier(base_classifier, X_train, Y_train, X_test):
graph_builder = LabelCooccurrenceGraphBuilder(weighted=True,
include_self_edges=False)
clusterer = NetworkXLabelGraphClusterer(graph_builder, method='louvain')
mvc = MajorityVotingClassifier(
classifier=BinaryRelevance(
classifier=base_classifier,
require_dense=[False, True],
),
clusterer=clusterer
)
return mvc.fit(X_train, Y_train).predict(X_test)
# ---------------------------- Metrics ---------------------------
def calculate_metrics(Y_hat, Y_test):
accuracy = metrics.accuracy_score(Y_test, Y_hat)
f1_score = metrics.f1_score(Y_test, Y_hat, average='weighted')
return accuracy, f1_score
# ----------------------------- Main ------------------------------
def main():
dataset_name = 'datasets/dataset_normal'
label_count = 16
# length_train diz a quantidade de dados para a parte de treino do modelo, se a variável não for definida então utiliza o conjunto de treino todo
# length_test diz a quantidade de dados para a parte de teste do modelo, se a variável não for definida então utiliza o conjunto de teste todo
X_train, Y_train, X_test, Y_test = load_custom_dataset(dataset_name, label_count)
# X_train, Y_train, X_test, Y_test = load_yeast_dataset()
classifier = DecisionTreeClassifier()
cc_Y_hat = predict_classifier_chain(classifier, X_train, Y_train, X_test)
cc_ac, cc_f1 = calculate_metrics(cc_Y_hat, Y_test)
print("############# Classifier Chains ############")
print("Accuracy: ", cc_ac, "F1-micro: ", cc_f1)
br_Y_hat = predict_binary_relevance(classifier, X_train, Y_train, X_test)
br_ac, br_f1 = calculate_metrics(br_Y_hat, Y_test)
print("############# Binary Relevance ############")
print("Accuracy: ", br_ac, "F1-micro: ", br_f1)
lp_Y_hat = predict_label_powerset(classifier, X_train, Y_train, X_test)
lp_ac, lp_f1 = calculate_metrics(lp_Y_hat, Y_test)
print("############# Label Powerset ############")
print("Accuracy: ", lp_ac, "F1-micro: ", lp_f1)
# mlknn_Y_hat = predict_multilabel_k_nearest_neighbors(X_train, Y_train, X_test)
# mlknn_ac, mlknn_f1 = calculate_metrics(mlknn_Y_hat, Y_test)
# print("############# Multi-label k-Nearest Neighbors ############")
# print("Accuracy: ", mlknn_ac, "F1-micro: ", mlknn_f1)
# brknn_Y_hat = predict_binary_relevance_k_nearest_neighbors(X_train, Y_train, X_test)
# brknn_ac, brknn_f1 = calculate_metrics(brknn_Y_hat, Y_test)
# print("############# Binary Relevance k-Nearest Neighbors ############")
# print("Accuracy: ", brknn_ac, "F1-micro: ", brknn_f1)
lspc_Y_hat = predict_label_space_partitioning_classifier(classifier, X_train, Y_train, X_test)
lspc_ac, lspc_f1 = calculate_metrics(lspc_Y_hat, Y_test)
print("############# Label Space Partitioning Classifier ############")
print("Accuracy: ", lspc_ac, "F1-micro: ", lspc_f1)
mvc_Y_hat = predict_majority_voting_classifier(classifier, X_train, Y_train, X_test)
mvc_ac, mvc_f1 = calculate_metrics(mvc_Y_hat, Y_test)
print("############# Majority Voting Classifier ############")
print("Accuracy: ", mvc_ac, "F1-micro: ", mvc_f1)
if __name__ == '__main__':
cProfile.run('main()')
|
StarcoderdataPython
|
127183
|
from .schema import ReactionNames, ReactionECs, ReactionMetabolites, ReactionAlternatives
def ecs(rid):
query = ReactionECs.select().where(ReactionECs.rid == rid)
return [item.ec for item in query]
def ids(ec):
query = ReactionECs.select().where(ReactionECs.ec == ec)
return [item.rid for item in query]
def reactants(rid):
query = ReactionMetabolites.select().where(
(ReactionMetabolites.rid == rid) & (ReactionMetabolites.stoichiometry < 0)
)
return [(item.stoichiometry, item.mid) for item in query]
def products(rid):
query = ReactionMetabolites.select().where(
(ReactionMetabolites.rid == rid) & (ReactionMetabolites.stoichiometry > 0)
)
return [(item.stoichiometry, item.mid) for item in query]
def alternatives(rid, include_self = True):
query = ReactionAlternatives.select().where(ReactionAlternatives.rid == rid)
out = [rid] if include_self else []
return out + [item.alternative for item in query]
|
StarcoderdataPython
|
11252734
|
from django.contrib import admin
from .models import User, Profile, Application, UserReward
class UserAdmin(admin.ModelAdmin):
model = User
admin.site.register(User, UserAdmin)
admin.site.register(Profile)
admin.site.register(Application)
admin.site.register(UserReward)
|
StarcoderdataPython
|
6659943
|
<gh_stars>0
# BEGIN: Copyright
# Copyright (C) 2019 Rector and Visitors of the University of Virginia
# All rights reserved
# END: Copyright
# BEGIN: License
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# END: License
import os
import re
import tempfile
from datetime import datetime
from shutil import copyfile
import chardet
# from nssacPreCommitHook.git import Git, Status
class Header:
def __init__(self, _git, _copyright, _license = None):
self.copyrights = _copyright
for c in self.copyrights:
if not "startYear" in c:
c["startYear"] = 0
else:
c["startYear"] = int(c["startYear"])
self.git = _git
self.copyrights = sorted(self.copyrights, key = lambda i: i["startYear"], reverse = True)
self.license = _license
self.commentStart = ""
self.commentEnd = ""
self.output = None
def updateHeader(self, inFile, commentStart, commentEnd = "", prolog = [], mode = "now"):
self.commentStart = commentStart
self.commentEnd = commentEnd
skipExistingLicense = False
skipExistingCopyright = False
TmpFile = tempfile.mktemp()
copyfile(inFile, TmpFile)
self.output = open(inFile, "w")
# Try to work around choking on ISO-8859-1 files (etc.)
with open(TmpFile, "rb") as fh:
guess_encoding = chardet.detect(fh.read())['encoding']
Input = open(TmpFile, "r", encoding=guess_encoding)
self.skipProlog(Input, prolog)
if self.copyrights:
if mode == "now":
CurrentYear = datetime.now().year
else:
Out, Err, Code = self.git("log", "-1", "--date=short", "--pretty=format:\"%ad\"", inFile)
Result = Out.splitlines()
if not Result:
CurrentYear = datetime.now().year
else:
CurrentYear = int(Result[0].strip('"')[0:4])
Out, Err, Code = self.git("log", "--reverse", "--date=short", "--pretty=format:\"%ad\"", inFile)
Result = Out.splitlines()
if not Result:
FirstYear = CurrentYear
else:
FirstYear = int(Result[0].strip('"')[0:4])
self.writeCopyright(FirstYear, CurrentYear)
skipExistingCopyright = True
if self.license:
self.writeLicense()
skipExistingLicense = True
if skipExistingLicense and skipExistingCopyright:
SectionStart = re.compile("{:s} BEGIN: (License|Copyright) *{:s}".format(self.commentStart, self.commentEnd))
SectionEnd = re.compile("{:s} END: (License|Copyright) *{:s}".format(self.commentStart, self.commentEnd))
elif skipExistingLicense:
SectionStart = re.compile("{:s} BEGIN: License *{:s}".format(self.commentStart, self.commentEnd))
SectionEnd = re.compile("{:s} END: License *{:s}".format(self.commentStart, self.commentEnd))
elif skipExistingCopyright:
SectionStart = re.compile("{:s} BEGIN: Copyright *{:s}".format(self.commentStart, self.commentEnd))
SectionEnd = re.compile("{:s} END: Copyright *{:s}".format(self.commentStart, self.commentEnd))
else:
self.write(Input.read())
return
EmptyLine = re.compile('^\\s*$')
Skip = False
SkipEmpty = True
for Line in Input:
if Skip:
if SectionEnd.search(Line):
Skip = False
SkipEmpty = True
continue
if SkipEmpty:
if EmptyLine.search(Line):
continue
else:
SkipEmpty = False
if SectionStart.search(Line):
Skip = True
continue
self.write(Line)
Input.close()
self.output.close()
os.remove(TmpFile)
def skipProlog(self, file, prolog):
if not prolog: return
LinesToWrite = 0
for p in prolog:
MaxLines = p["maxLines"] if "maxLines" in p else 0
Unlimited = (MaxLines == 0)
PrologEnd = re.compile(p["end"])
Finished = False
LinesToWrite = 0
for Line in file:
if not Unlimited and MaxLines <= 0:
break
LinesToWrite += 1
MaxLines -= 1
if PrologEnd.search(Line):
Finished = True
break
file.seek(0)
if Finished:
break
if Finished:
AppendLine = False
while LinesToWrite > 0:
AppendLine = True
LinesToWrite -= 1
self.output.write(file.readline())
if AppendLine:
self.output.write("\n")
def writeCopyright(self, firstYear, lastYear):
self.writeComment("BEGIN: Copyright")
for c in self.copyrights:
if lastYear > c["startYear"]:
FirstYear = max([firstYear, c["startYear"]])
if FirstYear == lastYear:
Range = "{:d}".format(FirstYear)
else:
Range = "{:d} - {:d}".format(FirstYear, lastYear)
for t in c["text"]:
self.writeComment(t.format(Range))
if firstYear >= c["startYear"]:
break
lastYear = c["startYear"] - 1
self.write("\n")
self.writeComment("END: Copyright")
self.write("\n")
def writeLicense(self):
self.writeComment("BEGIN: License")
for t in self.license:
self.writeComment(t)
self.writeComment("END: License")
self.write("\n")
def write(self, line):
self.output.write(line)
def writeComment(self, line):
self.output.write("{:s} {:s} {:s}\n".format(self.commentStart, line, self.commentEnd))
|
StarcoderdataPython
|
1947469
|
<gh_stars>0
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
print("\nData: \n\n", data)
print("\nType of data: \n\n", type(data))
#Code starts here
census= np.concatenate((new_record,data))
print("census: \n\n", census)
print(data.shape)
print(census.shape)
#We often associate the potential of a country based on the age distribution of the people residing there. We too want to do a simple analysis of the age distribution.
age= (census[:,0])
max_age= max(age)
min_age= min(age)
age_mean= np.mean(age)
age_std= np.std(age)
print(age)
print(max_age)
print(min_age)
print(age_mean)
print(age_std)
#The constitution of the country tries it's best to ensure that people of all races are able to live harmoniously. Let's check the country's race distribution to identify the minorities so that the government can help them.
race_0= census[census[:,2]==0]
race_1= census[census[:,2]==1]
race_2= census[census[:,2]==2]
race_3= census[census[:,2]==3]
race_4= census[census[:,2]==4]
len_0= len(race_0)
len_1= len(race_1)
len_2= len(race_2)
len_3= len(race_3)
len_4= len(race_4)
race_list=[len_0,len_1,len_2,len_3,len_4]
minority_race=race_list.index(min(race_list))
print(minority_race)
#As per the new govt. policy, all citizens above age 60 should not be made to work more than 25 hours per week. Let us look at the data and see if that policy is followed.
senior_citizens= census[census[:,0]>60]
working_hours_sum= senior_citizens.sum(axis=0)[6]
print(working_hours_sum)
senior_citizens_len= len(senior_citizens)
print(senior_citizens_len)
avg_working_hours= working_hours_sum/senior_citizens_len
print(avg_working_hours)
#Our parents have repeatedly told us that we need to study well in order to get a good(read: higher-paying) job. Let's see whether the higher educated people have better pay in general.
high= census[census[:,1]>10]
low= census[census[:,1]<=10]
print(high)
print(low)
avg_pay_high= high[:,7].mean()
avg_pay_low= low[:,7].mean()
print(avg_pay_high)
print(avg_pay_low)
|
StarcoderdataPython
|
6596820
|
<gh_stars>0
import re
from typing import Optional
from il2fb.ds.events.definitions.cheating import CheatingInfo
from il2fb.ds.events.definitions.cheating import CheatingDetectedEvent
from .base import PlainLineParser
from ._utils import export
CHEATING_REGEX = re.compile(
r"^socket channel '(?P<channel_no>\d+)' Cheater was detected! Reason=(?P<cheat_code>-?\d+): '(?P<cheat_details>.+)'\s*$"
)
@export
class CheatingLineParser(PlainLineParser):
"""
Parses cheating detection messages.
Examples of input lines:
"socket channel '203' Cheater was detected! Reason=8: 'Cheat-Engine'"
"socket channel '87' Cheater was detected! Reason=-557645630: 'Unknow'"
"socket channel '751' Cheater was detected! Reason=118227478: 'Unknow'"
"socket channel '145' Cheater was detected! Reason=7: 'Il2trainerstable'"
"""
def parse_line(self, line: str) -> Optional[CheatingDetectedEvent]:
match = CHEATING_REGEX.match(line)
if not match:
return
channel_no = int(match.group('channel_no'))
cheat_code = int(match.group('cheat_code'))
cheat_details = match.group('cheat_details')
return CheatingDetectedEvent(CheatingInfo(
channel_no=channel_no,
cheat_code=cheat_code,
cheat_details=cheat_details,
))
|
StarcoderdataPython
|
3360880
|
"""
The :mod:`kavica.parser` module includes data file parsers.
"""
from .prvparse import (ControlCZInterruptHandler,
ExtensionPathType,
ParsedArgs,
Parser)
__all__ = ['ControlCZInterruptHandler',
'ExtensionPathType',
'ParsedArgs',
'Parser']
|
StarcoderdataPython
|
6490865
|
import requests
from bs4 import BeautifulSoup
for i in range(0, 200):
r = requests.post('http://howstat.com/cricket/Quiz/Quiz.asp',data = {'cboCategory': 'J', 'txtAction': 'Start'})
s = BeautifulSoup(r.text, 'lxml')
x = s.find_all('td', {'class': 'TextCrimsonBold10'})
question = x[0].text.strip()
answers = [x[1].text.strip(), x[2].text.strip(), x[3].text.strip()]
print(question)
for i in answers:
print(i)
print('\n')
|
StarcoderdataPython
|
12826031
|
<reponame>patmloi/PalettePal
# Copyright 2021, <NAME>, mailto:<EMAIL>
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Signing of executables.
"""
from nuitka.Tracing import postprocessing_logger
from .Execution import executeToolChecked
from .FileOperations import withMadeWritableFileMode
_macos_codesign_usage = "The 'codesign' is used to remove invalidated signatures on macOS and required to be found."
def removeMacOSCodeSignature(filename):
"""Remove the code signature from a filename.
Args:
filename - The file to be modified.
Returns:
None
Notes:
This is macOS specific.
"""
with withMadeWritableFileMode(filename):
executeToolChecked(
logger=postprocessing_logger,
command=["codesign", "--remove-signature", "--all-architectures", filename],
absence_message=_macos_codesign_usage,
)
def addMacOSCodeSignature(filename, identity, entitlements_filename, deep):
extra_args = []
# Weak signing is supported.
if not identity:
identity = "-"
command = [
"codesign",
"-s",
identity,
"--force",
"--timestamp",
"--all-architectures",
]
# hardened runtime unless no good identify
if identity != "-":
extra_args.append("--options=runtime")
if entitlements_filename:
extra_args.append("--entitlements")
extra_args.append(entitlements_filename)
if deep:
extra_args.append("--deep")
command.append(filename)
with withMadeWritableFileMode(filename):
executeToolChecked(
logger=postprocessing_logger,
command=command,
absence_message=_macos_codesign_usage,
)
|
StarcoderdataPython
|
6627271
|
<reponame>kaichengyan/amplify-ci-support<filename>src/integ_test_resources/ios/sdk/integration/cdk/cdk_integration_tests_ios/polly_stack.py
from aws_cdk import aws_iam, aws_s3, core
from common.common_stack import CommonStack
from common.platforms import Platform
from common.region_aware_stack import RegionAwareStack
class PollyStack(RegionAwareStack):
def __init__(self, scope: core.Construct, id: str, common_stack: CommonStack, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
self._supported_in_region = self.is_service_supported_in_region()
self.create_bucket(common_stack)
all_resources_policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=[
"polly:DeleteLexicon",
"polly:GetSpeechSynthesisTask",
"polly:ListSpeechSynthesisTasks",
"polly:PutLexicon",
"polly:StartSpeechSynthesisTask",
"polly:SynthesizeSpeech",
],
resources=["*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=all_resources_policy)
self.save_parameters_in_parameter_store(platform=Platform.IOS)
def create_bucket(self, common_stack):
bucket_name = self.get_bucket_name("output")
bucket = aws_s3.Bucket(
self,
"integ_test_polly_output_bucket",
bucket_name=bucket_name,
removal_policy=core.RemovalPolicy.DESTROY,
)
self._parameters_to_save["s3_output_bucket_name"] = bucket.bucket_name
policy = aws_iam.PolicyStatement(
effect=aws_iam.Effect.ALLOW,
actions=["s3:PutObject"],
resources=[f"arn:aws:s3:::{bucket_name}/*"],
)
common_stack.add_to_common_role_policies(self, policy_to_add=policy)
|
StarcoderdataPython
|
9732894
|
<reponame>BDonnot/grid2op_pp_baseline
__all__ = [
"PandapowerOPFAgent",
"evaluate",
]
from l2rpn_baselines.PandapowerOPFAgent.PandapowerOPFAgent import PandapowerOPFAgent
from l2rpn_baselines.PandapowerOPFAgent.evaluate import evaluate
"""
In the __init__ file, it is expected to export 3 classes with names that depends on the name you gave to your baseline.
For example, say you chose to write a baseline with the awesome name "XXX" (what an imagination!) you should export
in this __init__.py file:
- `XXX` [**mandatory**] contains the definition of your baseline. It must follow the directives
given in "Template.py"
- `evaluate` [**mandatory**] contains the script to evaluate the performance of this baseline. It must
follow the directive in "evaluate.py"
- `train` [**optional**] contains the script to train your baseline. If provided, it must follow
the directives given in "train.py"
See the import above for an example on how to export your scripts properly.
"""
|
StarcoderdataPython
|
112934
|
# Copyright 2019 Toyota Research Institute. All rights reserved.
import importlib
import os
from setuptools import find_packages, setup
from setuptools.command.build_py import build_py
from setuptools.command.develop import develop
from setuptools.command.install import install
def build_protos():
SETUP_DIR = os.path.dirname(os.path.abspath(__file__))
from grpc.tools import command
command.build_package_protos(SETUP_DIR)
class CustomBuildPyCommand(build_py):
def run(self):
build_protos()
build_py.run(self)
class CustomInstallCommand(install):
def run(self):
build_protos()
install.run(self)
class CustomDevelopCommand(develop):
def run(self):
build_protos()
develop.run(self)
__version__ = importlib.import_module('dgp').__version__
with open('requirements.txt') as f:
requirements = f.read().splitlines()
packages = find_packages(exclude=['tests'])
setup(
name="dgp",
version=__version__,
description="TRI Dataset Governance Policy",
long_description=open('README.md', encoding='utf-8').read(),
long_description_content_type='text/markdown',
author="Toyota Research Institute",
author_email='<EMAIL>',
url="https://github.com/TRI-ML/dgp",
packages=packages,
entry_points={'console_scripts': [
'dgp_cli=dgp.cli:main',
]},
include_package_data=True,
setup_requires=['cython==0.29.10', 'grpcio==1.21.1', 'grpcio-tools==1.21.1'],
install_requires=requirements,
zip_safe=False,
python_requires='>=3.6',
cmdclass={
'install': CustomInstallCommand,
'develop': CustomDevelopCommand,
'build_py': CustomBuildPyCommand
}
)
|
StarcoderdataPython
|
11257083
|
<reponame>Michael8968/skulpt<gh_stars>1-10
import turtle
for i in range(6):
turtle.forward(100)
turtle.right(60)
turtle.done()
|
StarcoderdataPython
|
3570688
|
import unittest
import pytest
from pyalink.alink import *
class TestEnvironment(unittest.TestCase):
@pytest.mark.pyflink
def test_batch_get_table(self):
source = CsvSourceBatchOp() \
.setSchemaStr(
"sepal_length double, sepal_width double, petal_length double, petal_width double, category string") \
.setFilePath("https://alink-test-data.oss-cn-hangzhou.aliyuncs.com/iris.csv")
split = SplitBatchOp().setFraction(0.3).linkFrom(source)
t0 = TableSourceBatchOp(split.getOutputTable())
t1 = TableSourceBatchOp(split.getSideOutput(0).getOutputTable())
df0, df1 = collectToDataframes(t0, t1)
print(df0)
print(df1)
@pytest.mark.pyflink
def test_stream_get_table(self):
source = CsvSourceStreamOp() \
.setSchemaStr(
"sepal_length double, sepal_width double, petal_length double, petal_width double, category string") \
.setFilePath("https://alink-test-data.oss-cn-hangzhou.aliyuncs.com/iris.csv")
split = SplitStreamOp().setFraction(0.3).linkFrom(source)
t0 = TableSourceStreamOp(split.getOutputTable())
t1 = TableSourceStreamOp(split.getSideOutput(0).getOutputTable())
t0.print()
t1.print()
StreamOperator.execute()
|
StarcoderdataPython
|
6706669
|
import tensorflow as tf
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
|
StarcoderdataPython
|
1999982
|
<gh_stars>10-100
from al_services.alsvc_mcafee.mcafee import McAfee
|
StarcoderdataPython
|
12831340
|
from __future__ import division
inner_phil_str = """\
scanbox_windows = 101 51 51
#.type = ints(size_min=1, size_max=3, value_min=10)
# future: variable number of window passes
.type = ints(size=3, value_min=10)
.help = "Integer scanbox sizes for calculating background,"
"for cycles 1,2, and 3, respectively."
"Program defaults are 101, 51, and 51 pixels."
peripheral_margin = 20
.type = int(value_min=0)
.help = "No spot detection inside margin; width in pixels."
"""
phil_str = """\
spotfinder {
%s
}
""" % inner_phil_str
labelit_related_commands = """\
#Preparation
wedgelimit = 2
.type=int
.help="maximum number of images to use for labelit indexing"
.expert_level=2
goniometer_rotation = ''
.type=str
.help="Special types are Pringle-Shen, ..."
.expert_level=3
#Coordinate systems
convention_override=None
.type=int
.help="if defined, override the image-format specific behavior (spot_convention number)"
.expert_level=3
spot_convention=None
.type=int
.help="must be set by the calling program; no default"
.expert_level=3
#Spotfinder
override_pickled_spotfinders = True
.type=bool
.help="automatically erase any existing DISTL_pickle file"
.expert_level=3
spotfinder_header_tests = True
.type=bool
.expert_level=3
spotfinder_mode = 'distl'
.type=str
.expert_level=3
spotfinder_verbose = False
.type=bool
.expert_level=2
force_method2_resolution_limit = None
.type=float
.help="override resolution analysis based on spot count falloff; force spots at least this far out."
.expert_level=2
distl_lowres_limit = 50.0
.type=float
.help="don't pick spots inside this resolution limit"
.expert_level=3
distl_highres_limit = None
.type=float
.help="don't pick spots outside this resolution limit"
.expert_level=3
distl_binned_image_spot_size = 4
.type=int
.expert_level=2
distl_maximum_number_spots_for_indexing = 300
.type=int
.expert_level=3
distl_minimum_number_spots_for_indexing = 40
.type=int
.expert_level=3
distl_profile_bumpiness = 2
.type=int
.help="maximum number of local maxima in good Bragg spots"
.expert_level=2
distl_report_overloads = True
.type=bool
.expert_level=3
distl_keep_Zdata = True
.type=bool
.expert_level=3
percent_overlap_forcing_detail = 30.
.type=float
.help="detail examination of spots with nearest neighbor analysis and overlap likelihood"
.expert_level=3
overlapping_spot_criterion = 1.2
.type=float
.help="in multiples of the semimajor axis"
.expert_level=3
spots_pickle = './DISTL_pickle'
.type=str
.expert_level=3
distl_spotcenter_algorithm = 'center_of_mass'
.type=str
.help="either center_of_mass or maximum_pixel"
.expert_level=3
distl_permit_binning=True
.type=bool
.multiple=False
.help="Permit binning for large images; set False for Web-Ice since diffimage always renders unbinned."
.expert_level=2
distl_force_binning=False
.type=bool
.multiple=False
.help="Force binning for all images; only used for development and troubleshooting."
.expert_level=2
#Data Parameters to Autoindex; Some Affect Spotfinder Also
autoindex_override_beam = None
.type=floats(size=2)
.help="x and y coordinates of the direct beam in mm"
.expert_level=1
autoindex_override_distance = None
.type=float
.help="crystal-to-detector distance in mm"
.expert_level=1
autoindex_override_wavelength = None
.type=float
.help="incident wavelength in Angstroms"
.expert_level=1
autoindex_override_twotheta = None
.type=float
.help="detector swing angle in degrees"
.expert_level=1
autoindex_override_deltaphi = None
.type=float
.help="single-shot rotation angle in degrees"
.expert_level=1
image_specific_osc_start = None
.type=str
.help="A lambda x expression giving the rotation in degrees given the image number,
such as lambda x: x-1.0"
.expert_level=1
codecamp {
maxcell = None
.type=float
.multiple=False
.help="Directly specify max unit cell; potentially allow contiguous images"
.expert_level=2
minimum_spot_count = None
.type=int
.help="For determining spot masks on single images, minimum allowable spot count"
.expert_level=2
}
pdf_output {
file=""
.type=str
.multiple=False
.help="If given, specify a file path to output a picture of the sublattice model."
.expert_level=4
box_selection="all"
.type=str
.multiple=False
.help="index: show original superlattice | coset: show spots unique to the sublattice | all: default, show both"
.expert_level=4
enable_legend=False
.type=bool
.multiple=False
.help="Print the Miller indices, in the triclinic sublattice basis system"
.expert_level=4
enable_legend_font_size=10
.type=float
.multiple=False
.help="Print the Miller indices, font size in points"
.expert_level=4
enable_legend_ink_color=black
.type=str
.multiple=False
.help="Print the Miller indices, ink color"
.expert_level=4
enable_legend_vertical_offset=10
.type=float
.multiple=False
.help="Print the Miller indices, vertical legend offset"
.expert_level=4
box_linewidth=0.04
.type=float
.multiple=False
.help="Line width for the rectangular box enclosing the spot profile"
.expert_level=4
window_fraction=0.666666
.type=float
.multiple=False
.help="Fractional length of image x,y dimensions rendered to pdf; use fraction for x,y"
.expert_level=4
window_offset_x=0.16667
.type=float
.multiple=False
.help="Fractional offset of image x dimension for the window rendered to pdf"
.expert_level=4
window_offset_y=0.16667
.type=float
.multiple=False
.help="Fractional offset of image y dimension for the window rendered to pdf"
.expert_level=4
markup_inliers=True
.type=bool
.multiple=False
.help="Markup the filtered Bragg candidates, peak and profile center"
.expert_level=4
render_all=False
.type=bool
.multiple=False
.help="Show spot predictions for all possible sublattices on separate pages"
.expert_level=4
profile_shrink=0
.type=int
.multiple=False
.help="For clarity of view, shrink the profile box by # of pixels"
.expert_level=4
}
"""
|
StarcoderdataPython
|
9682790
|
theAnswer=42
def quote_marvin():
print('I\'m Sam , how are u ?')
|
StarcoderdataPython
|
6654753
|
<reponame>timgates42/lore<gh_stars>1000+
import inspect
import importlib
import json
import logging
import pkgutil
import lore
import lore.util
import lore.env
from lore.env import require
from lore.util import timer
require(
lore.dependencies.PANDAS +
lore.dependencies.FLASK
)
import pandas
from flask import Flask, request
app = Flask(lore.env.APP)
logger = logging.getLogger(__name__)
@app.route('/')
def index():
names = str([name for _, name, _ in pkgutil.iter_modules([lore.env.APP + '/' + 'models'])])
return 'Hello %s!' % lore.env.APP + '\n' + names
for module_finder, module_name, _ in pkgutil.iter_modules([lore.env.APP + '/' + 'models']):
module = importlib.import_module(lore.env.APP + '.models.' + module_name)
for class_name, member in inspect.getmembers(module):
if not (inspect.isclass(member) and issubclass(member, lore.models.base.Base)):
continue
qualified_name = module_name + '.' + class_name
with timer('load %s' % qualified_name):
best = member.load()
def predict():
logger.debug(request.args)
data = {arg: request.args.getlist(arg) for arg in request.args.keys()}
try:
data = pandas.DataFrame(data)
except ValueError:
return 'Malformed data!', 400
logger.debug(data)
try:
result = best.predict(data)
except KeyError as ex:
return 'Missing data!', 400
return json.dumps(result.tolist()), 200
predict.__name__ = best.name + '.predict'
rule = '/' + qualified_name + '/predict.json'
logger.info('Adding url rule for prediction: %s' % rule)
app.add_url_rule(rule, view_func=predict)
|
StarcoderdataPython
|
11222703
|
<gh_stars>0
# coding: utf-8
from .admin_forms import ChangeProductCategoryXMLForm, ChangeProductPrintTypeForm, \
ChangeCategoryXMLCategorySiteForm, ChangeBrandMakerBrandForm, \
ChangePrintTypeMakerPrintTypeForm, ChangeCategoryXMLMakerForm, \
ChangeProductBrandForm, ChangeProductMakerForm, ChangeProductStatusForm
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.contrib import admin
def product_add_category_xml(modeladmin, request, queryset):
form = None
if 'apply' in request.POST:
form = ChangeProductCategoryXMLForm(request.POST)
if form.is_valid():
category_xml = form.cleaned_data['category_xml']
count = 0
for item in queryset:
item.category_xml.add(category_xml)
item.save()
count += 1
modeladmin.message_user(request, 'Категория от поставщика {} '
'применена к {} товарам.'.
format(category_xml, count))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = ChangeProductCategoryXMLForm(initial={'_selected_action': request.
POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'admin/catalog/actions/product_add_category_xml.html',
{
'items': queryset,
'form': form,
'title': 'Добавление категории '
'от поставщика'
}
)
product_add_category_xml.short_description = 'Добавить КАТЕГОРИЮ ОТ ПОСТАВЩИКА'
def product_clear_category_xml_s(modeladmin, request, queryset):
for item in queryset:
item.category_xml.clear()
product_clear_category_xml_s.short_description = 'Очистить поле КАТЕГОРИИ ОТ ПОСТАВЩИКА'
def product_add_print_type(modeladmin, request, queryset):
form = None
if 'apply' in request.POST:
form = ChangeProductPrintTypeForm(request.POST)
if form.is_valid():
print_type = form.cleaned_data['print_type']
count = 0
for item in queryset:
item.print_type.add(print_type)
item.save()
count += 1
modeladmin.message_user(request, 'Вид нанесения от поставщика {} '
'применен к {} товарам.'.
format(print_type, count))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = ChangeProductPrintTypeForm(initial={'_selected_action': request.
POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'admin/catalog/actions/product_add_print_type.html',
{
'items': queryset,
'form': form,
'title': 'Добавить вид нанесения '
'от поставщика'
}
)
product_add_print_type.short_description = 'Добавить ВИД НАНЕСЕНИЯ ОТ ПОСТАВЩИКА'
def product_clear_print_type_s(modeladmin, request, queryset):
for item in queryset:
item.print_type.clear()
product_clear_print_type_s.short_description = 'Очистить поле ВИДЫ НАНЕСЕНИЯ ОТ ПОСТАВЩИКА'
def product_add_brand(modeladmin, request, queryset):
form = None
if 'apply' in request.POST:
form = ChangeProductBrandForm(request.POST)
if form.is_valid():
brand = form.cleaned_data['brand']
count = 0
for item in queryset:
item.brand = brand
item.save()
count += 1
modeladmin.message_user(request, 'Бренд от поставщика {} '
'применен к {} товарам.'.
format(brand, count))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = ChangeProductBrandForm(initial={'_selected_action': request.
POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'admin/catalog/actions/product_add_brand.html',
{
'items': queryset,
'form': form,
'title': 'Добавить бренд '
'от поставщика'
}
)
product_add_brand.short_description = 'Добавить БРЕНД ОТ ПОСТАВЩИКА'
def product_clear_brand(modeladmin, request, queryset):
for item in queryset:
item.brand.clear()
product_clear_brand.short_description = 'Очистить поле БРЕНД ОТ ПОСТАВЩИКА'
def product_add_status(modeladmin, request, queryset):
form = None
if 'apply' in request.POST:
form = ChangeProductStatusForm(request.POST)
if form.is_valid():
status = form.cleaned_data['status']
count = 0
for item in queryset:
item.status = status
item.save()
count += 1
modeladmin.message_user(request, 'Статус {} '
'применен к {} товарам.'.
format(status, count))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = ChangeProductStatusForm(initial={'_selected_action': request.
POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'admin/catalog/actions/product_add_status.html',
{
'items': queryset,
'form': form,
'title': 'Добавить статус'
}
)
product_add_status.short_description = 'Добавить СТАТУС'
def product_clear_status(modeladmin, request, queryset):
for item in queryset:
item.status.clear()
product_clear_status.short_description = 'Очистить поле СТАТУС'
def product_add_maker(modeladmin, request, queryset):
form = None
if 'apply' in request.POST:
form = ChangeProductMakerForm(request.POST)
if form.is_valid():
maker = form.cleaned_data['maker']
count = 0
for item in queryset:
item.maker = maker
item.save()
count += 1
modeladmin.message_user(request, 'Поставщик {} '
'применен к {} товарам.'.
format(maker, count))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = ChangeProductMakerForm(initial={'_selected_action': request.
POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'admin/catalog/actions/product_add_maker.html',
{
'items': queryset,
'form': form,
'title': 'Добавить поставщика'
}
)
product_add_maker.short_description = 'Добавить ПОСТАВЩИКА'
def product_clear_maker(modeladmin, request, queryset):
for item in queryset:
item.maker.clear()
product_clear_maker.short_description = 'Очистить поле ПОСТАВЩИКА'
def category_xml_add_category_site(modeladmin, request, queryset):
form = None
if 'apply' in request.POST:
form = ChangeCategoryXMLCategorySiteForm(request.POST)
if form.is_valid():
category_site = form.cleaned_data['category_site']
count = 0
for item in queryset:
item.category_site = category_site
item.save()
count += 1
modeladmin.message_user(request, 'Категория на сайте {} '
'применена к {} категориям от поставщика.'.
format(category_site, count))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = ChangeCategoryXMLCategorySiteForm(initial={'_selected_action': request.
POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'admin/catalog/actions/category_xml_add_category_site.html',
{
'items': queryset,
'form': form,
'title': 'Добавление категории '
'на сайте'
}
)
category_xml_add_category_site.short_description = 'Добавить КАТЕГОРИЮ НА САЙТЕ'
def category_xml_clear_category_site(modeladmin, request, queryset):
for item in queryset:
item.category_site.clear()
category_xml_clear_category_site.short_description = 'Очистить поле КАТЕГОРИЯ НА САЙТЕ'
def category_xml_add_maker(modeladmin, request, queryset):
form = None
if 'apply' in request.POST:
form = ChangeCategoryXMLMakerForm(request.POST)
if form.is_valid():
maker = form.cleaned_data['maker']
count = 0
for item in queryset:
item.maker = maker
item.save()
count += 1
modeladmin.message_user(request, 'Поставщик {} '
'применен к {} категориям от поставщика.'.
format(maker, count))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = ChangeCategoryXMLMakerForm(initial={'_selected_action': request.
POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'admin/catalog/actions/category_xml_add_maker.html',
{
'items': queryset,
'form': form,
'title': 'Изменение поставщика'
}
)
category_xml_add_maker.short_description = 'Изменить ПОСТАВЩИКА'
def brand_maker_add_brand(modeladmin, request, queryset):
form = None
if 'apply' in request.POST:
form = ChangeBrandMakerBrandForm(request.POST)
if form.is_valid():
brand = form.cleaned_data['brand']
count = 0
for item in queryset:
item.brand = brand
item.save()
count += 1
modeladmin.message_user(request, 'Бренд на сайте {} '
'применен к {} брендам от поставщика.'.
format(brand, count))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = ChangeBrandMakerBrandForm(initial={'_selected_action': request.
POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'admin/catalog/actions/brand_maker_add_brand.html',
{
'items': queryset,
'form': form,
'title': 'Добавление бренда '
'на сайт'
}
)
brand_maker_add_brand.short_description = 'Добавить БРЕНД НА САЙТЕ'
def brand_maker_clear_brand(modeladmin, request, queryset):
for item in queryset:
item.brand.clear()
brand_maker_clear_brand.short_description = 'Очистить поле БРЕНД НА САЙТЕ'
def print_type_maker_add_print_type(modeladmin, request, queryset):
form = None
if 'apply' in request.POST:
form = ChangePrintTypeMakerPrintTypeForm(request.POST)
if form.is_valid():
print_type = form.cleaned_data['print_type']
count = 0
for item in queryset:
item.print_type = print_type
item.save()
count += 1
modeladmin.message_user(request, 'Вид нанесения от поставщика {} '
'применен к {} видам нанесения на сайте.'.
format(print_type, count))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = ChangePrintTypeMakerPrintTypeForm(initial={'_selected_action': request.
POST.getlist(admin.ACTION_CHECKBOX_NAME)})
return render(request, 'admin/catalog/actions/print_type_maker_add_print_type.html',
{
'items': queryset,
'form': form,
'title': 'Добавление вида нанесения'
'на сайт'
}
)
print_type_maker_add_print_type.short_description = 'Добавить ВИД НАНЕСЕНИЯ НА САЙТЕ'
def print_type_maker_clear_print_type(modeladmin, request, queryset):
for item in queryset:
item.print_type.clear()
print_type_maker_clear_print_type.short_description = 'Очистить поле ВИД НАНЕСЕНИЯ НА САЙТЕ'
|
StarcoderdataPython
|
8056803
|
<filename>binarytree_impl.py
from trees.binarytree import BinaryTree
r = BinaryTree('a')
print r.get_root_value()
print r.get_left_child()
r.insert_left('b')
print r.get_left_child()
print r.get_left_child().get_root_value()
r.insert_right('c')
print r.get_right_child()
print r.get_right_child().get_root_value()
r.get_right_child().set_root_value('hello')
print r.get_right_child().get_root_value()
|
StarcoderdataPython
|
93470
|
from Spread.stddevct import StdDevCT
from Operations.differencepower import DifferencePower
from Spread.generalizedvariance import GeneralizedVariance
class StandardDeviation (GeneralizedVariance):
def __init__ (self, length, min_value, max_value, arithmetic_mean):
GeneralizedVariance.__init__ (self, length, min_value, max_value,
StdDevCT, DifferencePower, arithmetic_mean)
#def update (self, elem):
#def finish (self):
# GeneralizedVariance.finish (self)
# self.value = sqrt (self.ct.value)
#self.ct.finish ()
#self.value = sqrt (self.ct.value)
#Spread.finish (self)
#def validate (self):
#def cupdates (self, elem):
|
StarcoderdataPython
|
6662473
|
<reponame>emmo-repo/EMMO-python<filename>tests/test_basic.py
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from ontopy.ontology import Ontology
def test_basic(emmo: "Ontology") -> None:
from ontopy import get_ontology
emmo.sync_reasoner()
onto = get_ontology('onto.owl')
onto.imported_ontologies.append(emmo)
onto.base_iri = 'http://emmo.info/examples/test#'
# Add entity directly
onto.new_entity('Hydrogen', emmo.Atom)
with onto:
# Add entity using python classes
class Oxygen(emmo.Atom):
"""Oxygen atom."""
class H2O(emmo.Molecule):
"""Water molecule."""
emmo.hasSpatialDirectPart.exactly(2, onto.Hydrogen)
emmo.hasSpatialDirectPart.exactly(1, Oxygen)
# Create some
H1 = onto.Hydrogen()
H2 = onto.Hydrogen()
O = Oxygen()
water = H2O()
water.hasSpatialDirectPart = [H1, H2, O]
name_prefix = "myonto_"
onto.sync_attributes(name_policy='sequential', name_prefix=name_prefix)
assert f"{onto.base_iri}{name_prefix}0" in onto
assert f"{onto.base_iri}{name_prefix}6" in onto
name_prefix = "onto_"
onto.sync_attributes(name_policy='uuid', name_prefix=name_prefix)
assert water.name.startswith('onto_')
# A UUID is 32 chars long + 4 `-` chars = 36 chars
assert len(water.name) == len(name_prefix) + 36
|
StarcoderdataPython
|
8026732
|
<reponame>sekilas13/Python<filename>Basic/21_regex/regex_code.py
import re
text = "@robot9 "
print(re.findall(r"\d", text))
print(re.findall(r"\w", text))
print(re.findall(r"\s", text))
angka = "1234"
text = "Budi suka makan buah apel"
print(re.findall(r"\d+", angka))
print(re.findall(r"\w+", text))
text = "<NAME>"
print(re.findall(r"[aiueo]", text))
text = "Air, api, tanah, udara"
print(re.findall(r"Air|Water", text))
|
StarcoderdataPython
|
8100875
|
##Task
##The provided code stub reads two integers from STDIN,a and b. Add code to print three lines where:
##1.The first line contains the sum of the two numbers.
##2.The second line contains the difference of the two numbers (first - second).
##3.The third line contains the product of the two numbers.
if __name__ == '__main__':
a = int(input())
b = int(input())
print(a+b)
print(a-b)
print(a*b)
|
StarcoderdataPython
|
8037911
|
import test
# Return value test
def square(x):
return x*x
test.testEqual(square(3), 9)
# Side effect test
def update_counts(letters, counts_d):
for c in letters:
counts_d[c] = 1
if c in counts_d:
counts_d[c] = counts_d[c] + 1
counts = {'a': 3, 'b': 2}
update_counts("aaab", counts)
# 3 more occurrences of a, so 6 in all
test.testEqual(counts['a'], 6)
# 1 more occurrence of b, so 3 in all
test.testEqual(counts['b'], 3)
|
StarcoderdataPython
|
5030398
|
import sys
import logging
import tensorflow as tf
import pandas as pd
class Recorder(object):
'''
TF 2.0 Recorder
'''
def __init__(self, cp_dir, log_dir, excel_dir, logger2file, model=None):
self.writer = tf.summary.create_file_writer(log_dir)
self.checkpoint = tf.train.Checkpoint(policy=model)
self.saver = tf.train.CheckpointManager(self.checkpoint, directory=cp_dir, max_to_keep=5, checkpoint_name='rb')
self.excel_writer = pd.ExcelWriter(excel_dir + '/data.xlsx')
self.logger = self.create_logger(
name='logger',
console_level=logging.INFO,
console_format='%(levelname)s : %(message)s',
logger2file=logger2file,
file_name=log_dir + 'log.txt',
file_level=logging.WARNING,
file_format='%(lineno)d - %(asctime)s - %(module)s - %(funcName)s - %(levelname)s - %(message)s'
)
def create_logger(self, name, console_level, console_format, logger2file, file_name, file_level, file_format):
logger = logging.Logger(name)
logger.setLevel(level=console_level)
stdout_handle = logging.StreamHandler(stream=sys.stdout)
stdout_handle.setFormatter(logging.Formatter(console_format if console_level > 20 else '%(message)s'))
logger.addHandler(stdout_handle)
if logger2file:
logfile_handle = logging.FileHandler(file_name)
logfile_handle.setLevel(file_level)
logfile_handle.setFormatter(logging.Formatter(file_format))
logger.addHandler(logfile_handle)
return logger
|
StarcoderdataPython
|
9634083
|
<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-08-21 06:29
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20170817_0530'),
]
operations = [
migrations.AlterField(
model_name='patch',
name='pool_size',
field=models.BigIntegerField(default=9223372036854775807),
),
]
|
StarcoderdataPython
|
1987924
|
"""
TODO: Exit out of all demon programs when quit
"""
import pathlib as pathlib
import world as world
import enemies as enemies
import command_parser as command_parser
verbs_path = pathlib.Path.cwd() / 'Resources' / 'verbs.txt'
with verbs_path.open(mode='r') as file:
verbs = file.readlines()
verbs = [x.strip() for x in verbs]
def link_terminal(terminal):
global terminal_output
terminal_output = terminal
def do_action(action_input, character):
if len(action_input) == 0:
terminal_output.print_text("")
return
kwargs = command_parser.parser(action_input)
DoActions.do_action(kwargs['action_verb'], character, **kwargs)
class DoActions:
def __init__(self, character, **kwargs):
self.character = character
do_actions = {}
@classmethod
def register_subclass(cls, action):
"""Catalogues actions in a dictionary for reference purposes"""
def decorator(subclass):
cls.do_actions[action] = subclass
return subclass
return decorator
@classmethod
def do_action(cls, action, character, **kwargs):
"""Method used to initiate an action"""
if action not in cls.do_actions:
terminal_output.print_text("I am sorry, I did not understand.")
return
return cls.do_actions[action](character, **kwargs)
@DoActions.register_subclass('ask')
class Ask(DoActions):
"""\
Certain npcs have information that is valuable for you. The ASK verb allows you to interact with these npcs
and obtain that information.
Usage:
ASK <npc> about <subject>\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.ask(**kwargs)
@DoActions.register_subclass('attack')
class Attack(DoActions):
"""\
ATTACK allows you to engage in combat with an enemy. Provided you are not in round time, ATTACK swings
the weapon in your right hand (or your bare fist if there is no weapon) at the enemy. You will not be able
to attack anyone other than enemies.
Usage:
ATTACK <enemy> : Engages an enemy and begins combat.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.attack(**kwargs)
@DoActions.register_subclass('drop')
class Drop(DoActions):
"""\
DROP sets an object within your environment. This verb works the same as PUT <item>.
Usage:
DROP <item> : Places an item within an environment.
DROP <item> in <object/item> : Will put an item within an object or within another item if that object or item
is a container and if that object or item has enough room within it.
DROP <item> on <object/item> : Will put an item on top of an object or on top of another item if that object
or item is stackable.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.put(**kwargs)
@DoActions.register_subclass('east')
@DoActions.register_subclass('e')
class East(DoActions):
"""\
Moves you east, if you can move in that direction.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if world.tile_exists(x=self.character.location_x + 1, y=self.character.location_y, area=self.character.area):
self.character.move_east()
else:
terminal_output.print_text("You cannot find a way to move in that direction.")
@DoActions.register_subclass('flee')
class Flee(DoActions):
"""\
FLEE sends you in a random direction in your environment. FLEE can only be used when not in round time.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.flee(**kwargs)
@DoActions.register_subclass('get')
@DoActions.register_subclass('take')
class Get(DoActions):
"""\
GET retrieves an item from your surroundings. Many objects cannot be moved from their current position.
The item will be taken by your right hand, therefore you right hand will need to be empty. This
verb functions the same as TAKE.
Usage:
GET <item>\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.get(**kwargs)
@DoActions.register_subclass('give')
class Give(DoActions):
"""\
GIVE allows you to exchange items between you and various npcs. In order to give an item to an npc, you
must have the item in your right hand.
Usage:
GIVE <item> to <npc> : Gives the item to the npc if the npc has the ability to accept the item.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.give(**kwargs)
@DoActions.register_subclass('go')
class Go(DoActions):
"""\
GO allows you to move toward a certain object. If the object can be passed through, you will pass through it.
Usage:
GO <object> : move toward or through an object.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.go(**kwargs)
@DoActions.register_subclass('help')
class Help(DoActions):
"""\
Provides help on all parts of the game
Usage:
HELP <subject> : Output help on a specific subject.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if kwargs['subject_verb'] == None:
terminal_output.print_text('''
Below are the list of actions with which you can ask for help:
{}
'''.format(verbs))
elif kwargs['subject_verb'] in DoActions.do_actions:
terminal_output.print_text(DoActions.do_actions[kwargs['subject_verb']].__doc__)
else:
terminal_output.print_text("I'm sorry, what did you need help with?")
@DoActions.register_subclass('inventory')
class Inventory(DoActions):
"""\
INVENTORY allows you to view your inventory. It will list all items you have in your possession. INVENTORY
will not list the items within any containers you have.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.see_inventory(**kwargs)
@DoActions.register_subclass('look')
@DoActions.register_subclass('l')
class Look(DoActions):
"""\
View the environment and objects or items within your environment.
Usage:
LOOK : shows the descriptions of the environment around you.
LOOK <object/item> : shows the description of the object at which you want to look.
LOOK <npc> : shows the description of the npc at which you want to look.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.look(**kwargs)
@DoActions.register_subclass('north')
@DoActions.register_subclass('n')
class North(DoActions):
"""\
Moves you north, if you can move in that direction.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if world.tile_exists(x=self.character.location_x, y=self.character.location_y - 1, area=self.character.area):
self.character.move_north()
else:
terminal_output.print_text('You cannot find a way to move in that direction.')
@DoActions.register_subclass('put')
class Put(DoActions):
"""\
PUT sets an object within your environment. This usage works the same as DROP <item>.
Usage:
PUT <item> : Places an item within an environment.
PUT <item> in <object/item> : Will put an item within an object or within another item if that object or item
is a container and if that object or item has enough room within it.
PUT <item> on <object/item> : Will put an item on top of an object or on top of another item if that object
or item is stackable.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.put(**kwargs)
@DoActions.register_subclass('quit')
class Quit(DoActions):
"""\
Exits the game.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
terminal_output.print_text("You will need to find a way to exit the game.")
@DoActions.register_subclass('save')
class Save(DoActions):
"""\
\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.save()
@DoActions.register_subclass('search')
class Search(DoActions):
"""\
SEARCH allows you to explore your environment if the object, enemy, or area can be explored.
Usage:
SEARCH : Searches the environment around you and uncovers hidden items or objects.
SEARCH <enemy> : Searches an enemy, uncovers any potential items that the enemy could be hiding, and places
them in your environment.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.search(**kwargs)
@DoActions.register_subclass('sell')
class Sell(DoActions):
"""\
SELL allows you to exchange items for gulden. Certain merchants look for items you may find in the wilds.
Different merchants look for different items. The item must be in your right hand.
Usage:
SELL <item> to <npc> : Exchanges items for gulden with an npc if an item can be exchanged.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.search(**kwargs)
@DoActions.register_subclass('skills')
class Skills(DoActions):
"""\
SKILLS displays the skills available to you as well as the skill rating for your character. Different skills
allow you to accomplish different tasks.
Usage:
SKILLS: Shows your available skills and their rating.
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.skills(**kwargs)
@DoActions.register_subclass('skin')
class Skin(DoActions):
"""\
Many enemies are able to be skinned for various pelts, hides, etc. The SKIN verb allows you to skin enemies.
if successful the resulting item will be places within the environment. Not all enemies are able to be skinned.
Usage:
SKIN <enemy> : Skins an enemy and, if successful, leaves a skin.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.skin(**kwargs)
@DoActions.register_subclass('south')
@DoActions.register_subclass('s')
class South(DoActions):
"""\
Moves you south, if you can move in that direction.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if world.tile_exists(x=self.character.location_x, y=self.character.location_y + 1, area=self.character.area):
self.character.move_south()
else:
terminal_output.print_text("You cannot find a way to move in that direction.")
@DoActions.register_subclass('stats')
class Stats(DoActions):
"""\
Displays your general statistics.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.stats(**kwargs)
@DoActions.register_subclass('target')
class Target(DoActions):
"""\
When in combat, you must TARGET an enemy before you can ATTACK them. Use the TARGET verb to set the enemy
for which you want to ATTACK. TARGET only needs to be set once for the duration of the combat. The enemy
does not have to be within sight in order for you to TARGET it.
Usage:
TARGET <enemy> : Targets an enemy.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
self.character.target_enemy(**kwargs)
@DoActions.register_subclass('west')
@DoActions.register_subclass('w')
class West(DoActions):
"""\
Moves you west, if you can move in that direction.\
"""
def __init__(self, character, **kwargs):
DoActions.__init__(self, character, **kwargs)
if world.tile_exists(x=self.character.location_x - 1, y=self.character.location_y, area=self.character.area):
self.character.move_west()
else:
terminal_output.print_text("You cannot find a way to move in that direction.")
class Action:
def __init__(self, method, name, action, **kwargs):
self.method = method
self.name = name
self.action = action
self.kwargs = kwargs
def __str__(self):
return "{}: {}".format(self.action, self.name)
class MoveNorthEnemy(Action):
def __init__(self, **kwargs):
super().__init__(method=enemies.Enemy.move_north,
name='Move North',
action=['north'],
kwargs=kwargs)
class MoveSouthEnemy(Action):
def __init__(self, **kwargs):
super().__init__(method=enemies.Enemy.move_south,
name='Move South',
action=['south'],
kwargs=kwargs)
class MoveEastEnemy(Action):
def __init__(self, **kwargs):
super().__init__(method=enemies.Enemy.move_east,
name='Move East',
action=['east'],
kwargs=kwargs)
class MoveWestEnemy(Action):
def __init__(self, **kwargs):
super().__init__(method=enemies.Enemy.move_west,
name='Move West',
action=['west'],
kwargs=kwargs)
|
StarcoderdataPython
|
399384
|
<gh_stars>1-10
import random
from typing import Any, Dict, List, Tuple
from src.rankings.event_pred import read
from src.rankings.event_pred.models import (
elo as elo_model,
opr as opr_model,
rps as rps_model,
tiebreakers,
)
def get_dicts(event_key: str, year: int):
key = str(year) + event_key
sd_score, mean_score = read.get_year_dict(year=year)
sd_score, mean_score = sd_score or 1, mean_score or 1
teams, team_stats = read.get_teams_dict(key)
matches = read.get_matches_dict(key)
team_matches = read.get_team_matches_dict(key)
# currently empty elims
oprs, ils = opr_model.get_oprs(
year, matches, [], teams, team_stats, team_matches, mean_score
)
elos = elo_model.get_elos(matches, teams, team_stats, team_matches)
teams = oprs.keys()
rps = rps_model.get_rps(matches, teams)
ties = tiebreakers.get_tiebreakers(year, matches, teams)
return teams, matches, sd_score, oprs, ils, elos, rps, ties
def get_curr_stats(
index: int,
teams: List[int],
oprs: Dict[int, List[Any]],
ils: Dict[int, List[Any]],
elos: Dict[int, List[Any]],
) -> Tuple[Dict[int, Any], Dict[int, Any], Dict[int, Any]]:
oprs_curr: Dict[int, Any] = {}
ils_curr: Dict[int, Any] = {}
elos_curr: Dict[int, Any] = {}
for team in teams:
oprs_curr[team] = oprs[team][index]
ils_curr[team] = ils[team][index]
elos_curr[team] = elos[team][index]
return oprs_curr, ils_curr, elos_curr
def get_preds(
index: int,
quals: List[Dict[str, Any]],
oprs_curr: Dict[int, Any],
elos_curr: Dict[int, Any],
ils_curr: Dict[int, Any],
year: int,
sd_score: float,
):
team_matches: Dict[
int, Any
] = {} # for each match i after index, red and blue teams
preds: Dict[
int, List[float]
] = {} # for each match i after index , win_prob, red rps, blue rps
for i in range(index, len(quals)):
m = quals[i]
red, blue = m["red"], m["blue"]
red_score = sum([oprs_curr[t][0] for t in red])
blue_score = sum([oprs_curr[t][0] for t in blue])
red_elo = sum([elos_curr[t] for t in red])
blue_elo = sum([elos_curr[t] for t in blue])
elo_prob = elo_model.win_prob(red_elo, blue_elo)
elo_margin = elo_model.win_margin(red_elo, blue_elo, sd_score)
opr_prob = opr_model.win_prob(red_score, blue_score, year, sd_score)
win_prob = (elo_prob + opr_prob) / 2
win_margin = (elo_margin + (red_score - blue_score)) / 2
red_score = (red_score + blue_score) / 2 + win_margin / 2
blue_score = (red_score + blue_score) / 2 - win_margin / 2
red_rp_1 = opr_model.rp_prob([ils_curr[t][0] for t in red])
red_rp_2 = opr_model.rp_prob([ils_curr[t][1] for t in red])
blue_rp_1 = opr_model.rp_prob([ils_curr[t][0] for t in blue])
blue_rp_2 = opr_model.rp_prob([ils_curr[t][1] for t in blue])
ties_pred = tiebreakers.get_opr_tiebreakers(oprs_curr, red, blue, year)
red_tie, blue_tie = ties_pred
team_matches[i] = [red, blue]
preds[i] = [
round(float(red_score)),
round(float(blue_score)),
round(float(win_prob), 2),
round(red_rp_1, 2),
round(red_rp_2, 2),
round(blue_rp_1, 2),
round(blue_rp_2, 2),
round(red_tie, 2),
round(blue_tie, 2),
]
return team_matches, preds
def _mean_sim(
index: int,
matches: List[Dict[str, Any]],
teams: List[int],
team_matches: Dict[int, Any],
preds: Dict[int, List[float]],
rps: Dict[int, Any],
ties: Dict[int, Any],
):
rps_out: Dict[int, float] = {}
ties_out: Dict[int, float] = {}
for team in teams:
rps_out[team] = rps[team][index][0]
ties_out[team] = ties[team][index][0]
for i in range(index, len(matches)):
red, blue = team_matches[i]
red_rps = preds[i][2] * 2 + preds[i][3] + preds[i][4]
blue_rps = (1 - preds[i][2]) * 2 + preds[i][5] + preds[i][6]
for team in teams:
if team in red:
rps_out[team] += red_rps
ties_out[team] += preds[i][7]
if team in blue:
rps_out[team] += blue_rps
ties_out[team] += preds[i][8]
for team in teams:
rps_out[team] = round(rps_out[team], 2)
ties_out[team] = round(ties_out[team], 2)
return rps_out, ties_out
def mean_sim(year: int, event_key: str, i: int):
teams, matches, sd_score, oprs, ils, elos, rps, ties = get_dicts(event_key, year)
for t in teams: # gets specific stats based on current index
oprs[t], ils[t], elos[t] = oprs[t][i], ils[t][i], elos[t][i]
team_matches, preds = get_preds(i, matches, oprs, elos, ils, year, sd_score)
rps_out, ties_out = _mean_sim(i, matches, teams, team_matches, preds, rps, ties)
return {"mean_rps": rps_out, "mean_tiebreakers": ties_out}
def single_sim(
index: int,
matches: List[Dict[str, Any]],
teams: List[int],
team_matches: Dict[int, Any],
preds: Dict[int, List[float]],
rps: Dict[int, Any],
mean_ties: Dict[int, Any],
):
rps_out: Dict[int, float] = {}
for team in teams:
rps_out[team] = rps[team][index][0]
for i in range(index, len(matches)):
red, blue = team_matches[i]
red_rps, blue_rps = 0, 0
if preds[i][2] > random.uniform(0, 1):
red_rps += 2
else:
blue_rps += 2
if preds[i][3] > random.uniform(0, 1):
red_rps += 1
if preds[i][4] > random.uniform(0, 1):
red_rps += 1
if preds[i][5] > random.uniform(0, 1):
blue_rps += 1
if preds[i][6] > random.uniform(0, 1):
blue_rps += 1
for team in red:
rps_out[team] += red_rps
for team in blue:
rps_out[team] += blue_rps
rps_dict: Dict[int, Tuple[float, float]] = {}
for team in teams:
rps_dict[team] = (rps_out[team], mean_ties[team])
rps_list = sorted(rps_dict, key=lambda k: rps_dict[k], reverse=True)
ranks_out = {rps_list[i]: i + 1 for i in range(len(rps_list))}
return rps_out, ranks_out
def _index_sim(
index: int,
iterations: int,
teams: List[int],
matches: List[Dict[str, Any]],
team_matches: Dict[int, Any],
preds: Dict[int, List[float]],
rps: Dict[int, Any],
ties: Dict[int, Any],
):
mean_rps, mean_ties = _mean_sim(
index, matches, teams, team_matches, preds, rps, ties
)
T = len(teams)
avg_rps: Dict[int, float] = {}
ranks: Dict[int, List[float]] = {}
for team in teams:
avg_rps[team] = 0
ranks[team] = [0 for _ in range(T)]
for _ in range(iterations):
rps_ind, ranks_ind = single_sim(
index, matches, teams, team_matches, preds, rps, mean_ties
)
for team in teams:
avg_rps[team] += rps_ind[team]
ranks[team][ranks_ind[team] - 1] += 1
avg_ranks: Dict[int, float] = {}
for team in teams:
avg_rps[team] /= iterations
ranks[team] = [round(freq / iterations, 2) for freq in ranks[team]]
avg_ranks[team] = round(1 + sum([i * ranks[team][i] for i in range(T)]), 2)
return mean_rps, mean_ties, avg_rps, avg_ranks, ranks
def index_sim(year: int, event_key: str, i: int, iterations: int):
teams, matches, sd_score, oprs, ils, elos, rps, ties = get_dicts(event_key, year)
for t in teams: # gets specific stats based on current index
oprs[t], ils[t], elos[t] = oprs[t][i], ils[t][i], elos[t][i]
team_matches, preds = get_preds(i, matches, oprs, elos, ils, year, sd_score)
mean_rps, mean_ties, avg_rps, avg_ranks, ranks = _index_sim(
i, iterations, teams, matches, team_matches, preds, rps, ties
)
dict_ranks = {}
for team in ranks:
dict_ranks[team] = {}
for i in range(len(ranks[team])):
dict_ranks[team][i + 1] = ranks[team][i]
return {
"mean_rps": mean_rps,
"mean_tiebreaker": mean_ties,
"sim_rps": avg_rps,
"sim_ranks": avg_ranks,
"sim_rank_probs": dict_ranks,
}
def quick_sim(year: int, event_key: str):
teams, matches, sd_score, oprs, ils, elos, rps, ties = get_dicts(event_key, year)
out: Dict[int, Dict[str, Dict[int, Any]]] = {}
for i in range(len(matches) + 1):
oprs_c, ils_c, elos_c = get_curr_stats(i, teams, oprs, ils, elos)
team_matches, preds = get_preds(
i, matches, oprs_c, elos_c, ils_c, year, sd_score
)
mean_rps, mean_ties = _mean_sim(
i, matches, teams, team_matches, preds, rps, ties
)
sub_out: Dict[str, Dict[int, Any]] = {"mean_rps": {}, "mean_tiebreaker": {}}
for team in teams:
sub_out["mean_rps"][team] = mean_rps[team]
sub_out["mean_tiebreaker"][team] = mean_ties[team]
out[i] = sub_out
return out
def sim(year: int, event_key: str, iterations: int = 100):
teams, matches, sd_score, oprs, ils, elos, rps, ties = get_dicts(event_key, year)
out: Dict[int, Dict[str, Dict[int, Any]]] = {}
for i in range(len(matches) + 1):
oprs_c, ils_c, elos_c = get_curr_stats(i, teams, oprs, ils, elos)
team_matches, preds = get_preds(
i, matches, oprs_c, elos_c, ils_c, year, sd_score
)
mean_rps, mean_ties, avg_rps, avg_ranks, ranks = _index_sim(
i, iterations, teams, matches, team_matches, preds, rps, ties
)
sub_out: Dict[str, Dict[int, Any]] = {
"mean_rps": {},
"mean_tiebreaker": {},
"sim_rps": {},
"sim_ranks": {},
"sim_rank_probs": {},
}
for team in teams:
sub_out["mean_rps"][team] = mean_rps[team]
sub_out["mean_tiebreaker"][team] = mean_ties[team]
sub_out["sim_rps"][team] = avg_rps[team]
sub_out["sim_ranks"][team] = avg_ranks[team]
sub_out["sim_rank_probs"][team] = ranks[team]
out[i] = sub_out
return out
|
StarcoderdataPython
|
308158
|
<reponame>TiagoJLeandro/maquininha-de-troco<filename>tests/test_calculate_change.py
import pytest
from calculate_change import calculate_change
default_coins_list = [
200, 100, 50, 20, 10, 5,
2, 1, 0.5, 0.25, 0.10, 0.05, 0.01
]
@pytest.mark.parametrize('prod_value,received,expected',
[(100, 0, []),
(388.91, 777.82, [1,1,1,1,1,1,1,1,1,1,1,1,1]),
(0.10, 0.14, [0,0,0,0,0,0,0,0,0,0,0,0,4]),
(0.10, 0.16, [0,0,0,0,0,0,0,0,0,0,0,1,1]),
(0, 200, [1,0,0,0,0,0,0,0,0,0,0,0,0]),
(0, 100, [0,1,0,0,0,0,0,0,0,0,0,0,0]),
(0, 50, [0,0,1,0,0,0,0,0,0,0,0,0,0]),
(0, 20, [0,0,0,1,0,0,0,0,0,0,0,0,0]),
(0, 10, [0,0,0,0,1,0,0,0,0,0,0,0,0]),
(0, 5, [0,0,0,0,0,1,0,0,0,0,0,0,0]),
(0, 2, [0,0,0,0,0,0,1,0,0,0,0,0,0]),
(0, 1, [0,0,0,0,0,0,0,1,0,0,0,0,0]),
(0, 0.5, [0,0,0,0,0,0,0,0,1,0,0,0,0]),
(0, 0.25, [0,0,0,0,0,0,0,0,0,1,0,0,0]),
(0, 0.10, [0,0,0,0,0,0,0,0,0,0,1,0,0]),
(30.80, 50.80, [0,0,0,1,0,0,0,0,0,0,0,0,0])
]
)
def test_if_change_was_decomposed(prod_value, received, expected):
assert calculate_change(
default_coins_list, prod_value, received) == expected
|
StarcoderdataPython
|
3361233
|
<filename>tests/bitly/test_bitly_history.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import ecstasy
import os
import pytest
import requests
import time
from collections import namedtuple
import tests.paths
import lnk.bitly.history
VERSION = 3
API = 'https://api-ssl.bitly.com/v{0}'.format(VERSION)
with open(os.path.join(tests.paths.TEST_PATH, 'bitly', 'token')) as source:
ACCESS_TOKEN = source.read()
def timestamp(time_range):
seconds = {
"minute": 60,
"hour": 3600,
"day": 86400,
"week": 604800,
"month": 2629740,
"year": 31556900
}
offset = time_range[0] * seconds[time_range[1]]
return int(time.time() - offset)
def request_history(start=None, end=None, limit=None):
if start:
start = timestamp(start)
if end:
end = timestamp(end)
response = requests.get('{0}/user/link_history'.format(API),
params=dict(access_token=ACCESS_TOKEN,
created_after=start,
created_before=end,
limit=limit))
data = response.json()['data']
return [i['link'] for i in data['link_history']]
def request_expansion(url):
response = requests.get('{0}/expand'.format(API),
params=dict(access_token=ACCESS_TOKEN,
shortUrl=url))
data = response.json()['data']
return data['expand'][0]['long_url']
@pytest.fixture(scope='module')
def fixture():
Fixture = namedtuple('Fixture', [
'history',
'forever_data',
'last',
'last_data',
'ranges',
'ranges_data',
'template',
'url',
'expanded'
])
history = lnk.bitly.history.History(raw=True)
forever_data = request_history()
last = [(4, 'week'), (5, 'month')]
last_data = [request_history(i) for i in last]
ranges = [(5, 'month', 4, 'day'), (7, 'year', 1, 'day')]
ranges_data = [request_history(i[:2], i[2:]) for i in ranges]
template = ecstasy.beautify(' <+> {0}', ecstasy.Color.Red)
url = 'http://bit.ly/1OQM9nA'
expanded = request_expansion(url)
return Fixture(history,
forever_data,
last,
last_data,
ranges,
ranges_data,
template,
url,
expanded)
def test_initializes_well(fixture):
assert hasattr(fixture.history, 'raw')
assert hasattr(fixture.history, 'link')
assert hasattr(fixture.history, 'seconds')
assert isinstance(fixture.history.seconds, dict)
def test_request_works(fixture):
expected = request_history()
result = fixture.history.request()
print(result, expected)
assert sorted(result) == sorted(expected)
def test_lineify_does_nothing_if_pretty_false(fixture):
result = fixture.history.lineify('cat', False, False, False)
assert result == 'cat'
def test_lineify_prettifies_if_pretty_true(fixture):
result = fixture.history.lineify('cat', False, False, True)
expected = fixture.template.format('cat')
assert result == expected
def test_lineify_returns_only_expanded_if_expanded_true(fixture):
result = fixture.history.lineify(fixture.url, True, False, False)
assert result == fixture.expanded
def test_lineify_returns_both_if_both_true(fixture):
result = fixture.history.lineify(fixture.url, False, True, False)
expected = '{0} => {1}'.format(fixture.url, fixture.expanded)
assert result == expected
def test_timestamp_works(fixture):
now = time.time()
result = fixture.history.timestamp((1, 'minute'), now)
expected = int(now - 60)
assert result == expected
def test_timestamp_works_if_endswith_s(fixture):
now = time.time()
result = fixture.history.timestamp((1, 'minutes'), now)
expected = int(now - 60)
assert result == expected
def test_parse_time_works_without_upper_bound(fixture):
now = time.time()
result = fixture.history.parse_time((1, 'minute'), base=now)
expected = int(now - 60)
assert result['created_after'] == expected
assert result['created_before'] is None
def test_parse_time_works_with_upper_bound(fixture):
now = int(time.time())
result = fixture.history.parse_time((2, 'minute'), (1, 'minute'), now)
assert result['created_after'] == now - 120
assert result['created_before'] == now - 60
def test_last_works_for_single_range(fixture):
result = fixture.history.last([fixture.last[0]], False, False, False)
expected = fixture.last_data[0]
assert result == expected
def test_last_works_for_many_ranges(fixture):
result = fixture.history.last(fixture.last, False, False, False)
expected = fixture.last_data[0] + fixture.last_data[1]
assert result == expected
def test_ranges_works_for_single_range(fixture):
result = fixture.history.ranges([fixture.ranges[0]], False, False, False)
expected = fixture.ranges_data[0]
assert result == expected
def test_ranges_works_for_many_ranges(fixture):
result = fixture.history.ranges(fixture.ranges, False, False, False)
expected = [j for i in fixture.ranges_data for j in i]
assert sorted(result) == sorted(expected)
def test_forever_works(fixture):
result = fixture.history.forever(False, False, False)
expected = fixture.forever_data
assert result == expected
def test_pretty_works_for_forever(fixture):
result = fixture.history.forever(False, False, True)
expected = ['Since forever:']
expected += [fixture.template.format(i) for i in fixture.forever_data]
assert result == expected + ['']
def test_pretty_works_for_last(fixture):
result = fixture.history.last(fixture.last, False, False, True)
expected = []
for time_point, data in zip(fixture.last, fixture.last_data):
header = 'Last {0} {1}:'.format(time_point[0], time_point[1])
if not data:
header += ' None'
expected.append(header)
for item in data:
expected.append(fixture.template.format(item))
expected.append('')
assert sorted(result) == sorted(expected)
def test_ranges_header_removes_unit_if_both_equal(fixture):
after = (7, 'days')
before = (2, 'days')
result = fixture.history.ranges_header(after, before, True)
expected = 'Between 7 and 2 days ago:'
assert result == expected
def test_pretty_works_for_ranges(fixture):
result = fixture.history.ranges(fixture.ranges, False, False, True)
expected = []
for time_point, data in zip(fixture.ranges, fixture.ranges_data):
header = 'Between {0} {1} '.format(time_point[0], time_point[1])
header += 'and {0} {1} ago:'.format(time_point[2], time_point[3])
if not data:
header += ' None'
expected.append(header)
for item in data:
expected.append(fixture.template.format(item))
expected.append('')
assert sorted(result) == sorted(expected)
def test_fetch_works_only_for_forever(fixture):
result = fixture.history.fetch(None, None, True, None, False, False, False)
expected = fixture.forever_data
assert result == expected
def test_fetch_removes_last_line(fixture):
result = fixture.history.fetch(None, None, True, None, False, False, True)
expected = len(['Since forever:'] + fixture.forever_data)
assert len(result) == expected
assert result[-1] != ''
def test_fetch_works_for_all_ranges(fixture):
result = fixture.history.fetch(fixture.last,
fixture.ranges,
True,
None,
False,
False,
False)
expected = [fixture.forever_data] + fixture.last_data + fixture.ranges_data
expected = [j for i in expected for j in i]
assert sorted(result) == sorted(expected)
def test_fetch_limits_well(fixture):
result = fixture.history.fetch(None, None, True, 3, False, False, False)
expected = fixture.forever_data[:3]
assert len(result) <= 3
assert result == expected
|
StarcoderdataPython
|
12839994
|
<gh_stars>0
# Generated by Django 2.0.6 on 2018-07-02 10:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('parkingsystem', '0015_auto_20180629_1215'),
]
operations = [
migrations.AlterField(
model_name='parkinglot',
name='latitude',
field=models.DecimalField(decimal_places=6, max_digits=9),
),
migrations.AlterField(
model_name='parkinglot',
name='longitude',
field=models.DecimalField(decimal_places=6, max_digits=9),
),
migrations.AlterField(
model_name='standaloneparkingspace',
name='latitude',
field=models.DecimalField(decimal_places=6, max_digits=9),
),
migrations.AlterField(
model_name='standaloneparkingspace',
name='longitude',
field=models.DecimalField(decimal_places=6, max_digits=9),
),
]
|
StarcoderdataPython
|
8136277
|
<gh_stars>1-10
###############################################################################
#
# Copyright 2009-2011, Universitat P<NAME>
#
# This file is part of Wok.
#
# Wok is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Wok is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses
#
###############################################################################
class BatchInsert(object):
"""Helper to do batch inserts"""
def __init__(self, cursor, table, columns = None, batch_size = 1000, lock_table = None):
self.__cursor = cursor
self.table = table
self.columns = columns
if columns is not None:
self.__insert_sql = u"INSERT INTO {} ({}) VALUES".format(table, ", ".join(columns))
else:
self.__insert_sql = u"INSERT INTO {} VALUES".format(table)
self.batch_size = batch_size
if lock_table is not None:
lock_table = lock_table.upper()
if lock_table not in ["READ", "WRITE"]:
raise Exception("lock_table should be one of READ or WRITE")
self.lock_table = lock_table
self.count = 0
self.__sql = []
if self.lock_table is not None:
self.__cursor.execute("LOCK TABLES {} {}".format(self.table, self.lock_table))
def __execute(self):
if len(self.__sql) == 0:
return
sql = u"".join(self.__sql).encode("utf-8", "replace")
#print sql
self.__cursor.execute(sql)
#print "Affected rows: {0}".format(self.__cursor.rowcount)
def __marshall_data(self, data):
sb = []
for v in data:
if v is None:
sb += [u"NULL"]
elif isinstance(v, basestring):
sb += [u"'" + v.replace("'", "''") + u"'"]
else:
sb += [str(v)]
return u",".join(sb)
def insert(self, *data):
if self.count % self.batch_size == 0:
self.__execute()
self.__sql = [self.__insert_sql, u"\n\t(", self.__marshall_data(data), u")"]
else:
self.__sql += [u",\n\t(", self.__marshall_data(data), u")"]
self.count += 1
def close(self):
self.__execute()
if self.lock_table is not None:
self.__cursor.execute("UNLOCK TABLES")
|
StarcoderdataPython
|
261101
|
<filename>keystone/contrib/oauth1/migrate_repo/versions/001_add_oauth_tables.py
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sql
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
meta = sql.MetaData()
meta.bind = migrate_engine
consumer_table = sql.Table(
'consumer',
meta,
sql.Column('id', sql.String(64), primary_key=True, nullable=False),
sql.Column('description', sql.String(64), nullable=False),
sql.Column('secret', sql.String(64), nullable=False),
sql.Column('extra', sql.Text(), nullable=False))
consumer_table.create(migrate_engine, checkfirst=True)
request_token_table = sql.Table(
'request_token',
meta,
sql.Column('id', sql.String(64), primary_key=True, nullable=False),
sql.Column('request_secret', sql.String(64), nullable=False),
sql.Column('verifier', sql.String(64), nullable=True),
sql.Column('authorizing_user_id', sql.String(64), nullable=True),
sql.Column('requested_project_id', sql.String(64), nullable=False),
sql.Column('requested_roles', sql.Text(), nullable=False),
sql.Column('consumer_id', sql.String(64), nullable=False, index=True),
sql.Column('expires_at', sql.String(64), nullable=True))
request_token_table.create(migrate_engine, checkfirst=True)
access_token_table = sql.Table(
'access_token',
meta,
sql.Column('id', sql.String(64), primary_key=True, nullable=False),
sql.Column('access_secret', sql.String(64), nullable=False),
sql.Column('authorizing_user_id', sql.String(64),
nullable=False, index=True),
sql.Column('project_id', sql.String(64), nullable=False),
sql.Column('requested_roles', sql.Text(), nullable=False),
sql.Column('consumer_id', sql.String(64), nullable=False),
sql.Column('expires_at', sql.String(64), nullable=True))
access_token_table.create(migrate_engine, checkfirst=True)
|
StarcoderdataPython
|
6522336
|
<reponame>rexsimiloluwah/Python-Experiments
from django.views.generic import View
from django.http import HttpResponse
from app.models import Movie
import json
from .mixins import CSRFExemptMixin
from app.mixins import HttpResponseMixin
from .forms import MovieModelForm
from .utilities import validate_json
# Using Class Based Views
class MovieDetailView(HttpResponseMixin, CSRFExemptMixin, View):
# GET
def get(self, request, id, *args, **kwargs):
obj = Movie.objects.get(id = id)
if obj is None:
error_data = json.dumps({"message" : "Object not Found !"})
self.render_to_response(erro_data, status = 404) # 404 is used because the response resource is not found !
json_data = obj.serialize()
print(json_data)
return HttpResponse(json_data, content_type="application/json")
# POST
def post(self, request, *args, **kwargs):
response = json.dumps({
"message" : "Method not Allowed, Please use the /api/movies endpoint"
})
return self.render_to_response(response, status = 405) # 405 is the status code for METHOD NOT ALLOWED
# UPDATE
def put(self, request, id, *args, **kwargs):
obj = Movie.objects.get(id = id)
if obj is None:
error_data = json.dumps({
"message" : "Object not found !"
})
return self.render_to_response(error_data, status = 404) # 404 is used for the NOT FOUND HTTP Status code
valid_json = validate_json(request.body)
if not valid_json:
error_data = json.dumps({
"message" : "Invalid Request !"
})
return self.render_to_response(error_data, status = 400) # 400 is used for the BAD REQUEST HTTP Status code
# print(json.loads(request.body))
data = json.loads(obj.serialize())
passed_data = json.loads(request.body)
for k,v in passed_data.items():
data[0]["fields"][k] = v
form = MovieModelForm(data[0]["fields"], instance = obj)
if form.is_valid():
obj = form.save(commit = True)
json_data = obj.serialize()
return self.render_to_response(json_data, status = 200) # 200 is used for a SUCCESSFUL Request
elif form.errors:
json_data = json.dumps(form.errors)
return self.render_to_response(json_data, status = 400 ) # 400 is for BAD REQUEST
# DELETE
def delete(self, request, id, *args, **kwargs):
obj = Movie.objects.get(id = id)
if obj is None:
error_data = json.dumps({
"message" : "Object not found !"
})
return self.render_to_response(error_data, status = 404) # 404 is used for the NOT FOUND HTTP Status code
deleted_ = obj.delete()
print(deleted_)
json_data = json.dumps({
"message" : "Deleted Successfully !"
})
return self.render_to_response(json_data, status = 200) # 200 for the OK (SUCCESS) HTTP Status code
class MovieListView(HttpResponseMixin, CSRFExemptMixin, View):
is_json = True
def get(self,request, *args, **kwargs):
obj = Movie.objects.filter(id__gte = 2)
json_data = obj.serialize()
print(json_data)
return self.render_to_response(json_data, status = 200)
def post(self, request, *args, **kwargs):
print(request.POST)
form = MovieModelForm(request.POST)
if form.is_valid():
obj = form.save(commit = True)
json_data = obj.serialize()
return self.render_to_response(json_data, status = 201) #201 is for CREATED HTTP status code
elif form.errors:
data = json.dumps(form.errors)
return self.render_to_response(data, status = 400) # 400 is for Bad Request
data = json.dumps({
"message" : "NOT ALLOWED"
})
return self.render_to_response(data, status = 405)
def delete(self, request, *args, **kwargs):
data = json.dumps({
"message" : "Forbidden Request"
})
return self.render_to_response(data, status = 403)
|
StarcoderdataPython
|
9689821
|
import argparse
import codecs
import json
import pandas as pd
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--ideology', nargs='+', type=str,
help='set <input.csv> (required) and <output.js> (optional, \"ideologies.js\" by default)',
metavar=('input.csv', 'output.js'))
parser.add_argument('-q', '--question', nargs='+', type=str,
help='set <input.csv> (required) and <output.js> (optional, \"questions.js\" by default)',
metavar=('input.csv', 'output.js'))
parser.add_argument('-s', '--special', nargs='+', type=str,
help='set <input.csv> (required) and <output.js> (optional, \"specials.js\" by default)',
metavar=('input.csv', 'output.js'))
return parser
def convert_ideology(input: str, output: str = 'ideologies.js'):
ideo_name = '意识形态'
econ_name = '经济'
govt_name = '政治'
scty_name = '社会'
desc_name = '描述'
link_name = '链接'
df = pd.read_csv(input)
print(df)
ideologies = []
for _, row in df.iterrows():
if pd.isna(row[ideo_name]):
continue
item = {
'name': row[ideo_name],
'stats': {
'econ': int(row[econ_name]),
'govt': int(row[govt_name]),
'scty': int(row[scty_name]),
},
'desc': row[desc_name],
'link': row[link_name]
}
ideologies.append(item)
json_str = json.dumps(ideologies, ensure_ascii=False, indent=4)
with codecs.open(output, 'w', encoding='utf-8') as f:
f.write('ideologies = ' + json_str + ';\n')
def convert_question(input: str, output: str = 'questions.js'):
ques_name = '内容'
axes_name_to_id = {
'平等': 'econ',
'自由': 'govt',
'进步': 'scty',
'生态': 'envo',
'入关学': 'ruguan',
'阴谋论': 'yinmou',
'一神论': 'onegod',
'儒家': 'rujia',
'工业党': 'gongye',
'毛粉': 'mao',
'皇汉': 'han',
'解体论': 'jieti',
'民国派': 'minguo',
'女权主义': 'nvquan',
'政治冷感': 'suijing',
'全盘西化': 'dengta',
'逆向民族主义': 'nimin',
'地方主义': 'difang',
'社会达尔文主义': 'sheda',
'中国特色社会主义': 'tese',
'海盗党': 'haidao',
'加速主义': 'jiasu',
'社群主义': 'shequn',
}
df = pd.read_csv(input)
print(df)
questions = []
for _, row in df.iterrows():
if pd.isna(row[ques_name]):
continue
item = {
'question': row[ques_name],
'effect': {}
}
for name, id in axes_name_to_id.items():
if not pd.isna(row[name]):
item['effect'][id] = int(row[name])
questions.append(item)
json_str = json.dumps(questions, ensure_ascii=False, indent=4)
with codecs.open(output, 'w', encoding='utf-8') as f:
f.write('questions = ' + json_str + ';\n')
def convert_special(input: str, output: str = 'specials.js'):
spec_name = '特性'
spec_id = 'id'
spec_desc = '描述'
df = pd.read_csv(input)
print(df)
specials = []
for _, row in df.iterrows():
if pd.isna(row[spec_name]):
continue
item = {
'id': row[spec_id],
'name': row[spec_name],
'desc': row[spec_desc]
}
specials.append(item)
json_str = json.dumps(specials, ensure_ascii=False, indent=4)
with codecs.open(output, 'w', encoding='utf-8') as f:
f.write('specials = ' + json_str + ';\n')
if __name__ == '__main__':
parser = get_parser()
args = vars(parser.parse_args())
if not any(args.values()):
parser.print_help()
if args['ideology'] is not None:
ideology = args['ideology']
if len(ideology) <= 2:
convert_ideology(*ideology)
else:
parser.error('argument -i/--ideology: expected 1 or 2 arguments')
if args['question'] is not None:
question = args['question']
if len(question) <= 2:
convert_question(*question)
else:
parser.error('argument -q/--question: expected 1 or 2 arguments')
if args['special'] is not None:
special = args['special']
if len(special) <= 2:
convert_special(*special)
else:
parser.error('argument -s/--special: expected 1 or 2 arguments')
|
StarcoderdataPython
|
11341682
|
import logging
import os
import shutil
import time
from pathlib import Path
log = logging.getLogger(__name__)
class CompletedSeed:
"""
Object representing a download that has completed seeding
determines if any post-processing of files is required and executes steps including:
1. deleting unwanted files by file extension
2. removing the directory structure of the download to put
all files in one directory for the category
"""
def __init__(
self,
config,
qbitclient,
name,
hash,
content_path,
save_path,
completion_on,
genre,
):
self.qbitclient = qbitclient
self.name = name
self.hash = hash
self.content_path = Path(
content_path
) # path of torrent content (root path for multi-file torrents, absolute file path for single-file torrents)
self.save_path = Path(save_path)
self.time_complete = self.elapsed_seconds(completion_on)
self.keep_dir_structure = config["genres"][genre]["keepDirStructure"]
self.delete_from_client = config["genres"][genre]["deleteFromClientWhenDone"]
self.file_exts_to_keep = tuple(config["genres"][genre]["keepSpecificFileTypes"])
@staticmethod
def elapsed_seconds(given_time_since_epoch):
"""seconds between now and given time in seconds
elapsed_seconds(1636115660) --> 955
"""
return time.time() - given_time_since_epoch
@staticmethod
def delete_non_filetype_recursively(dir_path, keep_extensions: tuple):
"""deletes all files that do not have matching extension(s)
Args:
dir_path: path to directory to delete files from
keep_extensions: tuple of file extensions including period ex: (".py", ".log")
Returns:
none
"""
for root, dirs, files in os.walk(dir_path, topdown=False):
[
(Path(root, file).unlink(), log.debug(f"Deleted file: {file}"))
for file in files
if not file.endswith(keep_extensions)
]
@staticmethod
def delete_empty_dirs_recursively(dir_path):
"""deletes empty directories recursively
Args:
dir_path: top directory in tree
Returns:
none
"""
for root, dirs, files in os.walk(dir_path, topdown=False):
[
(Path(root, dir).rmdir(), log.debug(f"Deleted empty directory: {dir}"))
for dir in dirs
if not any(Path(root, dir).iterdir())
]
@staticmethod
def move_all_files_in_tree_to_another_dir(source_dir, dest_dir):
"""moves all files in directory tree to a directory in another tree
Args:
source_dir: path to directory to get files
dest_dir: path to directory to move files to
Returns:
none
"""
for root, dirs, files in os.walk(source_dir, topdown=False):
[
(
shutil.move(Path(root, file), dest_dir),
log.debug(f"Moved file: {file}"),
)
for file in files
]
@staticmethod
def move_single_file(source, dest):
"""moves single file from source to destination
Args:
source: source path
dest: destination path
Returns:
None
"""
shutil.move(source, dest)
log.debug(f"Moved file: {source}")
def delete_in_client(self):
"""deletes torrent from queue or adds 'Processed' tag"""
if self.delete_from_client:
self.qbitclient.torrents_delete(
delete_files=False, torrent_hashes=self.hash
)
log.debug(f"Deleted from client: {self.name}")
else:
self.qbitclient.torrents_add_tags(
tags="Processed", torrent_hashes=self.hash
)
log.debug(f"Added 'Processed' tag for {self.name}")
def process_completed_seed(self, ignore_age):
"""performs class functions based on config.
ignores downloads older than specified time to avoid race conditions with periodic cleaner
Args:
ignore_age: time in seconds since download completion to ignore
Returns:
None
"""
if self.time_complete < ignore_age:
return
if self.file_exts_to_keep and self.content_path.is_dir():
self.delete_non_filetype_recursively(
self.content_path, self.file_exts_to_keep
)
if not self.keep_dir_structure:
if self.content_path.is_dir():
self.move_all_files_in_tree_to_another_dir(
self.content_path, self.save_path
)
self.content_path.rmdir()
log.debug(f"Deleted dir: {self.content_path}")
if self.content_path.is_file():
self.move_single_file(self.content_path, self.save_path)
self.content_path.parent.rmdir()
log.debug(f"Deleted dir for: {self.content_path}")
self.delete_in_client()
class Cleaner:
"""
reviews completed seeds for post-processing steps and creates CompletedSeed objects
"""
def __init__(self, config, qbitclient):
self.config = config
self.qbitclient = qbitclient
def get_completed_seeds(self):
"""returns list of torrents that are done seeding
Args:
None
Returns:
list of completed seeds
"""
completed_list = self.qbitclient.torrents_info(status_filter="completed")
seeding_list = self.qbitclient.torrents_info(status_filter="seeding")
return [
i
for i in completed_list
if i not in seeding_list
and "Processed" not in i.tags
and self.get_genre(i.save_path)
]
def get_genre(self, save_path):
"""gets genre of torrent
Args:
save_path: torrent save_path
Returns:
genre from config or False
"""
genre_path = Path(save_path).parent
for key, val in self.config["genres"].items():
if Path(val["moveToDir"]) == genre_path:
return key
return False
def clean_seeds(self, ignore_age=120):
"""creates objects and tells them to process themselves"""
if not self.get_completed_seeds():
log.info("No completed seeds to clean")
else:
for i in [
CompletedSeed(
self.config,
self.qbitclient,
i.name,
i.hash,
i.content_path,
i.save_path,
i.completion_on,
self.get_genre(i.save_path),
)
for i in self.get_completed_seeds()
]:
i.process_completed_seed(ignore_age)
|
StarcoderdataPython
|
29897
|
from typing import List
from django.shortcuts import render
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from assignment.models import Assignment
from course.models import Course
class CourseListView(ListView):
template_name = 'course/course_list.html'
model = Course
context_object_name = 'course'
class CourseDetailView(DetailView):
template_name = 'course/course_detail.html'
model = Course
context_object_name = 'course'
def get(self, request, *args, **kwargs):
self.pk = kwargs["pk"]
return super().get(request, *args, **kwargs)
def get_context_data(self, **kwargs):
kwargs["assignment"] = Assignment.objects.filter(course__id=self.pk)
return super().get_context_data(**kwargs)
|
StarcoderdataPython
|
1638548
|
"""Top-level package for utilities for bootcamp."""
from .na_utils import *
from .bioinfo_dicts import *
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__version__ = '0.0.1'
|
StarcoderdataPython
|
1942447
|
<filename>xschem/fractional_n_divider/tests/lfsr_fib/test_lfsr_fib_coverage.py
import cocotb
from cocotb.triggers import RisingEdge, ReadOnly
from cocotb_coverage.coverage import *
# auxiliary relation function to define bins matching within a range
range_relation = lambda val_, bin_ : bin_[0] <= val_ <= bin_[1]
class LFSRFibStatus():
"""
Object representing modulator status
"""
def __init__(self, dut):
self.dut = dut
# @cocotb.coroutine
def update(self):
self.i_in = int(self.dut.i_in.value)
self.i_ce = int(self.dut.i_ce.value)
self.o_bit = int(self.dut.o_bit)
#functional coverage - check if all FIFO states have been reached
#and check if read or write operation performed in every FIFO state
LFSRFibCoverage = coverage_section (
CoverPoint(
name = "top.i_in",
xf = lambda dut, status, input_data : status.i_in,
bins = [0, 1],
bins_labels = ["low", "high"]
),
CoverPoint(
name = "top.i_ce",
xf = lambda dut, status, input_data : status.i_ce,
bins = [0, 1],
bins_labels = ["low", "high"]
),
CoverPoint(
name = "top.o_bit",
xf = lambda dut, status, input_data : status.o_bit,
bins = [0, 1],
bins_labels = ["low", "high"]
)
)
# procedure of processing data
# coverage sampled here - at each function call
@LFSRFibCoverage
async def process_data(dut, status, input_data):
# provide new data and read old
dut.i_in <= input_data["in"]
dut.i_ce <= input_data["ce"]
await RisingEdge(dut.i_clk)
output_data = int(dut.o_bit)
return output_data
|
StarcoderdataPython
|
5118459
|
<gh_stars>0
import random
import math
def quickselect(l, k, pivot_fn = random.choice):
if (len(l) == 1):
assert k==0
return l[0]
pivot = pivot_fn(l)
less = [i for i in l if i < pivot]
greater = [i for i in l if i > pivot]
pivots = [i for i in l if i == pivot]
if (k < len(less)): return quickselect(less, k, pivot_fn)
elif (k < len(less)+len(pivots)): return pivots[0]
else: return quickselect(greater, k-len(less)-len(pivots), pivot_fn)
def median(l):
return quickselect(l, math.floor(len(l)/2))
'''
def quickselect(l, k, pivot_fn=random.choice):
"""
Select the kth element in l (0 based)
:param l: List of numerics
:param k: Index
:param pivot_fn: Function to choose a pivot, defaults to random
:return: The kth element of l
"""
if len(l) == 1:
assert k == 0
return l[0]
pivot = pivot_fn(l)
lows = [el for el in l if el < pivot]
highs = [el for el in l if el > pivot]
pivots = [el for el in l if el == pivot]
if k < len(lows):
return quickselect(lows, k, pivot_fn)
elif k < len(lows) + len(pivots):
# We got lucky and guessed the median
return pivots[0]
else:
return quickselect(highs, k - len(lows) - len(pivots), pivot_fn)
'''
|
StarcoderdataPython
|
1644114
|
<reponame>MalteIwanicki/simple_thread
from threading import Thread
class SimpleThread(Thread):
"""Creates and starts a thread with the given function and parameter"""
def __init__(self, function, parameters=()):
if not isinstance(parameters, tuple):
parameters = (parameters,)
Thread.__init__(self, target=function, args=parameters)
self._return = None
self.start()
def run(self):
if self._target is not None:
self._return = self._target(*self._args, **self._kwargs)
def join(self, *args):
Thread.join(self, *args)
return self._return
|
StarcoderdataPython
|
6623859
|
from setuptools import setup
from meta import __version__
setup(
name='mergeyaml',
version=__version__,
author="<NAME>",
py_modules=['mergeyaml'],
license="MIT",
install_requires=[
"click==6.7",
"PyYAML==3.12",
"oyaml>=0.4",
],
entry_points='''
[console_scripts]
mergeyaml=mergeyaml:cli
''',
)
|
StarcoderdataPython
|
59039
|
<filename>setup.py
#
# This file is part of the Fonolo Python Wrapper package.
#
# (c) Foncloud, Inc.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name='fonolo',
version='1.0.2',
description='A Python wrapper for the Fonolo Call-Back Service',
author='<NAME>',
author_email='<EMAIL>',
url='https://fonolo.com/',
license='MIT',
install_requires=[
'requests >= 2.1.0',
'six >= 1.9.0'
],
packages=[
'fonolo',
'fonolo.api',
'fonolo.exception'
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
StarcoderdataPython
|
6682179
|
<reponame>jphacks/D_2010
#coding: utf-8
import datetime
from django.db import models
from django.utils import timezone
#liveのURLを管理
class Live(models.Model):
liveName = models.CharField(verbose_name='liveName', max_length=100)
liveUser = models.CharField(verbose_name='liveUser', max_length=100)
liveId = models.CharField(verbose_name='liveId', max_length=100)
pub_date = models.DateTimeField()
def __str__(self):
return self.liveId
#リアクションの管理
class Reactions(models.Model):
live = models.ForeignKey(Live, on_delete=models.CASCADE)
reaction = models.CharField(max_length=100)
reactionCount = models.IntegerField(default=0)
def __str__(self):
return self.reaction
|
StarcoderdataPython
|
6631777
|
<reponame>scotthavens/smrf
'''
2015-11-30 <NAME>
updated 2015-12-31 <NAME>
- start using panda dataframes to help keep track of stations
Distributed forcing data over a grid using different methods
'''
import numpy as np
class IDW:
'''
Inverse distance weighting class for distributing input data. Availables
options are:
* Standard IDW
* Detrended IDW
'''
def __init__(self, mx, my, GridX, GridY, mz=None, GridZ=None,
power=2, zeroVal=-1):
"""
Args:
mx: x locations for the points
my: y locations for the points
GridX: x locations in grid to interpolate over
GridY: y locations in grid to interpolate over
mz: elevation for the points
GridZ: Elevation values for the points to interpolate over for
trended data
power: power of the inverse distance weighting
"""
# measurement point locations
self.mx = mx
self.my = my
self.mz = mz
self.npoints = len(mx)
# grid information
self.GridX = GridX
self.GridY = GridY
self.GridZ = GridZ
# data information
self.data = None
self.nan_val = []
# IDW parameters
self.power = power
self.zeroVal = zeroVal
# calculate the distances
self.calculateDistances()
# calculate the weights
self.calculateWeights()
def calculateDistances(self):
'''
Calculate the distances from the measurement locations to the
grid locations
'''
# preallocate
self.distance = np.empty((self.GridX.shape[0],
self.GridX.shape[1],
self.npoints))
for i in range(self.npoints):
self.distance[:, :, i] = np.sqrt((self.GridX - self.mx[i])**2 +
(self.GridY - self.my[i])**2)
# remove any zero values
self.distance[np.where(self.distance == 0)] = np.min(self.distance)
def calculateWeights(self):
'''
Calculate the weights for
'''
# calculate the weights
self.weights = 1.0/(np.power(self.distance, self.power))
# if there are Inf values, set to 1 as the distance was 0
# self.weights[np.isinf(self.weights)] = 100
def calculateIDW(self, data, local=False):
'''
Calculate the IDW of the data at mx,my over GridX,GridY
Inputs:
data - is the same size at mx,my
'''
nan_val = ~np.isnan(data)
w = self.weights[:, :, nan_val]
data = data[nan_val]
v = np.nansum(w * data, 2) / np.sum(w, 2)
return v
def detrendedIDW(self, data, flag=0, zeros=None, local=False):
'''
Calculate the detrended IDW of the data at mx,my over GridX,GridY
Inputs:
data - is the same size at mx,my
'''
self.detrendData(data, flag, zeros)
v = self.calculateIDW(self.dtrend, local)
# vtmp = v.copy()
v = self.retrendData(v)
if zeros is not None:
v[v < 0] = 0
return v
def detrendData(self, data, flag=0, zeros=None):
'''
Detrend the data in val using the heights zmeas
data - is the same size at mx,my
flag - 1 for positive, -1 for negative, 0 for any trend imposed
'''
# calculate the trend on any real data
nan_val = np.isnan(data)
pv = np.polyfit(self.mz[~nan_val], data[~nan_val], 1)
# apply trend constraints
if flag == 1 and pv[0] < 0:
pv = np.array([0, 0])
elif (flag == -1 and pv[0] > 0):
pv = np.array([0, 0])
self.pv = pv
# detrend the data
el_trend = self.mz * pv[0] + pv[1]
if zeros is not None:
data[zeros] = self.zeroVal
self.dtrend = data - el_trend
def retrendData(self, idw):
'''
Retrend the IDW values
'''
# retrend the data
return idw + self.pv[0]*self.GridZ + self.pv[1]
|
StarcoderdataPython
|
9697798
|
<reponame>xuwenyihust/warp-gallery<filename>bilibili/run.py
from bilibili.WordCloudGenerator import WordCloudGenerator
from bilibili.UserInfo import user_info_map
def run(uid, mask_file_path):
word_cloud_generator = WordCloudGenerator()
videos = word_cloud_generator.get_videos_by_user(uid, 100)
barrages_file_path = word_cloud_generator.get_barrages_by_uid(uid, videos)
# barrages_file_path = "resources/barrages/barrage_by_uid_777536.csv"
word_cloud_generator.generate_graph_from_file(barrages_file_path, str(uid), mask_file_path)
def run_media_storm():
# 影视飓风
uid = user_info_map["media_storm"]["mid"]
mask_file_path = user_info_map["media_storm"]["mask"]
run(uid, mask_file_path)
def run_xiaomi():
# 小米公司
uid = user_info_map["xiaomi"]["mid"]
mask_file_path = user_info_map["xiaomi"]["mask"]
run(uid, mask_file_path)
# Failed
def run_luoxiang():
# 罗翔说刑法
uid = user_info_map["luoxiang"]["mid"]
mask_file_path = user_info_map["luoxiang"]["mask"]
run(uid, mask_file_path)
def run_lexburner():
uid = user_info_map["lexburner"]["mid"]
mask_file_path = user_info_map["lexburner"]["mask"]
run(uid, mask_file_path)
if __name__ == '__main__':
run_lexburner()
|
StarcoderdataPython
|
1730332
|
<gh_stars>0
def to_rna(dna_strand):
map_dna_to_rna = {
"G": "C",
"C": "G",
"T": "A",
"A": "U",
}
rna = []
for dna in dna_strand:
rna.append(map_dna_to_rna.get(dna, ""))
return "".join(rna)
|
StarcoderdataPython
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.