filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_18386 | #! /usr/bin/env python3
__author__ = 'Nina VERSTRAETE, Jacques TOEN & Nicolas JEANNE'
__copyright__ = 'GNU General Public License'
__version__ = '1.0.0'
__email__ = '[email protected]'
import argparse
import sys
import os
import logging
import subprocess
import concurrent.futures
import time
import parse_uniprot
import midi_operations
import parse_pdb
import protein_movie
def restricted_tempo(tempo_value):
'''
Check range for tempo argument, must be between 60 and 150.
param str tempo_value: value of the tempo argument in BPM.
return: the tempo.
rtype: int
'''
tempo_value = int(tempo_value)
if tempo_value < 60 or tempo_value > 250:
raise argparse.ArgumentTypeError('{} not in range 60 to 250.'.format(tempo_value))
return tempo_value
if __name__ == '__main__':
prg_id = os.path.splitext(os.path.basename(__file__))[0]
descr = '''
{} v.{}
Created by {}.
Contact: {}
{}
Create a MIDI file and from a protein entry of the UniProt database
(https://www.uniprot.org/).
If the data are available in the UniProt entry, a movie file of the 3D
representation of the protein will also be created.
'''.format(prg_id, __version__, __author__, __email__, __copyright__)
# Parse arguments
parser = argparse.ArgumentParser(description=descr,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-o', '--out', required=True, help='path to the results directory.')
parser.add_argument('-s', '--score', required=False, action='store_true',
help='''use musescore software to create the score
corresponding to the MIDI file.''')
parser.add_argument('-p', '--play', required=False, action='store_true',
help='play the music with Timidity, just for tests.')
parser.add_argument('-t', '--tempo', required=False, type=restricted_tempo,
help='set the tempo in BPM. Value between 60 and 250.')
parser.add_argument('-i', '--instruments', required=False, nargs=3,
help='''set channel 0, 1 and 2 instruments,
restricted to 3 values between 0 and 127
separated by spaces. Default is 0: Acoustic Grand,
42: Cello and 65: Alto Sax.
See: http://www.pjb.com.au/muscript/gm.html#patch for details.''')
parser.add_argument('-d', '--debug', required=False, action='store_true',
help='''debug mode, create a log file which details each
entry of the MIDI file.''')
parser.add_argument('uniprot_AN',
help='''the protein Accession Number in the UniProt
database. Example: Human Interleukin-8 > P10145''')
args = parser.parse_args()
# check if instruments are between 0 and 127
if args.instruments:
for i in range(len(args.instruments)):
instru = int(args.instruments[i])
if instru < 0 or instru > 127:
raise argparse.ArgumentTypeError('{} should be 3 integers between 0 and 127.'.format(args.instruments))
args.instruments[i] = instru
instrus = args.instruments
else:
instrus = [0, 42, 65]
# tempo
if args.tempo:
tempo = int(args.tempo)
else:
tempo = 100 # In BPM
# midi notes on major mode correspondance with AA sorted by decreasing
# molecular weight keys are set as DO (48, 60, 72) degrees I,
# SOL (55, 67) degrees V, FA (53, 65) degrees IV, RE (50, 62) degrees II,
# MI (52, 64) degrees III, LA (57, 69) degrees VI and
# SI (59, 71) degrees VII. Finally, we add 7 alterations '#' following the
# ascending quint (54, 66, 49, 61, 56, 68, 51)
initial_midi_keys = [48, 60, 72, 55, 67, 53, 65, 50, 62, 52, 64, 57, 69,
59, 71, 54, 66, 49, 61, 56, 68, 51]
midi_keys = {}
# Physico-chemical properties of AA
AA_PHY_CHI = {'A': {'hybrophobic', 'small'},
'R': {'polar', 'pos_charged'},
'N': {'polar', 'small'},
'D': {'polar', 'small', 'neg_charged'},
'C': {'hydrophobic', 'polar', 'small'},
'E': {'polar', 'neg_charged'},
'Q': {'polar'},
'G': {'hydrophobic', 'small'},
'H': {'hydrophobic', 'polar', 'pos_charged', 'aromatic'},
'I': {'hydrophobic', 'aliphatic'},
'L': {'hydrophobic', 'aliphatic'},
'K': {'hydrophobic', 'polar', 'pos_charged'},
'M': {'hydrophobic'},
'F': {'hydrophobic', 'aromatic'},
'P': {'small'},
'S': {'polar', 'small'},
'T': {'hydrophobic', 'polar', 'small'},
'W': {'hydrophobic', 'polar', 'aromatic'},
'Y': {'hydrophobic', 'polar', 'aromatic'},
'V': {'hydrophobic', 'small', 'aliphatic'}}
# create the output directory
out_dir = os.path.abspath(args.out)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# create the log file
log_path = os.path.join(out_dir, '{}.log'.format(prg_id))
if os.path.exists(log_path):
os.remove(log_path)
logging.basicConfig(filename=log_path,
level=logging.DEBUG,
format='%(asctime)s\t%(levelname)s:\t%(message)s',
datefmt='%Y/%m/%d %H:%M:%S')
logger = logging.getLogger(__name__)
logger.info(' '.join(sys.argv))
logger.info('Output directory: {}'.format(out_dir))
logger.info('Tempo: {} BPM'.format(tempo))
logger.info('Instruments: {} (general MIDI patch numbers, see: http://www.pjb.com.au/muscript/gm.html#patch)'.format(', '.join(map(str, instrus))))
logger.info('Create score: {}'.format(args.score))
# parsing of uniprot entry
protein = parse_uniprot.parse_entry(args.uniprot_AN, logger)
logger.info('UniProt accession number: {}'.format(args.uniprot_AN))
logger.info('Protein: {}'.format(protein['entry_name']))
logger.info('Organism: {}'.format(protein['organism']))
if 'PDB' in protein:
logger.info('PDB: {} (Protein DataBase accession number)'.format(protein['PDB']))
else:
logger.info('PDB: No accession number in Uniprot entry')
sequence = protein['seq']
sequence_length = len(sequence)
protein['seq'] = {}
if args.debug:
logger.info('AA sequence ({} AA): {}'.format(sequence_length,
sequence))
for i in range(sequence_length):
protein['seq'][i] = sequence[i]
# frequence of AA in the sequence
set_AA = set(''.join(sequence))
proportion_AA = {}
for aa in set_AA:
proportion_AA[aa] = sequence.count(aa) / sequence_length
# sort by decreasing frequency
proportion_AA = sorted(proportion_AA.items(),
key=lambda kv: kv[1],
reverse=True)
for idx, aa_proportion in enumerate(proportion_AA):
midi_keys[aa_proportion[0]] = initial_midi_keys[idx]
# set the result files base name
file_base_name = '{}_{}_{}_{}bpm_instrus'.format(args.uniprot_AN,
protein['entry_name'],
protein['organism'],
tempo)
for instru in instrus:
file_base_name = '{}-{}'.format(file_base_name, instru)
# create the MIDI file
midi_file_path = os.path.join(out_dir, '{}.midi'.format(file_base_name))
keys_duration = midi_operations.create_midi(midi_file_path, protein,
midi_keys, tempo, instrus,
AA_PHY_CHI, logger, args.debug)
print('MIDI file for {} {} ({}) created: {}'.format(protein['entry_name'],
protein['organism'],
args.uniprot_AN,
midi_file_path))
if 'PDB' in protein:
# create the directories for PDB data and frames
pdb_dir = os.path.join(os.path.abspath(args.out), 'pdb', '{}_{}'.format(protein['accession_number'], protein['PDB']))
frames_dir = os.path.join(pdb_dir, 'frames')
if not os.path.exists(frames_dir):
os.makedirs(frames_dir)
# get data from the PDB file
pdb_data = parse_pdb.get_pdb_info(protein,
pdb_dir,
logger)
# create a frame without colored AA for all AA outside the PDB data
existing_frames = sorted([png for png in os.listdir(frames_dir)])
if '{}_no-idx.png'.format(protein['PDB']) not in existing_frames:
print('\nCreating {} ({}) protein frame, please wait..'.format(protein['entry_name'],
protein['PDB']))
logger.info('Creating {} ({}) protein frame.'.format(protein['entry_name'],
protein['PDB']))
cmd_no_color = './create_pdb_frames.py -p {} -c {} -n {} -i {} {}'.format(pdb_dir,
pdb_data['chain'],
'no-idx',
1,
protein['PDB'])
logger.info(cmd_no_color)
sub = subprocess.run(cmd_no_color, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# capturing the output
if sub.stdout:
logger.info(sub.stdout.decode('utf-8'))
print('Done!')
if sub.stderr:
logger.error(sub.stderr.decode('utf-8'))
print('Error!')
# create the commands for the python script which generates the pymol
# pictures with colored AA
cmd_list = []
for aa_idx, frame_idx in enumerate(pdb_data['frames_idx']):
if '{}_{}.png'.format(protein['PDB'],
frame_idx) not in existing_frames:
cmd = './create_pdb_frames.py -p {} -c {} -n {} -i {} --color_aa {}'.format(pdb_dir,
pdb_data['chain'],
frame_idx,
aa_idx + 1,
protein['PDB'])
cmd_list.append(cmd)
# threading to run the commands
if cmd_list:
nb_threads_to_do = len(cmd_list)
nb_threads_done = 0
errors = 0
print('\nCreating {} ({}) protein colored AA frames, please wait..'.format(protein['entry_name'],
protein['PDB']))
logger.info('Creating {} ({}) protein colored AA frames.'.format(protein['entry_name'],
protein['PDB']))
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
for cmd in cmd_list:
logger.info(cmd)
thread = executor.submit(subprocess.run,
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# capturing the output
if thread.result().stdout:
logger.info(thread.result().stdout.decode('utf-8'))
nb_threads_done += 1
if thread.result().stderr:
logger.error(thread.result().stderr.decode('utf-8'))
nb_threads_done += 1
errors += 1
print('{}/{} threads ({} errors)'.format(nb_threads_done,
nb_threads_to_do,
errors))
# check if all frames are created else wait
while len(os.listdir(frames_dir)) != (len(pdb_data['frames_idx']) + 1):
time.sleep(1)
# create the movie
movie_path = os.path.join(out_dir, '{}.avi'.format(file_base_name))
if not os.path.exists(movie_path):
protein_movie.create_movie(movie_path, frames_dir, keys_duration,
midi_file_path, logger)
else:
msg = 'Movie file already exists: {}'.format(movie_path)
print(msg)
logger.info(msg)
# create the score
if args.score:
print('Creating the score:')
score_basename = '{}_{}_{}_{}bpm_score.pdf'.format(args.uniprot_AN,
protein['entry_name'],
protein['organism'],
tempo)
score_output = os.path.join(args.out, score_basename)
cmd = 'mscore -o {} {}'.format(score_output, midi_file_path)
subprocess.run(cmd, shell=True)
print('Score created at {}'.format(score_output))
# play the file with timidity if asked
if args.play:
cmd = 'timidity {}'.format(midi_file_path)
subprocess.run(cmd, shell=True)
|
the-stack_0_18389 | import tensorflow as tf
from tensorflow.keras.layers import Conv2D, MaxPool2D
class LocalisationNet(tf.keras.layers.Layer):
def __init__(self):
super(LocalisationNet, self).__init__()
self.conv_1 = Conv2D(8, kernel_size=7, activation="relu", kernel_initializer="he_normal")
self.maxpool_1 = MaxPool2D(strides=2)
self.conv_2 = Conv2D(10, kernel_size=5, activation="relu", kernel_initializer="he_normal")
self.maxpool_2 = MaxPool2D(strides=2)
def call(self, inputs):
x = self.conv_1(inputs)
x = self.maxpool_1(x)
x = self.conv_2(x)
x = self.maxpool_2(x)
return x |
the-stack_0_18390 | import basic
# 不断接收键盘的输入
while True:
text = input("basic >")
res, error = basic.run('<stdin>', text) # stdin 来自键盘
if error:
print(error.as_string())
else:
print(res)
|
the-stack_0_18391 | import numpy as np
import scipy.special
stdErf=scipy.special.erf(1/np.sqrt(2))
def splitNormal(x,mu,sigma,cigma):
epsilon=cigma/sigma
alphas=sigma*np.ones_like(x)
alphas[x>mu]=cigma
return (1/np.sqrt(2*np.pi*sigma**2))*(2/(1+epsilon))*np.exp(-0.5*((x-mu)/alphas)**2)
def cdf(x,mu,sigma,cigma):
epsilon=cigma/sigma
alphas=sigma*np.ones_like(x)
alphas[x>mu]=cigma
betas=np.ones_like(x)
betas[x>mu]=epsilon
return (1/(1+epsilon))*(1 + betas*scipy.special((x-mu) / sqrt(2 * alphas**2)) )
def inverse(F,mu,sigma,cigma):
epsilon=cigma/sigma
alphas=sigma*np.ones_like(F)
alphas[F>1/(1+epsilon)]=cigma
betas=np.ones_like(F)
betas[F>1/(1+epsilon)]=1/epsilon
return mu + np.sqrt(2 * alphas**2) * scipy.special.erfinv(betas*((1+epsilon)*F -1))
def random(n,mu,sigma,cigma):
Fs=np.random.random(n)
xs=inverse(Fs,mu,sigma,cigma)
return xs
def fit(xs):
xs=np.sort(xs)
N=xs.size
Delta=int(N*stdErf) #hardcoded version of erf(1/sqrt(2))
js=np.arange(0,N-Delta-1)
w_js=xs[js+Delta]-xs[js]
J=np.argmin(w_js)
w_J=w_js[J]
x_J=xs[J]
ks=np.arange(J+1,J+Delta-2)
theta_ks=(ks/N) - ((xs[ks]-x_J)/w_J)
K=ks[np.argmin(np.abs(theta_ks))]
mu=xs[K]
sigma=mu-x_J
cigma=w_J-sigma
return mu,sigma,cigma |
the-stack_0_18392 | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class ActionContractInput(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
ActionContractInput - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'input': 'PostInputContract',
'output': 'PostOutputContract'
}
self.attribute_map = {
'input': 'input',
'output': 'output'
}
self._input = None
self._output = None
@property
def input(self):
"""
Gets the input of this ActionContractInput.
Execution input contract
:return: The input of this ActionContractInput.
:rtype: PostInputContract
"""
return self._input
@input.setter
def input(self, input):
"""
Sets the input of this ActionContractInput.
Execution input contract
:param input: The input of this ActionContractInput.
:type: PostInputContract
"""
self._input = input
@property
def output(self):
"""
Gets the output of this ActionContractInput.
Execution output contract
:return: The output of this ActionContractInput.
:rtype: PostOutputContract
"""
return self._output
@output.setter
def output(self, output):
"""
Sets the output of this ActionContractInput.
Execution output contract
:param output: The output of this ActionContractInput.
:type: PostOutputContract
"""
self._output = output
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
the-stack_0_18394 | import pytest
from pydicom.dataset import Dataset
from dicomtrolley.core import DICOMObjectReference, Instance, Series, Study
@pytest.fixture
def a_study():
study = Study(uid="stu1", data=Dataset(), series=[])
series = Series(uid="ser2", data=Dataset(), parent=study, instances=[])
instance1 = Instance(uid="ins3", data=Dataset(), parent=series)
instance2 = Instance(uid="ins4", data=Dataset(), parent=series)
study.series = (series,)
series.instances = (instance1, instance2)
return study
def test_object_get(a_study):
study = a_study
series = a_study["ser2"]
instance = a_study["ser2"]["ins3"]
str(study.reference())
str(series.reference())
str(instance.reference())
assert len(study.all_instances()) == 2
assert len(series.all_instances()) == 2
assert len(instance.all_instances()) == 1
assert str(study) == "Study stu1"
assert str(series) == "Series ser2"
assert str(instance) == "Instance ins3"
assert instance.root().uid == study.uid
assert series.root().uid == study.uid
assert study.root().uid == study.uid
def test_object_exceptions(a_study):
with pytest.raises(KeyError):
_ = a_study["unknown"]
with pytest.raises(KeyError):
_ = a_study["ser2"]["unknown"]
def test_reference():
"""Incomplete references should yield an error"""
with pytest.raises(ValueError):
DICOMObjectReference(study_uid="foo", instance_uid="baz")
|
the-stack_0_18395 | from ..util import create_element
from .common import EWSAccountService, create_item_ids_element
class MarkAsJunk(EWSAccountService):
"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/markasjunk"""
SERVICE_NAME = 'MarkAsJunk'
def call(self, items, is_junk, move_item):
return self._chunked_get_elements(self.get_payload, items=items, is_junk=is_junk, move_item=move_item)
@staticmethod
def _get_elements_in_container(container):
from ..properties import MovedItemId
return container.findall(MovedItemId.response_tag())
def get_payload(self, items, is_junk, move_item):
# Takes a list of items and returns either success or raises an error message
mark_as_junk = create_element(
'm:%s' % self.SERVICE_NAME,
attrs=dict(IsJunk='true' if is_junk else 'false', MoveItem='true' if move_item else 'false')
)
item_ids = create_item_ids_element(items=items, version=self.account.version)
mark_as_junk.append(item_ids)
return mark_as_junk
|
the-stack_0_18396 | from datamatch.filters import DissimilarFilter, NonOverlappingFilter
import unittest
import pandas as pd
from pandas.testing import assert_frame_equal
from datamatch.indices import ColumnsIndex, NoopIndex
from datamatch.matchers import ThresholdMatcher
from datamatch.scorers import AbsoluteScorer, MaxScorer, SimSumScorer
from datamatch.similarities import JaroWinklerSimilarity, StringSimilarity
from datamatch.variators import Swap
class TestThresholdMatcher(unittest.TestCase):
def test_match(self):
cols = ["a", "b"]
dfa = pd.DataFrame(
[
["ab", "cd"],
["rtx", "qw"]
],
columns=cols
)
dfb = pd.DataFrame(
[
["ab", "cd"],
["ae", "vb"],
["rt", "qw"]
],
columns=cols
)
matcher = ThresholdMatcher(
NoopIndex(), {"a": StringSimilarity()}, dfa, dfb
)
self.assertEqual(matcher._pairs, [(0.8, 1, 2), (1.0, 0, 0)])
self.assertEqual(
matcher.get_index_pairs_within_thresholds(), [(1, 2), (0, 0)])
assert_frame_equal(
matcher.get_sample_pairs(),
pd.DataFrame.from_records([
{"score_range": "1.00-0.95", "pair_idx": 0,
"sim_score": 1.0, "row_key": 0, "a": "ab", "b": "cd"},
{"score_range": "1.00-0.95", "pair_idx": 0,
"sim_score": 1.0, "row_key": 0, "a": "ab", "b": "cd"},
{"score_range": "0.85-0.80", "pair_idx": 0,
"sim_score": 0.8, "row_key": 1, "a": "rtx", "b": "qw"},
{"score_range": "0.85-0.80", "pair_idx": 0,
"sim_score": 0.8, "row_key": 2, "a": "rt", "b": "qw"},
], index=["score_range", "pair_idx", "sim_score", "row_key"])
)
assert_frame_equal(
matcher.get_sample_pairs(include_exact_matches=False),
pd.DataFrame.from_records([
{"score_range": "0.85-0.80", "pair_idx": 0,
"sim_score": 0.8, "row_key": 1, "a": "rtx", "b": "qw"},
{"score_range": "0.85-0.80", "pair_idx": 0,
"sim_score": 0.8, "row_key": 2, "a": "rt", "b": "qw"},
], index=["score_range", "pair_idx", "sim_score", "row_key"])
)
assert_frame_equal(
matcher.get_all_pairs(),
pd.DataFrame.from_records([
{"pair_idx": 0, "sim_score": 1.0,
"row_key": 0, "a": "ab", "b": "cd"},
{"pair_idx": 0, "sim_score": 1.0,
"row_key": 0, "a": "ab", "b": "cd"},
{"pair_idx": 1, "sim_score": 0.8,
"row_key": 1, "a": "rtx", "b": "qw"},
{"pair_idx": 1, "sim_score": 0.8,
"row_key": 2, "a": "rt", "b": "qw"},
], index=["pair_idx", "sim_score", "row_key"])
)
assert_frame_equal(
matcher.get_all_pairs(include_exact_matches=False),
pd.DataFrame.from_records([
{"pair_idx": 1, "sim_score": 0.8,
"row_key": 1, "a": "rtx", "b": "qw"},
{"pair_idx": 1, "sim_score": 0.8,
"row_key": 2, "a": "rt", "b": "qw"},
], index=["pair_idx", "sim_score", "row_key"])
)
def test_ensure_unique_index(self):
dfa = pd.DataFrame(
[[1, 2], [3, 4]], index=["a", "a"]
)
dfb = pd.DataFrame(
[[5, 6], [7, 8]], index=["a", "b"]
)
with self.assertRaisesRegex(
ValueError,
"Dataframe index contains duplicates. Both frames need to have index free of duplicates."):
ThresholdMatcher(NoopIndex(), {"a": StringSimilarity()}, dfa, dfb)
def test_ensure_same_columns(self):
dfa = pd.DataFrame(
[[1, 2], [3, 4]], columns=["a", "c"]
)
dfb = pd.DataFrame(
[[5, 6], [7, 8]], columns=["a", "b"]
)
with self.assertRaisesRegex(
ValueError,
"Dataframe columns are not equal."):
ThresholdMatcher(NoopIndex(), {"a": StringSimilarity()}, dfa, dfb)
def test_deduplicate(self):
cols = ['last', 'first']
df = pd.DataFrame([
['beech', 'freddie'],
['beech', 'freedie'],
['dupas', 'demia'],
['dupas', 'demeia'],
['brown', 'latoya'],
['bowen', 'latoya'],
['rhea', 'cherri'],
['rhea', 'cherrie'],
['be', 'freedie'],
['du', 'demeia'],
['teneisha', 'green'],
['tyler', 'green'],
['te neisha', 'green'],
['t', 'green'],
], columns=cols)
matcher = ThresholdMatcher(NoopIndex(), {
'last': JaroWinklerSimilarity(),
'first': JaroWinklerSimilarity()
}, df)
self.assertEqual(
matcher.get_index_clusters_within_thresholds(0.83),
[
frozenset({6, 7}),
frozenset({4, 5}),
frozenset({2, 3, 9}),
frozenset({10, 12, 13}),
frozenset({0, 8, 1}),
],
)
self.maxDiff = None
self.assertEqual(
matcher.get_clusters_within_threshold(0.83).to_string(),
'\n'.join([
' last first',
'cluster_idx pair_idx sim_score row_key ',
'0 0 0.990522 6 rhea cherri',
' 7 rhea cherrie',
'1 0 0.985297 10 teneisha green',
' 12 te neisha green',
' 1 0.878609 10 teneisha green',
' 13 t green',
' 2 0.876863 12 te neisha green',
' 13 t green',
'2 0 0.980748 2 dupas demia',
' 3 dupas demeia',
' 1 0.923472 3 dupas demeia',
' 9 du demeia',
' 2 0.902589 2 dupas demia',
' 9 du demeia',
'3 0 0.941913 4 brown latoya',
' 5 bowen latoya',
'4 0 0.939581 0 beech freddie',
' 1 beech freedie',
' 1 0.923472 1 beech freedie',
' 8 be freedie',
' 2 0.857679 0 beech freddie',
' 8 be freedie',
]),
)
def test_swap_variator(self):
cols = ['last', 'first']
df = pd.DataFrame([
['blake', 'lauri'],
['lauri', 'blake'],
['robinson', 'alexis'],
['robertson', 'alexis'],
['haynes', 'terry'],
['terry', 'hayes']
], columns=cols)
matcher = ThresholdMatcher(NoopIndex(), {
'last': JaroWinklerSimilarity(),
'first': JaroWinklerSimilarity()
}, df, variator=Swap('first', 'last'))
self.assertEqual(
matcher.get_index_pairs_within_thresholds(),
[(2, 3), (4, 5), (0, 1)]
)
def test_filters(self):
cols = ['uid', 'first', 'agency', 'start', 'end']
df = pd.DataFrame([
['1', 'john', 'slidell pd', 0, 10],
['2', 'john', 'slidell pd', 10, 20],
['3', 'john', 'slidell pd', 20, 30],
['4', 'john', 'gretna pd', 11, 21],
['5', 'john', 'gretna pd', 0, 7],
['6', 'john', 'gretna pd', 10, 18],
], columns=cols)
matcher = ThresholdMatcher(NoopIndex(), {
'first': JaroWinklerSimilarity()
}, df, filters=[
DissimilarFilter('agency'),
NonOverlappingFilter('start', 'end')
])
self.assertEqual(
matcher.get_index_pairs_within_thresholds(),
[(0, 3), (1, 4), (2, 4), (2, 5)]
)
def test_scorer(self):
columns = ['first_name', 'attract_id']
df = pd.DataFrame([
['john', 5],
['jim', 5],
['ted', 3],
['tedd', 2]
], columns=columns)
matcher = ThresholdMatcher(NoopIndex(), MaxScorer([
AbsoluteScorer('attract_id', 1),
SimSumScorer({
'first_name': JaroWinklerSimilarity()
})
]), df)
self.assertEqual(
matcher.get_clusters_within_threshold().to_string(),
'\n'.join([
' first_name attract_id',
'cluster_idx pair_idx sim_score row_key ',
'0 0 1.000000 0 john 5',
' 1 jim 5',
'1 0 0.941667 2 ted 3',
' 3 tedd 2',
]),
)
self.assertEqual(
matcher.get_clusters_within_threshold(
include_exact_matches=False).to_string(),
'\n'.join([
' first_name attract_id',
'cluster_idx pair_idx sim_score row_key ',
'1 0 0.941667 2 ted 3',
' 3 tedd 2',
]),
)
def test_func_scorer(self):
self.maxDiff = None
df = pd.DataFrame([
['j', 'john', 20],
['j', 'jim', 20],
['b', 'bill', 19],
['b', 'bob', 21]
], columns=['fc', 'name', 'age'])
matcher = ThresholdMatcher(
index=ColumnsIndex('fc'),
scorer=lambda a, b: 1.0 if a.age == b.age else 0.8,
dfa=df
)
self.assertEqual(
matcher.get_clusters_within_threshold().to_string(),
'\n'.join([
' fc name age',
'cluster_idx pair_idx sim_score row_key ',
'0 0 1.0 0 j john 20',
' 1 j jim 20',
'1 0 0.8 2 b bill 19',
' 3 b bob 21',
])
)
|
the-stack_0_18397 | # coding=utf-8
# Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import functools
import os
import re
from pants.backend.native.config.environment import Platform
from pants.backend.native.targets.external_native_library import ExternalNativeLibrary
from pants.backend.native.targets.packaged_native_library import PackagedNativeLibrary
from pants.backend.native.tasks.conan_prep import ConanPrep
from pants.base.build_environment import get_pants_cachedir
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import mergetree, safe_file_dump, safe_mkdir
from pants.util.memo import memoized_property
class ConanFetch(SimpleCodegenTask):
deprecated_scope = 'native-third-party-fetch'
deprecated_scope_removal_version = '1.16.0.dev0'
gentarget_type = ExternalNativeLibrary
sources_globs = ('include/**/*', 'lib/*',)
@property
def validate_sources_present(self):
return False
def synthetic_target_type(self, target):
return PackagedNativeLibrary
default_remotes = {
'conan-center': 'https://conan.bintray.com',
}
@classmethod
def register_options(cls, register):
super(ConanFetch, cls).register_options(register)
register('--conan-remotes', type=dict, default=cls.default_remotes, advanced=True,
fingerprint=True,
help='The conan remotes to download conan packages from.')
@classmethod
def implementation_version(cls):
return super(ConanFetch, cls).implementation_version() + [('ConanFetch', 1)]
@classmethod
def prepare(cls, options, round_manager):
super(ConanFetch, cls).prepare(options, round_manager)
round_manager.require_data(ConanPrep.tool_instance_cls)
class ConanConfigError(TaskError): pass
class ConanFetchError(TaskError): pass
@property
def _remotes_txt_content(self):
"""Generate a file containing overrides for Conan remotes which get applied to registry.json."""
return '{}\n'.format('\n'.join(
'{name} {url} {is_ssl}'.format(
name=name,
url=url,
is_ssl=re.match(r'^https://', url) is not None)
for name, url in self.get_options().conan_remotes.items()))
def _conan_user_home(self, conan, in_workdir=False):
"""Create the CONAN_USER_HOME for this task fingerprint and initialize the Conan remotes.
See https://docs.conan.io/en/latest/reference/commands/consumer/config.html#conan-config-install
for docs on configuring remotes.
"""
# This argument is exposed so tests don't leak out of the workdir.
if in_workdir:
base_cache_dir = self.workdir
else:
base_cache_dir = get_pants_cachedir()
user_home_base = os.path.join(base_cache_dir, 'conan-support', 'conan-user-home')
# Locate the subdirectory of the pants shared cachedir specific to this task's option values.
user_home = os.path.join(user_home_base, self.fingerprint)
conan_install_base = os.path.join(user_home, '.conan')
# Conan doesn't copy remotes.txt into the .conan subdir after the "config install" command, it
# simply edits registry.json. However, it is valid to have this file there, and Conan won't
# touch it, so we use its presence to detect whether we have appropriately initialized the
# Conan installation.
remotes_txt_sentinel = os.path.join(conan_install_base, 'remotes.txt')
if not os.path.isfile(remotes_txt_sentinel):
safe_mkdir(conan_install_base)
# Conan doesn't consume the remotes.txt file just by being in the conan directory -- we need
# to create another directory containing any selection of files detailed in
# https://docs.conan.io/en/latest/reference/commands/consumer/config.html#conan-config-install
# and "install" from there to our desired conan directory.
with temporary_dir() as remotes_install_dir:
# Create an artificial conan configuration dir containing just remotes.txt.
remotes_txt_for_install = os.path.join(remotes_install_dir, 'remotes.txt')
safe_file_dump(remotes_txt_for_install, self._remotes_txt_content, mode='w')
# Configure the desired user home from this artificial config dir.
argv = ['config', 'install', remotes_install_dir]
workunit_factory = functools.partial(
self.context.new_workunit,
name='initial-conan-config',
labels=[WorkUnitLabel.TOOL])
env = {
'CONAN_USER_HOME': user_home,
}
cmdline, exit_code = conan.run(workunit_factory, argv, env=env)
if exit_code != 0:
raise self.ConanConfigError(
'Error configuring conan with argv {} and environment {}: exited non-zero ({}).'
.format(cmdline, env, exit_code),
exit_code=exit_code)
# Generate the sentinel file so that we know the remotes have been successfully configured for
# this particular task fingerprint in successive pants runs.
safe_file_dump(remotes_txt_sentinel, self._remotes_txt_content, mode='w')
return user_home
@memoized_property
def _conan_os_name(self):
return Platform.current.resolve_for_enum_variant({
'darwin': 'Macos',
'linux': 'Linux',
})
@property
def _copy_target_attributes(self):
basic_attributes = [a for a in super(ConanFetch, self)._copy_target_attributes
if a != 'provides']
return basic_attributes + [
'include_relpath',
'lib_relpath',
'native_lib_names',
]
def execute_codegen(self, target, target_workdir):
"""
Invoke the conan pex to fetch conan packages specified by a
`ExternalNativeLibrary` target.
:param ExternalNativeLibrary target: a target containing conan package specifications.
:param str target_workdir: where to copy the installed package contents to.
"""
conan = self.context.products.get_data(ConanPrep.tool_instance_cls)
# TODO: we should really be able to download all of these in one go, and we should make an
# upstream PR to allow that against Conan if not.
for conan_requirement in target.packages:
# See https://docs.conan.io/en/latest/reference/commands/consumer/install.html for
# documentation on the 'install' command.
argv = [
'install',
conan_requirement.pkg_spec,
'--settings', 'os={}'.format(self._conan_os_name),
]
for remote in self.get_options().conan_remotes:
argv.extend(['--remote', remote])
workunit_factory = functools.partial(
self.context.new_workunit,
name='install-conan-{}'.format(conan_requirement.pkg_spec),
labels=[WorkUnitLabel.TOOL])
# CONAN_USER_HOME is somewhat documented at
# https://docs.conan.io/en/latest/mastering/sharing_settings_and_config.html.
user_home = self._conan_user_home(conan)
env = {
'CONAN_USER_HOME': user_home,
}
with conan.run_with(workunit_factory, argv, env=env) as (cmdline, exit_code, workunit):
if exit_code != 0:
raise self.ConanFetchError(
'Error performing conan install with argv {} and environment {}: exited non-zero ({}).'
.format(cmdline, env, exit_code),
exit_code=exit_code)
# Read the stdout from the read-write buffer, from the beginning of the output, and convert
# to unicode.
conan_install_stdout = workunit.output('stdout').read_from(0).decode('utf-8')
pkg_sha = conan_requirement.parse_conan_stdout_for_pkg_sha(conan_install_stdout)
installed_data_dir = os.path.join(
user_home,
'.conan', 'data',
conan_requirement.directory_path,
'package',
pkg_sha)
# Copy over the contents of the installed package into the target output directory. These
# paths are currently hardcoded -- see `ExternalNativeLibrary`.
mergetree(os.path.join(installed_data_dir, conan_requirement.include_relpath),
os.path.join(target_workdir, 'include'))
mergetree(os.path.join(installed_data_dir, conan_requirement.lib_relpath),
os.path.join(target_workdir, 'lib'))
|
the-stack_0_18398 | #!/usr/bin/python3
import sys
import numpy as np
import matplotlib.pyplot as plt
if __name__ == '__main__':
datafile = sys.argv[1] if len(sys.argv) > 1 else "src/results/contention.txt"
data = []
for s in open(datafile):
size, time = s.split()
time = float(time) / 2.59 # convert to nano seconds
data.append([int(size), time])
data = np.array(data, dtype=object)
plt.plot(data[:, 0], data[:, 1])
plt.xlabel("Number of Processes")
plt.ylabel("Block Read Time (ns)")
plt.show()
|
the-stack_0_18400 | import proj_pipeline
import proj_analysis
import streamlit as st
import pandas as pd
import numpy as np
import random
import plotly.express as px
import plotly.graph_objs as go
from plotly.subplots import make_subplots
from plotly.colors import n_colors
import requests
from PIL import Image, ImageFilter
from PIL.ImageFilter import (
BLUR, CONTOUR, DETAIL, EDGE_ENHANCE, EDGE_ENHANCE_MORE,
EMBOSS, FIND_EDGES, SMOOTH, SMOOTH_MORE, SHARPEN
)
import spotipy
from spotipy.oauth2 import SpotifyOAuth, SpotifyClientCredentials
import spotipy.oauth2 as oauth2
# THEME SETTINGS
st.set_page_config(
page_title='Spotify Analysis Dashboard',
page_icon=Image.open(requests.get('https://i.imgur.com/vbzB30k.png', stream=True).raw),
layout='wide'
)
st.set_option('deprecation.showPyplotGlobalUse', False)
st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)
# TITLE SECTION
st.sidebar.title('Spotify Analysis Dashboard')
# GATHER INPUT DATA
with st.sidebar.beta_expander('Enter Input Data', True):
playlist_input = st.text_area(
'Playlist URL(s)',
'https://open.spotify.com/playlist/37i9dQZEVXbLp5XoPON0wI')
ready_button = st.checkbox('Gather DataFrame')
# DATAFRAME FUNCTIONS
radio_page = st.empty()
submit = st.empty()
search_filter = st.empty()
def raw_dataframe():
'''
Gather RAW dataframe using user input from sidebar configuration;
raw due to intentional lack of cleaning, done to catch faulty
data that is later reported to user
Parameters:
|| NONE ||
Returns:
|| playlist_df (pd.DataFrame) ||
dataframe containing various labels and statistics
related to obtainable observations
'''
if playlist_input and ready_button:
# user = proj_pipeline.SpotifyUser(client_id, client_secret)
if len(playlist_input.split(',')) > 1:
playlist_df = proj_pipeline.pipeline_multip_spotify(playlist_input)
else:
playlist_df = proj_pipeline.pipeline_single_spotify(playlist_input)
return playlist_df
def retrieve_dataframe():
'''
Cleans raw dataframe; drops unnecessary columns,
preserves first instance of duplicates, and resets index
Parameters:
|| NONE ||
Returns:
|| _ (pd.DataFrame) ||
dataframe containing various labels and statistics
related to obtainable observations ready for filtering
'''
return raw_dataframe(
).drop(columns=['inv_dt', 'imp_dt']
).drop_duplicates(subset=['title', 'artist'], keep='first'
).reset_index(drop=True)
def alter_dataframe(df):
'''
Returns dataframe ready for analysis
Parameters:
|| df (pd.DataFrame) ||
cleaned dataframe derived from previous function
retrieve_dataframe()
Returns:
|| _ (pd.DataFrame) ||
dataframe containing various labels and statistics
related to obtainable observations ready for analysis
'''
if ready_button:
with st.sidebar.beta_expander("Select Data", False):
global radio_page
global submit
global search_filter
search_filter = st.radio(label='', options=['Filter', 'Search'])
if search_filter == 'Filter':
artist_year = pd.to_datetime(df['artist_date']).dt.year
st.subheader('Filter Parameters')
filter_playlist = st.multiselect(
'Playlist',
options=list(np.sort(data['playlist'].unique()))
)
filter_release = st.slider(
'Release Date',
min_value=int(artist_year.min()),
max_value=int(artist_year.max()),
value=(int(artist_year.min()), int(artist_year.max())),
step=1
)
st.subheader('Dashboard Views')
radio_page = st.selectbox(
label='Select at least one',
options=['Brief History', 'Tracks', 'Artists + Albums', 'Listening Trends', 'Random Statistics', 'Recommendations [Beta]']
)
submit = st.checkbox(label='Filter')
if submit:
return proj_analysis.analysis_filter_dataframe(df, filter_playlist, filter_release)
if search_filter == 'Search':
st.subheader('Search Parameters')
filter_by = st.radio(label='Filter by', options=['Song', 'Artist', 'Album'])
if filter_by == 'Song':
search_song = st.selectbox('Song', sorted(df['title'].unique()))
if df[df['title'] == search_song]['title'].unique().shape[0] == 1:
search_artist = ''
search_album = ''
else:
search_artist = st.selectbox('Artist', sorted(df[df['title'] == search_song]['artist'].unique()))
search_album = st.selectbox('Album', sorted(df[(df['title'] == search_song) & (df['artist'] == search_artist)]['album'].unique()))
if filter_by == 'Artist':
search_artist = st.selectbox('Artist', sorted(df['artist'].unique()))
if df[df['artist'] == search_artist]['artist'].unique().shape[0] == 1:
search_song = ''
search_album = ''
else:
search_song = st.selectbox('Song', sorted(df[df['artist'] == search_artist]['title'].unique()))
search_album = st.selectbox('Album', sorted(df[df['artist'] == search_artist]['album'].unique()))
if filter_by == 'Album':
search_album = st.selectbox('Album', sorted(df['album'].unique()))
if df[df['album'] == search_album]['album'].unique().shape[0] == 1:
search_song = ''
search_artist = ''
else:
search_song = ''
search_artist = st.selectbox('Artist', sorted(df[df['album'] == search_album]['artist'].unique()))
submit = st.checkbox(label='Search')
if submit:
return proj_analysis.analysis_search_dataframe(df, search_song, search_artist, search_album)
def project_pretty_time(time):
'''
Converts milliseconds to {}h{}m{}s time format
Parameters:
|| time (np.ndarray) ||
array containing duration of song in milliseconds
Returns:
|| _ (string) ||
string denoting sum of time in user readable manner
'''
duration_mins = time / 60000
hours = int(duration_mins // 60)
minutes = int(((duration_mins / 60) - hours) * 60)
seconds = abs(round((((duration_mins / 60) - hours) * 60 - round(((duration_mins / 60) - hours) * 60)) * 60))
if hours >= 1:
return f'{hours}h{minutes}m{seconds}s'
elif minutes >= 1:
return f'{minutes}m{seconds}s'
elif seconds >= 1:
return f'{seconds}s'
else:
return 'unspecified'
# PAGE SKELETONS
def project_welcm_page():
'''
Welcome page for Spotify Analysis Dashboard
Parameters:
|| NONE ||
Returns:
|| _ (streamlit) ||
various UI objects for user to interact with
'''
with st.sidebar.beta_expander('Resources', False):
st.write("[GitHub Documentation](https://github.com/lucas-nelson-uiuc/academia_epidemia/tree/main/spotipy_analysis)")
st.write("[Spotify Developer Dashboard](https://developer.spotify.com/dashboard/)")
st.write("[Stats for Spotify](https://www.statsforspotify.com/)")
st.markdown("<h1 style='text-align: center;'>Welcome to Spotify Analysis Dashboard</h1>", unsafe_allow_html=True)
st.markdown("<h2 style='text-align: center;'>The data hub for all your music listening...</h2>", unsafe_allow_html=True)
img_cols = st.beta_columns((0.2,1,0.2))
img_cols[1].image('https://images.prismic.io/soundcharts/727545d02420e55c5c6a376f633a1f02ebc59dc5_mapspot2.png?auto=compress,format')
data_grp = st.beta_columns((1,1,1))
data_grp[0].markdown("<h3><b>01. How to Access Your Data</b></h3>", unsafe_allow_html=True)
data_grp[0].markdown('''
Data for your dashboard is gathered by providing playlist URLs
Visit the [`Walkthrough Document`](https://github.com/lucas-nelson-uiuc/spotipy_analysis/blob/main/docs/walk_through.md)
to learn how to properly prepare your input data
''')
data_grp[1].markdown("<h3><b>02. Manipulating Your Data</b></h3>", unsafe_allow_html=True)
data_grp[1].markdown('''
Filter to include all observations that strictly match your criteria
Search to obtain statistics of an individual song, artist, or album
''')
data_grp[2].markdown("<h3><b>03. Dashboard Interaction</b></h3>", unsafe_allow_html=True)
data_grp[2].markdown('''
Analyze your music listening habits of the past and present
Leverage your unique insights to develop curtailed recommendations for future listening
''')
def project_dataq_page(r_df):
'''
Data review page for Spotify Analysis Dashboard
Parameters:
|| r_df (pd.DataFrame) ||
raw dataframe, used for relaying raw data to user
prior to analysis - mainly a way for the user to
review what data, if any, is faulty
Returns:
|| _ (streamlit) ||
various UI objects for user to interact with
'''
page_cols = st.beta_columns((6,4,4))
page_cols[0].title('Gathered Data')
page_cols[0].markdown('''
This is the page to view your requested data before it is shipped off for analysis.
You can always come back to get a second glance, or you can assume that the data gathering
process works perfectly all the time
However, even if you are confident the request is perfect, you might be better equipped
to learn more about your music taste by taking a brief glance at your unique dataset
''')
duration = project_pretty_time(r_df['duration'].sum())
playlists = r_df['playlist'].unique().shape[0]
titles = r_df['title'].unique().shape[0]
artists = r_df['artist'].unique().shape[0]
albums = r_df['album'].unique().shape[0]
genres = r_df[r_df['genre'] != 'NA']['genre'].unique().shape[0]
items = [duration, playlists, titles, artists, albums, genres]
labels = ['Duration', 'Playlists', 'Songs', 'Artists', 'Albums', 'Genres']
colors = n_colors('rgb(4,74,4)', 'rgb(96,223,96)', 6, colortype='rgb')
page_cols[1].title(''); page_cols[2].title('')
for i, color, item, label in zip(range(len(items)), colors, items, labels):
if i % 2 == 0:
page_cols[1].markdown(f'''
<h3 style="background-color:{color};color:#ffffff;style=padding-left:20px;"> {item}</h3>
<p style="background-color:{color};color:#ffffff;"> {label}</p>
''', unsafe_allow_html=True)
if i % 2 == 1:
page_cols[2].markdown(f'''
<h3 style="background-color:{color};color:#ffffff;border-radius:2%;"> {item}</h3>
<p style="background-color:{color};color:#ffffff;border-radius:2%;"> {label}</p>
''', unsafe_allow_html=True)
page_cols = st.beta_columns((4,4,6))
page_cols[2].title('Faulty Data')
page_cols[2].markdown('''
Given the limitations of Spotify's API, some inaccuracies may occur,
most commonly incorrect or broad genre labeling, imprecise artist release dates, and
missing data entries
`NA` entries are replaced by either a proxy or a zero whereas duplicate entries are
reduced to the first instance
''')
attributes = [
'popularity', 'danceability', 'energy', 'loudness',
'acousticness', 'instrumentalness', 'liveness', 'valence'
]
skipped = len(playlist_input.split(',')) - r_df['playlist'].unique().shape[0]
un_gnr = r_df[r_df['genre'] == 'NA'].shape[0]
na_entr = r_df[r_df[attributes] == 0].notna().sum().sum()
dups = r_df.shape[0] - r_df.drop_duplicates(subset=['title', 'artist'], keep='first').reset_index(drop=True).shape[0]
inv_dt = r_df['inv_dt'].sum()
imp_dt = r_df['imp_dt'].sum()
items = [skipped, un_gnr, na_entr, dups, inv_dt, imp_dt]
labels = ['Skipped Playlists', 'Unlabeled Genres', 'NA Entries', 'Duplicate Entries', 'Invalid Dates', 'Imprecise Dates']
colors = n_colors('rgb(74,4,4)', 'rgb(223,96,96)', 6, colortype='rgb')
page_cols[0].title(''); page_cols[1].title('')
for i, color, item, label in zip(range(len(items)), colors, items, labels):
if i % 2 == 0:
page_cols[0].markdown(f'''
<h3 style="background-color:{color};color:#ffffff;style=padding-left:20px;"> {item}</h3>
<p style="background-color:{color};color:#ffffff;"> {label}</p>
''', unsafe_allow_html=True)
if i % 2 == 1:
page_cols[1].markdown(f'''
<h3 style="background-color:{color};color:#ffffff;border-radius:2%;"> {item}</h3>
<p style="background-color:{color};color:#ffffff;border-radius:2%;"> {label}</p>
''', unsafe_allow_html=True)
cols = st.beta_columns((6,4,4))
cols[0].title('Data Fixer')
cols[0].markdown('''
*Coming soon...*
Although the analyses attempt to mitigate these differences, now is your chance to label
data as you see best fit
''')
items = ['Enter genre for ...', 'Enter statistic for ...', 'Enter release date for ...', 'Many more ...']
labels = ['Indie', '83.3', '2013-05-16', 'User input here']
colors = n_colors('rgb(0,76,153)', 'rgb(0,128,255)', 4, colortype='rgb')
cols[1].title(''); cols[2].title('')
for i, color, item, label in zip(range(len(items)), colors, items, labels):
if i % 2 == 0:
cols[1].markdown(f'''
<h3 style="background-color:{color};color:#ffffff;style=padding-left:20px;"> {item}</h3>
<p style="background-color:{color};color:#ffffff;"> {label}</p>
''', unsafe_allow_html=True)
if i % 2 == 1:
cols[2].markdown(f'''
<h3 style="background-color:{color};color:#ffffff;border-radius:2%;"> {item}</h3>
<p style="background-color:{color};color:#ffffff;border-radius:2%;"> {label}</p>
''', unsafe_allow_html=True)
def project_histry_page(df):
'''
Brief History page for Spotify Analysis Dashboard
Parameters:
|| df (pd.DataFrame) ||
clean dataframe used for analysis
Returns:
|| _ (streamlit) ||
various UI objects for user to interact with
'''
st.title('Spotify Activity Over Time')
with st.beta_expander('Description...'):
desc_cols = st.beta_columns((1,1))
desc_cols[0].subheader('Total Songs by User Date Added')
desc_cols[0].write('Returns graph of all songs, artists, albums, and genres that were added by the user in the provided `User Year` selectbox')
desc_cols[1].subheader('Total Songs by Release Date')
desc_cols[1].write('Returns graph of all songs, artists, albums, and genres that were released by all artists in the provided `User Year` selectbox')
desc_cols[0].subheader('Percentage by User Date Added')
desc_cols[0].write('Returns graph of all songs, artists, albums, and genres as a percentage of the sum of songs, artists, albums, and genres, respectively, as a function of the provided `User Year` selectbox')
desc_cols[1].subheader('Percentage by Release Date')
desc_cols[1].write('Returns graph of all songs, artists, albums, and genres as a percentage of the sum of songs, artists, albums, and genres, respectively, as a function of the provided `User Year` selectbox')
option_cols = st.beta_columns((1,1,1,1))
year = option_cols[0].selectbox(
'User Year',
options=['All Years']
+ sorted(df['user_date'].astype(str).str[:4].unique()),
key='13:58_0804')
if year != 'All Years':
genre = option_cols[1].selectbox(
'Genre',
options=['All Genres']
+ [attr.title() for attr in sorted(df[(df['genre'] != 'NA')
& (df['user_date'].astype(str).str[:4] == year)]['genre'].unique())],
key='13:36_0804')
else:
genre = option_cols[1].selectbox('Genre',
options=['All Genres']
+ [attr.title() for attr in sorted(df[df['genre'] != 'NA']['genre'].unique())],
key='13:36_0804')
option_01 = option_cols[2].radio(
label='Metric',
options=['Total', 'Percentage'],
key='tomato17:12')
option_02 = option_cols[3].radio(
label='By',
options=['User Date Added', 'Artist Release Date'],
key='tomato17:13')
# plot 1,colspan(2)
if year == 'All Years':
px_df = df.copy()
if year != 'All Years':
px_df = df[df['user_date'].astype(str).str[:4] == year].copy()
if genre != 'All Genres':
px_df = px_df[px_df['genre'] == genre.lower()]
if option_01 == 'Total':
y_axis_title = 'Count (#)'
if option_01 == 'Percentage':
y_axis_title = 'Percentage (%)'
if option_02 == 'User Date Added':
px_df = px_df[['user_date', 'title', 'artist', 'album', 'genre']]
px_df['date'] = px_df['user_date'].astype(str).str[:4].astype(int)
if option_02 == 'Artist Release Date':
px_df = px_df[['artist_date', 'title', 'artist', 'album', 'genre']]
px_df['date'] = px_df['artist_date'].astype(str).str[:4].astype(int)
px_df = px_df.groupby('date').agg({
'title': lambda x : x.nunique(),
'artist': lambda x : x.nunique(),
'album': lambda x : x.nunique(),
'genre': lambda x : x.nunique()
})
if option_01 == 'Percentage':
for col in px_df.columns:
px_df[col] = px_df[col].apply(lambda x : x / px_df[col].sum() * 100).round(2)
fig = make_subplots(rows=2, cols=1)
for i in range(len(px_df.columns)):
fig.add_trace(
go.Bar(
name=px_df.columns[i].title(),
x=list(px_df.index),
y=px_df[px_df.columns[i]],
text=px_df[px_df.columns[i]],
textposition='auto'
),
row=1, col=1
)
fig.update_layout(
yaxis_title=y_axis_title,
legend_title="Legend"
)
# plot 2,1
px_df = df.copy()
px_df['user_mnth'] = px_df['user_date'].astype(str).str[5:7].astype(int)
px_df['artist_mnth'] = px_df['artist_date'].astype(str).str[5:7].astype(int)
if year != 'All Years':
if option_02 == 'User Date Added':
sng_df = px_df[px_df['user_date'].astype(str).str[:4] == year].groupby('user_mnth').agg({'title':'nunique'})
art_df = px_df[px_df['user_date'].astype(str).str[:4] == year].groupby('user_mnth').agg({'artist':'nunique'})
alb_df = px_df[px_df['user_date'].astype(str).str[:4] == year].groupby('user_mnth').agg({'album':'nunique'})
gnr_df = px_df[px_df['user_date'].astype(str).str[:4] == year].groupby('user_mnth').agg({'genre':'nunique'})
else:
sng_df = px_df[px_df['artist_date'].astype(str).str[:4] == year].groupby('artist_mnth').agg({'title':'nunique'})
art_df = px_df[px_df['artist_date'].astype(str).str[:4] == year].groupby('artist_mnth').agg({'artist':'nunique'})
alb_df = px_df[px_df['artist_date'].astype(str).str[:4] == year].groupby('artist_mnth').agg({'album':'nunique'})
gnr_df = px_df[px_df['artist_date'].astype(str).str[:4] == year].groupby('artist_mnth').agg({'genre':'nunique'})
else:
if option_02 == 'User Date Added':
sng_df = px_df.groupby('user_mnth').agg({'title':'nunique'})
art_df = px_df.groupby('user_mnth').agg({'artist':'nunique'})
alb_df = px_df.groupby('user_mnth').agg({'album':'nunique'})
gnr_df = px_df.groupby('user_mnth').agg({'genre':'nunique'})
else:
sng_df = px_df.groupby('artist_mnth').agg({'title':'nunique'})
art_df = px_df.groupby('artist_mnth').agg({'artist':'nunique'})
alb_df = px_df.groupby('artist_mnth').agg({'album':'nunique'})
gnr_df = px_df.groupby('artist_mnth').agg({'genre':'nunique'})
yr_df = pd.DataFrame(
data=[[df.loc[i].item() if i in list(df.index) else 0 for i in range(1,13)] for df in [sng_df, art_df, alb_df, gnr_df]],
index=['Titles', 'Artists', 'Albums', 'Genres']
)
yr_df = yr_df.rename(columns={
0:'January', 1:'February', 2:'March', 3:'April', 4:'May', 5:'June',
6:'July', 7:'August', 8:'September', 9:'October', 10:'November', 11:'December'
})
if option_01 == 'Total':
xaxis = 'Aggregate Count (#)'
yaxis = 'Month'
yr_df = yr_df.transpose().cumsum()
if option_01 == 'Percentage':
xaxis = 'Aggregate Percentage (%)'
yaxis = 'Month'
yr_df = yr_df.transpose()
for column in yr_df.columns:
yr_df[column] = yr_df[column].apply(lambda x : x / yr_df[column].sum() * 100).round(0)
test_df = pd.DataFrame({
'month': ['Jan ', 'Feb ', 'Mar ', 'Apr ', 'May ', 'Jun ', 'Jul ', 'Aug ', 'Sep ', 'Oct ', 'Nov ', 'Dec '] * 4,
'type': ['song'] * 12 + ['artist'] * 12 + ['album'] * 12 + ['genre'] * 12,
'value': [yr_df.iloc[i][col] for col in list(yr_df.columns) for i in range(12)]
})
fig.add_trace(
go.Bar(
x=test_df['value'],
y=test_df['month'],
orientation='h',
marker=dict(
color=['#636EFA'] * 12 + ['#EF553B'] * 12 + ['#00CC96'] * 12 + ['#AB63FA'] * 12
),
showlegend=False
),
row=2, col=1
)
fig['layout']['xaxis2']['title'] = xaxis
fig['layout']['yaxis2']['title'] = yaxis
fig.update_traces(texttemplate='%{text:.2s}', textposition='outside')
fig.update_layout(uniformtext_minsize=8, uniformtext_mode='hide')
st.plotly_chart(fig, use_container_width=True)
##############################################
# polar graph
st.title('Taste of Music Over Time')
with st.beta_expander('Description'):
cols = st.beta_columns((1,1))
cols[0].subheader('Attribute Distribution')
cols[0].write('Distribution of Spotify-generated attributes over all songs added by the user in the provided `Year` range')
cols[1].subheader('Attribute Polar Plot')
cols[1].write('Spread of attribute scores (from 0 to 100) over all songs added by the user in the provided `Year` range and grouped by top ten artists, top ten genres, or as an average')
options_cols = st.beta_columns((1,1))
year = options_cols[0].selectbox('Year', options=['All Years'] + sorted(df['user_date'].astype(str).str[:4].unique()), key='15:41_0802')
by = options_cols[1].radio('Group', options=['Artist', 'Genre', 'None'])
px_df = df[df['genre'] != 'NA'].copy()
if year != 'All Years':
px_df = px_df[px_df['user_date'].astype(str).str[:4] == year]
if by != 'None':
if by == 'Artist':
key = 'artist'
tt_artist = px_df.groupby('artist').size().sort_values(ascending=False)[:10].index
tt_filter = sorted(tt_artist)
df_art = px_df[px_df['artist'].isin(tt_filter)]
polar_df = df_art.groupby('artist')[['popularity', 'danceability', 'energy', 'loudness', 'acousticness', 'instrumentalness', 'liveness', 'valence']].agg('mean')
if by == 'Genre':
key = 'genre'
tt_genre = px_df.groupby('genre').size().sort_values(ascending=False)[:10].index
tt_filter = sorted(tt_genre)
df_gnr = px_df[px_df['genre'].isin(tt_filter)]
polar_df = df_gnr.groupby('genre')[['popularity', 'danceability', 'energy', 'loudness', 'acousticness', 'instrumentalness', 'liveness', 'valence']].agg('mean')
for col in polar_df.columns:
if col not in ['popularity', 'loudness', 'artist']:
polar_df[col] = polar_df[col].multiply(100)
if col == 'loudness':
polar_df[col] = polar_df[col].multiply(-(100/60))
polar_df = pd.melt(polar_df)
polar_df[key] = list(tt_filter) * 8
attributes = [
'popularity', 'danceability', 'energy', 'loudness', 'acousticness',
'instrumentalness', 'liveness', 'valence'
]
colors = n_colors('rgb(0,153,76)', 'rgb(153,255,204)', 10, colortype='rgb')
fig = px.line_polar(polar_df, r="value", theta="variable",
color=key, line_close=True,
color_discrete_sequence=colors[::-1],
template='plotly_dark'
)
if by == 'None':
key = ''
tt_filter = ['Average']
polar_df = px_df[['popularity', 'danceability', 'energy', 'loudness', 'acousticness', 'instrumentalness', 'liveness', 'valence']]
for col in polar_df.columns:
if col not in ['popularity', 'loudness', 'artist']:
polar_df[col] = polar_df[col].multiply(100)
if col == 'loudness':
polar_df[col] = polar_df[col].multiply(-(100/60))
polar_df.loc['mean'] = polar_df.mean()
polar_df = pd.melt(polar_df)
polar_df = polar_df.groupby('variable').agg('mean').reset_index()
polar_df[key] = ['Average'] * 8
fig = px.bar_polar(polar_df, r="value", theta="variable",
color_discrete_sequence=px.colors.sequential.PuBuGn[::-1],
template='plotly_dark')
plot_cols = st.beta_columns((1,1))
plot_cols[1].plotly_chart(fig, use_container_width=True)
# violin plot
attributes = [
'popularity', 'danceability', 'energy', 'loudness', 'acousticness',
'instrumentalness', 'liveness', 'valence'
]
px_df = df.copy()
if year != 'All Years':
px_df = df[df['user_date'].astype(str).str[:4] == year].copy()
px_df = px_df[attributes]
for col in px_df:
if col not in ['popularity', 'loudness']:
px_df[col] = px_df[col].multiply(100)
if col == 'loudness':
px_df[col] = px_df[col].multiply(-(100/60))
colors = n_colors('rgb(0, 102, 51)', 'rgb(5, 255, 164)', 8, colortype='rgb')
fig = go.Figure()
for col, color in zip(list(px_df.columns)[::-1], colors):
fig.add_trace(go.Violin(x=px_df[col], line_color=color, name=col.title()))
fig.update_traces(orientation='h', side='positive', width=3, points=False)
fig.update_layout(xaxis_showgrid=True, showlegend=False)
plot_cols[0].plotly_chart(fig, use_container_width=True)
########################################################
# heat map absolute
st.title('Change in Attribute Preferences Over Time')
with st.beta_expander('Description'):
cols = st.beta_columns((1,1))
cols[0].subheader('Attribute Scores per Year')
cols[0].write('Color coding based on attribute scores by `User Year`')
cols[1].subheader('Attribute Derivatives')
cols[1].write('Color coding based on percent change in the attribute scores of the previous `User Year`')
heat_cols = st.beta_columns((1,1))
min_year = df['user_date'].astype(str).str[:4].astype(int).min()
max_year = df['user_date'].astype(str).str[:4].astype(int).max()
group_labels = [str(year) for year in range(min_year, max_year + 1)]
heat_data = []
for year in range(min_year, max_year + 1):
loop_df = df[df['user_date'].astype(str).str[:4].astype(int) == year]
pop_avg = loop_df['popularity'].mean().round(2)
dnc_avg = loop_df['danceability'].multiply(100).mean().round(2)
nrg_avg = loop_df['energy'].multiply(100).mean().round(2)
lud_avg = loop_df['loudness'].multiply(-(100/60)).mean().round(2)
acs_avg = loop_df['acousticness'].multiply(100).mean().round(2)
ins_avg = loop_df['instrumentalness'].multiply(100).mean().round(2)
liv_avg = loop_df['liveness'].multiply(100).mean().round(2)
val_avg = loop_df['valence'].multiply(100).mean().round(2)
heat_data.append([pop_avg, dnc_avg, nrg_avg, lud_avg, acs_avg, ins_avg, liv_avg, val_avg])
fig = px.imshow(heat_data,
labels=dict(x='Attribute', y='User Year', color='Attr Score'),
x=[
'Popularity', 'Danceability', 'Energy', 'Loudness',
'Acousticness', 'Instrumentalness',
'Liveness', 'Valence'
],
y=group_labels,
color_continuous_scale='Agsunset')
fig.update_xaxes(side='top')
heat_cols[0].plotly_chart(fig, use_container_width=True)
# heat map derivative
prct_data = [[0,0,0,0,0,0,0,0]]
for i in range(1, len(heat_data)):
row_data = []
for j in range(len(heat_data[i])):
row_data.append(((heat_data[i][j] - heat_data[i-1][j]) / heat_data[i-1][j]) * 100)
prct_data.append(row_data)
fig = px.imshow(prct_data,
labels=dict(x='Attribute', y='User Year', color='Pct Change'),
x=[
'Popularity', 'Danceability', 'Energy', 'Loudness',
'Acousticness', 'Instrumentalness',
'Liveness', 'Valence'
],
y=group_labels,
color_continuous_scale='haline')
fig.update_xaxes(side='top')
heat_cols[1].plotly_chart(fig, use_container_width=True)
def project_tracks_page(df):
'''
Tracks page for Spotify Analysis Dashboard
Parameters:
|| df (pd.DataFrame) ||
clean dataframe used for analysis
Returns:
|| _ (streamlit) ||
various UI objects for user to interact with
'''
# data transformations
for col in df.columns:
if col in ['danceability', 'energy', 'acousticness', 'instrumentalness', 'liveness', 'valence']:
df[col] = df[col].multiply(100).round(2)
if col == 'loudness':
df[col] = df[col].multiply(-(100/60)).round(2)
# locating and creating instances for specific categories
attributes = ['duration', 'duration', 'artist_date', 'artist_date', 'user_date', 'user_date', 'user_time', 'user_time']
categories = ['Longest Track', 'Shortest Track', 'Newest Track', 'Oldest Track', 'Added Most Recently', 'Added Least Recently',
'Added Latest in Day', 'Added Earliest in Day']
for i, cat, attr in zip(range(8), categories, attributes):
if i % 4 == 0:
cols_group = st.beta_columns((1,1,1,1))
if i % 2 == 0:
if attr == 'duration':
cols_group[i % 4].title('{}'.format(project_pretty_time(df.loc[df['duration'].argmax(), 'duration'])))
cols_group[i % 4].image('{}'.format(df.loc[df['duration'].argmax(), 'img_url']))
cols_group[i % 4].subheader('Longest Track')
cols_group[i % 4].write('{}'.format(df.loc[df['duration'].argmax(), 'title']))
else:
cols_group[i % 4].title('{}'.format(df[df[attr] == max(df[attr])][attr].iloc[0,]))
cols_group[i % 4].image('{}'.format(df[df[attr] == max(df[attr])]['img_url'].iloc[0,]))
cols_group[i % 4].subheader(cat)
cols_group[i % 4].write('{}'.format(df[df[attr] == max(df[attr])]['title'].iloc[0,]))
if i % 2 != 0:
if attr == 'duration':
cols_group[i % 4].title('{}'.format(project_pretty_time(df.loc[df['duration'].argmin(), 'duration'])))
cols_group[i % 4].image('{}'.format(df.loc[df['duration'].argmin(), 'img_url']))
cols_group[i % 4].subheader('Shortest Track')
cols_group[i % 4].write('{}'.format(df.loc[df['duration'].argmin(), 'title']))
else:
cols_group[i % 4].title('{}'.format(df[df[attr] == min(df[attr])][attr].iloc[0,]))
cols_group[i % 4].image('{}'.format(df[df[attr] == min(df[attr])]['img_url'].iloc[0,]))
cols_group[i % 4].subheader(cat)
cols_group[i % 4].write('{}'.format(df[df[attr] == min(df[attr])]['title'].iloc[0,]))
# locating and creating instances for highest/lowest attribute score
attributes = ['popularity', 'danceability', 'energy', 'loudness', 'instrumentalness', 'acousticness', 'liveness', 'valence', 'tempo']
descriptions = [
'''The popularity of a track is a value between 0 and 100, with 100 being the most popular.
The popularity is calculated by algorithm and is based, in the most part, on the total
number of plays the track has had and how recent those plays are.''',
'''Danceability describes how suitable a track is for dancing based on a
combination of musical elements including tempo, rhythm stability, beat strength,
and overall regularity. A value of 0 is least danceable and 100 is most danceable.''',
'''Energy is a measure from 0 to 100 and represents a perceptual measure of
intensity and activity. Typically, energetic tracks feel fast, loud, and noisy.
For example, death metal has high energy, while a Bach prelude scores low on the
scale. Perceptual features contributing to this attribute include dynamic range,
perceived loudness, timbre, onset rate, and general entropy.''',
'''Overall loudness of a track in adjusted decibels (dB * -100/60). Loudness values
are averaged across the entire track and are useful for comparing relative loudness
of tracks. Loudness is the quality of a sound that is the primary psychological
correlate of physical strength (amplitude).''',
'''Predicts whether a track contains no vocals. Rap or spoken word tracks are clearly “vocal”.
The closer the instrumentalness value is to 100, the greater likelihood the track contains
no vocal content. Values above 50 are intended to represent instrumental tracks,
but confidence is higher as the value approaches 100.''',
'''Confidence measure from 0 to 100 of whether the track is acoustic.
100 represents high confidence the track is acoustic.
0 represents low confidence the track is not acoustic.''',
'''Detects the presence of an audience in the recording. Higher liveness values
represent an increased probability that the track was performed live.''',
'''Measure from 0 to 100 describing the musical positiveness conveyed by a track.
Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while
tracks with low valence sound more negative (e.g. sad, depressed, angry).''',
'''Overall estimated tempo of a track in beats per minute (BPM). In musical terminology,
tempo is the speed or pace of a given piece and derives directly from the average beat
duration.'''
]
for attr, desc in zip(attributes, descriptions):
if attr != 'tempo':
x_axis_range = [0, 100]
else:
x_axis_range = (df[df['tempo'] > 0]['tempo'].min(), df['tempo'].max())
attr_group = st.beta_columns((2,1,1))
# attribute desription and tabs (expanders)
attr_group[0].title(attr.title())
with attr_group[0].beta_expander('How does Spotify determine...'):
st.write(desc)
with attr_group[0].beta_expander('{} rankings...'.format(attr.title())):
rank_df = pd.DataFrame(df[['title', 'artist', attr]].set_index(['title', 'artist'])[attr].sort_values(ascending=False).rank(method='max', ascending=False)).rename(columns={attr:'rank'})
vals_df = df[['title', 'artist', attr]].set_index(['title', 'artist'])[attr].sort_values(ascending=False)
rank_df['value'] = vals_df.values
rank_df['rank'] = rank_df['rank'].astype(int)
st.dataframe(rank_df)
with attr_group[0].beta_expander('{} distribution...'.format(attr.title())):
fig = px.histogram(df[attr])
fig.update_xaxes(range=x_axis_range)
st.plotly_chart(fig, use_container_width=True)
# "best" (highest) attribute score track
attr_group[1].title('{}'.format(df.loc[df[attr].argmax(), attr]))
attr_group[1].image('{}'.format(df.loc[df[attr].argmax(), 'img_url']))
attr_group[1].subheader('Highest {}'.format(attr.title()))
attr_group[1].write('{}'.format(df.loc[df[attr].argmax(), 'title']))
# "worst" (lowest) attribute score track
arg_df = df[df[attr] > 0].reset_index(drop=True)
attr_group[2].title('{}'.format(arg_df.loc[arg_df[attr].argmin(), attr]))
attr_group[2].image('{}'.format(arg_df.loc[arg_df[attr].argmin(), 'img_url']))
attr_group[2].subheader('Lowest {}'.format(attr.title()))
attr_group[2].write('{}'.format(arg_df.loc[arg_df[attr].argmin(), 'title']))
def project_artist_page(df):
'''
Artis + Albums page for Spotify Analysis Dashboard
Parameters:
|| df (pd.DataFrame) ||
clean dataframe used for analysis
Returns:
|| _ (streamlit) ||
various UI objects for user to interact with
'''
st.title('Top Artists by User Date Added')
with st.beta_expander('Description...'):
cols = st.beta_columns((1,1,1))
cols[0].subheader('Song Count')
cols[0].write('Filters the dataset to include all data from the top ten artists by song count')
cols[1].subheader('Duration')
cols[1].write('Filters the dataset to include all data from the top ten artists by song duration (milliseconds)')
cols[2].subheader('Album Count')
cols[2].write('Filters the dataset to include all data from the top ten artists by album count')
temp_df = df.copy()
temp_df['user_date'] = temp_df['user_date'].astype(str).str[:4].astype(int)
columns = st.beta_columns((1,1))
yr_options = ['All Years'] + [year for year in set(temp_df['user_date'])]
yr_slctbox = columns[0].selectbox(label='User Year', options=yr_options, key='time_select')
radio = columns[1].radio(label='Parameter', options=['Song Count', 'Song Duration', 'Album Count'], key='time_radio')
if yr_slctbox == 'All Years':
if 'Song Count' in radio:
pie_df = temp_df.groupby('artist').size().sort_values(ascending=False)[:10]
values = pie_df
if 'Song Duration' in radio:
pie_df = temp_df.groupby('artist').agg({'duration':'sum'}).sort_values('duration', ascending=False)[:10]
values = pie_df['duration']
if 'Album Count' in radio:
pie_df = temp_df.groupby('artist').agg({'album':'nunique'}).sort_values('album', ascending=False)[:10]
values = pie_df['album']
else:
temp_df = temp_df[temp_df['user_date'] == yr_slctbox]
if 'Song Count' in radio:
pie_df = temp_df.groupby('artist').size().sort_values(ascending=False)[:10]
values = pie_df
if 'Song Duration' in radio:
pie_df = temp_df.groupby('artist').agg({'duration':'sum'}).sort_values('duration', ascending=False)[:10]
values = pie_df['duration']
if 'Album Count' in radio:
pie_df = temp_df.groupby('artist').agg({'album':'nunique'}).sort_values('album', ascending=False)[:10]
values = pie_df['album']
labels = list(pie_df.index)
colors = px.colors.diverging.curl
fig = make_subplots(rows=1, cols=2, specs=[[{"type": "pie"}, {"type": "bar"}]])
fig.add_trace(
go.Pie(
values=values,
labels=labels
),
row=1, col=1
).update_traces(
hoverinfo='label', textinfo='percent', textfont_size=14,
marker=dict(colors=colors, line=dict(color='#000000', width=2))
)
fig.add_trace(
go.Bar(
x=labels,
y=values,
marker=dict(color=values, colorscale=px.colors.diverging.curl, reversescale=True)
),
row=1, col=2
)
st.plotly_chart(fig, use_container_width=True)
####################
st.title('Attribute Distribution by Artist')
with st.beta_expander('Description...'):
cols = st.beta_columns((1.5,3))
cols[0].subheader('Parameters')
cols[0].write('Similar filtering capabilities to previous graph.')
cols[1].subheader('Interpretation')
cols[1].write('''
Assuming each artist obtained the same value for the specified parameter, this is how their attribute distributions
would compare to one another.
Although there are not labels on the x-axis denoting what the colors mean, the labels
and their corresponding values can be found in the dataframe box just above the graph.
''')
attributes = ['popularity', 'danceability', 'energy', 'loudness', 'instrumentalness', 'acousticness', 'liveness', 'valence']
temp_df = df[attributes + ['title', 'artist', 'user_date', 'duration', 'album']].copy()
temp_df['user_date'] = temp_df['user_date'].astype(str).str[:4].astype(int)
columns = st.beta_columns((1,1))
yr_options = ['All Years'] + [year for year in set(temp_df['user_date'])]
yr_slctbox = columns[0].selectbox(label='User Year', options=yr_options, key='keytime')
radio = columns[1].radio(label='Parameter', options=['Song Count', 'Song Duration', 'Album Count'], key='keytime2')
if yr_slctbox == 'All Years':
if 'Song Count' in radio:
tt_artist = temp_df.groupby('artist').size().sort_values(ascending=False)[:10].index
if 'Song Duration' in radio:
tt_artist = temp_df.groupby('artist').agg({'duration':'sum'}).sort_values('duration', ascending=False)[:10].index
if 'Album Count' in radio:
tt_artist = temp_df.groupby('artist').agg({'album':'nunique'}).sort_values('album', ascending=False)[:10].index
else:
temp_df = temp_df[temp_df['user_date'] == yr_slctbox]
if 'Song Count' in radio:
tt_artist = temp_df.groupby('artist').size().sort_values(ascending=False)[:10].index
if 'Song Duration' in radio:
tt_artist = temp_df.groupby('artist').agg({'duration':'sum'}).sort_values('duration', ascending=False)[:10].index
if 'Album Count' in radio:
tt_artist = temp_df.groupby('artist').agg({'album':'nunique'}).sort_values('album', ascending=False)[:10].index
temp_df['loudness'] = temp_df['loudness'].multiply(-(100/60))
for col in temp_df.columns:
if col not in ['popularity', 'loudness', 'artist', 'title', 'user_date', 'album']:
temp_df[col] = temp_df[col].multiply(100)
wk_df = temp_df[temp_df['artist'].isin(tt_artist)].groupby('artist').agg({
'title':'size',
'popularity':'mean',
'danceability':'mean',
'energy':'mean',
'loudness':'mean',
'instrumentalness':'mean',
'acousticness':'mean',
'liveness':'mean',
'valence':'mean'
}).sort_values('title', ascending=False).drop(columns='title')
for i in range(len(wk_df.index)):
wk_df.iloc[i] = wk_df.iloc[i] * 100 / wk_df.iloc[i].sum()
with st.beta_expander('DataFrame'):
st.dataframe(wk_df)
colors = px.colors.sequential.Cividis
x_data = [list(wk_df.iloc[wk_df.shape[0] - 1 - i].values) for i in range(wk_df.shape[0])]
y_data = tt_artist[::-1]
fig = go.Figure()
for i in range(0, len(x_data[0])):
for xd, yd in zip(x_data, y_data):
fig.add_trace(go.Bar(
x=[xd[i]], y=[yd],
orientation='h',
marker=dict(
color=colors[i],
line=dict(color='rgb(248, 248, 249)', width=1)
)
))
fig.update_layout(
xaxis=dict(
showgrid=False,
showline=False,
showticklabels=False,
zeroline=False,
domain=[0.15, 1]
),
yaxis=dict(
showgrid=False,
showline=False,
showticklabels=False,
zeroline=False,
),
barmode='stack',
margin=dict(l=120, r=10, t=140, b=80),
showlegend=False,
)
annotations = []
for yd, xd in zip(y_data, x_data):
annotations.append(dict(xref='paper', yref='y',
x=0.14, y=yd,
xanchor='right',
text=str(yd),
font=dict(family='Arial', size=14,
color='rgb(255,255,255)'),
showarrow=False, align='right'))
fig.update_layout(annotations=annotations)
st.plotly_chart(fig, use_container_width=True)
####################
st.title('Artist Album Workshop')
with st.beta_expander('Description...'):
st.write('''
At its core, music is a form of art. However, one medium of the art that often goes overlooked is the album
cover that artists use as a symbol of their music.
This neat feature allows you to not only explore the art that artists attach to their music but also alter
various features to derive a piece of art that the artist may have considered otherwise.
At the end of the day, it's just a little fun to have while learning more about your taste of music.
''')
wksp = st.beta_columns((1,2))
filters = ['RGB Inverse', 'Blur', 'Contour', 'Detail', 'Edges', 'Enhance', 'Enhance+', 'Emboss', 'Smooth', 'Smooth+', 'Sharpen']
#BLUR, CONTOUR, DETAIL, EDGE_ENHANCE, EDGE_ENHANCE_MORE,EMBOSS, FIND_EDGES, SMOOTH, SMOOTH_MORE, SHARPEN
option = wksp[0].radio(label='Operation', options=filters)
artist = wksp[1].selectbox('Artist', options=list(sorted(df['artist'].unique())), key='it_is_1:56_dude')
pict_df = df.copy()
img_list = []
if artist != 'None':
art_df = pict_df[pict_df['artist'] == artist]
if art_df['img_url'].unique().shape[0] > 1:
album = wksp[1].selectbox('Album', options=list(art_df['album'].unique()), key='it_is_2:05_dude')
img_url = art_df[art_df['album'] == album]['img_url'].iloc[0]
img_list.append(img_url)
else:
img_url = art_df['img_url'].iloc[0]
album = wksp[1].selectbox('Album', options=list(art_df['album'].unique()), key='it_is_2:05_dude')
img_list.append(img_url)
wksp = st.beta_columns((1,1))
if option == 'RGB Inverse':
image = Image.open(requests.get(img_list[0], stream=True).raw)
wksp[0].subheader('Before Operation')
wksp[0].image(image)
r, g, b = image.split()
image = Image.merge("RGB", (b, g, r))
wksp[1].subheader('After Operation')
wksp[1].image(image)
if option == 'Blur':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(BLUR))
if option == 'Contour':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(CONTOUR))
if option == 'Detail':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(DETAIL))
if option == 'Edges':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(FIND_EDGES))
if option == 'Enhance':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(EDGE_ENHANCE))
if option == 'Enhance+':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(EDGE_ENHANCE_MORE))
if option == 'Emboss':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(EMBOSS))
if option == 'Smooth':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(SMOOTH))
if option == 'Smooth+':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(SMOOTH_MORE))
if option == 'Sharpen':
image = Image.open(requests.get(img_url, stream=True).raw)
wksp[0].subheader('Before Operation'); wksp[0].image(image)
wksp[1].subheader('After Operation'); wksp[1].image(image.filter(SHARPEN))
def project_trends_page(df):
'''
Listening Trends page for Spotify Analysis Dashboard
Parameters:
|| df (pd.DataFrame) ||
clean dataframe used for analysis
Returns:
|| _ (streamlit) ||
various UI objects for user to interact with
'''
st.title('Attribute Trends Over Time')
with st.beta_expander('Description...'):
st.write('Here is a description of the grpah you are looking at.')
px_df = df.copy()
px_df['user_date'] = px_df['user_date'].astype(str).str[:4]
px_df = px_df.groupby('user_date').agg('mean').drop(columns=['duration', 'explicit', 'tempo', 'signature'])
for col in px_df.columns:
if col not in ['popularity', 'loudness']:
px_df[col] = px_df[col].multiply(100)
if col == 'loudness':
px_df[col] = px_df[col].multiply(-100/60)
pct_df = px_df.pct_change().fillna(0).round(2).multiply(100)
fig = make_subplots(rows=2, cols=1,
shared_xaxes=True,
# vertical_spacing=0.02,
subplot_titles=("Attribute Score", "Percentage Change"))
colors = n_colors('rgb(255,102,102)', 'rgb(102,102,255)', 8, colortype='rgb')
for i, ncolor, px_col, pct_col in zip(range(px_df.shape[1]), colors, px_df, pct_df):
fig.add_trace(go.Scatter(x=list(px_df.index), y=px_df[px_col],
name=px_col.title(),
legendgroup='group'+f'{i}',
line=dict(color=ncolor)),
row=1, col=1)
fig.add_trace(go.Bar(x=list(pct_df.index), y=pct_df[pct_col],
name=pct_col.title(),
legendgroup='group'+f'{i}',
marker_color=ncolor,
showlegend=False),
row=2, col=1)
st.plotly_chart(fig, use_container_width=True)
##############################################
st.title('Attribute Distributions Over Time')
with st.beta_expander('Description...'):
st.write('description goes here boss...')
attributes = ['popularity', 'danceability', 'energy', 'loudness', 'acousticness',
'instrumentalness', 'liveness', 'valence']
cols_group = st.beta_columns((1,1,1,1))
year = cols_group[0].selectbox(label='Year', options=['All Years'] + sorted(df['user_date'].astype(str).str[:4].astype(int).unique()))
if year != 'All Years':
genre = cols_group[1].selectbox(label='Genre', options=['All Genres'] + sorted(df.fillna('NA')[(df['genre'] != 'NA') & (df['user_date'].astype(str).str[:4].astype(int) == year)]['genre'].unique()))
else:
genre = cols_group[1].selectbox(label='Genre', options=['All Genres'] + sorted(df[df['genre'] != 'NA']['genre'].unique()))
attr = cols_group[2].selectbox('Fixed Attribute', options=['None'] + [atbt.title() for atbt in attributes])
if attr != 'None':
attr_slid = cols_group[3].slider('Fixed Range', 0, 100, (0,100))
if attr == 'None':
attr_slid = cols_group[3].info('No fixed attribute')
px_df = df[df['genre'] != 'NA'][attributes].copy()
for col in px_df:
if col not in ['popularity', 'loudness']:
px_df[col] = px_df[col].multiply(100)
if col == 'loudness':
px_df[col] = px_df[col].multiply(-(100/60))
if (year != 'All Years') & (genre != 'All Genres'):
px_df = px_df[(px_df['user_date'].astype(str).str[:4].astype(int) == year) & (px_df['genre'] == genre)][attributes]
if attr != 'None':
px_df = px_df[(px_df[attr.lower()] >= int(attr_slid[0])) & (px_df[attr.lower()] <= int(attr_slid[1]))]
elif (year != 'All Years') & (genre == 'All Genres'):
px_df = px_df[px_df['user_date'].astype(str).str[:4].astype(int) == year][attributes]
if attr != 'None':
px_df = px_df[(px_df[attr.lower()] >= int(attr_slid[0])) & (px_df[attr.lower()] <= int(attr_slid[1]))]
elif (year == 'All Years') & (genre != 'All Genres'):
px_df = px_df[px_df['genre'] == genre][attributes]
if attr != 'None':
px_df = px_df[(px_df[attr.lower()] >= int(attr_slid[0])) & (px_df[attr.lower()] <= int(attr_slid[1]))]
elif (year == 'All Years') & (genre == 'All Genres'):
px_df = px_df[attributes]
if attr != 'None':
px_df = px_df[(px_df[attr.lower()] >= int(attr_slid[0])) & (px_df[attr.lower()] <= int(attr_slid[1]))]
else:
st.error('Combination does not exist')
colors = n_colors('rgb(0, 102, 51)', 'rgb(5, 255, 164)', 8, colortype='rgb')
fig = go.Figure()
for col, color in zip(list(px_df.columns)[::-1], colors):
fig.add_trace(go.Violin(x=px_df[col], line_color=color, name=col.title()))
fig.update_traces(orientation='h', side='positive', width=3, points=False)
fig.update_layout(xaxis_showgrid=True, showlegend=False, xaxis_title='Attribute Score')
st.plotly_chart(fig, use_container_width=True)
##############################################
st.title('Attribute Correlations Over Time')
with st.beta_expander('Description...'):
st.write('Here is a description of the grpah you are looking at.')
cols_group = st.beta_columns((2.4,1.2,1.8,1.8))
group = cols_group[0].radio(label='Group', options=['Genre', 'Time', 'Pairplot'], key='20:49_0803')
top_ten_genre = list(df[df['genre'] != 'NA'].groupby('genre').size().sort_values(ascending=False)[:10].index)
if 'Pairplot' in group:
scat_df = df[df['genre'].isin(top_ten_genre)]
scat_cols = ['popularity', 'danceability', 'energy', 'loudness', 'acousticness']
for col in scat_cols:
if col not in ['popularity', 'loudness']:
scat_df[col] = scat_df[col].multiply(100)
if col == 'loudness':
scat_df[col] = scat_df[col].multiply(-(100/60))
fig = px.scatter_matrix(scat_df, dimensions=scat_cols,
color='genre', color_discrete_sequence=px.colors.sequential.RdBu)
st.plotly_chart(fig, use_container_width=True)
if 'Genre' in group:
dimension = cols_group[1].radio(label='Dimensions', options=['2D', '3D'], key='tomato1')
if '2D' in dimension:
attributes = ['Popularity', 'Danceability', 'Energy', 'Loudness', 'Instrumentalness',
'Acousticness', 'Liveness', 'Valence']
x = cols_group[2].selectbox(label='Attribute 01',
options = attributes,
key='tomato2')
y = cols_group[3].selectbox(label='Attribute 02',
options = attributes,
key='tomato3')
px_df = df[(df['genre'] != 'NA') & (df['genre'].isin(top_ten_genre))].copy()
px_df['loudness'] = px_df['loudness'].multiply(-100/60)
fig = px.scatter(px_df, x=x.lower(), y=y.lower(), color='genre')
st.plotly_chart(fig, use_container_width=True)
if '3D' in dimension:
attributes = ['Popularity', 'Danceability', 'Energy', 'Loudness', 'Instrumentalness',
'Acousticness', 'Liveness', 'Valence']
x = cols_group[2].selectbox(label='Attribute 01',
options = attributes,
key='tomato4')
y = cols_group[3].selectbox(label='Attribute 02',
options = attributes,
key='tomato5')
px_df = df[(df['genre'] != 'NA') & (df['genre'].isin(top_ten_genre))].copy()
px_df['loudness'] = px_df['loudness'].multiply(-100/60)
px_df['user_year'] = px_df['user_date'].astype(str).str[:4].astype(int)
px_df['user_month'] = px_df['user_date'].astype(str).str[5:7].astype(int)
px_df['user_day'] = px_df['user_date'].astype(str).str[-2:].astype(int)
px_df['user_ym'] = px_df['user_year'] + (px_df['user_month'] / 12) + (px_df['user_day'] / 365)
fig = px.scatter_3d(px_df, x=x.lower(), y=y.lower(), z='user_ym', color='genre')
st.plotly_chart(fig, use_container_width=True)
if 'Time' in group:
dimension = cols_group[1].radio(label='Dimensions', options=['2D', '3D'], key='tomato1')
if '2D' in dimension:
attributes = ['Popularity', 'Danceability', 'Energy', 'Loudness', 'Instrumentalness',
'Acousticness', 'Liveness', 'Valence']
x = cols_group[2].selectbox(label='Attribute 01',
options = attributes,
key='tomato7')
y = cols_group[3].selectbox(label='Attribute 02',
options = attributes,
key='tomato8')
px_df = df.copy()
px_df['loudness'] = px_df['loudness'].multiply(-100/60)
px_df['user_year'] = px_df['user_date'].astype(str).str[:4].astype(int)
fig = px.scatter(px_df, x=x.lower(), y=y.lower(), color='user_year')
st.plotly_chart(fig, use_container_width=True)
if '3D' in dimension:
attributes = ['Popularity', 'Danceability', 'Energy', 'Loudness', 'Instrumentalness',
'Acousticness', 'Liveness', 'Valence']
x = cols_group[2].selectbox(label='Attribute 01',
options = attributes,
key='tomato9')
y = cols_group[3].selectbox(label='Attribute 02',
options = attributes,
key='tomato10')
px_df = df[(df['genre'] != 'NA') & (df['genre'].isin(top_ten_genre))].copy()
px_df['loudness'] = px_df['loudness'].multiply(-100/60)
px_df['user_year'] = px_df['user_date'].astype(str).str[:4].astype(int)
px_df['user_month'] = px_df['user_date'].astype(str).str[5:7].astype(int)
px_df['user_day'] = px_df['user_date'].astype(str).str[-2:].astype(int)
px_df['user_ym'] = px_df['user_year'] + (px_df['user_month'] / 12) + (px_df['user_day'] / 365)
fig = px.scatter_3d(px_df, x=x.lower(), y=y.lower(), z='user_ym', color='user_ym')
st.plotly_chart(fig, use_container_width=True)
def project_randm_page(df):
'''
Random Statistics page for Spotify Analysis Dashboard
Parameters:
|| df (pd.DataFrame) ||
clean dataframe used for analysis
Returns:
|| _ (streamlit) ||
various UI objects for user to interact with
'''
idx = random.randint(0, df.shape[0])
idx_title = df.loc[idx, 'title']
idx_artist = df.loc[idx, 'artist']
idx_album = df.loc[idx, 'album']
idx_genre = df.loc[idx, 'genre']
idx_track_url = df.loc[idx, 'track_url']
idx_img_url = df.loc[idx, 'img_url']
idx_duration = df.loc[idx, 'duration']
idx_explicit = df.loc[idx, 'explicit']
idx_popularity = df.loc[idx, 'popularity']
idx_artist_date = df.loc[idx, 'artist_date']
idx_user_date = df.loc[idx, 'user_date']
idx_user_time = df.loc[idx, 'user_time']
idx_danceability = (df.loc[idx, 'danceability'] * 100).round(2)
idx_energy = (df.loc[idx, 'energy'] * 100).round(2)
idx_loudness = (df.loc[idx, 'loudness'] * (-100/60)).round(2)
idx_acousticness = (df.loc[idx, 'acousticness'] * 100).round(2)
idx_instrumentalness = (df.loc[idx, 'instrumentalness'] * 100).round(2)
idx_liveness = (df.loc[idx, 'liveness'] * 100).round(2)
idx_valence = (df.loc[idx, 'valence']* 100).round(2)
idx_playlist = df.loc[idx, 'playlist']
rand_group = st.beta_columns((2.3,7,2.3))
with rand_group[0]:
st.image(idx_img_url)
with rand_group[1]:
spotify_logo = '[]({})'.format('https://i.imgur.com/TejSfM3.png', idx_track_url)
youtube_logo = '[](https://www.youtube.com/results?search_query={})'.format('https://i.imgur.com/LkMsDsO.png', '+'.join(str(idx_artist + ' ' + idx_title).split(' ')))
wikiped_logo = '[](https://en.wikipedia.org/wiki/{})'.format('https://i.imgur.com/UIllM9Y.png', '_'.join(idx_artist.split(' ')))
googlee_logo = '[](https://www.google.com/search?q={})'.format('https://i.imgur.com/SbD92XG.png', '+'.join(str(idx_artist + ' ' + idx_title).split(' ')))
twitter_logo = '[](https://twitter.com/search?q={}&src=typed_query)'.format('https://i.imgur.com/XMNpp3p.png', '%20'.join(str(idx_artist + ' ' + idx_title).split(' ')))
facebok_logo = '[](https://www.facebook.com/search/top?q={})'.format('https://i.imgur.com/kvkQy1h.png', '%20'.join(str(idx_artist + ' ' + idx_title).split(' ')))
redditt_logo = '[](https://www.reddit.com/search/?q={})'.format('https://i.imgur.com/idnSbNj.png', '%20'.join(str(idx_artist + ' ' + idx_title).split(' ')))
st.title(idx_title)
st.subheader('Explore the music...')
st.markdown(f"""
{spotify_logo}    
{youtube_logo}    
{wikiped_logo}    
{googlee_logo}    
{twitter_logo}    
{facebok_logo}    
{redditt_logo}
""")
rand_group[2].button('Rerun Randomization')
attributes = ['artist', 'album', 'genre', 'playlist',
'artist_date', 'user_date', 'user_time', 'duration',
'popularity', 'danceability', 'energy', 'loudness',
'acousticness', 'instrumentalness', 'liveness', 'valence',
'explicit']
idx_attributes = [idx_artist, idx_album, idx_genre, idx_playlist,
idx_artist_date, idx_user_date, idx_user_time, idx_duration,
idx_popularity, idx_danceability, idx_energy, idx_loudness,
idx_acousticness, idx_instrumentalness, idx_liveness, idx_valence,
idx_explicit]
labels = ['Artist', 'Album', 'Genre', 'Playlist',
'Released', 'Date Added', 'Time Added', 'Duration',
'Popularity', 'Danceability', 'Energy', 'Loudness',
'Acoustic', 'Instrumental', 'Liveness', 'Valence',
'Explicitness']
for i, attr, idx_attr, label in zip(range(len(attributes)), attributes, idx_attributes, labels):
if i % 4 == 0:
rand_group = st.beta_columns((1.5,2,2,1.5))
# duration cell
if attr == 'duration':
rand_group[i % 4].title(label)
rand_group[i % 4].subheader(project_pretty_time(idx_duration))
rank_df = df[attr].rank(ascending=True)
rand_group[i % 4].write('Rank #{}'.format(int(rank_df.iloc[idx,])))
# first row; filtered data is strictly non-numeric
elif attr in ['artist', 'album', 'genre', 'playlist']:
rand_group[i % 4].title(label)
rand_group[i % 4].subheader(idx_attr)
rand_group[i % 4].write('Count: {}'.format(df[df[attr] == idx_attr].shape[0]))
rank_df = df[attr].value_counts().sort_values(ascending=False).rank(method='max', ascending=False)
rand_group[i % 4].write('Rank #{}'.format(int(rank_df.loc[rank_df.index == idx_attr].item())))
# sort ascending = True
elif attr in ['liveness', 'loudness', 'artist_date', 'user_date', 'user_time']:
rand_group[i % 4].title(label)
rand_group[i % 4].subheader(idx_attr)
rank_df = df[attr].rank(ascending=True)
rand_group[i % 4].write('Rank #{}'.format(int(rank_df.iloc[idx,])))
# sort ascending = False
else:
rand_group[i % 4].title(label)
rand_group[i % 4].subheader(idx_attr)
rank_df = df[attr].rank(ascending=False)
rand_group[i % 4].write('Rank #{}'.format(int(rank_df.iloc[idx,])))
def project_recomm_page(df):
'''
Recommendations page for Spotify Analysis Dashboard
Parameters:
|| df (pd.DataFrame) ||
clean dataframe used for analysis
Returns:
|| _ (streamlit) ||
various UI objects for user to interact with
'''
st.title('Randomly Generated Recommendations')
rec_cols = st.beta_columns((1,1,1,1))
auth_manager = SpotifyClientCredentials(client_id = 'db90a7924baf4b38a9cbb37964f71044',
client_secret = '27599d5076e74b29b99d0f3e0f1caa92')
sp = spotipy.Spotify(auth_manager=auth_manager)
with rec_cols[0].beta_expander('Song'):
rand_idx = np.random.randint(df.shape[0])
rec = sp.recommendations(seed_tracks=[df.iloc[rand_idx]['track_url']],
limit=1)
st.image(rec['tracks'][0]['album']['images'][0]['url'], use_column_width=True)
st.subheader(rec['tracks'][0]['name'])
st.write(rec['tracks'][0]['artists'][0]['name'])
with rec_cols[1].beta_expander('Artist'):
rand_idx = np.random.randint(df.shape[0])
rec = sp.recommendations(seed_tracks=[df.iloc[rand_idx]['track_url']],
limit=1)
st.image(rec['tracks'][0]['album']['images'][0]['url'], use_column_width=True)
st.subheader(rec['tracks'][0]['artists'][0]['name'])
with rec_cols[2].beta_expander('Album'):
rand_idx = np.random.randint(df.shape[0])
rec = sp.recommendations(seed_tracks=[df.iloc[rand_idx]['track_url']],
limit=1)
st.image(rec['tracks'][0]['album']['images'][0]['url'], use_column_width=True)
st.subheader(rec['tracks'][0]['album']['name'])
st.write(rec['tracks'][0]['artists'][0]['name'])
with rec_cols[3].beta_expander('Spotify Membership Code'):
st.image('https://s.yimg.com/ny/api/res/1.2/hJL990zXx8DAOm_ao155kw--/YXBwaWQ9aGlnaGxhbmRlcjt3PTIwMDA7aD0xNTAw/https://s.yimg.com/uu/api/res/1.2/bCY1rmzVTO5bG8aTkWUmkw--~B/aD0xNzcyO3c9MjM2MzthcHBpZD15dGFjaHlvbg--/https://media.zenfs.com/en/insider_articles_922/bb5661ee1b1b1323855be3a0f95eb119')
st.title('Create Your Own Recommendations')
with st.beta_expander('How it Works...'):
cols = st.beta_columns((1,0.05,1))
cols[0].subheader('Parameters')
cols[0].write('''
Here you can tweak specific attributes to find music that falls within
the range of the specified minimum and maximum values per parameter.
If you do not adjust the values, recommendations will be generated within
a +/- 10 range of your average value for the specific parameter.
''')
cols[2].subheader('User Input')
cols[2].write('''
Here you can enter a specific track (URL), artist (URL), or genre
(based on Spotify's list of recognized genres) to specify your recommendation
futher.
These parameters do not need to be from any of the playlists you have entered
in the sidebar and will generate a list of recommended songs based on your
desired maximum number of results.
''')
with st.beta_expander('Customize Parameters...'):
rec_df = df.copy()
rec_df['danceability'] = (rec_df['danceability']* 100).round(2)
rec_df['energy'] = (rec_df['energy']* 100).round(2)
rec_df['acousticness'] = (rec_df['acousticness']* 100).round(2)
rec_df['instrumentalness'] = (rec_df['instrumentalness']* 100).round(2)
rec_df['liveness'] = (rec_df['liveness']* 100).round(2)
rec_df['valence'] = (rec_df['valence']* 100).round(2)
rec_df['loudness'] = (rec_df['loudness'] * (-100/60)).round(2)
slid_group = st.beta_columns((1,0.05,1))
slid_group[0].subheader('Attribute Selection')
slid_group[0].write('Control specific variables that define your music.')
option = slid_group[0].selectbox('Attribute', options=['Popularity', 'Danceability', 'Energy', 'Instrumentalness', 'Loudness', 'Acousticness', 'Liveness', 'Valence', 'Tempo'])
if option == 'Tempo':
slid = slid_group[0].slider(label='', min_value=0, max_value=300, value=(int(rec_df[option.lower()].mean() - 10), int(rec_df[option.lower()].mean() + 10)))
else:
slid = slid_group[0].slider(label='', min_value=0, max_value=100, value=(max(0, int(rec_df[option.lower()].mean() - 10)), min(100, int(rec_df[option.lower()].mean() + 10))))
slid_group[2].subheader('User Input')
user_input = slid_group[2].radio(label='', options=['Track URL', 'Artist URL'])
track_url, artist_url = None, None
if user_input == 'Track URL':
track_url = slid_group[2].text_input('Track URL', key='track')
if user_input == 'Artist URL':
artist_url = slid_group[2].text_input('Artist URL', key='artist')
slid_group[2].subheader('Customize Results')
limit = slid_group[2].slider(label='Maximum Number of Results', min_value=1, max_value=100, step=1, value=20)
gather = slid_group[2].button('Gather Results')
if gather:
st.subheader('Because you listened to...')
if track_url != None:
if track_url != '':
image = sp.track(track_url)['album']['images'][0]['url']
track_name = sp.track(track_url)['name']
artist_name = sp.track(track_url)['artists'][0]['name']
genre_name = ''
recs = sp.recommendations(seed_tracks=[track_url],
limit=limit)
rec_cols = st.beta_columns((2,8))
rec_cols[0].image(image)
rec_cols[1].markdown(f'''
Track: {track_name}
Artist: {artist_name}
Genre: {genre_name}
''')
else:
rand_idx = np.random.randint(df.shape[0])
image = df.iloc[rand_idx]['img_url']
track_url = df.iloc[rand_idx]['track_url']
track_name = df.iloc[rand_idx]['title']
artist_name = df.iloc[rand_idx]['artist']
genre_name = df.iloc[rand_idx]['genre'].title()
recs = sp.recommendations(seed_tracks=[track_url],
limit=limit)
rec_cols = st.beta_columns((2,8))
rec_cols[0].image(image)
rec_cols[1].markdown(f'''
Track: {track_name}
Artist: {artist_name}
Genre: {genre_name}
''')
elif artist_url != None:
if artist_url != '':
image = sp.artist(artist_url)['images'][0]['url']
track_name = ''
artist_name = sp.artist(artist_url)['name']
genre_name = sp.artist(artist_url)['genres'][0]
recs = sp.recommendations(seed_artists=[artist_url],
limit=limit)
rec_cols = st.beta_columns((2,8))
rec_cols[0].image(image)
rec_cols[1].markdown(f'''
Artist: {artist_name}
Genre: {genre_name}
''')
else:
rand_idx = np.random.randint(df.shape[0])
image = df.iloc[rand_idx]['img_url']
track_url = df.iloc[rand_idx]['track_url']
track_name = df.iloc[rand_idx]['title']
artist_name = df.iloc[rand_idx]['artist']
genre_name = df.iloc[rand_idx]['genre'].title()
recs = sp.recommendations(seed_tracks=[track_url],
limit=limit)
rec_cols = st.beta_columns((2,8))
rec_cols[0].image(image)
rec_cols[1].markdown(f'''
Track: {track_name}
Artist: {artist_name}
Genre: {genre_name}
''')
rec_lists = []
for track in recs['tracks']:
rec_track = track['name']
rec_artst = track['artists'][0]['name']
rec_trurl = track['external_urls']['spotify']
rec_arurl = track['artists'][0]['external_urls']['spotify']
rec_igurl = track['album']['images'][0]['url']
rec_lists.append([rec_track, rec_artst, rec_trurl, rec_arurl, rec_igurl])
st.subheader('You may also like...')
for i, rec_data in enumerate(rec_lists):
if i % 4 == 0:
finaler_cols = st.beta_columns((1,1,1,1))
finaler_cols[i % 4].image(rec_data[-1])
finaler_cols[i % 4].subheader(rec_data[0])
finaler_cols[i % 4].write(rec_data[1])
def project_search_page(s_df):
'''
Search page for Spotify Analysis Dashboard
Parameters:
|| s_df (pd.DataFrame) ||
search dataframe, similar to clean dataframe
except it returns all observations that match
one song/artist/album
Returns:
|| _ (streamlit) ||
various UI objects for user to interact with
'''
images = [Image.open(requests.get(img_url, stream=True).raw) for img_url in s_df['img_url'].unique()]
while len(images) < 6:
images *= 3
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths)
max_height = max(heights)
new_im = Image.new('RGB', (total_width, max_height))
x_offset = 0
for im in images:
new_im.paste(im, (x_offset,0))
x_offset += im.size[0]
st.markdown(f"<h1 style=\"text-align:center;vertical-align:middle\">{s_df['artist'].values[0]}</h1>", unsafe_allow_html=True)
st.markdown(f"<h4 style=\"text-align:center;vertical-align:middle\">{s_df['album'].values[0]}</h4>", unsafe_allow_html=True)
st.image(new_im)
### graphs
# songs
px_df = s_df.copy()
attributes = ['popularity', 'danceability', 'energy', 'loudness', 'instrumentalness', 'acousticness', 'liveness', 'valence', 'tempo']
for i, attr in zip(range(len(attributes)), attributes):
if i % 2 == 0:
cols = st.beta_columns((1,1))
cols[i % 2].subheader(attr.title())
with cols[i % 2].beta_expander('Description...'):
st.write(f'''
This gauge represents the {attr} of the requested item. The number in the middle and green gauge bar
represents the average {attr} score across all observations.
''')
st.dataframe(px_df[attr].describe())
if attr not in ['popularity', 'loudness', 'tempo']:
px_df[attr] = px_df[attr].multiply(100)
if attr == 'loudness':
px_df[attr] = px_df[attr].multiply(-100/60)
if attr == 'tempo':
rng = [0,300]
else:
rng = [0, 100]
tfp = px_df[attr].describe()['25%']
sfp = px_df[attr].describe()['75%']
fig = go.Figure(go.Indicator(
value = px_df[attr].mean().round(0),
mode = "gauge+number",
# title = {'text': attr},
# delta = {'reference': },
gauge = {'axis': {'range': rng},
'steps' : [
{'range': [tfp, sfp], 'color': "lightgreen"}]}))
fig.update_layout(autosize=False,
margin=dict(
l=50,
r=50,
b=0,
t=0
)
)
cols[i % 2].plotly_chart(fig, use_container_width=True)
# PAGE MEAT
if ready_button:
try:
data = retrieve_dataframe()
f_data = alter_dataframe(data)
if (not f_data.empty) & (search_filter == 'Filter'):
if 'Brief History' in radio_page:
st.title('Brief History Page')
st.markdown('''
Welcome to the `Brief History Page` -- the page for observing your unique music trends and statistics
over the years.
As the first dashboard view, take time becoming familiar with the layout and functionality of the page,
as it is mirrored throughout the other dashboard views. Graph details can be found in `Description`
boxes and each graph hosts interactive features for you to utilize to develop a greater understanding
of the music you listen to.
''')
project_histry_page(f_data)
if 'Tracks' in radio_page:
st.title('Tracks Page')
st.markdown('''
Welcome to the `Tracks Page` -- the page for discovering the highs and lows of your individual
tracks. The metrics on this page are gathered by Spotify, and a more detailed description of how the
data is gathered for this project can be found in the `GitHub Documentation`.
''')
project_tracks_page(f_data)
if 'Artists + Albums' in radio_page:
st.title('Artists + Albums Page')
st.markdown('''
Welcome to the `Artists + Albums Page` -- the page for discovering which artists have soared
to the top of your listening charts and by how much they stand out compared to the others.
''')
project_artist_page(f_data)
if 'Listening Trends' in radio_page:
st.title('Listening Trends Page')
st.markdown('''
Welcome to the `Listening Trends Page` -- the page for diving deeper into when exactly
and by how much your taste of music has changed over time.
This page offers the most customization of all, so take time exploring the many unique
relations between all possible attributes across every genre and every year. To learn
more about any of the attributes, please visit the `GitHub Documentation` in the sidebar.
''')
project_trends_page(f_data)
if 'Random Statistics' in radio_page: project_randm_page(f_data)
if 'Recommendations [Beta]' in radio_page: project_recomm_page(f_data)
if (not f_data.empty) & (search_filter == 'Search'):
project_search_page(f_data)
except Exception:
project_dataq_page(raw_dataframe())
else:
project_welcm_page()
|
the-stack_0_18402 | # -*- coding: utf-8 -*-
'''
Support for APT (Advanced Packaging Tool)
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
For repository management, the ``python-apt`` package must be installed.
'''
from __future__ import absolute_import, print_function, unicode_literals
# Import python libs
import copy
import os
import re
import logging
import time
# Import third party libs
# pylint: disable=no-name-in-module,import-error,redefined-builtin
from salt.ext import six
from salt.ext.six.moves.urllib.error import HTTPError
from salt.ext.six.moves.urllib.request import Request as _Request, urlopen as _urlopen
# pylint: enable=no-name-in-module,import-error,redefined-builtin
# Import salt libs
import salt.config
import salt.syspaths
from salt.modules.cmdmod import _parse_env
import salt.utils.args
import salt.utils.data
import salt.utils.files
import salt.utils.functools
import salt.utils.itertools
import salt.utils.json
import salt.utils.path
import salt.utils.pkg
import salt.utils.pkg.deb
import salt.utils.stringutils
import salt.utils.systemd
import salt.utils.versions
import salt.utils.yaml
import salt.utils.environment
from salt.exceptions import (
CommandExecutionError, MinionError, SaltInvocationError
)
log = logging.getLogger(__name__)
# pylint: disable=import-error
try:
import apt.cache
import apt.debfile
from aptsources import sourceslist
HAS_APT = True
except ImportError:
HAS_APT = False
try:
import apt_pkg
HAS_APTPKG = True
except ImportError:
HAS_APTPKG = False
try:
import softwareproperties.ppa
HAS_SOFTWAREPROPERTIES = True
except ImportError:
HAS_SOFTWAREPROPERTIES = False
# pylint: enable=import-error
APT_LISTS_PATH = "/var/lib/apt/lists"
PKG_ARCH_SEPARATOR = ':'
# Source format for urllib fallback on PPA handling
LP_SRC_FORMAT = 'deb http://ppa.launchpad.net/{0}/{1}/ubuntu {2} main'
LP_PVT_SRC_FORMAT = 'deb https://{0}private-ppa.launchpad.net/{1}/{2}/ubuntu' \
' {3} main'
_MODIFY_OK = frozenset(['uri', 'comps', 'architectures', 'disabled',
'file', 'dist'])
DPKG_ENV_VARS = {
'APT_LISTBUGS_FRONTEND': 'none',
'APT_LISTCHANGES_FRONTEND': 'none',
'DEBIAN_FRONTEND': 'noninteractive',
'UCF_FORCE_CONFFOLD': '1',
}
if six.PY2:
# Ensure no unicode in env vars on PY2, as it causes problems with
# subprocess.Popen()
DPKG_ENV_VARS = salt.utils.data.encode(DPKG_ENV_VARS)
# Define the module's virtual name
__virtualname__ = 'pkg'
def __virtual__():
'''
Confirm this module is on a Debian-based system
'''
# If your minion is running an OS which is Debian-based but does not have
# an "os_family" grain of Debian, then the proper fix is NOT to check for
# the minion's "os_family" grain here in the __virtual__. The correct fix
# is to add the value from the minion's "os" grain to the _OS_FAMILY_MAP
# dict in salt/grains/core.py, so that we assign the correct "os_family"
# grain to the minion.
if __grains__.get('os_family') == 'Debian':
return __virtualname__
return False, 'The pkg module could not be loaded: unsupported OS family'
def __init__(opts):
'''
For Debian and derivative systems, set up
a few env variables to keep apt happy and
non-interactive.
'''
if __virtual__() == __virtualname__:
# Export these puppies so they persist
os.environ.update(DPKG_ENV_VARS)
def _get_ppa_info_from_launchpad(owner_name, ppa_name):
'''
Idea from softwareproperties.ppa.
Uses urllib2 which sacrifices server cert verification.
This is used as fall-back code or for secure PPAs
:param owner_name:
:param ppa_name:
:return:
'''
lp_url = 'https://launchpad.net/api/1.0/~{0}/+archive/{1}'.format(
owner_name, ppa_name)
request = _Request(lp_url, headers={'Accept': 'application/json'})
lp_page = _urlopen(request)
return salt.utils.json.load(lp_page)
def _reconstruct_ppa_name(owner_name, ppa_name):
'''
Stringify PPA name from args.
'''
return 'ppa:{0}/{1}'.format(owner_name, ppa_name)
def _check_apt():
'''
Abort if python-apt is not installed
'''
if not HAS_APT:
raise CommandExecutionError(
'Error: \'python-apt\' package not installed'
)
def _call_apt(args, scope=True, **kwargs):
'''
Call apt* utilities.
'''
cmd = []
if scope and salt.utils.systemd.has_scope(__context__) and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope', '--description', '"{0}"'.format(__name__)])
cmd.extend(args)
params = {'output_loglevel': 'trace',
'python_shell': False,
'env': salt.utils.environment.get_module_environment(globals())}
params.update(kwargs)
return __salt__['cmd.run_all'](cmd, **params)
def _warn_software_properties(repo):
'''
Warn of missing python-software-properties package.
'''
log.warning('The \'python-software-properties\' package is not installed. '
'For more accurate support of PPA repositories, you should '
'install this package.')
log.warning('Best guess at ppa format: %s', repo)
def normalize_name(name):
'''
Strips the architecture from the specified package name, if necessary.
CLI Example:
.. code-block:: bash
salt '*' pkg.normalize_name zsh:amd64
'''
try:
name, arch = name.rsplit(PKG_ARCH_SEPARATOR, 1)
except ValueError:
return name
return name
def parse_arch_from_name(name):
'''
Parse name and architecture from the specified package name.
CLI Example:
.. code-block:: bash
salt '*' pkg.parse_arch_from_name zsh:amd64
'''
try:
_name, _arch = name.rsplit(PKG_ARCH_SEPARATOR, 1)
except ValueError:
_name, _arch = name, None
return {
'name': _name,
'arch': _arch
}
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
A specific repo can be requested using the ``fromrepo`` keyword argument.
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package name> fromrepo=unstable
salt '*' pkg.latest_version <package1> <package2> <package3> ...
'''
refresh = salt.utils.data.is_true(kwargs.pop('refresh', True))
show_installed = salt.utils.data.is_true(kwargs.pop('show_installed', False))
if 'repo' in kwargs:
raise SaltInvocationError(
'The \'repo\' argument is invalid, use \'fromrepo\' instead'
)
fromrepo = kwargs.pop('fromrepo', None)
cache_valid_time = kwargs.pop('cache_valid_time', 0)
if len(names) == 0:
return ''
ret = {}
# Initialize the dict with empty strings
for name in names:
ret[name] = ''
pkgs = list_pkgs(versions_as_list=True)
repo = ['-o', 'APT::Default-Release={0}'.format(fromrepo)] \
if fromrepo else None
# Refresh before looking for the latest version available
if refresh:
refresh_db(cache_valid_time)
for name in names:
cmd = ['apt-cache', '-q', 'policy', name]
if repo is not None:
cmd.extend(repo)
out = _call_apt(cmd, scope=False)
candidate = ''
for line in salt.utils.itertools.split(out['stdout'], '\n'):
if 'Candidate' in line:
comps = line.split()
if len(comps) >= 2:
candidate = comps[-1]
if candidate.lower() == '(none)':
candidate = ''
break
installed = pkgs.get(name, [])
if not installed:
ret[name] = candidate
elif installed and show_installed:
ret[name] = candidate
elif candidate:
# If there are no installed versions that are greater than or equal
# to the install candidate, then the candidate is an upgrade, so
# add it to the return dict
if not any(
(salt.utils.versions.compare(ver1=x,
oper='>=',
ver2=candidate,
cmp_func=version_cmp)
for x in installed)
):
ret[name] = candidate
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
'''
return __salt__['pkg_resource.version'](*names, **kwargs)
def refresh_db(cache_valid_time=0, failhard=False, **kwargs):
'''
Updates the APT database to latest packages based upon repositories
Returns a dict, with the keys being package databases and the values being
the result of the update attempt. Values can be one of the following:
- ``True``: Database updated successfully
- ``False``: Problem updating database
- ``None``: Database already up-to-date
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
failhard
If False, return results of Err lines as ``False`` for the package database that
encountered the error.
If True, raise an error with a list of the package databases that encountered
errors.
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
failhard = salt.utils.data.is_true(failhard)
ret = {}
error_repos = list()
if cache_valid_time:
try:
latest_update = os.stat(APT_LISTS_PATH).st_mtime
now = time.time()
log.debug("now: %s, last update time: %s, expire after: %s seconds", now, latest_update, cache_valid_time)
if latest_update + cache_valid_time > now:
return ret
except TypeError as exp:
log.warning("expected integer for cache_valid_time parameter, failed with: %s", exp)
except IOError as exp:
log.warning("could not stat cache directory due to: %s", exp)
call = _call_apt(['apt-get', '-q', 'update'], scope=False)
if call['retcode'] != 0:
comment = ''
if 'stderr' in call:
comment += call['stderr']
raise CommandExecutionError(comment)
else:
out = call['stdout']
for line in out.splitlines():
cols = line.split()
if not cols:
continue
ident = ' '.join(cols[1:])
if 'Get' in cols[0]:
# Strip filesize from end of line
ident = re.sub(r' \[.+B\]$', '', ident)
ret[ident] = True
elif 'Ign' in cols[0]:
ret[ident] = False
elif 'Hit' in cols[0]:
ret[ident] = None
elif 'Err' in cols[0]:
ret[ident] = False
error_repos.append(ident)
if failhard and error_repos:
raise CommandExecutionError('Error getting repos: {0}'.format(', '.join(error_repos)))
return ret
def install(name=None,
refresh=False,
fromrepo=None,
skip_verify=False,
debconf=None,
pkgs=None,
sources=None,
reinstall=False,
ignore_epoch=False,
**kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install the passed package, add refresh=True to update the dpkg database.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
32-bit packages can be installed on 64-bit systems by appending the
architecture designation (``:i386``, etc.) to the end of the package
name.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
Whether or not to refresh the package database before installing.
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
fromrepo
Specify a package repository to install from
(e.g., ``apt-get -t unstable install somepackage``)
skip_verify
Skip the GPG verification check (e.g., ``--allow-unauthenticated``, or
``--force-bad-verify`` for install from package file).
debconf
Provide the path to a debconf answers file, processed before
installation.
version
Install a specific version of the package, e.g. 1.2.3~0ubuntu0. Ignored
if "pkgs" or "sources" is passed.
.. versionchanged:: 2018.3.0
version can now contain comparison operators (e.g. ``>1.2.3``,
``<=2.0``, etc.)
reinstall : False
Specifying reinstall=True will use ``apt-get install --reinstall``
rather than simply ``apt-get install`` for requested packages that are
already installed.
If a version is specified with the requested package, then ``apt-get
install --reinstall`` will only be used if the installed version
matches the requested version.
.. versionadded:: 2015.8.0
ignore_epoch : False
Only used when the version of a package is specified using a comparison
operator (e.g. ``>4.1``). If set to ``True``, then the epoch will be
ignored when comparing the currently-installed version to the desired
version.
.. versionadded:: 2018.3.0
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-0ubuntu0"}]'
sources
A list of DEB packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package. Dependencies are automatically resolved
and marked as auto-installed.
32-bit packages can be installed on 64-bit systems by appending the
architecture designation (``:i386``, etc.) to the end of the package
name.
.. versionchanged:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.deb"},{"bar": "salt://bar.deb"}]'
force_yes
Passes ``--force-yes`` to the apt-get command. Don't use this unless
you know what you're doing.
.. versionadded:: 0.17.4
install_recommends
Whether to install the packages marked as recommended. Default is True.
.. versionadded:: 2015.5.0
only_upgrade
Only upgrade the packages, if they are already installed. Default is False.
.. versionadded:: 2015.5.0
force_conf_new
Always install the new version of any configuration files.
.. versionadded:: 2015.8.0
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
'''
_refresh_db = False
if salt.utils.data.is_true(refresh):
_refresh_db = True
if 'version' in kwargs and kwargs['version']:
_refresh_db = False
_latest_version = latest_version(name,
refresh=False,
show_installed=True)
_version = kwargs.get('version')
# If the versions don't match, refresh is True, otherwise no need
# to refresh
if not _latest_version == _version:
_refresh_db = True
if pkgs:
_refresh_db = False
for pkg in pkgs:
if isinstance(pkg, dict):
_name = next(six.iterkeys(pkg))
_latest_version = latest_version(_name,
refresh=False,
show_installed=True)
_version = pkg[_name]
# If the versions don't match, refresh is True, otherwise
# no need to refresh
if not _latest_version == _version:
_refresh_db = True
else:
# No version specified, so refresh should be True
_refresh_db = True
if debconf:
__salt__['debconf.set_file'](debconf)
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
# Support old "repo" argument
repo = kwargs.get('repo', '')
if not fromrepo and repo:
fromrepo = repo
if pkg_params is None or len(pkg_params) == 0:
return {}
cmd_prefix = []
old = list_pkgs()
targets = []
downgrade = []
to_reinstall = {}
errors = []
if pkg_type == 'repository':
pkg_params_items = list(six.iteritems(pkg_params))
has_comparison = [x for x, y in pkg_params_items
if y is not None
and (y.startswith('<') or y.startswith('>'))]
_available = list_repo_pkgs(*has_comparison, byrepo=False, **kwargs) \
if has_comparison else {}
# Build command prefix
cmd_prefix.extend(['apt-get', '-q', '-y'])
if kwargs.get('force_yes', False):
cmd_prefix.append('--force-yes')
if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
cmd_prefix.extend(['-o', 'DPkg::Options::=--force-confnew'])
else:
cmd_prefix.extend(['-o', 'DPkg::Options::=--force-confold'])
cmd_prefix += ['-o', 'DPkg::Options::=--force-confdef']
if 'install_recommends' in kwargs:
if not kwargs['install_recommends']:
cmd_prefix.append('--no-install-recommends')
else:
cmd_prefix.append('--install-recommends')
if 'only_upgrade' in kwargs and kwargs['only_upgrade']:
cmd_prefix.append('--only-upgrade')
if skip_verify:
cmd_prefix.append('--allow-unauthenticated')
if fromrepo:
cmd_prefix.extend(['-t', fromrepo])
cmd_prefix.append('install')
else:
pkg_params_items = []
for pkg_source in pkg_params:
if 'lowpkg.bin_pkg_info' in __salt__:
deb_info = __salt__['lowpkg.bin_pkg_info'](pkg_source)
else:
deb_info = None
if deb_info is None:
log.error(
'pkg.install: Unable to get deb information for %s. '
'Version comparisons will be unavailable.', pkg_source
)
pkg_params_items.append([pkg_source])
else:
pkg_params_items.append(
[deb_info['name'], pkg_source, deb_info['version']]
)
# Build command prefix
if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
cmd_prefix.extend(['dpkg', '-i', '--force-confnew'])
else:
cmd_prefix.extend(['dpkg', '-i', '--force-confold'])
if skip_verify:
cmd_prefix.append('--force-bad-verify')
if HAS_APT:
_resolve_deps(name, pkg_params, **kwargs)
for pkg_item_list in pkg_params_items:
if pkg_type == 'repository':
pkgname, version_num = pkg_item_list
if name \
and pkgs is None \
and kwargs.get('version') \
and len(pkg_params) == 1:
# Only use the 'version' param if 'name' was not specified as a
# comma-separated list
version_num = kwargs['version']
else:
try:
pkgname, pkgpath, version_num = pkg_item_list
except ValueError:
pkgname = None
pkgpath = pkg_item_list[0]
version_num = None
if version_num is None:
if pkg_type == 'repository':
if reinstall and pkgname in old:
to_reinstall[pkgname] = pkgname
else:
targets.append(pkgname)
else:
targets.append(pkgpath)
else:
# If we are installing a package file and not one from the repo,
# and version_num is not None, then we can assume that pkgname is
# not None, since the only way version_num is not None is if DEB
# metadata parsing was successful.
if pkg_type == 'repository':
# Remove leading equals sign(s) to keep from building a pkgstr
# with multiple equals (which would be invalid)
version_num = version_num.lstrip('=')
if pkgname in has_comparison:
candidates = _available.get(pkgname, [])
target = salt.utils.pkg.match_version(
version_num,
candidates,
cmp_func=version_cmp,
ignore_epoch=ignore_epoch,
)
if target is None:
errors.append(
'No version matching \'{0}{1}\' could be found '
'(available: {2})'.format(
pkgname,
version_num,
', '.join(candidates) if candidates else None
)
)
continue
else:
version_num = target
pkgstr = '{0}={1}'.format(pkgname, version_num)
else:
pkgstr = pkgpath
cver = old.get(pkgname, '')
if reinstall and cver \
and salt.utils.versions.compare(ver1=version_num,
oper='==',
ver2=cver,
cmp_func=version_cmp):
to_reinstall[pkgname] = pkgstr
elif not cver or salt.utils.versions.compare(ver1=version_num,
oper='>=',
ver2=cver,
cmp_func=version_cmp):
targets.append(pkgstr)
else:
downgrade.append(pkgstr)
if fromrepo and not sources:
log.info('Targeting repo \'%s\'', fromrepo)
cmds = []
all_pkgs = []
if targets:
all_pkgs.extend(targets)
cmd = copy.deepcopy(cmd_prefix)
cmd.extend(targets)
cmds.append(cmd)
if downgrade:
cmd = copy.deepcopy(cmd_prefix)
if pkg_type == 'repository' and '--force-yes' not in cmd:
# Downgrading requires --force-yes. Insert this before 'install'
cmd.insert(-1, '--force-yes')
cmd.extend(downgrade)
cmds.append(cmd)
if to_reinstall:
all_pkgs.extend(to_reinstall)
cmd = copy.deepcopy(cmd_prefix)
if not sources:
cmd.append('--reinstall')
cmd.extend([x for x in six.itervalues(to_reinstall)])
cmds.append(cmd)
if not cmds:
ret = {}
else:
cache_valid_time = kwargs.pop('cache_valid_time', 0)
if _refresh_db:
refresh_db(cache_valid_time)
env = _parse_env(kwargs.get('env'))
env.update(DPKG_ENV_VARS.copy())
hold_pkgs = get_selections(state='hold').get('hold', [])
# all_pkgs contains the argument to be passed to apt-get install, which
# when a specific version is requested will be in the format
# name=version. Strip off the '=' if present so we can compare the
# held package names against the pacakges we are trying to install.
targeted_names = [x.split('=')[0] for x in all_pkgs]
to_unhold = [x for x in hold_pkgs if x in targeted_names]
if to_unhold:
unhold(pkgs=to_unhold)
for cmd in cmds:
out = _call_apt(cmd)
if out['retcode'] != 0 and out['stderr']:
errors.append(out['stderr'])
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
for pkgname in to_reinstall:
if pkgname not in ret or pkgname in old:
ret.update({pkgname: {'old': old.get(pkgname, ''),
'new': new.get(pkgname, '')}})
if to_unhold:
hold(pkgs=to_unhold)
if errors:
raise CommandExecutionError(
'Problem encountered installing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
def _uninstall(action='remove', name=None, pkgs=None, **kwargs):
'''
remove and purge do identical things but with different apt-get commands,
this function performs the common logic.
'''
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
old_removed = list_pkgs(removed=True)
targets = [x for x in pkg_params if x in old]
if action == 'purge':
targets.extend([x for x in pkg_params if x in old_removed])
if not targets:
return {}
cmd = ['apt-get', '-q', '-y', action]
cmd.extend(targets)
env = _parse_env(kwargs.get('env'))
env.update(DPKG_ENV_VARS.copy())
out = _call_apt(cmd, env=env)
if out['retcode'] != 0 and out['stderr']:
errors = [out['stderr']]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
new_removed = list_pkgs(removed=True)
changes = salt.utils.data.compare_dicts(old, new)
if action == 'purge':
ret = {
'removed': salt.utils.data.compare_dicts(old_removed, new_removed),
'installed': changes
}
else:
ret = changes
if errors:
raise CommandExecutionError(
'Problem encountered removing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
def autoremove(list_only=False, purge=False):
'''
.. versionadded:: 2015.5.0
Remove packages not required by another package using ``apt-get
autoremove``.
list_only : False
Only retrieve the list of packages to be auto-removed, do not actually
perform the auto-removal.
purge : False
Also remove package config data when autoremoving packages.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.autoremove
salt '*' pkg.autoremove list_only=True
salt '*' pkg.autoremove purge=True
'''
cmd = []
if list_only:
ret = []
cmd.extend(['apt-get', '--assume-no'])
if purge:
cmd.append('--purge')
cmd.append('autoremove')
out = _call_apt(cmd, ignore_retcode=True)['stdout']
found = False
for line in out.splitlines():
if found is True:
if line.startswith(' '):
ret.extend(line.split())
else:
found = False
elif 'The following packages will be REMOVED:' in line:
found = True
ret.sort()
return ret
else:
old = list_pkgs()
cmd.extend(['apt-get', '--assume-yes'])
if purge:
cmd.append('--purge')
cmd.append('autoremove')
_call_apt(cmd, ignore_retcode=True)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
return salt.utils.data.compare_dicts(old, new)
def remove(name=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages using ``apt-get remove``.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
return _uninstall(action='remove', name=name, pkgs=pkgs, **kwargs)
def purge(name=None, pkgs=None, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages via ``apt-get purge`` along with all configuration files.
name
The name of the package to be deleted.
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
return _uninstall(action='purge', name=name, pkgs=pkgs, **kwargs)
def upgrade(refresh=True, dist_upgrade=False, **kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any apt-get/dpkg commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Upgrades all packages via ``apt-get upgrade`` or ``apt-get dist-upgrade``
if ``dist_upgrade`` is ``True``.
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
dist_upgrade
Whether to perform the upgrade using dist-upgrade vs upgrade. Default
is to use upgrade.
.. versionadded:: 2014.7.0
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
download_only (or downloadonly)
Only download the packages, don't unpack or install them. Use
downloadonly to be in line with yum and zypper module.
.. versionadded:: 2018.3.0
force_conf_new
Always install the new version of any configuration files.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
'''
cache_valid_time = kwargs.pop('cache_valid_time', 0)
if salt.utils.data.is_true(refresh):
refresh_db(cache_valid_time)
old = list_pkgs()
if 'force_conf_new' in kwargs and kwargs['force_conf_new']:
force_conf = '--force-confnew'
else:
force_conf = '--force-confold'
cmd = ['apt-get', '-q', '-y', '-o', 'DPkg::Options::={0}'.format(force_conf),
'-o', 'DPkg::Options::=--force-confdef']
if kwargs.get('force_yes', False):
cmd.append('--force-yes')
if kwargs.get('skip_verify', False):
cmd.append('--allow-unauthenticated')
if kwargs.get('download_only', False) or kwargs.get('downloadonly', False):
cmd.append('--download-only')
cmd.append('dist-upgrade' if dist_upgrade else 'upgrade')
result = _call_apt(cmd, env=DPKG_ENV_VARS.copy())
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret
def hold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
'''
.. versionadded:: 2014.7.0
Set package in 'hold' state, meaning it will not be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.hold <package name>
pkgs
A list of packages to hold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.hold pkgs='["foo", "bar"]'
'''
if not name and not pkgs and not sources:
raise SaltInvocationError(
'One of name, pkgs, or sources must be specified.'
)
if pkgs and sources:
raise SaltInvocationError(
'Only one of pkgs or sources can be specified.'
)
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target))
ret[target] = {'name': target,
'changes': {},
'result': False,
'comment': ''}
state = get_selections(pattern=target, state='hold')
if not state:
ret[target]['comment'] = ('Package {0} not currently held.'
.format(target))
elif not salt.utils.data.is_true(state.get('hold', False)):
if 'test' in __opts__ and __opts__['test']:
ret[target].update(result=None)
ret[target]['comment'] = ('Package {0} is set to be held.'
.format(target))
else:
result = set_selections(selection={'hold': [target]})
ret[target].update(changes=result[target], result=True)
ret[target]['comment'] = ('Package {0} is now being held.'
.format(target))
else:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is already set to be held.'
.format(target))
return ret
def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
'''
.. versionadded:: 2014.7.0
Set package current in 'hold' state to install state,
meaning it will be upgraded.
name
The name of the package, e.g., 'tmux'
CLI Example:
.. code-block:: bash
salt '*' pkg.unhold <package name>
pkgs
A list of packages to hold. Must be passed as a python list.
CLI Example:
.. code-block:: bash
salt '*' pkg.unhold pkgs='["foo", "bar"]'
'''
if not name and not pkgs and not sources:
raise SaltInvocationError(
'One of name, pkgs, or sources must be specified.'
)
if pkgs and sources:
raise SaltInvocationError(
'Only one of pkgs or sources can be specified.'
)
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(iter(target))
ret[target] = {'name': target,
'changes': {},
'result': False,
'comment': ''}
state = get_selections(pattern=target)
if not state:
ret[target]['comment'] = ('Package {0} does not have a state.'
.format(target))
elif salt.utils.data.is_true(state.get('hold', False)):
if 'test' in __opts__ and __opts__['test']:
ret[target].update(result=None)
ret[target]['comment'] = ('Package {0} is set not to be '
'held.'.format(target))
else:
result = set_selections(selection={'install': [target]})
ret[target].update(changes=result[target], result=True)
ret[target]['comment'] = ('Package {0} is no longer being '
'held.'.format(target))
else:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is already set not to be '
'held.'.format(target))
return ret
def list_pkgs(versions_as_list=False,
removed=False,
purge_desired=False,
**kwargs): # pylint: disable=W0613
'''
List the packages currently installed in a dict::
{'<package_name>': '<version>'}
removed
If ``True``, then only packages which have been removed (but not
purged) will be returned.
purge_desired
If ``True``, then only packages which have been marked to be purged,
but can't be purged due to their status as dependencies for other
installed packages, will be returned. Note that these packages will
appear in installed
.. versionchanged:: 2014.1.1
Packages in this state now correctly show up in the output of this
function.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs versions_as_list=True
'''
versions_as_list = salt.utils.data.is_true(versions_as_list)
removed = salt.utils.data.is_true(removed)
purge_desired = salt.utils.data.is_true(purge_desired)
if 'pkg.list_pkgs' in __context__:
if removed:
ret = copy.deepcopy(__context__['pkg.list_pkgs']['removed'])
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs']['purge_desired'])
if not purge_desired:
ret.update(__context__['pkg.list_pkgs']['installed'])
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {'installed': {}, 'removed': {}, 'purge_desired': {}}
cmd = ['dpkg-query', '--showformat',
'${Status} ${Package} ${Version} ${Architecture}\n', '-W']
out = __salt__['cmd.run_stdout'](
cmd,
output_loglevel='trace',
python_shell=False)
# Typical lines of output:
# install ok installed zsh 4.3.17-1ubuntu1 amd64
# deinstall ok config-files mc 3:4.8.1-2ubuntu1 amd64
for line in out.splitlines():
cols = line.split()
try:
linetype, status, name, version_num, arch = \
[cols[x] for x in (0, 2, 3, 4, 5)]
except (ValueError, IndexError):
continue
if __grains__.get('cpuarch', '') == 'x86_64':
osarch = __grains__.get('osarch', '')
if arch != 'all' and osarch == 'amd64' and osarch != arch:
name += ':{0}'.format(arch)
if len(cols):
if ('install' in linetype or 'hold' in linetype) and \
'installed' in status:
__salt__['pkg_resource.add_pkg'](ret['installed'],
name,
version_num)
elif 'deinstall' in linetype:
__salt__['pkg_resource.add_pkg'](ret['removed'],
name,
version_num)
elif 'purge' in linetype and status == 'installed':
__salt__['pkg_resource.add_pkg'](ret['purge_desired'],
name,
version_num)
for pkglist_type in ('installed', 'removed', 'purge_desired'):
__salt__['pkg_resource.sort_pkglist'](ret[pkglist_type])
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if removed:
ret = ret['removed']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs']['purge_desired'])
if not purge_desired:
ret.update(__context__['pkg.list_pkgs']['installed'])
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret
def _get_upgradable(dist_upgrade=True, **kwargs):
'''
Utility function to get upgradable packages
Sample return data:
{ 'pkgname': '1.2.3-45', ... }
'''
cmd = ['apt-get', '--just-print']
if dist_upgrade:
cmd.append('dist-upgrade')
else:
cmd.append('upgrade')
try:
cmd.extend(['-o', 'APT::Default-Release={0}'.format(kwargs['fromrepo'])])
except KeyError:
pass
call = _call_apt(cmd)
if call['retcode'] != 0:
msg = 'Failed to get upgrades'
for key in ('stderr', 'stdout'):
if call[key]:
msg += ': ' + call[key]
break
raise CommandExecutionError(msg)
else:
out = call['stdout']
# rexp parses lines that look like the following:
# Conf libxfont1 (1:1.4.5-1 Debian:testing [i386])
rexp = re.compile('(?m)^Conf '
'([^ ]+) ' # Package name
r'\(([^ ]+)') # Version
keys = ['name', 'version']
_get = lambda l, k: l[keys.index(k)]
upgrades = rexp.findall(out)
ret = {}
for line in upgrades:
name = _get(line, 'name')
version_num = _get(line, 'version')
ret[name] = version_num
return ret
def list_upgrades(refresh=True, dist_upgrade=True, **kwargs):
'''
List all available package upgrades.
refresh
Whether to refresh the package database before listing upgrades.
Default: True.
cache_valid_time
.. versionadded:: 2016.11.0
Skip refreshing the package database if refresh has already occurred within
<value> seconds
dist_upgrade
Whether to list the upgrades using dist-upgrade vs upgrade. Default is
to use dist-upgrade.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
cache_valid_time = kwargs.pop('cache_valid_time', 0)
if salt.utils.data.is_true(refresh):
refresh_db(cache_valid_time)
return _get_upgradable(dist_upgrade, **kwargs)
def upgrade_available(name, **kwargs):
'''
Check whether or not an upgrade is available for a given package
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
return latest_version(name) != ''
def version_cmp(pkg1, pkg2, ignore_epoch=False, **kwargs):
'''
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
making the comparison.
ignore_epoch : False
Set to ``True`` to ignore the epoch when comparing versions
.. versionadded:: 2015.8.10,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' pkg.version_cmp '0.2.4-0ubuntu1' '0.2.4.1-0ubuntu1'
'''
normalize = lambda x: six.text_type(x).split(':', 1)[-1] \
if ignore_epoch else six.text_type(x)
# both apt_pkg.version_compare and _cmd_quote need string arguments.
pkg1 = normalize(pkg1)
pkg2 = normalize(pkg2)
# if we have apt_pkg, this will be quickier this way
# and also do not rely on shell.
if HAS_APTPKG:
try:
# the apt_pkg module needs to be manually initialized
apt_pkg.init_system()
# if there is a difference in versions, apt_pkg.version_compare will
# return an int representing the difference in minor versions, or
# 1/-1 if the difference is smaller than minor versions. normalize
# to -1, 0 or 1.
try:
ret = apt_pkg.version_compare(pkg1, pkg2)
except TypeError:
ret = apt_pkg.version_compare(six.text_type(pkg1), six.text_type(pkg2))
return 1 if ret > 0 else -1 if ret < 0 else 0
except Exception:
# Try to use shell version in case of errors w/python bindings
pass
try:
for oper, ret in (('lt', -1), ('eq', 0), ('gt', 1)):
cmd = ['dpkg', '--compare-versions', pkg1, oper, pkg2]
retcode = __salt__['cmd.retcode'](cmd,
output_loglevel='trace',
python_shell=False,
ignore_retcode=True)
if retcode == 0:
return ret
except Exception as exc:
log.error(exc)
return None
def _split_repo_str(repo):
'''
Return APT source entry as a tuple.
'''
split = sourceslist.SourceEntry(repo)
return split.type, split.architectures, split.uri, split.dist, split.comps
def _consolidate_repo_sources(sources):
'''
Consolidate APT sources.
'''
if not isinstance(sources, sourceslist.SourcesList):
raise TypeError(
'\'{0}\' not a \'{1}\''.format(
type(sources),
sourceslist.SourcesList
)
)
consolidated = {}
delete_files = set()
base_file = sourceslist.SourceEntry('').file
repos = [s for s in sources.list if not s.invalid]
for repo in repos:
repo.uri = repo.uri.rstrip('/')
# future lint: disable=blacklisted-function
key = str((getattr(repo, 'architectures', []),
repo.disabled, repo.type, repo.uri, repo.dist))
# future lint: enable=blacklisted-function
if key in consolidated:
combined = consolidated[key]
combined_comps = set(repo.comps).union(set(combined.comps))
consolidated[key].comps = list(combined_comps)
else:
consolidated[key] = sourceslist.SourceEntry(salt.utils.pkg.deb.strip_uri(repo.line))
if repo.file != base_file:
delete_files.add(repo.file)
sources.list = list(consolidated.values())
sources.save()
for file_ in delete_files:
try:
os.remove(file_)
except OSError:
pass
return sources
def list_repo_pkgs(*args, **kwargs): # pylint: disable=unused-import
'''
.. versionadded:: 2017.7.0
Returns all available packages. Optionally, package names (and name globs)
can be passed and the results will be filtered to packages matching those
names.
This function can be helpful in discovering the version or repo to specify
in a :mod:`pkg.installed <salt.states.pkg.installed>` state.
The return data will be a dictionary mapping package names to a list of
version numbers, ordered from newest to oldest. For example:
.. code-block:: python
{
'bash': ['4.3-14ubuntu1.1',
'4.3-14ubuntu1'],
'nginx': ['1.10.0-0ubuntu0.16.04.4',
'1.9.15-0ubuntu1']
}
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_repo_pkgs
salt '*' pkg.list_repo_pkgs foo bar baz
'''
if args:
# Get only information about packages in args
cmd = ['apt-cache', 'show'] + [arg for arg in args]
else:
# Get information about all available packages
cmd = ['apt-cache', 'dump']
out = _call_apt(cmd, scope=False, ignore_retcode=True)
ret = {}
pkg_name = None
skip_pkg = False
new_pkg = re.compile('^Package: (.+)')
for line in salt.utils.itertools.split(out['stdout'], '\n'):
if not line.strip():
continue
try:
cur_pkg = new_pkg.match(line).group(1)
except AttributeError:
pass
else:
if cur_pkg != pkg_name:
pkg_name = cur_pkg
continue
comps = line.strip().split(None, 1)
if comps[0] == 'Version:':
ret.setdefault(pkg_name, []).append(comps[1])
return ret
def _skip_source(source):
'''
Decide to skip source or not.
:param source:
:return:
'''
if source.invalid:
if source.uri and source.type and source.type in ("deb", "deb-src", "rpm", "rpm-src"):
pieces = source.mysplit(source.line)
if pieces[1].strip()[0] == "[":
options = pieces.pop(1).strip("[]").split()
if len(options) > 0:
log.debug("Source %s will be included although is marked invalid", source.uri)
return False
return True
else:
return True
return False
def list_repos(**kwargs):
'''
Lists all repos in the sources.list (and sources.lists.d) files
CLI Example:
.. code-block:: bash
salt '*' pkg.list_repos
salt '*' pkg.list_repos disabled=True
'''
_check_apt()
repos = {}
sources = sourceslist.SourcesList()
for source in sources.list:
if _skip_source(source):
continue
repo = {}
repo['file'] = source.file
repo['comps'] = getattr(source, 'comps', [])
repo['disabled'] = source.disabled
repo['enabled'] = not repo['disabled'] # This is for compatibility with the other modules
repo['dist'] = source.dist
repo['type'] = source.type
repo['uri'] = source.uri.rstrip('/')
repo['line'] = salt.utils.pkg.deb.strip_uri(source.line.strip())
repo['architectures'] = getattr(source, 'architectures', [])
repos.setdefault(source.uri, []).append(repo)
return repos
def get_repo(repo, **kwargs):
'''
Display a repo from the sources.list / sources.list.d
The repo passed in needs to be a complete repo entry.
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo "myrepo definition"
'''
_check_apt()
ppa_auth = kwargs.get('ppa_auth', None)
# we have to be clever about this since the repo definition formats
# are a bit more "loose" than in some other distributions
if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
# This is a PPA definition meaning special handling is needed
# to derive the name.
dist = __grains__['lsb_distrib_codename']
owner_name, ppa_name = repo[4:].split('/')
if ppa_auth:
auth_info = '{0}@'.format(ppa_auth)
repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name,
ppa_name, dist)
else:
if HAS_SOFTWAREPROPERTIES:
try:
if hasattr(softwareproperties.ppa, 'PPAShortcutHandler'):
repo = softwareproperties.ppa.PPAShortcutHandler(
repo).expand(dist)[0]
else:
repo = softwareproperties.ppa.expand_ppa_line(
repo,
dist)[0]
except NameError as name_error:
raise CommandExecutionError(
'Could not find ppa {0}: {1}'.format(repo, name_error)
)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
repos = list_repos()
if repos:
try:
repo_type, repo_architectures, repo_uri, repo_dist, repo_comps = _split_repo_str(repo)
if ppa_auth:
uri_match = re.search('(http[s]?://)(.+)', repo_uri)
if uri_match:
if not uri_match.group(2).startswith(ppa_auth):
repo_uri = '{0}{1}@{2}'.format(uri_match.group(1),
ppa_auth,
uri_match.group(2))
except SyntaxError:
raise CommandExecutionError(
'Error: repo \'{0}\' is not a well formatted definition'
.format(repo)
)
for source in six.itervalues(repos):
for sub in source:
if (sub['type'] == repo_type and
# strip trailing '/' from repo_uri, it's valid in definition
# but not valid when compared to persisted source
sub['uri'].rstrip('/') == repo_uri.rstrip('/') and
sub['dist'] == repo_dist):
if not repo_comps:
return sub
for comp in repo_comps:
if comp in sub.get('comps', []):
return sub
return {}
def del_repo(repo, **kwargs):
'''
Delete a repo from the sources.list / sources.list.d
If the .list file is in the sources.list.d directory
and the file that the repo exists in does not contain any other
repo configuration, the file itself will be deleted.
The repo passed in must be a fully formed repository definition
string.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo "myrepo definition"
'''
_check_apt()
is_ppa = False
if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
# This is a PPA definition meaning special handling is needed
# to derive the name.
is_ppa = True
dist = __grains__['lsb_distrib_codename']
if not HAS_SOFTWAREPROPERTIES:
_warn_software_properties(repo)
owner_name, ppa_name = repo[4:].split('/')
if 'ppa_auth' in kwargs:
auth_info = '{0}@'.format(kwargs['ppa_auth'])
repo = LP_PVT_SRC_FORMAT.format(auth_info, dist, owner_name,
ppa_name)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
else:
if hasattr(softwareproperties.ppa, 'PPAShortcutHandler'):
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
else:
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
sources = sourceslist.SourcesList()
repos = [s for s in sources.list if not s.invalid]
if repos:
deleted_from = dict()
try:
repo_type, \
repo_architectures, \
repo_uri, \
repo_dist, \
repo_comps = _split_repo_str(repo)
except SyntaxError:
raise SaltInvocationError(
'Error: repo \'{0}\' not a well formatted definition'
.format(repo)
)
for source in repos:
if (source.type == repo_type
and source.architectures == repo_architectures
and source.uri == repo_uri
and source.dist == repo_dist):
s_comps = set(source.comps)
r_comps = set(repo_comps)
if s_comps.intersection(r_comps):
deleted_from[source.file] = 0
source.comps = list(s_comps.difference(r_comps))
if not source.comps:
try:
sources.remove(source)
except ValueError:
pass
# PPAs are special and can add deb-src where expand_ppa_line
# doesn't always reflect this. Lets just cleanup here for good
# measure
if (is_ppa and repo_type == 'deb' and source.type == 'deb-src' and
source.uri == repo_uri and source.dist == repo_dist):
s_comps = set(source.comps)
r_comps = set(repo_comps)
if s_comps.intersection(r_comps):
deleted_from[source.file] = 0
source.comps = list(s_comps.difference(r_comps))
if not source.comps:
try:
sources.remove(source)
except ValueError:
pass
sources.save()
if deleted_from:
ret = ''
for source in sources:
if source.file in deleted_from:
deleted_from[source.file] += 1
for repo_file, count in six.iteritems(deleted_from):
msg = 'Repo \'{0}\' has been removed from {1}.\n'
if count == 0 and 'sources.list.d/' in repo_file:
if os.path.isfile(repo_file):
msg = ('File {1} containing repo \'{0}\' has been '
'removed.')
try:
os.remove(repo_file)
except OSError:
pass
ret += msg.format(repo, repo_file)
# explicit refresh after a repo is deleted
refresh_db()
return ret
raise CommandExecutionError(
'Repo {0} doesn\'t exist in the sources.list(s)'.format(repo)
)
def _convert_if_int(value):
'''
.. versionadded:: 2017.7.0
Convert to an int if necessary.
:param str value: The value to check/convert.
:return: The converted or passed value.
:rtype: bool|int|str
'''
try:
value = int(str(value)) # future lint: disable=blacklisted-function
except ValueError:
pass
return value
def get_repo_keys():
'''
.. versionadded:: 2017.7.0
List known repo key details.
:return: A dictionary containing the repo keys.
:rtype: dict
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo_keys
'''
ret = dict()
repo_keys = list()
# The double usage of '--with-fingerprint' is necessary in order to
# retrieve the fingerprint of the subkey.
cmd = ['apt-key', 'adv', '--batch', '--list-public-keys', '--with-fingerprint',
'--with-fingerprint', '--with-colons', '--fixed-list-mode']
cmd_ret = _call_apt(cmd, scope=False)
if cmd_ret['retcode'] != 0:
log.error(cmd_ret['stderr'])
return ret
lines = [line for line in cmd_ret['stdout'].splitlines() if line.strip()]
# Reference for the meaning of each item in the colon-separated
# record can be found here: https://goo.gl/KIZbvp
for line in lines:
items = [_convert_if_int(item.strip()) if item.strip() else None for item in line.split(':')]
key_props = dict()
if len(items) < 2:
log.debug('Skipping line: %s', line)
continue
if items[0] in ('pub', 'sub'):
key_props.update({
'algorithm': items[3],
'bits': items[2],
'capability': items[11],
'date_creation': items[5],
'date_expiration': items[6],
'keyid': items[4],
'validity': items[1]
})
if items[0] == 'pub':
repo_keys.append(key_props)
else:
repo_keys[-1]['subkey'] = key_props
elif items[0] == 'fpr':
if repo_keys[-1].get('subkey', False):
repo_keys[-1]['subkey'].update({'fingerprint': items[9]})
else:
repo_keys[-1].update({'fingerprint': items[9]})
elif items[0] == 'uid':
repo_keys[-1].update({
'uid': items[9],
'uid_hash': items[7]
})
for repo_key in repo_keys:
ret[repo_key['keyid']] = repo_key
return ret
def add_repo_key(path=None, text=None, keyserver=None, keyid=None, saltenv='base'):
'''
.. versionadded:: 2017.7.0
Add a repo key using ``apt-key add``.
:param str path: The path of the key file to import.
:param str text: The key data to import, in string form.
:param str keyserver: The server to download the repo key specified by the keyid.
:param str keyid: The key id of the repo key to add.
:param str saltenv: The environment the key file resides in.
:return: A boolean representing whether the repo key was added.
:rtype: bool
CLI Examples:
.. code-block:: bash
salt '*' pkg.add_repo_key 'salt://apt/sources/test.key'
salt '*' pkg.add_repo_key text="'$KEY1'"
salt '*' pkg.add_repo_key keyserver='keyserver.example' keyid='0000AAAA'
'''
cmd = ['apt-key']
kwargs = {}
current_repo_keys = get_repo_keys()
if path:
cached_source_path = __salt__['cp.cache_file'](path, saltenv)
if not cached_source_path:
log.error('Unable to get cached copy of file: %s', path)
return False
cmd.extend(['add', cached_source_path])
elif text:
log.debug('Received value: %s', text)
cmd.extend(['add', '-'])
kwargs.update({'stdin': text})
elif keyserver:
if not keyid:
error_msg = 'No keyid or keyid too short for keyserver: {0}'.format(keyserver)
raise SaltInvocationError(error_msg)
cmd.extend(['adv', '--batch', '--keyserver', keyserver, '--recv', keyid])
elif keyid:
error_msg = 'No keyserver specified for keyid: {0}'.format(keyid)
raise SaltInvocationError(error_msg)
else:
raise TypeError('{0}() takes at least 1 argument (0 given)'.format(add_repo_key.__name__))
# If the keyid is provided or determined, check it against the existing
# repo key ids to determine whether it needs to be imported.
if keyid:
for current_keyid in current_repo_keys:
if current_keyid[-(len(keyid)):] == keyid:
log.debug("The keyid '%s' already present: %s", keyid, current_keyid)
return True
cmd_ret = _call_apt(cmd, **kwargs)
if cmd_ret['retcode'] == 0:
return True
log.error('Unable to add repo key: %s', cmd_ret['stderr'])
return False
def del_repo_key(name=None, **kwargs):
'''
.. versionadded:: 2015.8.0
Remove a repo key using ``apt-key del``
name
Repo from which to remove the key. Unnecessary if ``keyid`` is passed.
keyid
The KeyID of the GPG key to remove
keyid_ppa : False
If set to ``True``, the repo's GPG key ID will be looked up from
ppa.launchpad.net and removed.
.. note::
Setting this option to ``True`` requires that the ``name`` param
also be passed.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo_key keyid=0123ABCD
salt '*' pkg.del_repo_key name='ppa:foo/bar' keyid_ppa=True
'''
if kwargs.get('keyid_ppa', False):
if isinstance(name, six.string_types) and name.startswith('ppa:'):
owner_name, ppa_name = name[4:].split('/')
ppa_info = _get_ppa_info_from_launchpad(
owner_name, ppa_name)
keyid = ppa_info['signing_key_fingerprint'][-8:]
else:
raise SaltInvocationError(
'keyid_ppa requires that a PPA be passed'
)
else:
if 'keyid' in kwargs:
keyid = kwargs.get('keyid')
else:
raise SaltInvocationError(
'keyid or keyid_ppa and PPA name must be passed'
)
result = _call_apt(['apt-key', 'del', keyid], scope=False)
if result['retcode'] != 0:
msg = 'Failed to remove keyid {0}'
if result['stderr']:
msg += ': {0}'.format(result['stderr'])
raise CommandExecutionError(msg)
return keyid
def mod_repo(repo, saltenv='base', **kwargs):
'''
Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as the definition is well formed. For Ubuntu the
``ppa:<project>/repo`` format is acceptable. ``ppa:`` format can only be
used to create a new repository.
The following options are available to modify a repo definition:
architectures
A comma-separated list of supported architectures, e.g. ``amd64`` If
this option is not set, all architectures (configured in the system)
will be used.
comps
A comma separated list of components for the repo, e.g. ``main``
file
A file name to be used
keyserver
Keyserver to get gpg key from
keyid
Key ID or a list of key IDs to load with the ``keyserver`` argument
key_url
URL to a GPG key to add to the APT GPG keyring
key_text
GPG key in string form to add to the APT GPG keyring
.. versionadded:: 2018.3.0
consolidate : False
If ``True``, will attempt to de-duplicate and consolidate sources
comments
Sometimes you want to supply additional information, but not as
enabled configuration. All comments provided here will be joined
into a single string and appended to the repo configuration with a
comment marker (#) before it.
.. versionadded:: 2015.8.9
.. note::
Due to the way keys are stored for APT, there is a known issue where
the key won't be updated unless another change is made at the same
time. Keys should be properly added on initial configuration.
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo 'myrepo definition' uri=http://new/uri
salt '*' pkg.mod_repo 'myrepo definition' comps=main,universe
'''
if 'refresh_db' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'refresh_db\' argument to \'pkg.mod_repo\' has been '
'renamed to \'refresh\'. Support for using \'refresh_db\' will be '
'removed in the Neon release of Salt.'
)
refresh = kwargs['refresh_db']
else:
refresh = kwargs.get('refresh', True)
_check_apt()
# to ensure no one sets some key values that _shouldn't_ be changed on the
# object itself, this is just a white-list of "ok" to set properties
if repo.startswith('ppa:'):
if __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
# secure PPAs cannot be supported as of the time of this code
# implementation via apt-add-repository. The code path for
# secure PPAs should be the same as urllib method
if salt.utils.path.which('apt-add-repository') \
and 'ppa_auth' not in kwargs:
repo_info = get_repo(repo)
if repo_info:
return {repo: repo_info}
else:
env = None
http_proxy_url = _get_http_proxy_url()
if http_proxy_url:
env = {'http_proxy': http_proxy_url,
'https_proxy': http_proxy_url}
if float(__grains__['osrelease']) < 12.04:
cmd = ['apt-add-repository', repo]
else:
cmd = ['apt-add-repository', '-y', repo]
out = _call_apt(cmd, env=env, scope=False, **kwargs)
if out['retcode']:
raise CommandExecutionError(
'Unable to add PPA \'{0}\'. \'{1}\' exited with '
'status {2!s}: \'{3}\' '.format(
repo[4:],
cmd,
out['retcode'],
out['stderr']
)
)
# explicit refresh when a repo is modified.
if refresh:
refresh_db()
return {repo: out}
else:
if not HAS_SOFTWAREPROPERTIES:
_warn_software_properties(repo)
else:
log.info('Falling back to urllib method for private PPA')
# fall back to urllib style
try:
owner_name, ppa_name = repo[4:].split('/', 1)
except ValueError:
raise CommandExecutionError(
'Unable to get PPA info from argument. '
'Expected format "<PPA_OWNER>/<PPA_NAME>" '
'(e.g. saltstack/salt) not found. Received '
'\'{0}\' instead.'.format(repo[4:])
)
dist = __grains__['lsb_distrib_codename']
# ppa has a lot of implicit arguments. Make them explicit.
# These will defer to any user-defined variants
kwargs['dist'] = dist
ppa_auth = ''
if 'file' not in kwargs:
filename = '/etc/apt/sources.list.d/{0}-{1}-{2}.list'
kwargs['file'] = filename.format(owner_name, ppa_name,
dist)
try:
launchpad_ppa_info = _get_ppa_info_from_launchpad(
owner_name, ppa_name)
if 'ppa_auth' not in kwargs:
kwargs['keyid'] = launchpad_ppa_info[
'signing_key_fingerprint']
else:
if 'keyid' not in kwargs:
error_str = 'Private PPAs require a ' \
'keyid to be specified: {0}/{1}'
raise CommandExecutionError(
error_str.format(owner_name, ppa_name)
)
except HTTPError as exc:
raise CommandExecutionError(
'Launchpad does not know about {0}/{1}: {2}'.format(
owner_name, ppa_name, exc)
)
except IndexError as exc:
raise CommandExecutionError(
'Launchpad knows about {0}/{1} but did not '
'return a fingerprint. Please set keyid '
'manually: {2}'.format(owner_name, ppa_name, exc)
)
if 'keyserver' not in kwargs:
kwargs['keyserver'] = 'keyserver.ubuntu.com'
if 'ppa_auth' in kwargs:
if not launchpad_ppa_info['private']:
raise CommandExecutionError(
'PPA is not private but auth credentials '
'passed: {0}'.format(repo)
)
# assign the new repo format to the "repo" variable
# so we can fall through to the "normal" mechanism
# here.
if 'ppa_auth' in kwargs:
ppa_auth = '{0}@'.format(kwargs['ppa_auth'])
repo = LP_PVT_SRC_FORMAT.format(ppa_auth, owner_name,
ppa_name, dist)
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
else:
raise CommandExecutionError(
'cannot parse "ppa:" style repo definitions: {0}'
.format(repo)
)
sources = sourceslist.SourcesList()
if kwargs.get('consolidate', False):
# attempt to de-dup and consolidate all sources
# down to entries in sources.list
# this option makes it easier to keep the sources
# list in a "sane" state.
#
# this should remove duplicates, consolidate comps
# for a given source down to one line
# and eliminate "invalid" and comment lines
#
# the second side effect is removal of files
# that are not the main sources.list file
sources = _consolidate_repo_sources(sources)
repos = [s for s in sources if not s.invalid]
mod_source = None
try:
repo_type, \
repo_architectures, \
repo_uri, \
repo_dist, \
repo_comps = _split_repo_str(repo)
except SyntaxError:
raise SyntaxError(
'Error: repo \'{0}\' not a well formatted definition'.format(repo)
)
full_comp_list = set(repo_comps)
no_proxy = __salt__['config.option']('no_proxy')
if 'keyid' in kwargs:
keyid = kwargs.pop('keyid', None)
keyserver = kwargs.pop('keyserver', None)
if not keyid or not keyserver:
error_str = 'both keyserver and keyid options required.'
raise NameError(error_str)
if not isinstance(keyid, list):
keyid = [keyid]
for key in keyid:
if isinstance(key, int): # yaml can make this an int, we need the hex version
key = hex(key)
cmd = ['apt-key', 'export', key]
output = __salt__['cmd.run_stdout'](cmd, python_shell=False, **kwargs)
imported = output.startswith('-----BEGIN PGP')
if keyserver:
if not imported:
http_proxy_url = _get_http_proxy_url()
if http_proxy_url and keyserver not in no_proxy:
cmd = ['apt-key', 'adv', '--batch', '--keyserver-options', 'http-proxy={0}'.format(http_proxy_url),
'--keyserver', keyserver, '--logger-fd', '1', '--recv-keys', key]
else:
cmd = ['apt-key', 'adv', '--batch', '--keyserver', keyserver,
'--logger-fd', '1', '--recv-keys', key]
ret = _call_apt(cmd, scope=False, **kwargs)
if ret['retcode'] != 0:
raise CommandExecutionError(
'Error: key retrieval failed: {0}'.format(ret['stdout'])
)
elif 'key_url' in kwargs:
key_url = kwargs['key_url']
fn_ = __salt__['cp.cache_file'](key_url, saltenv)
if not fn_:
raise CommandExecutionError(
'Error: file not found: {0}'.format(key_url)
)
cmd = ['apt-key', 'add', fn_]
out = __salt__['cmd.run_stdout'](cmd, python_shell=False, **kwargs)
if not out.upper().startswith('OK'):
raise CommandExecutionError(
'Error: failed to add key from {0}'.format(key_url)
)
elif 'key_text' in kwargs:
key_text = kwargs['key_text']
cmd = ['apt-key', 'add', '-']
out = __salt__['cmd.run_stdout'](cmd, stdin=key_text,
python_shell=False, **kwargs)
if not out.upper().startswith('OK'):
raise CommandExecutionError(
'Error: failed to add key:\n{0}'.format(key_text)
)
if 'comps' in kwargs:
kwargs['comps'] = kwargs['comps'].split(',')
full_comp_list |= set(kwargs['comps'])
else:
kwargs['comps'] = list(full_comp_list)
if 'architectures' in kwargs:
kwargs['architectures'] = kwargs['architectures'].split(',')
else:
kwargs['architectures'] = repo_architectures
if 'disabled' in kwargs:
kwargs['disabled'] = salt.utils.data.is_true(kwargs['disabled'])
kw_type = kwargs.get('type')
kw_dist = kwargs.get('dist')
for source in repos:
# This series of checks will identify the starting source line
# and the resulting source line. The idea here is to ensure
# we are not returning bogus data because the source line
# has already been modified on a previous run.
repo_matches = source.type == repo_type and source.uri.rstrip('/') == repo_uri.rstrip('/') and source.dist == repo_dist
kw_matches = source.dist == kw_dist and source.type == kw_type
if repo_matches or kw_matches:
for comp in full_comp_list:
if comp in getattr(source, 'comps', []):
mod_source = source
if not source.comps:
mod_source = source
if kwargs['architectures'] != source.architectures:
mod_source = source
if mod_source:
break
if 'comments' in kwargs:
kwargs['comments'] = \
salt.utils.pkg.deb.combine_comments(kwargs['comments'])
if not mod_source:
mod_source = sourceslist.SourceEntry(repo)
if 'comments' in kwargs:
mod_source.comment = kwargs['comments']
sources.list.append(mod_source)
elif 'comments' in kwargs:
mod_source.comment = kwargs['comments']
for key in kwargs:
if key in _MODIFY_OK and hasattr(mod_source, key):
setattr(mod_source, key, kwargs[key])
sources.save()
# on changes, explicitly refresh
if refresh:
refresh_db()
return {
repo: {
'architectures': getattr(mod_source, 'architectures', []),
'comps': mod_source.comps,
'disabled': mod_source.disabled,
'file': mod_source.file,
'type': mod_source.type,
'uri': mod_source.uri,
'line': mod_source.line
}
}
def file_list(*packages, **kwargs):
'''
List the files that belong to a package. Not specifying any packages will
return a list of _every_ file on the system's package database (not
generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
'''
return __salt__['lowpkg.file_list'](*packages)
def file_dict(*packages, **kwargs):
'''
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of _every_ file on the system's
package database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_dict httpd
salt '*' pkg.file_dict httpd postfix
salt '*' pkg.file_dict
'''
return __salt__['lowpkg.file_dict'](*packages)
def expand_repo_def(**kwargs):
'''
Take a repository definition and expand it to the full pkg repository dict
that can be used for comparison. This is a helper function to make
the Debian/Ubuntu apt sources sane for comparison in the pkgrepo states.
This is designed to be called from pkgrepo states and will have little use
being called on the CLI.
'''
if 'repo' not in kwargs:
raise SaltInvocationError('missing \'repo\' argument')
_check_apt()
sanitized = {}
repo = salt.utils.pkg.deb.strip_uri(kwargs['repo'])
if repo.startswith('ppa:') and __grains__['os'] in ('Ubuntu', 'Mint', 'neon'):
dist = __grains__['lsb_distrib_codename']
owner_name, ppa_name = repo[4:].split('/', 1)
if 'ppa_auth' in kwargs:
auth_info = '{0}@'.format(kwargs['ppa_auth'])
repo = LP_PVT_SRC_FORMAT.format(auth_info, owner_name, ppa_name,
dist)
else:
if HAS_SOFTWAREPROPERTIES:
if hasattr(softwareproperties.ppa, 'PPAShortcutHandler'):
repo = softwareproperties.ppa.PPAShortcutHandler(repo).expand(dist)[0]
else:
repo = softwareproperties.ppa.expand_ppa_line(repo, dist)[0]
else:
repo = LP_SRC_FORMAT.format(owner_name, ppa_name, dist)
if 'file' not in kwargs:
filename = '/etc/apt/sources.list.d/{0}-{1}-{2}.list'
kwargs['file'] = filename.format(owner_name, ppa_name, dist)
source_entry = sourceslist.SourceEntry(repo)
for list_args in ('architectures', 'comps'):
if list_args in kwargs:
kwargs[list_args] = kwargs[list_args].split(',')
for kwarg in _MODIFY_OK:
if kwarg in kwargs:
setattr(source_entry, kwarg, kwargs[kwarg])
sanitized['file'] = source_entry.file
sanitized['comps'] = getattr(source_entry, 'comps', [])
sanitized['disabled'] = source_entry.disabled
sanitized['dist'] = source_entry.dist
sanitized['type'] = source_entry.type
sanitized['uri'] = source_entry.uri.rstrip('/')
sanitized['line'] = source_entry.line.strip()
sanitized['architectures'] = getattr(source_entry, 'architectures', [])
return sanitized
def _parse_selections(dpkgselection):
'''
Parses the format from ``dpkg --get-selections`` and return a format that
pkg.get_selections and pkg.set_selections work with.
'''
ret = {}
if isinstance(dpkgselection, six.string_types):
dpkgselection = dpkgselection.split('\n')
for line in dpkgselection:
if line:
_pkg, _state = line.split()
if _state in ret:
ret[_state].append(_pkg)
else:
ret[_state] = [_pkg]
return ret
def get_selections(pattern=None, state=None):
'''
View package state from the dpkg database.
Returns a dict of dicts containing the state, and package names:
.. code-block:: python
{'<host>':
{'<state>': ['pkg1',
...
]
},
...
}
CLI Example:
.. code-block:: bash
salt '*' pkg.get_selections
salt '*' pkg.get_selections 'python-*'
salt '*' pkg.get_selections state=hold
salt '*' pkg.get_selections 'openssh*' state=hold
'''
ret = {}
cmd = ['dpkg', '--get-selections']
cmd.append(pattern if pattern else '*')
stdout = __salt__['cmd.run_stdout'](cmd,
output_loglevel='trace',
python_shell=False)
ret = _parse_selections(stdout)
if state:
return {state: ret.get(state, [])}
return ret
# TODO: allow state=None to be set, and that *args will be set to that state
# TODO: maybe use something similar to pkg_resources.pack_pkgs to allow a list
# passed to selection, with the default state set to whatever is passed by the
# above, but override that if explicitly specified
# TODO: handle path to selection file from local fs as well as from salt file
# server
def set_selections(path=None, selection=None, clear=False, saltenv='base'):
'''
Change package state in the dpkg database.
The state can be any one of, documented in ``dpkg(1)``:
- install
- hold
- deinstall
- purge
This command is commonly used to mark specific packages to be held from
being upgraded, that is, to be kept at a certain version. When a state is
changed to anything but being held, then it is typically followed by
``apt-get -u dselect-upgrade``.
Note: Be careful with the ``clear`` argument, since it will start
with setting all packages to deinstall state.
Returns a dict of dicts containing the package names, and the new and old
versions:
.. code-block:: python
{'<host>':
{'<package>': {'new': '<new-state>',
'old': '<old-state>'}
},
...
}
CLI Example:
.. code-block:: bash
salt '*' pkg.set_selections selection='{"install": ["netcat"]}'
salt '*' pkg.set_selections selection='{"hold": ["openssh-server", "openssh-client"]}'
salt '*' pkg.set_selections salt://path/to/file
salt '*' pkg.set_selections salt://path/to/file clear=True
'''
ret = {}
if not path and not selection:
return ret
if path and selection:
err = ('The \'selection\' and \'path\' arguments to '
'pkg.set_selections are mutually exclusive, and cannot be '
'specified together')
raise SaltInvocationError(err)
if isinstance(selection, six.string_types):
try:
selection = salt.utils.yaml.safe_load(selection)
except (salt.utils.yaml.parser.ParserError, salt.utils.yaml.scanner.ScannerError) as exc:
raise SaltInvocationError(
'Improperly-formatted selection: {0}'.format(exc)
)
if path:
path = __salt__['cp.cache_file'](path, saltenv)
with salt.utils.files.fopen(path, 'r') as ifile:
content = [salt.utils.stringutils.to_unicode(x)
for x in ifile.readlines()]
selection = _parse_selections(content)
if selection:
valid_states = ('install', 'hold', 'deinstall', 'purge')
bad_states = [x for x in selection if x not in valid_states]
if bad_states:
raise SaltInvocationError(
'Invalid state(s): {0}'.format(', '.join(bad_states))
)
if clear:
cmd = ['dpkg', '--clear-selections']
if not __opts__['test']:
result = _call_apt(cmd, scope=False)
if result['retcode'] != 0:
err = ('Running dpkg --clear-selections failed: '
'{0}'.format(result['stderr']))
log.error(err)
raise CommandExecutionError(err)
sel_revmap = {}
for _state, _pkgs in six.iteritems(get_selections()):
sel_revmap.update(dict((_pkg, _state) for _pkg in _pkgs))
for _state, _pkgs in six.iteritems(selection):
for _pkg in _pkgs:
if _state == sel_revmap.get(_pkg):
continue
cmd = ['dpkg', '--set-selections']
cmd_in = '{0} {1}'.format(_pkg, _state)
if not __opts__['test']:
result = _call_apt(cmd, scope=False, stdin=cmd_in)
if result['retcode'] != 0:
log.error(
'failed to set state %s for package %s',
_state, _pkg
)
else:
ret[_pkg] = {'old': sel_revmap.get(_pkg),
'new': _state}
return ret
def _resolve_deps(name, pkgs, **kwargs):
'''
Installs missing dependencies and marks them as auto installed so they
are removed when no more manually installed packages depend on them.
.. versionadded:: 2014.7.0
:depends: - python-apt module
'''
missing_deps = []
for pkg_file in pkgs:
deb = apt.debfile.DebPackage(filename=pkg_file, cache=apt.Cache())
if deb.check():
missing_deps.extend(deb.missing_deps)
if missing_deps:
cmd = ['apt-get', '-q', '-y']
cmd = cmd + ['-o', 'DPkg::Options::=--force-confold']
cmd = cmd + ['-o', 'DPkg::Options::=--force-confdef']
cmd.append('install')
cmd.extend(missing_deps)
ret = __salt__['cmd.retcode'](
cmd,
env=kwargs.get('env'),
python_shell=False
)
if ret != 0:
raise CommandExecutionError(
'Error: unable to resolve dependencies for: {0}'.format(name)
)
else:
try:
cmd = ['apt-mark', 'auto'] + missing_deps
__salt__['cmd.run'](
cmd,
env=kwargs.get('env'),
python_shell=False
)
except MinionError as exc:
raise CommandExecutionError(exc)
return
def owner(*paths, **kwargs):
'''
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.aptpkg.version>`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Example:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /usr/bin/basename
'''
if not paths:
return ''
ret = {}
for path in paths:
cmd = ['dpkg', '-S', path]
output = __salt__['cmd.run_stdout'](cmd,
output_loglevel='trace',
python_shell=False)
ret[path] = output.split(':')[0]
if 'no path found' in ret[path].lower():
ret[path] = ''
if len(ret) == 1:
return next(six.itervalues(ret))
return ret
def show(*names, **kwargs):
'''
.. versionadded:: 2019.2.0
Runs an ``apt-cache show`` on the passed package names, and returns the
results in a nested dictionary. The top level of the return data will be
the package name, with each package name mapping to a dictionary of version
numbers to any additional information returned by ``apt-cache show``.
filter
An optional comma-separated list (or quoted Python list) of
case-insensitive keys on which to filter. This allows one to restrict
the information returned for each package to a smaller selection of
pertinent items.
refresh : False
If ``True``, the apt cache will be refreshed first. By default, no
refresh is performed.
CLI Examples:
.. code-block:: bash
salt myminion pkg.show gawk
salt myminion pkg.show 'nginx-*'
salt myminion pkg.show 'nginx-*' filter=description,provides
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
refresh = kwargs.pop('refresh', False)
filter_ = salt.utils.args.split_input(
kwargs.pop('filter', []),
lambda x: six.text_type(x)
if not isinstance(x, six.string_types)
else x.lower()
)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
if refresh:
refresh_db()
if not names:
return {}
result = _call_apt(['apt-cache', 'show'] + list(names), scope=False)
def _add(ret, pkginfo):
name = pkginfo.pop('Package', None)
version = pkginfo.pop('Version', None)
if name is not None and version is not None:
ret.setdefault(name, {}).setdefault(version, {}).update(pkginfo)
def _check_filter(key):
key = key.lower()
return True if key in ('package', 'version') or not filter_ \
else key in filter_
ret = {}
pkginfo = {}
for line in salt.utils.itertools.split(result['stdout'], '\n'):
line = line.strip()
if line:
try:
key, val = [x.strip() for x in line.split(':', 1)]
except ValueError:
pass
else:
if _check_filter(key):
pkginfo[key] = val
else:
# We've reached a blank line, which separates packages
_add(ret, pkginfo)
# Clear out the pkginfo dict for the next package
pkginfo = {}
continue
# Make sure to add whatever was in the pkginfo dict when we reached the end
# of the output.
_add(ret, pkginfo)
return ret
def info_installed(*names, **kwargs):
'''
Return the information of the named package(s) installed on the system.
.. versionadded:: 2015.8.1
names
The names of the packages for which to return information.
failhard
Whether to throw an exception if none of the packages are installed.
Defaults to True.
.. versionadded:: 2016.11.3
attr
Comma-separated package attributes. If no 'attr' is specified, all available attributes returned.
Valid attributes are:
version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description.
.. versionadded:: Neon
CLI example:
.. code-block:: bash
salt '*' pkg.info_installed <package1>
salt '*' pkg.info_installed <package1> <package2> <package3> ...
salt '*' pkg.info_installed <package1> failhard=false
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
failhard = kwargs.pop('failhard', True)
kwargs.pop('errors', None) # Only for compatibility with RPM
attr = kwargs.pop('attr', None) # Package attributes to return
all_versions = kwargs.pop('all_versions', False) # This is for backward compatible structure only
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
ret = dict()
for pkg_name, pkg_nfo in __salt__['lowpkg.info'](*names, failhard=failhard, attr=attr).items():
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
for key, value in pkg_nfo.items():
if key == 'package':
t_nfo['name'] = value
elif key == 'origin':
t_nfo['vendor'] = value
elif key == 'section':
t_nfo['group'] = value
elif key == 'maintainer':
t_nfo['packager'] = value
elif key == 'homepage':
t_nfo['url'] = value
else:
t_nfo[key] = value
if all_versions:
ret.setdefault(pkg_name, []).append(t_nfo)
else:
ret[pkg_name] = t_nfo
return ret
def _get_http_proxy_url():
'''
Returns the http_proxy_url if proxy_username, proxy_password, proxy_host, and proxy_port
config values are set.
Returns a string.
'''
http_proxy_url = ''
host = __salt__['config.option']('proxy_host')
port = __salt__['config.option']('proxy_port')
username = __salt__['config.option']('proxy_username')
password = __salt__['config.option']('proxy_password')
# Set http_proxy_url for use in various internet facing actions...eg apt-key adv
if host and port:
if username and password:
http_proxy_url = 'http://{0}:{1}@{2}:{3}'.format(
username,
password,
host,
port
)
else:
http_proxy_url = 'http://{0}:{1}'.format(
host,
port
)
return http_proxy_url
|
the-stack_0_18406 | """A setuptools based setup module.
See:
https://packaging.python.org/guides/distributing-packages-using-setuptools/
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import pathlib
here = pathlib.Path(__file__).parent.resolve()
# Get the long description from the README file
long_description = (here / "README.md").read_text(encoding="utf-8")
# Arguments marked as "Required" below must be included for upload to PyPI.
# Fields marked as "Optional" may be commented out.
setup(
# This is the name of your project. The first time you publish this
# package, this name will be registered for you. It will determine how
# users can install this project, e.g.:
#
# $ pip install sampleproject
#
# And where it will live on PyPI: https://pypi.org/project/sampleproject/
#
# There are some restrictions on what makes a valid project name
# specification here:
# https://packaging.python.org/specifications/core-metadata/#name
name="keysightdsox2000", # Required
# Versions should comply with PEP 440:
# https://www.python.org/dev/peps/pep-0440/
#
# For a discussion on single-sourcing the version across setup.py and the
# project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version="0.0.1", # Required
# This is a one-line description or tagline of what your project does. This
# corresponds to the "Summary" metadata field:
# https://packaging.python.org/specifications/core-metadata/#summary
# Optional
description="Python interface to the Keysight DSO-X 2000 scopes",
# This is an optional longer description of your project that represents
# the body of text which users will see when they visit PyPI.
#
# Often, this is the same as your README, so you can just read it in from
# that file directly (as we have already done above)
#
# This field corresponds to the "Description" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-optional
long_description=long_description, # Optional
# Denotes that our long_description is in Markdown; valid values are
# text/plain, text/x-rst, and text/markdown
#
# Optional if long_description is written in reStructuredText (rst) but
# required for plain-text or Markdown; if unspecified, "applications should
# attempt to render [the long_description] as text/x-rst; charset=UTF-8 and
# fall back to text/plain if it is not valid rst" (see link below)
#
# This field corresponds to the "Description-Content-Type" metadata field:
# https://packaging.python.org/specifications/core-metadata/#description-content-type-optional
long_description_content_type="text/markdown", # Optional (see note above)
# This should be a valid link to your project's main homepage.
#
# This field corresponds to the "Home-Page" metadata field:
# https://packaging.python.org/specifications/core-metadata/#home-page-optional
url="https://github.com/quantop-dungeon/KeysightDSOX2000", # Optional
# This should be your name or the name of the organization which owns the
# project.
author="QUANTOP", # Optional
# This should be a valid email address corresponding to the author listed
# above.
author_email="[email protected]", # Optional
# Classifiers help users find your project by categorizing it.
#
# For a list of valid classifiers, see https://pypi.org/classifiers/
classifiers=[ # Optional
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
"Development Status :: 3 - Alpha",
# Indicate who your project is intended for
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
# Pick your license as you wish
"License :: OSI Approved :: MIT License",
# Specify the Python versions you support here. In particular, ensure
# that you indicate you support Python 3. These classifiers are *not*
# checked by 'pip install'. See instead 'python_requires' below.
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3 :: Only",
],
# This field adds keywords for your project which will appear on the
# project page. What does your project relate to?
#
# Note that this is a list of additional keywords, separated
# by commas, to be used to assist searching for the distribution in a
# larger catalog.
keywords="sample, setuptools, development", # Optional
# When your source code is in a subdirectory under the project root, e.g.
# `src/`, it is necessary to specify the `package_dir` argument.
# package_dir={'': '.'}, # Optional
# You can just specify package directories manually here if your project is
# simple. Or you can use find_packages().
#
# Alternatively, if you just want to distribute a single Python file, use
# the `py_modules` argument instead as follows, which will expect a file
# called `my_module.py` to exist:
#
# py_modules=["my_module"],
#
packages=find_packages(where="."), # Required
# Specify which Python versions you support. In contrast to the
# 'Programming Language' classifiers above, 'pip install' will check this
# and refuse to install the project if the version does not match. See
# https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
python_requires=">=3.6, <4",
# This field lists other packages that your project depends on to run.
# Any package you put here will be installed by pip when your project is
# installed, so they must be valid existing projects.
#
# For an analysis of "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=["numpy", "pyvisa"], # Optional
# List additional groups of dependencies here (e.g. development
# dependencies). Users will be able to install these using the "extras"
# syntax, for example:
#
# $ pip install sampleproject[dev]
#
# Similar to `install_requires` above, these must be valid existing
# projects.
# extras_require={ # Optional
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here.
# package_data={ # Optional
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/distutils/setupscript.html#installing-additional-files
#
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])], # Optional
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# `pip` to create the appropriate form of executable for the target
# platform.
#
# For example, the following would provide a command called `sample` which
# executes the function `main` from this package when invoked:
# entry_points={ # Optional
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
# List additional URLs that are relevant to your project as a dict.
#
# This field corresponds to the "Project-URL" metadata fields:
# https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use
#
# Examples listed include a pattern for specifying where the package tracks
# issues, where the source is hosted, where to say thanks to the package
# maintainers, and where to support the project financially. The key is
# what's used to render the link text on PyPI.
# project_urls={ # Optional
# 'Bug Reports': 'https://github.com/pypa/sampleproject/issues',
# 'Funding': 'https://donate.pypi.org',
# 'Say Thanks!': 'http://saythanks.io/to/example',
# 'Source': 'https://github.com/pypa/sampleproject/',
# },
)
|
the-stack_0_18407 | #!/usr/bin/python3
b = 0
c = 0
for b in range(0, 10):
c = b + 1
for c in range(c, 10):
if (b != 8 or c != 9):
print("{:0d}{:0d}".format(b, c), end=", ")
else:
print("{}{}".format(b, c))
|
the-stack_0_18409 | from data_structures import Graph, Stack
input = open("day_12_input.txt", "r")
line = input.readline()
cave_graph = Graph()
cave_count = 0
while line != "":
nodes = [x.strip() for x in line.split("-")]
for node in nodes:
if not cave_graph.has_vertex(node):
cave_graph.add_vertex(node)
cave_count += 1
cave_graph.add_undirected_edge(nodes[0], nodes[1])
line = input.readline()
def traverse_caves(cave_graph, visited, cave):
label = cave_graph.Vertices[cave].get_label()
if label == "end":
return 1
if visited.contains(cave):
return 0
if label.islower():
visited.push(cave)
next_caves = cave_graph.get_neighbors(label)
x = 0
for next_cave in next_caves:
x += traverse_caves(cave_graph, visited, next_cave)
if label.islower():
visited.pop()
return x
visited = Stack()
path_count = traverse_caves(cave_graph, visited, cave_graph.get_index("start"))
print(path_count)
|
the-stack_0_18410 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, getdate
def execute(filters=None):
if not filters: filters = {}
validate_filters(filters)
columns = get_columns()
item_map = get_item_details(filters)
iwb_map = get_item_warehouse_map(filters)
data = []
for (company, item, warehouse) in sorted(iwb_map):
qty_dict = iwb_map[(company, item, warehouse)]
data.append([item, item_map[item]["item_name"],
item_map[item]["item_group"],
item_map[item]["brand"],
item_map[item]["colour"],
item_map[item]["size"],
item_map[item]["description"], warehouse,
item_map[item]["stock_uom"], qty_dict.opening_qty,
qty_dict.opening_val, qty_dict.in_qty,
qty_dict.in_val, qty_dict.out_qty,
qty_dict.out_val, qty_dict.bal_qty,
qty_dict.bal_val, qty_dict.val_rate,
company
])
return columns, data
def get_columns():
"""return columns"""
columns = [
_("Item")+":Link/Item:100",
_("Item Name")+"::150",
_("Item Group")+"::100",
_("Brand")+"::90",
_("Colour")+"::100",
_("Size")+"::100",
_("Description")+"::140",
_("Warehouse")+":Link/Warehouse:100",
_("Stock UOM")+":Link/UOM:90",
_("Opening Qty")+":Float:100",
_("Opening Value")+":Float:110",
_("In Qty")+":Float:80",
_("In Value")+":Float:80",
_("Out Qty")+":Float:80",
_("Out Value")+":Float:80",
_("Balance Qty")+":Float:100",
_("Balance Value")+":Float:100",
_("Valuation Rate")+":Float:90",
_("Company")+":Link/Company:100"
]
return columns
def get_conditions(filters):
conditions = ""
if not filters.get("from_date"):
frappe.throw(_("'From Date' is required"))
if filters.get("to_date"):
conditions += " and posting_date <= '%s'" % frappe.db.escape(filters["to_date"])
else:
frappe.throw(_("'To Date' is required"))
if filters.get("item_code"):
conditions += " and item_code = '%s'" % frappe.db.escape(filters.get("item_code"), percent=False)
if filters.get("warehouse"):
warehouse_details = frappe.db.get_value("Warehouse", filters.get("warehouse"), ["lft", "rgt"], as_dict=1)
if warehouse_details:
conditions += " and exists (select name from `tabWarehouse` wh \
where wh.lft >= %s and wh.rgt <= %s and sle.warehouse = wh.name)"%(warehouse_details.lft,
warehouse_details.rgt)
return conditions
def get_stock_ledger_entries(filters):
conditions = get_conditions(filters)
return frappe.db.sql("""select item_code, warehouse, posting_date, actual_qty, valuation_rate,
company, voucher_type, qty_after_transaction, stock_value_difference
from `tabStock Ledger Entry` sle force index (posting_sort_index)
where docstatus < 2 %s order by posting_date, posting_time, name""" %
conditions, as_dict=1)
def get_item_warehouse_map(filters):
iwb_map = {}
from_date = getdate(filters["from_date"])
to_date = getdate(filters["to_date"])
sle = get_stock_ledger_entries(filters)
for d in sle:
key = (d.company, d.item_code, d.warehouse)
if key not in iwb_map:
iwb_map[key] = frappe._dict({
"opening_qty": 0.0, "opening_val": 0.0,
"in_qty": 0.0, "in_val": 0.0,
"out_qty": 0.0, "out_val": 0.0,
"bal_qty": 0.0, "bal_val": 0.0,
"val_rate": 0.0, "uom": None
})
qty_dict = iwb_map[(d.company, d.item_code, d.warehouse)]
if d.voucher_type == "Stock Reconciliation":
qty_diff = flt(d.qty_after_transaction) - qty_dict.bal_qty
else:
qty_diff = flt(d.actual_qty)
value_diff = flt(d.stock_value_difference)
if d.posting_date < from_date:
qty_dict.opening_qty += qty_diff
qty_dict.opening_val += value_diff
elif d.posting_date >= from_date and d.posting_date <= to_date:
if qty_diff > 0:
qty_dict.in_qty += qty_diff
qty_dict.in_val += value_diff
else:
qty_dict.out_qty += abs(qty_diff)
qty_dict.out_val += abs(value_diff)
qty_dict.val_rate = d.valuation_rate
qty_dict.bal_qty += qty_diff
qty_dict.bal_val += value_diff
return iwb_map
def get_item_details(filters):
condition = ''
value = ()
if filters.get("item_code"):
condition = "where item_code=%s"
value = (filters["item_code"],)
items = frappe.db.sql("""select name, item_name, stock_uom, item_group, brand, colour, size, description
from tabItem {condition}""".format(condition=condition), value, as_dict=1)
return dict((d.name, d) for d in items)
def validate_filters(filters):
if not (filters.get("item_code") or filters.get("warehouse")):
sle_count = flt(frappe.db.sql("""select count(name) from `tabStock Ledger Entry`""")[0][0])
if sle_count > 500000:
frappe.throw(_("Please set filter based on Item or Warehouse"))
|
the-stack_0_18411 | import pygame
from pygame.sprite import Sprite
class Alien(Sprite):
"""表示单个外星人的类"""
def __init__(self, ai_settings, screen):
"""初始化外星人并设置其起始位置"""
super(Alien, self).__init__()
self.screen = screen
self.ai_settings = ai_settings
# 加载外星人图像,并设置其rect属性
self.image = pygame.image.load('images/alien.bmp')
self.rect = self.image.get_rect()
# 每个外星人最初都在屏幕左上角附近
self.rect.x = self.rect.w
self.rect.y = self.rect.height
# 存储外星人的准确位置
self.x = float(self.rect.x)
def blitme(self):
"""在指定位置绘制外星人"""
self.screen.blit(self.image, self.rect)
def update(self):
"""向右移动外星人"""
self.x += (self.ai_settings.alien_speed_factor * self.ai_settings.fleet_direction)
self.rect.x = self.x
def check_edges(self):
"""如果外星人位于屏幕边缘,就返回True"""
screen_rect = self.screen.get_rect()
if self.rect.right >= screen_rect.right:
return True
elif self.rect.left <= 0:
return True |
the-stack_0_18413 | import math, re
import pymel.core as pm
import maya.cmds as cmds
import maya.OpenMaya as OpenMaya
import maya.OpenMayaAnim as OpenMayaAnim
from zMayaTools.menus import Menu
from zMayaTools import maya_logging, maya_helpers
log = maya_logging.get_log()
def scale(x, l1, h1, l2, h2):
return (x - l1) * (h2 - l2) / (h1 - l1) + l2
def _to_vtx_list(p):
return [(x, y, z) for x, y, z in zip(p[0::3], p[1::3], p[2::3])]
def split_blend_shape(base_mesh, target_mesh, right_side=True, fade_distance=2, axis=0, axis_origin=0):
# Read the positions in world space. Although the shapes should be in the same position,
# we want world space units so the distance factor makes sense.
#
# We do this with cmds instead of pm, since it's faster for dealing with lots of vertex
# data.
target_pos = _to_vtx_list(cmds.xform('%s.vtx[*]' % target_mesh, q=True, t=True, ws=True))
base_pos = _to_vtx_list(cmds.xform('%s.vtx[*]' % base_mesh, q=True, t=True, ws=True))
if len(target_pos) != len(base_pos):
OpenMaya.MGlobal.displayError('Target has %i vertices, but base has %i vertices.' % (len(target_pos) != len(base_pos)))
return
result_pos = []
new_target_pos = []
for idx in xrange(len(target_pos)):
dist = target_pos[idx][axis]
dist -= axis_origin
if fade_distance == 0:
p = 0 if dist < 0 else 1
else:
p = scale(dist, -fade_distance/2.0, fade_distance/2.0, 0, 1.0)
# If we're fading in the left side instead of the right, flip the value.
if not right_side:
p = 1-p
p = min(max(p, 0), 1)
# Clean up the percentage. It's easy to end up with lots of values like 0.000001, and clamping
# them to zero or one can give a smaller file.
if p < 0.001: p = 0
if p > .999: p = 1
delta = [target_pos[idx][i] - base_pos[idx][i] for i in range(3)]
new_target_pos.append([base_pos[idx][i] + delta[i]*p for i in range(3)])
def distance_squared(a, b):
p0 = math.pow(a[0]-b[0], 2)
p1 = math.pow(a[1]-b[1], 2)
p2 = math.pow(a[2]-b[2], 2)
return math.pow(p0 + p1 + p2, 1)
for idx in xrange(len(new_target_pos)):
old = target_pos[idx]
new = new_target_pos[idx]
if distance_squared(old, new) < 0.0001:
continue
cmds.xform('%s.vtx[%i]' % (target_mesh, idx), t=new_target_pos[idx], ws=True)
def get_connected_input_geometry(blend_shape):
"""
Return an array of blend_shape's input plugs that have an input connection.
pm.listConnections should do this, but it has bugs when the input array is sparse.
"""
results = []
blend_shape_plug = _get_plug_from_node('%s.input' % blend_shape)
num_input_elements = blend_shape_plug.evaluateNumElements()
for idx in xrange(num_input_elements):
input = blend_shape_plug.elementByPhysicalIndex(idx)
input_geometry_attr = OpenMaya.MFnDependencyNode(input.node()).attribute('inputGeometry')
input_geometry_plug = input.child(input_geometry_attr)
conns = OpenMaya.MPlugArray()
input_geometry_plug.connectedTo(conns, True, False);
if conns.length():
results.append(input_geometry_plug.info())
return results
def _find_output_mesh(plug):
# pm.listHistory will traverse the graph to find an output mesh, but it only works
# on nodes, not plugs. Go from the plug to the next node. If there's more than one
# output connection, we won't know which one to follow.
connections = pm.listConnections(plug, s=False, d=True) or []
if len(connections) != 1:
raise RuntimeError('Expected one connection out of %s, got: %s' % (plug, connections))
for node in pm.listHistory(connections[0], f=True):
if node.nodeType() != 'mesh':
continue
return node
else:
OpenMaya.MGlobal.displayError('Couldn\'t find a mesh in the future of %s.' % deformer)
def _get_plug_from_node(node):
selection_list = OpenMaya.MSelectionList()
selection_list.add(node)
plug = OpenMaya.MPlug()
selection_list.getPlug(0, plug)
return plug
def _copy_mesh_from_plug(path):
plug = _get_plug_from_node(path)
mesh = OpenMaya.MFnMesh().copy(plug.asMObject())
return pm.ls(OpenMaya.MFnTransform(mesh).partialPathName())[0]
def get_weight_from_alias(blend_shape, alias):
"""
Given a blend shape node and an aliased weight attribute, return the index in .weight to the
alias.
"""
# aliasAttr lets us get the alias from an attribute, but it doesn't let us get the attribute
# from the alias.
existing_indexes = blend_shape.attr('weight').get(mi=True) or []
for idx in existing_indexes:
aliasName = pm.aliasAttr(blend_shape.attr('weight').elementByLogicalIndex(idx), q=True)
if aliasName == alias:
return idx
raise Exception('Couldn\'t find the weight index for blend shape target %s.%s' % (blend_shape, alias))
def split_all_blend_shape_targets(blend_shape, *args, **kwargs):
blend_targets = pm.listAttr(blend_shape.attr('w'), m=True) or []
for blend_target in blend_targets:
split_blend_shape_from_deformer(blend_shape, blend_target, *args, **kwargs)
def substitute_name(pattern, name, left_side):
"""
Replace substitutions in a name pattern.
<name> will be replaced with the value of name.
Patterns containing a pipe, eg. <ABCD|EFGH>, will be replaced with "ABCD"
if left_side is true or "EFGH" if left_side is false.
"""
def sub(s):
text = s.group(1)
if text == 'name':
return name
if '|' in text:
parts = text.split('|', 2)
if left_side or len(parts) == 1:
return parts[0]
else:
return parts[1]
return s.group(0)
return re.sub(r'<([^>]*)>', sub, pattern)
def split_blend_shape_from_deformer(blend_shape, blendTarget,
outputBlendShapeLeft=None, outputBlendShapeRight=None,
naming_pattern='<Name>',
split_args={}):
"""
outputBlendShapeLeft, outputBlendShapeRight: If not None, the blend_shape deformers
to put the resulting blend shapes. If None, the blend shapes are added to the same
deformer as their source.
If we're adding the new shapes to separate deformers, we'll always add it at the same
target index as the source. This makes it easier to keep track of which target is
which. If there's already a blend shape on that index, we'll try to overwrite it.
Currently this will fail if there's a mesh input for that target, but we normally
delete the target meshes to use a delta target instead.
"""
# XXX: This still doesn't undo correctly. I'm not sure why.
pm.undoInfo(openChunk=True)
try:
if outputBlendShapeLeft is None:
# Get the next free blend shape target indexes, for the new blend shapes we'll create.
existing_indexes = pm.getAttr(blend_shape.attr('weight'), mi=True) or [-1]
output_blend_shape_indexes = {
'L': max(existing_indexes) + 1,
'R': max(existing_indexes) + 2,
}
else:
# If we're adding the blend shapes to separate blendShape deformers rather than the
# same deformer as the source, we'll always use the same index as the source, so that
# srcBlendShape.w[1] for the full blend shape corresponds to leftBlendShape.w[1] for the
# left side blend shape.
weightIndex = get_weight_from_alias(blend_shape, blendTarget)
output_blend_shape_indexes = {
'L': weightIndex,
'R': weightIndex,
}
# Save all weights.
original_weights = {attr.index(): attr.get() for attr in blend_shape.attr('weight')}
# Disconnect all incoming connections into the weights, so we can manipulate them. We'll
# reconnect them when we're done.
existing_connections = pm.listConnections(blend_shape.attr('weight'), s=True, d=False, p=True, c=True) or []
for dst, src in existing_connections:
src.disconnect(dst)
try:
# Reset all weights to 0.
for idx in original_weights.keys():
try:
# Don't try to set weights that are already 0, so we don't print warnings for connected blend
# shape weights that we don't actually need to change.
if blend_shape.attr('weight').elementByLogicalIndex(idx).get() == 0:
continue
blend_shape.attr('weight').elementByLogicalIndex(idx).set(0)
except RuntimeError as e:
log.error('Couldn\'t disable blend shape target: %s' % e)
# Turn on the blend shape that we're splitting.
blend_shape.attr(blendTarget).set(1)
# Get a list of the inputGeometry plugs on the blend shape that are connected.
connected_input_geometry = get_connected_input_geometry(blend_shape)
# Split each mesh.
for input_geom in connected_input_geometry:
# Figure out the outputGeometry for this inputGeometry. Maya knows this
# via passThroughToMany, but I don't know how to access that information here.
# Search and replace input[*].inputGeometry -> outputGeometry[*].
output_geom = input_geom.replace('.inputGeometry', '')
output_geom = output_geom.replace('.input', '.outputGeometry')
# Make a separate copy of the blended mesh for the left and right sides, and a copy of the input
# into the blend shape. We do this directly from the blend shape's plugs, so we're not affected
# by other deformers.
new_mesh_base = _copy_mesh_from_plug(output_geom)
for side in ('L', 'R'):
new_mesh = _copy_mesh_from_plug(input_geom)
# Rename the blended nodes, since the name of this node will become the name of the
# blend shape target.
new_mesh_name = substitute_name(naming_pattern, blendTarget, side == 'L')
new_mesh.rename(new_mesh_name)
# Fade the left and right shapes to their respective sides.
split_blend_shape(new_mesh_base, new_mesh, right_side=side == 'R', **split_args)
# Find the mesh that output_geom is connected to.
output_mesh = _find_output_mesh(output_geom)
# Create the two blend shapes (or add them to the existing blend shape if there
# are multiple meshes attached to the deformer).
if outputBlendShapeLeft:
outputShape = outputBlendShapeLeft if side == 'L' else outputBlendShapeRight
else:
outputShape = blend_shape
pm.blendShape(outputShape, edit=True, t=(output_mesh, output_blend_shape_indexes[side], new_mesh, 1))
# Delete the mesh. It'll be stored in the blendShape.
pm.delete(new_mesh)
pm.delete(new_mesh_base)
finally:
# Reset blend shape weights that we disabled.
for idx, weight in original_weights.items():
try:
attr = blend_shape.attr('weight').elementByLogicalIndex(idx)
if attr.get() == weight:
continue
attr.set(weight)
except RuntimeError as e:
log.error('Couldn\'t disable blend shape target: %s' % e)
# Reconnect any incoming connections to the weights that we disconnected above.
for dst, src in existing_connections:
src.connect(dst)
finally:
pm.undoInfo(closeChunk=True)
class UI(maya_helpers.OptionsBox):
title = 'Split Blend Shape'
def options_box_setup(self):
self.optvars.add('zSplitBlendShapesBlendDistance', 'float', 2)
self.optvars.add('zSplitBlendShapesPlane', 'int', 2)
self.optvars.add('zSplitBlendShapesPlaneOrigin', 'float', 0)
self.optvars.add('zSplitBlendShapesNamingPattern', 'string', '<name>_<L|R>')
parent = pm.columnLayout(adjustableColumn=1)
pm.optionMenuGrp('sbsList', label='Blend shape:', cc=self.fill_blend_target)
self.fill_blend_shapes('sbsList|OptionMenu', False)
pm.optionMenuGrp('sbsLeftOutput', label='Left output:')
self.fill_blend_shapes('sbsLeftOutput|OptionMenu', True)
pm.optionMenuGrp('sbsRightOutput', label='Right output:')
self.fill_blend_shapes('sbsRightOutput|OptionMenu', True)
# If something is selected, try to find a blend shape to select by default.
selection = pm.ls(sl=True)
if selection:
history = pm.listHistory(selection)
blend_shapes = pm.ls(history, type='blendShape')
if blend_shapes:
default_blend_shape = blend_shapes[0]
self.select_blend_shape(default_blend_shape)
pm.optionMenuGrp('sbsTargetList', label='Blend target:')
self.fill_blend_target()
pm.floatSliderGrp('sbsBlendDistance', label='Blend distance', field=True, min=0, max=10, fieldMinValue=0, fieldMaxValue=1000)
pm.radioButtonGrp('sbsPlane', label='Plane:', numberOfRadioButtons=3, labelArray3=('XY', 'YZ', 'XZ'))
pm.floatSliderGrp('sbsPlaneOrigin', label='Plane origin', v=0, min=0, max=1000)
pm.textFieldGrp('sbsNamingPattern', label='Naming pattern')
def fill_blend_target(self, unused=True):
# Clear the existing target list.
for item in pm.optionMenu('sbsTargetList|OptionMenu', q=True, itemListLong=True):
pm.deleteUI(item)
# Prevent a warning from being printed if there aren't any blendShapes.
if pm.optionMenuGrp('sbsList', q=True, ni=True) == 0:
return
# Get the names of the targets in the selected blend shape.
value = pm.optionMenuGrp('sbsList', q=True, v=True)
if not value:
return
nodes = pm.ls(value)
if not nodes:
return
node = nodes[0]
pm.menuItem(label='All', parent='sbsTargetList|OptionMenu')
for item in node.attr('w'):
target_name = pm.aliasAttr(item, q=True)
pm.menuItem(label=target_name, parent='sbsTargetList|OptionMenu')
def select_blend_shape(self, blend_shape):
menu_items = pm.optionMenu('sbsList|OptionMenu', q=True, itemListLong=True)
for idx, menu_item in enumerate(menu_items):
item = pm.menuItem(menu_item, q=True, label=True)
nodes = pm.ls(item)
if not nodes:
continue
node = nodes[0]
if node != blend_shape:
continue;
pm.optionMenuGrp('sbsList', edit=True, select=idx + 1)
def fill_blend_shapes(self, target, includeSame):
for item in pm.optionMenu(target, q=True, itemListLong=True):
pm.deleteUI(item)
if includeSame:
pm.menuItem(parent=target, label='Same deformer as source')
for item in pm.ls(type='blendShape'):
pm.menuItem(parent=target, label=item)
def option_box_apply(self):
kwargs = { }
blend_shape = pm.optionMenuGrp('sbsList', q=True, v=True)
blend_shape = pm.ls(blend_shape)[0]
leftOutput = None
rightOutput = None
if pm.optionMenuGrp('sbsLeftOutput', q=True, sl=True) != 1: # "Same deformer as source"
leftOutput = pm.optionMenuGrp('sbsLeftOutput', q=True, v=True)
leftOutput = pm.ls(leftOutput)[0]
if pm.optionMenuGrp('sbsRightOutput', q=True, sl=True) != 1: # "Same deformer as source"
rightOutput = pm.optionMenuGrp('sbsRightOutput', q=True, v=True)
rightOutput = pm.ls(rightOutput)[0]
blendShapeTarget = ''
if pm.optionMenuGrp('sbsTargetList', q=True, sl=True) != 1: # "All"
blendShapeTarget = pm.optionMenuGrp('sbsTargetList', q=True, v=True)
distance = pm.floatSliderGrp('sbsBlendDistance', q=True, v=True)
origin = pm.floatSliderGrp('sbsPlaneOrigin', q=True, v=True)
plane = pm.radioButtonGrp('sbsPlane', q=True, sl=True)
kwargs['naming_pattern'] = pm.textFieldGrp('sbsNamingPattern', q=True, text=True)
plane_to_axis = {
1: 2,
2: 0,
0: 1,
}
axis = plane_to_axis[plane]
if blendShapeTarget != "":
func = split_blend_shape_from_deformer
kwargs['blendTarget'] = blendShapeTarget
else:
func = split_all_blend_shape_targets
kwargs['blend_shape'] = blend_shape
if leftOutput:
kwargs['outputBlendShapeLeft'] = leftOutput
if rightOutput:
kwargs['outputBlendShapeRight'] = rightOutput
split_args = {}
kwargs['split_args'] = split_args
split_args['fade_distance'] = distance
split_args['axis'] = axis
split_args['axis_origin'] = origin
func(**kwargs)
def option_box_load(self):
pm.floatSliderGrp('sbsBlendDistance', edit=True, v=self.optvars['zSplitBlendShapesBlendDistance'])
pm.radioButtonGrp('sbsPlane', edit=True, select=self.optvars['zSplitBlendShapesPlane'])
pm.floatSliderGrp('sbsPlaneOrigin', edit=True, v=self.optvars['zSplitBlendShapesPlaneOrigin'])
pm.textFieldGrp('sbsNamingPattern', edit=True, text=self.optvars['zSplitBlendShapesNamingPattern'])
def option_box_save(self):
self.optvars['zSplitBlendShapesBlendDistance'] = pm.floatSliderGrp('sbsBlendDistance', q=True, v=True)
self.optvars['zSplitBlendShapesPlane'] = pm.radioButtonGrp('sbsPlane', q=True, select=True)
self.optvars['zSplitBlendShapesPlaneOrigin'] = pm.floatSliderGrp('sbsPlaneOrigin', q=True, v=True)
self.optvars['zSplitBlendShapesNamingPattern'] = pm.textFieldGrp('sbsNamingPattern', q=True, text=True)
def run():
ui = UI()
ui.run()
|
the-stack_0_18416 | import os
from zmq.eventloop import ioloop
from distutils.util import strtobool as _bool
from test_util import remove_peers_from_db
BEHAVE_DEBUG_ON_ERROR = _bool(os.environ.get("BEHAVE_DEBUG_ON_ERROR", "no"))
def after_step(context, step):
if BEHAVE_DEBUG_ON_ERROR and step.status == "failed":
# -- ENTER DEBUGGER: Zoom in on failure location.
# NOTE: Use IPython debugger, same for pdb (basic python debugger).
import pdb
pdb.post_mortem(step.exc_traceback)
def before_all(context):
# -- SET LOG LEVEL: behave --logging-level=ERROR ...
# on behave command-line or in "behave.ini".
context.config.setup_logging()
def before_scenario(context, scenario):
cur = ioloop.IOLoop.current()
ioloop.IOLoop.clear_current()
cur.close(all_fds=True)
newloop = ioloop.IOLoop()
newloop.make_current()
def after_scenario(context, scenario):
if context.feature.name == 'CryptoTransportLayer':
# reset database peers
for layer in context.layers:
layer.db.deleteEntries('peers')
elif context.feature.name == 'Websocket Client Interface':
# reset database peers
for i in range(len(context.app)):
remove_peers_from_db(i)
|
the-stack_0_18420 | #!/usr/bin/env python
import os
import sys
import pprint
from jira import JIRA, JIRAError
awsRemediationDescriptionMapping = {
}
class awssRemediationJira(object):
def __init__(
self,
server="https://flatironhealth.atlassian.net",
board="SEGTEST", # TODO Change this to a board that is monitored
access_token=None,
access_token_secret=None,
consumer_key=None,
key_cert=None,
api_token=None,
):
options = {"server": server}
self.jira = None
self.board = board
oauth_dict = {}
if access_token and access_token_secret and consumer_key and key_cert:
key_cert_data = (
open(os.path.expanduser(key_cert), "r").read()
if os.path.isfile(os.path.expanduser(key_cert))
else key_cert.replace("\\n", "\n")
)
oauth_dict = {
"access_token": access_token,
"access_token_secret": access_token_secret,
"consumer_key": consumer_key,
"key_cert": key_cert_data,
}
self.jira = JIRA(options, oauth=oauth_dict)
elif api_token:
creds = (
self._get_api_token(api_token)
if os.path.isfile(api_token)
else api_token.split(":")
)
self.jira = JIRA(options, basic_auth=(tuple(creds)))
if not getattr(self, "jira"):
raise JIRAError("JIRA unable to be initialized")
@staticmethod
def _get_api_token(path):
with open(path) as f:
creds = f.readlines()[0].strip().split(":")
return creds
# def view_ticket(self, id):
# dat = self.get_ticket(id)
# def get_ticket(self, id):
# if id.startswith(self.board):
# id = id.replace("{}-".format(self.board), "")
# return self.jira.issue("{}-{}".format(self.board, id))
def create_issue(self, title, metadata, epic=None, board=None):
if not board:
board = self.board
issue_dict = {
"project": board,
"summary": title,
"description": self._create_description(metadata),
"issuetype": {"name": "Bug"},
}
issue = self.jira.create_issue(fields=issue_dict)
issue.update(labels=["AWS Misconfiguration", "Remediation Framework"])
if epic:
issue.update(fields={"customfield_10007": epic})
return issue
@staticmethod
def _create_description(metadata):
description = ""
return description
# def update_issue(self, jiraTicket, status):
# if jiraTicket and self.jira:
# issue_obj = self.jira.issue(jiraTicket)
# to_do = self.jira.find_transitionid_by_name(issue_obj, "To Do")
# done = self.jira.find_transitionid_by_name(issue_obj, "Done")
# _status_map = {
# "Open": {"transition_id": to_do},
# "Resolved": {"transition_id": done,},
# "FalsePositive": {"transition_id": done,},
# "Duplicate": {"transition_id": done,},
# }
# if status not in _status_map.keys():
# ret["msg"] = "Invalid status"
# return ret
# mapped_status = _status_map.get(status)
# self.jira.transition_issue(issue_obj, mapped_status["transition_id"])
# return {
# "status": "success",
# "msg": "%s has been set to %s" % (jiraTicket, mapped_status),
# } |
the-stack_0_18422 | # Copyright 2018 Samuel Payne [email protected]
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
import os
import re
import math
from .fileLoader import FileLoader
class DataFrameLoader:
def __init__(self, fileName):
self.fileName = fileName
def createDataFrame(self):
"""
Parameters
None
Returns
Dataframe of parsed datafile depending on the data type
"""
if bool(re.search(r'\.txt[.|(a-z)]{,7}$', self.fileName)):
#temp fix for reading error on clinical_v2:
file = open(self.fileName, "r", errors="ignore")
df = pd.read_csv(file, sep="\t", index_col=0)
df = df.sort_index()
f = self.fileName.split(os.sep)
f = f[len(f) - 1]
df.name = f.split(".")[0]
return df
elif bool(re.search(r'\.(cct|cbt)[.|(a-z)]{,7}$', self.fileName)):
df = pd.read_csv(self.fileName, sep="\t", index_col=0)
df = df.transpose()
df = df.sort_index()
f = self.fileName.split(os.sep)
f = f[len(f) - 1]
df.name = f.split(".")[0]
return df
elif bool(re.search(r'\.maf[.|(a-z)]{,7}$', self.fileName)):
df = pd.read_csv(self.fileName, sep = "\t")
if "Tumor_Sample_Barcode" in df.columns:
split_barcode = df["Tumor_Sample_Barcode"].str.split("_", n = 1, expand = True)
df["Tumor_Sample_Barcode"] = split_barcode[0]
parsedDf = df[["Tumor_Sample_Barcode","Hugo_Symbol","Variant_Classification","HGVSp_Short"]]
parsedDf = parsedDf.rename({"Tumor_Sample_Barcode":"Patient_Id","Hugo_Symbol":"Gene","Variant_Classification":"Mutation","HGVSp_Short":"Location"}, axis='columns')
f = self.fileName.split(os.sep)
f = f[len(f) - 1]
parsedDf.name = f.split(".")[0] + " MAF"
return parsedDf
else:
print("Error reading", self.fileName)
|
the-stack_0_18424 | import sqlalchemy as db
from sqlalchemy.orm import Session
class DB_alchemy:
db_engine = None
db_con = None
db_session = None
metadata_obj = None
def __init__(self, host: str, port: str, dbName: str, userName: str, pwd: str) -> None:
# This will connect with a static DB
#DB_URL = f'postgresql://{userName}:{pwd}@{host}:{port}/{dbName}'
# This will connect with a postgres container DB (for containers,
# which are connected to the same network)
DB_URL = f'postgresql+psycopg2://{userName}:{pwd}@postgres:{port}/{dbName}'
self.host = host
self.port = port
self.dbName = dbName
self.userName = userName
self.pwd = pwd
self.db_engine = db.create_engine(DB_URL, echo=True)
self.db_con = self.db_engine.connect()
self.db_session = Session(self.db_engine) # , future=True)
def create_schema(self, schema_name):
# Creating a schema 'public' if not exists
if not self.db_engine.dialect.has_schema(self.db_engine, schema_name):
self.db_engine.execute(db.schema.CreateSchema(self.db_engine))
def create_metadata_obj(self, schema_name):
self.metadata_obj = db.MetaData(schema=schema_name)
def create_items_table(self):
return db.Table(
'item', self.metadata_obj,
db.Column('item_id', db.Integer, primary_key=True),
db.Column('item_name', db.String(50), nullable=False),
db.Column('description', db.String(250), nullable=True),
db.Column('price', db.Float(2), nullable=False),
db.Column('tax', db.Float(2), nullable=True),
db.Column('tax_with_price', db.Float(2))
)
def create_all(self):
self.metadata_obj.create_all(self.db_engine, checkfirst=True)
|
the-stack_0_18425 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# AZURE CLI RBAC TEST DEFINITIONS
import json
import os
import tempfile
import time
import datetime
import mock
import unittest
from knack.util import CLIError
from azure_devtools.scenario_tests import AllowLargeResponse, record_only
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.testsdk import ScenarioTest, LiveScenarioTest, ResourceGroupPreparer, KeyVaultPreparer
class RoleScenarioTest(ScenarioTest):
def run_under_service_principal(self):
account_info = self.cmd('account show').get_output_in_json()
return account_info['user']['type'] == 'servicePrincipal'
class RbacSPSecretScenarioTest(RoleScenarioTest):
def test_create_for_rbac_with_right_display_name(self):
sp_name = self.create_random_name('cli-test-sp', 15)
self.kwargs['sp'] = 'http://{}'.format(sp_name)
self.kwargs['display_name'] = sp_name
try:
self.cmd('ad sp create-for-rbac -n {display_name} --skip-assignment', checks=self.check('name', '{sp}'))
finally:
self.cmd('ad app delete --id {sp}')
# verify we can extrat out disply name from name which starts with protocol
sp_name2 = self.create_random_name('cli-test-sp', 15)
self.kwargs['sp2'] = 'http://{}'.format(sp_name2)
self.kwargs['display_name2'] = sp_name2
try:
self.cmd('ad sp create-for-rbac -n {sp2} --skip-assignment', checks=self.check('displayName', '{display_name2}'))
finally:
self.cmd('ad app delete --id {sp2}')
@AllowLargeResponse()
@ResourceGroupPreparer(name_prefix='cli_create_rbac_sp_with_password')
def test_create_for_rbac_with_secret_with_assignment(self, resource_group):
subscription_id = self.get_subscription_id()
self.kwargs.update({
'sub': subscription_id,
'scope': '/subscriptions/{}'.format(subscription_id),
'sp': 'http://{}'.format(resource_group),
'display_name': resource_group
})
try:
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
self.cmd('ad sp create-for-rbac -n {display_name} --scopes {scope} {scope}/resourceGroups/{rg}',
checks=self.check('name', '{sp}'))
self.cmd('role assignment list --assignee {sp} --scope {scope}',
checks=self.check("length([])", 1))
self.cmd('role assignment list --assignee {sp} -g {rg}',
checks=self.check("length([])", 1))
self.cmd('role assignment delete --assignee {sp} -g {rg}',
checks=self.is_empty())
self.cmd('role assignment delete --assignee {sp}',
checks=self.is_empty())
finally:
self.cmd('ad app delete --id {sp}')
class RbacSPCertScenarioTest(RoleScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_create_rbac_sp_with_cert')
def test_create_for_rbac_with_cert_with_assignment(self, resource_group):
subscription_id = self.get_subscription_id()
self.kwargs.update({
'sub': subscription_id,
'scope': '/subscriptions/{}'.format(subscription_id),
'sp': 'http://' + resource_group,
'display_name': resource_group
})
try:
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
result = self.cmd('ad sp create-for-rbac -n {display_name} --scopes {scope} {scope}/resourceGroups/{rg} --create-cert',
checks=self.check('name', '{sp}')).get_output_in_json()
self.assertTrue(result['fileWithCertAndPrivateKey'].endswith('.pem'))
os.remove(result['fileWithCertAndPrivateKey'])
result = self.cmd('ad sp credential reset -n {sp} --create-cert',
checks=self.check('name', '{sp}')).get_output_in_json()
self.assertTrue(result['fileWithCertAndPrivateKey'].endswith('.pem'))
os.remove(result['fileWithCertAndPrivateKey'])
finally:
self.cmd('ad app delete --id {sp}',
checks=self.is_empty())
class RbacSPKeyVaultScenarioTest2(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sp_with_kv_new_cert')
@KeyVaultPreparer()
def test_create_for_rbac_with_new_kv_cert(self, resource_group, key_vault):
KeyVaultErrorException = get_sdk(self.cli_ctx, ResourceType.DATA_KEYVAULT, 'models.key_vault_error#KeyVaultErrorException')
subscription_id = self.get_subscription_id()
self.kwargs.update({
'sp': 'http://{}'.format(resource_group),
'display_name': resource_group,
'sub': subscription_id,
'scope': '/subscriptions/{}'.format(subscription_id),
'cert': 'cert1',
'kv': key_vault
})
time.sleep(5) # to avoid 504(too many requests) on a newly created vault
try:
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
try:
self.cmd('ad sp create-for-rbac --scopes {scope}/resourceGroups/{rg} --create-cert --keyvault {kv} --cert {cert} -n {display_name}')
except KeyVaultErrorException:
if not self.is_live and not self.in_recording:
pass # temporary workaround for keyvault challenge handling was ignored under playback
else:
raise
cer1 = self.cmd('keyvault certificate show --vault-name {kv} -n {cert}').get_output_in_json()['cer']
self.cmd('ad sp credential reset -n {sp} --create-cert --keyvault {kv} --cert {cert}')
cer2 = self.cmd('keyvault certificate show --vault-name {kv} -n {cert}').get_output_in_json()['cer']
self.assertTrue(cer1 != cer2)
finally:
self.cmd('ad app delete --id {sp}')
class RbacSPKeyVaultScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_sp_with_kv_existing_cert')
@KeyVaultPreparer()
def test_create_for_rbac_with_existing_kv_cert(self, resource_group, key_vault):
import time
subscription_id = self.get_subscription_id()
self.kwargs.update({
'sp': 'http://{}'.format(resource_group),
'sp2': 'http://{}2'.format(resource_group),
'display_name': resource_group,
'display_name2': resource_group + '2',
'sub': subscription_id,
'scope': '/subscriptions/{}'.format(subscription_id),
'cert': 'cert1',
'kv': key_vault
})
time.sleep(5) # to avoid 504(too many requests) on a newly created vault
# test with valid length cert
try:
self.kwargs['policy'] = self.cmd('keyvault certificate get-default-policy').get_output_in_json()
self.cmd('keyvault certificate create --vault-name {kv} -n {cert} -p "{policy}" --validity 24')
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
self.cmd('ad sp create-for-rbac -n {display_name} --keyvault {kv} --cert {cert} --scopes {scope}/resourceGroups/{rg}')
self.cmd('ad sp credential reset -n {sp} --keyvault {kv} --cert {cert}')
finally:
self.cmd('ad app delete --id {sp}')
# test with cert that has too short a validity
try:
self.cmd('keyvault certificate create --vault-name {kv} -n {cert} -p "{policy}" --validity 6')
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
self.cmd('ad sp create-for-rbac --scopes {scope}/resourceGroups/{rg} --keyvault {kv} --cert {cert} -n {display_name2}')
self.cmd('ad sp credential reset -n {sp2} --keyvault {kv} --cert {cert}')
finally:
self.cmd('ad app delete --id {sp2}')
class RoleCreateScenarioTest(RoleScenarioTest):
@record_only() # workaround https://github.com/Azure/azure-cli/issues/3187
@AllowLargeResponse()
def test_role_create_scenario(self):
subscription_id = self.get_subscription_id()
role_name = self.create_random_name('cli-test-role', 20)
template = {
"Name": role_name,
"Description": "Can monitor compute, network and storage, and restart virtual machines",
"Actions": ["Microsoft.Compute/*/read",
"Microsoft.Compute/virtualMachines/start/action",
"Microsoft.Compute/virtualMachines/restart/action",
"Microsoft.Network/*/read",
"Microsoft.Storage/*/read",
"Microsoft.Authorization/*/read",
"Microsoft.Resources/subscriptions/resourceGroups/read",
"Microsoft.Resources/subscriptions/resourceGroups/resources/read",
"Microsoft.Insights/alertRules/*"],
"DataActions": [
"Microsoft.Storage/storageAccounts/blobServices/containers/blobs/*"
],
"NotDataActions": [
"Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write"
],
"AssignableScopes": ["/subscriptions/{}".format(subscription_id)]
}
_, temp_file = tempfile.mkstemp()
with open(temp_file, 'w') as f:
json.dump(template, f)
self.kwargs.update({
'sub': subscription_id,
'role': role_name,
'template': temp_file.replace('\\', '\\\\')
})
# a few 'sleep' here to handle server replicate latency. It is no-op under playback
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
self.cmd('role definition create --role-definition {template}', checks=[
self.check('permissions[0].dataActions[0]', 'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/*'),
self.check('permissions[0].notDataActions[0]', 'Microsoft.Storage/storageAccounts/blobServices/containers/blobs/write'),
])
time.sleep(180)
role = self.cmd('role definition list -n {role}',
checks=self.check('[0].roleName', '{role}')).get_output_in_json()
# verify we can update
role[0]['permissions'][0]['actions'].append('Microsoft.Support/*')
with open(temp_file, 'w') as f:
json.dump(role[0], f)
self.cmd('role definition update --role-definition {template}',
checks=self.check('permissions[0].actions[-1]', 'Microsoft.Support/*'))
time.sleep(30)
self.cmd('role definition delete -n {role}',
checks=self.is_empty())
time.sleep(240)
self.cmd('role definition list -n {role}',
checks=self.is_empty())
class RoleAssignmentScenarioTest(RoleScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_role_assign')
@AllowLargeResponse()
def test_role_assignment_e2e(self, resource_group):
if self.run_under_service_principal():
return # this test delete users which are beyond a SP's capacity, so quit...
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
user = self.create_random_name('testuser', 15)
self.kwargs.update({
'upn': user + '@azuresdkteam.onmicrosoft.com',
'nsg': 'nsg1'
})
result = self.cmd('ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}')
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
try:
self.cmd('network nsg create -n {nsg} -g {rg}')
result = self.cmd('network nsg show -n {nsg} -g {rg}').get_output_in_json()
self.kwargs['nsg_id'] = result['id']
# test role assignments on a resource group
self.cmd('role assignment create --assignee {upn} --role contributor -g {rg}')
# verify role assignment create is idempotent
self.cmd('role assignment create --assignee {upn} --role contributor -g {rg}',
self.check("principalName", self.kwargs["upn"]))
self.cmd('role assignment list -g {rg}', checks=self.check("length([])", 1))
self.cmd('role assignment list --assignee {upn} --role contributor -g {rg}', checks=[
self.check("length([])", 1),
self.check("[0].principalName", self.kwargs["upn"])
])
# test couple of more general filters
result = self.cmd('role assignment list -g {rg} --include-inherited').get_output_in_json()
self.assertTrue(len(result) >= 1)
result = self.cmd('role assignment list --all').get_output_in_json()
self.assertTrue(len(result) >= 1)
self.cmd('role assignment delete --assignee {upn} --role contributor -g {rg}')
self.cmd('role assignment list -g {rg}',
checks=self.is_empty())
# test role assignments on a resource
self.cmd('role assignment create --assignee {upn} --role contributor --scope {nsg_id}')
self.cmd('role assignment list --assignee {upn} --role contributor --scope {nsg_id}',
checks=self.check("length([])", 1))
self.cmd('role assignment delete --assignee {upn} --role contributor --scope {nsg_id}')
self.cmd('role assignment list --scope {nsg_id}',
checks=self.is_empty())
# test role assignment on subscription level
self.cmd('role assignment create --assignee {upn} --role reader')
self.cmd('role assignment list --assignee {upn} --role reader',
checks=self.check("length([])", 1))
self.cmd('role assignment list --assignee {upn}',
checks=self.check("length([])", 1))
self.cmd('role assignment delete --assignee {upn} --role reader')
# test role assignment on empty scope
with self.assertRaisesRegexp(CLIError, 'Invalid scope. Please use --help to view the valid format.'):
self.cmd('role assignment create --assignee {upn} --scope "" --role reader')
self.cmd('role assignment delete --assignee {upn} --scope "" --role reader')
finally:
self.cmd('ad user delete --upn-or-object-id {upn}')
@ResourceGroupPreparer(name_prefix='cli_role_assign')
@AllowLargeResponse()
def test_role_assignment_create_using_principal_type(self, resource_group):
if self.run_under_service_principal():
return # this test delete users which are beyond a SP's capacity, so quit...
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
user = self.create_random_name('testuser', 15)
self.kwargs.update({
'upn': user + '@azuresdkteam.onmicrosoft.com',
'rg': resource_group
})
result = self.cmd('ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}').get_output_in_json()
self.kwargs['object_id'] = result['objectId']
try:
# test role assignment on subscription level
self.cmd('role assignment create --assignee-object-id {object_id} --assignee-principal-type User --role reader -g {rg}')
self.cmd('role assignment list -g {rg}', checks=self.check("length([])", 1))
self.cmd('role assignment delete -g {rg}')
self.cmd('role assignment list -g {rg}', checks=self.check("length([])", 0))
finally:
self.cmd('ad user delete --upn-or-object-id {upn}')
@ResourceGroupPreparer(name_prefix='cli_role_assign')
@AllowLargeResponse()
def test_role_assignment_handle_conflicted_assignments(self, resource_group):
if self.run_under_service_principal():
return # this test delete users which are beyond a SP's capacity, so quit...
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
user = self.create_random_name('testuser', 15)
self.kwargs.update({
'upn': user + '@azuresdkteam.onmicrosoft.com',
'nsg': 'nsg1'
})
self.cmd('ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}')
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
base_dir = os.curdir
try:
temp_dir = self.create_temp_dir()
os.chdir(temp_dir)
self.cmd('configure --default group={rg} --scope local')
local_defaults_config = self.cmd('configure --list-defaults --scope local', checks=[
self.check('length([])', 1),
self.check('[0].name', 'group'),
self.check('[0].value', '{rg}')
]).get_output_in_json()
self.assertTrue(temp_dir.lower() in local_defaults_config[0]['source'].lower())
# test role assignments on a resource group
rg_id = self.cmd('group show -n {rg}').get_output_in_json()['id']
self.cmd('role assignment create --assignee {upn} --role reader --scope ' + rg_id)
self.cmd('role assignment list --assignee {upn} --role reader --scope ' + rg_id, checks=self.check('length([])', 1))
self.cmd('role assignment delete --assignee {upn} --role reader --scope ' + rg_id)
self.cmd('role assignment list --assignee {upn} --role reader --scope ' + rg_id, checks=self.check('length([])', 0))
finally:
self.cmd('configure --default group="" --scope local')
os.chdir(os.path.basename(base_dir))
self.cmd('ad user delete --upn-or-object-id {upn}')
@ResourceGroupPreparer(name_prefix='cli_role_assign')
@AllowLargeResponse()
def test_role_assignment_mgmt_grp(self, resource_group):
if self.run_under_service_principal():
return # this test delete users which are beyond a SP's capacity, so quit...
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
user = self.create_random_name('testuser', 15)
mgmt_grp = self.create_random_name('mgmt_grp', 15)
self.kwargs.update({
'upn': user + '@azuresdkteam.onmicrosoft.com',
'mgmt_grp': mgmt_grp
})
self.cmd('ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}')
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
mgmt_grp_created = False
try:
mgmt_grp_id = self.cmd('account management-group create -n {mgmt_grp}').get_output_in_json()['id']
self.kwargs['scope'] = mgmt_grp_id
mgmt_grp_created = True
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
# test role assignments on a resource group
self.cmd('role assignment create --assignee {upn} --role reader --scope {scope}',
checks=self.check('scope', self.kwargs['scope']))
self.cmd('role assignment list --assignee {upn} --role reader --scope {scope}', checks=[
self.check('length([])', 1),
self.check('[0].scope', self.kwargs['scope'])
])
self.cmd('role assignment delete --assignee {upn} --role reader --scope {scope}')
self.cmd('role assignment list --assignee {upn} --role reader --scope {scope}',
checks=self.check('length([])', 0))
finally:
if mgmt_grp_created:
self.cmd('account management-group delete -n {mgmt_grp}')
self.cmd('ad user delete --upn-or-object-id {upn}')
@ResourceGroupPreparer(name_prefix='cli_role_audit')
@AllowLargeResponse()
def test_role_assignment_audits(self, resource_group):
if self.run_under_service_principal():
return # this test delete users which are beyond a SP's capacity, so quit...
with mock.patch('azure.cli.command_modules.role.custom._gen_guid', side_effect=self.create_guid):
user = self.create_random_name('testuser', 15)
self.kwargs.update({
'upn': user + '@azuresdkteam.onmicrosoft.com',
})
self.cmd('ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}')
time.sleep(15) # By-design, it takes some time for RBAC system propagated with graph object change
try:
self.cmd('role assignment create --assignee {upn} --role contributor -g {rg}')
if self.is_live or self.in_recording:
now = datetime.datetime.utcnow()
start_time = '{}-{}-{}T{}:{}:{}Z'.format(now.year, now.month, now.day - 1, now.hour,
now.minute, now.second)
time.sleep(120)
result = self.cmd('role assignment list-changelogs --start-time {}'.format(start_time)).get_output_in_json()
else:
# figure out the right time stamps from the recording file
r = next(r for r in self.cassette.requests if r.method == 'GET' and 'providers/microsoft.insights/eventtypes/management/' in r.uri)
try:
from urllib.parse import parse_qs, urlparse
except ImportError:
from urlparse import urlparse, parse_qs
query_parts = parse_qs(urlparse(r.uri).query)['$filter'][0].split()
start_index, end_index = [i + 2 for (i, j) in enumerate(query_parts) if j == 'eventTimestamp']
start_time, end_time = query_parts[start_index], query_parts[end_index]
result = self.cmd('role assignment list-changelogs --start-time {} --end-time {}'.format(
start_time, end_time)).get_output_in_json()
self.assertTrue([x for x in result if (resource_group in x['scope'] and
x['principalName'] == self.kwargs['upn'])])
finally:
self.cmd('ad user delete --upn-or-object-id {upn}')
class RoleAssignmentListScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='cli_test_assignments_for_coadmins')
@AllowLargeResponse()
def test_assignments_for_co_admins(self, resource_group):
result = self.cmd('role assignment list --include-classic-administrator').get_output_in_json()
self.assertTrue([x for x in result if x['roleDefinitionName'] in ['CoAdministrator', 'AccountAdministrator']])
self.cmd('role assignment list -g {}'.format(resource_group), checks=[
self.check("length([])", 0)
])
result = self.cmd('role assignment list -g {} --include-classic-administrator'.format(resource_group)).get_output_in_json()
self.assertTrue([x for x in result if x['roleDefinitionName'] in ['CoAdministrator', 'AccountAdministrator']])
if __name__ == '__main__':
unittest.main()
|
the-stack_0_18426 | import subprocess
import re
import os.path
import sys
from os import mkdir
from importlib import import_module
sys.path.append("..")
from config import *
def get_proto_files():
"""
Extract .proto files from the services
"""
proto_files = set()
[proto_files.add(p) for _, _, p in services]
return proto_files
def get_proto_libs(proto_files):
"""
Dynamically import the compiled protobuf files
"""
libs = dict()
for pf in proto_files:
proto_lib = re.sub(r'.proto$', '_pb2', os.path.basename(pf))
libs[pf] = import_module(f"{proto_out}.{proto_lib}")
return libs
def create_vectors(libs):
"""
Create attack vectors - list of dictionaries containing url, request
and protobuf message. Later each entry will be expanded with grammar.
"""
vectors = list()
for url, request, proto in services:
msg = getattr(libs[proto], request)
entry = dict()
entry['url'] = url
entry['request'] = request
entry['msg'] = msg
vectors.append(entry)
return vectors
def pb_compile(files, dest):
"""
Compile the protobuf files running the external 'protoc' compiler
"""
if not os.path.exists(dest):
mkdir(dest)
for file in files:
args = f'protoc -I={os.path.dirname(file)} --python_out={dest} {file}'
print(f"Running '{args}'")
ret = subprocess.call(args, shell=True)
if ret:
exit()
__all__ = ["create_vectors", "get_proto_files", "get_proto_libs", "pb_compile"]
|
the-stack_0_18427 | from django.shortcuts import render_to_response
from account.auth import *
'''
@author: Anant Bhardwaj
@date: Mar 21, 2013
Datahub Viz
'''
@login_required
def index(request):
login = get_login(request)
return render_to_response("viz.html", {
'login': login,
'repo_base': login
}) |
the-stack_0_18428 | #============================================================
#
#
# Copyright (c) 2017 NetApp, Inc. All rights reserved.
# Specifications subject to change without notice.
#
# This sample code is provided AS IS, with no support or
# warranties of any kind, including but not limited to
# warranties of merchantability or fitness of any kind,
# expressed or implied.
#
# Min Python Version = python 2.7
#
#============================================================
#!/usr/bin/python
from ansible.module_utils.basic import *
import requests
import warnings
import sys
import json
import time
warnings.filterwarnings("ignore")
def get():
url_path = "/api/2.0/ontap/"
flag=0
url_path+="storage-vm-aggregate-relationships"
flag=0
if key != None:
if flag is 0:
url_path+="?key="+key
flag=1
else:
url_path+="&key="+key
if storage_vm_key != None:
if flag is 0:
url_path+="?storage_vm_key="+storage_vm_key
flag=1
else:
url_path+="&storage_vm_key="+storage_vm_key
if aggregate_key != None:
if flag is 0:
url_path+="?aggregate_key="+aggregate_key
flag=1
else:
url_path+="&aggregate_key="+aggregate_key
if sortBy != None:
if flag is 0:
url_path+="?sortBy="+sortBy
flag=1
else:
url_path+="&sortBy="+sortBy
if maxRecords != None:
if flag is 0:
url_path+="?maxRecords="+maxRecords
flag=1
else:
url_path+="&maxRecords="+maxRecords
if nextTag != None:
if flag is 0:
url_path+="?nextTag="+nextTag
flag=1
else:
url_path+="&nextTag="+nextTag
response=http_request_for_get(url_path)
json_response=response.json()
return json_response
def post():
url_path = "/api/2.0/ontap/"
url_path+="storage-vm-aggregate-relationships"
payload={}
if (key != None) & (key != key):
payload['key']=key
if (storage_vm_key != None) & (storage_vm_key != key):
payload['storage_vm_key']=storage_vm_key
if (aggregate_key != None) & (aggregate_key != key):
payload['aggregate_key']=aggregate_key
if (sortBy != None) & (sortBy != key):
payload['sortBy']=sortBy
if (maxRecords != None) & (maxRecords != key):
payload['maxRecords']=maxRecords
if (nextTag != None) & (nextTag != key):
payload['nextTag']=nextTag
response=http_request_for_post(url_path,**payload)
json_response=response.headers
return json_response
def put():
url_path = "/api/2.0/ontap/"
url_path+="storage-vm-aggregate-relationships/"
payload={}
if (key != None) & (key != key):
payload['key']=key
if (storage_vm_key != None) & (storage_vm_key != key):
payload['storage_vm_key']=storage_vm_key
if (aggregate_key != None) & (aggregate_key != key):
payload['aggregate_key']=aggregate_key
if (sortBy != None) & (sortBy != key):
payload['sortBy']=sortBy
if (maxRecords != None) & (maxRecords != key):
payload['maxRecords']=maxRecords
if (nextTag != None) & (nextTag != key):
payload['nextTag']=nextTag
if key != None:
url_path+=key
response=http_request_for_put(url_path,**payload)
json_response=response.headers
return json_response
else:
return "Provide the object key"
def delete():
url_path = "/api/2.0/ontap/"
url_path+="storage-vm-aggregate-relationships/"
if key != None:
url_path+=key
response=http_request_for_delete(url_path)
json_response=response.headers
return json_response
else:
return "Provide the object key for deletion"
def http_request_for_get(url_path,**payload):
response = requests.get("https://"+api_host+":"+api_port+url_path, auth=(api_user_name,api_user_password), verify=False, data=json.dumps(payload),headers={'content-type': 'application/json'})
return response
def http_request_for_put(url_path,**payload):
response = requests.put("https://"+api_host+":"+api_port+url_path, auth=(api_user_name,api_user_password), verify=False, data=json.dumps(payload),headers={'content-type': 'application/json'})
return response
def http_request_for_post(url_path,**payload):
response = requests.post("https://"+api_host+":"+api_port+url_path, auth=(api_user_name,api_user_password), verify=False, data=json.dumps(payload),headers={'content-type': 'application/json'})
return response
def http_request_for_delete(url_path,**payload):
response = requests.delete("https://"+api_host+":"+api_port+url_path, auth=(api_user_name,api_user_password), verify=False, data=json.dumps(payload),headers={'content-type': 'application/json'})
return response
def main():
fields = {
"action" : {
"required": True,
"choices": ['get', 'put', 'post', 'delete'],
"type": 'str'
},
"host" : {"required": True, "type": "str"},
"port" : {"required": True, "type": "str"},
"user" : {"required": True, "type": "str"},
"password" : {"required": True, "type": "str"},
"key" : {"required": False, "type": "str"},
"storage_vm_key" : {"required": False, "type": "str"},
"aggregate_key" : {"required": False, "type": "str"},
"sortBy" : {"required": False, "type": "str"},
"maxRecords" : {"required": False, "type": "str"},
"nextTag" : {"required": False, "type": "str"},
}
module = AnsibleModule(argument_spec=fields)
# NetApp Service Level Manager details
global api_host
global api_port
global api_user_name
global api_user_password
global lun_key
global nfs_share_key
global cifs_share_key
api_host = module.params["host"]
api_port = module.params["port"]
api_user_name = module.params["user"]
api_user_password = module.params["password"]
# Properties details
global key
key = module.params["key"]
global storage_vm_key
storage_vm_key = module.params["storage_vm_key"]
global aggregate_key
aggregate_key = module.params["aggregate_key"]
global sortBy
sortBy = module.params["sortBy"]
global maxRecords
maxRecords = module.params["maxRecords"]
global nextTag
nextTag = module.params["nextTag"]
global json_response
# Actions
if module.params["action"] == "get":
result=get()
module.exit_json(changed=False,meta=result)
elif module.params["action"] == "put":
result=put()
module.exit_json(changed=True,meta=result['Location'].split("/jobs/")[1])
elif module.params["action"] == "post":
result=post()
module.exit_json(changed=True,meta=result['Location'].split("/jobs/")[1])
elif module.params["action"] == "delete":
result=delete()
module.exit_json(changed=True,meta=result['Location'].split("/jobs/")[1])
if __name__ == '__main__':
main() |
the-stack_0_18429 | import sentry_sdk
from decouple import Csv, config
from dj_database_url import parse as db_url
from sentry_sdk.integrations.django import DjangoIntegration
from .base import * # noqa
DEBUG = False
SECRET_KEY = config("SECRET_KEY")
DATABASES = {
"default": config("DATABASE_URL", cast=db_url),
}
DATABASES["default"]["ATOMIC_REQUESTS"] = True
ALLOWED_HOSTS = config("ALLOWED_HOSTS", cast=Csv())
STATIC_ROOT = base_dir_join("staticfiles")
STATIC_URL = "/static/"
MEDIA_ROOT = base_dir_join("mediafiles")
MEDIA_URL = "/media/"
SERVER_EMAIL = "[email protected]"
EMAIL_HOST = "smtp.sendgrid.net"
EMAIL_HOST_USER = config("SENDGRID_USERNAME")
EMAIL_HOST_PASSWORD = config("SENDGRID_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Security
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = "DENY"
# Webpack
WEBPACK_LOADER["DEFAULT"]["CACHE"] = True
# Celery
CELERY_BROKER_URL = config("REDIS_URL")
CELERY_RESULT_BACKEND = config("REDIS_URL")
CELERY_SEND_TASK_ERROR_EMAILS = True
# Whitenoise
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
MIDDLEWARE.insert( # insert WhiteNoiseMiddleware right after SecurityMiddleware
MIDDLEWARE.index("django.middleware.security.SecurityMiddleware") + 1,
"whitenoise.middleware.WhiteNoiseMiddleware",
)
# django-log-request-id
MIDDLEWARE.insert( # insert RequestIDMiddleware on the top
0, "log_request_id.middleware.RequestIDMiddleware"
)
LOG_REQUEST_ID_HEADER = "HTTP_X_REQUEST_ID"
LOG_REQUESTS = True
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"filters": {
"require_debug_false": {"()": "django.utils.log.RequireDebugFalse"},
"request_id": {"()": "log_request_id.filters.RequestIDFilter"},
},
"formatters": {
"standard": {
"format": "%(levelname)-8s [%(asctime)s] [%(request_id)s] %(name)s: %(message)s"
},
},
"handlers": {
"null": {"class": "logging.NullHandler",},
"mail_admins": {
"level": "ERROR",
"class": "django.utils.log.AdminEmailHandler",
"filters": ["require_debug_false"],
},
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"filters": ["request_id"],
"formatter": "standard",
},
},
"loggers": {
"": {"handlers": ["console"], "level": "INFO"},
"django.security.DisallowedHost": {"handlers": ["null"], "propagate": False,},
"django.request": {"handlers": ["mail_admins"], "level": "ERROR", "propagate": True,},
"log_request_id.middleware": {
"handlers": ["console"],
"level": "DEBUG",
"propagate": False,
},
},
}
JS_REVERSE_EXCLUDE_NAMESPACES = ["admin"]
# Sentry
sentry_sdk.init(dsn=SENTRY_DSN, integrations=[DjangoIntegration()], release=COMMIT_SHA)
|
the-stack_0_18430 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class LocationServiceData(Model):
"""LocationServiceData.
:param access_mappings: Data about the access mappings contained by this location service.
:type access_mappings: list of :class:`AccessMapping <locations.v4_1.models.AccessMapping>`
:param client_cache_fresh: Data that the location service holds.
:type client_cache_fresh: bool
:param client_cache_time_to_live: The time to live on the location service cache.
:type client_cache_time_to_live: int
:param default_access_mapping_moniker: The default access mapping moniker for the server.
:type default_access_mapping_moniker: str
:param last_change_id: The obsolete id for the last change that took place on the server (use LastChangeId64).
:type last_change_id: int
:param last_change_id64: The non-truncated 64-bit id for the last change that took place on the server.
:type last_change_id64: long
:param service_definitions: Data about the service definitions contained by this location service.
:type service_definitions: list of :class:`ServiceDefinition <locations.v4_1.models.ServiceDefinition>`
:param service_owner: The identifier of the deployment which is hosting this location data (e.g. SPS, TFS, ELS, Napa, etc.)
:type service_owner: str
"""
_attribute_map = {
'access_mappings': {'key': 'accessMappings', 'type': '[AccessMapping]'},
'client_cache_fresh': {'key': 'clientCacheFresh', 'type': 'bool'},
'client_cache_time_to_live': {'key': 'clientCacheTimeToLive', 'type': 'int'},
'default_access_mapping_moniker': {'key': 'defaultAccessMappingMoniker', 'type': 'str'},
'last_change_id': {'key': 'lastChangeId', 'type': 'int'},
'last_change_id64': {'key': 'lastChangeId64', 'type': 'long'},
'service_definitions': {'key': 'serviceDefinitions', 'type': '[ServiceDefinition]'},
'service_owner': {'key': 'serviceOwner', 'type': 'str'}
}
def __init__(self, access_mappings=None, client_cache_fresh=None, client_cache_time_to_live=None, default_access_mapping_moniker=None, last_change_id=None, last_change_id64=None, service_definitions=None, service_owner=None):
super(LocationServiceData, self).__init__()
self.access_mappings = access_mappings
self.client_cache_fresh = client_cache_fresh
self.client_cache_time_to_live = client_cache_time_to_live
self.default_access_mapping_moniker = default_access_mapping_moniker
self.last_change_id = last_change_id
self.last_change_id64 = last_change_id64
self.service_definitions = service_definitions
self.service_owner = service_owner
|
the-stack_0_18431 | # This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
"""
Stores jobs in a database table using SQLAlchemy.
"""
import pickle
import logging
from clacks.common.components.scheduler.jobstores.base import JobStore
from clacks.common.components.scheduler.job import Job, JOB_WAITING, JOB_ERROR
try:
from sqlalchemy import create_engine, Table, MetaData, Column, Integer, Sequence, PickleType, Boolean, BigInteger, select, and_, String, Unicode, DateTime
except ImportError: # pragma: nocover
raise ImportError('SQLAlchemyJobStore requires SQLAlchemy installed')
logger = logging.getLogger(__name__)
class SQLAlchemyJobStore(JobStore):
def __init__(self, url=None, engine=None, tablename='clacks.common.components.scheduler_jobs',
metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL):
self.jobs = []
self.pickle_protocol = pickle_protocol
if engine:
self.engine = engine
elif url:
self.engine = create_engine(url)
else:
raise ValueError('Need either "engine" or "url" defined')
self.jobs_t = Table(tablename, metadata or MetaData(),
Column('id', Integer,
Sequence(tablename + '_id_seq', optional=True),
primary_key=True),
Column('trigger', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('func_ref', String(1024), nullable=False),
Column('args', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('kwargs', PickleType(pickle_protocol, mutable=False),
nullable=False),
Column('name', Unicode(1024)),
Column('misfire_grace_time', Integer, nullable=False),
Column('coalesce', Boolean, nullable=False),
Column('origin', String(1024), nullable=False),
Column('owner', String(1024), nullable=True),
Column('tag', String(1024), nullable=True),
Column('description', String(1024), nullable=True),
Column('callback_ref', String(1024), nullable=True),
Column('progress', Integer, nullable=False),
Column('status', Integer, nullable=False),
Column('max_runs', Integer),
Column('max_instances', Integer),
Column('next_run_time', DateTime, nullable=False),
Column('runs', BigInteger))
self.jobs_t.create(self.engine, True)
def add_job(self, job):
job_dict = job.__getstate__()
result = self.engine.execute(self.jobs_t.insert().values(**job_dict))
job.id = result.inserted_primary_key[0]
self.jobs.append(job)
def remove_job(self, job):
delete = self.jobs_t.delete().where(self.jobs_t.c.id == job.id)
self.engine.execute(delete)
self.jobs.remove(job)
def load_jobs(self):
jobs = []
for row in self.engine.execute(select([self.jobs_t])):
try:
job = Job.__new__(Job)
job_dict = dict(row.items())
job.__setstate__(job_dict)
# Set jobs that have not been executed completely to ERROR
if job.status != JOB_WAITING:
job.status = JOB_ERROR
# Treat our local jobs differently
jobs.append(job)
except Exception:
job_name = job_dict.get('name', '(unknown)')
logger.exception('Unable to restore job "%s"', job_name)
self.jobs = jobs
def migrate_jobs(self, job, origin):
# Migrate job only if it still has it's original origin, elseways
# someone else already migrated it...
update = self.jobs_t.update().where(and_(self.jobs_t.c.origin ==
job.origin, self.jobs_t.c.id == job.id)).values(origin=origin)
self.engine.execute(update)
def update_job(self, job):
job_dict = job.__getstate__()
update = self.jobs_t.update().where(self.jobs_t.c.id == job.id).\
values(next_run_time=job_dict['next_run_time'],
runs=job_dict['runs'])
self.engine.execute(update)
def close(self):
self.engine.dispose()
def __repr__(self):
return '<%s (url=%s)>' % (self.__class__.__name__, self.engine.url)
|
the-stack_0_18434 | import cv2
from typing import Tuple, List
import numpy as np
import time
import os
from dataclasses import dataclass
import math
from config import Config
from utils.misc import color_filter, cut_roi
from item import ItemCropper
@dataclass
class Template:
data: np.ndarray = None
hist = None
blacklist: bool = False
@dataclass
class Item:
center: Tuple[float, float] = None # (x, y) in screen coordinates
name: str = None
score: float = -1.0
dist: float = -1.0
roi: List[int] = None
class ItemFinder:
def __init__(self, config: Config):
self._item_cropper = ItemCropper()
# color range for each type of item
# hsv ranges in opencv h: [0-180], s: [0-255], v: [0, 255]
self._template_color_ranges = {
"white": [np.array([0, 0, 150]), np.array([0, 0, 245])],
"gray": [np.array([0, 0, 90]), np.array([0, 0, 126])],
"magic": [np.array([120, 120, 190]), np.array([120, 126, 255])],
"set": [np.array([60, 250, 190]), np.array([60, 255, 255])],
"rare": [np.array([30, 128, 190]), np.array([30, 137, 255])],
"unique": [np.array([23, 80, 140]), np.array([23, 89, 216])],
"runes": [np.array([21, 251, 190]), np.array([22, 255, 255])]
}
self._items_to_pick = config.items
self._folder_name = "items"
self._min_score = 0.86
# load all templates
self._templates = {}
for filename in os.listdir(f'assets/{self._folder_name}'):
filename = filename.lower()
if filename.endswith('.png'):
item_name = filename[:-4]
# assets with bl__ are black listed items and will not be picke up
blacklist_item = item_name.startswith("bl__")
# these items will be searched for regardless of pickit setting (e.g. for runes to avoid mixup)
force_search = item_name.startswith("rune_")
if blacklist_item or ((item_name in config.items and config.items[item_name]) or force_search):
data = cv2.imread(f"assets/{self._folder_name}/" + filename)
filtered_template = np.zeros(data.shape, np.uint8)
for key in self._template_color_ranges:
_, extracted_template = color_filter(data, self._template_color_ranges[key])
filtered_template = cv2.bitwise_or(filtered_template, extracted_template)
grayscale = cv2.cvtColor(filtered_template, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(grayscale, 0, 255, cv2.THRESH_BINARY)
hist = cv2.calcHist([filtered_template], [0, 1, 2], mask, [8, 8, 8], [0, 256, 0, 256, 0, 256])
template = Template()
template.data = filtered_template
template.hist = hist
if blacklist_item:
template.blacklist = True
self._templates[item_name] = template
def update_items_to_pick(self, config: Config):
self._items_to_pick = config.items
def search(self, inp_img: np.ndarray) -> List[Item]:
img = inp_img[:,:,:]
start = time.time()
item_text_clusters = self._item_cropper.crop(img, 7)
item_list = []
for cluster in item_text_clusters:
x, y, w, h = cluster.roi
# cv2.rectangle(inp_img, (x, y), (x+w, y+h), (0, 255, 0), 1)
cropped_input = cluster.data
best_score = None
item = None
for key in self._templates:
template: Template = self._templates[key]
if cropped_input.shape[1] > template.data.shape[1] and cropped_input.shape[0] > template.data.shape[0]:
# sanity check if there is any color overlap of template and cropped_input
grayscale = cv2.cvtColor(cropped_input, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(grayscale, 0, 255, cv2.THRESH_BINARY)
hist = cv2.calcHist([cropped_input], [0, 1, 2], mask, [8, 8, 8], [0, 256, 0, 256, 0, 256])
hist_result = cv2.compareHist(template.hist, hist, cv2.HISTCMP_CORREL)
same_type = hist_result > 0.0 and hist_result is not np.inf
if same_type:
result = cv2.matchTemplate(cropped_input, template.data, cv2.TM_CCOEFF_NORMED)
_, max_val, _, max_loc = cv2.minMaxLoc(result)
if max_val > self._min_score:
if template.blacklist:
max_val += 0.02
if (best_score is None or max_val > best_score):
best_score = max_val
if template.blacklist:
item = None
else:
# Do another color hist check with the actuall found item template
# TODO: After cropping the "cropped_input" with "cropped_item", check if "cropped_input" might need to be
# checked for other items. This would solve the issue of many items in one line being in one cluster
roi = [max_loc[0], max_loc[1], template.data.shape[1], template.data.shape[0]]
cropped_item = cut_roi(cropped_input, roi)
grayscale = cv2.cvtColor(cropped_item, cv2.COLOR_BGR2GRAY)
_, mask = cv2.threshold(grayscale, 0, 255, cv2.THRESH_BINARY)
hist = cv2.calcHist([cropped_item], [0, 1, 2], mask, [8, 8, 8], [0, 256, 0, 256, 0, 256])
hist_result = cv2.compareHist(template.hist, hist, cv2.HISTCMP_CORREL)
same_type = hist_result > 0.65 and hist_result is not np.inf
if same_type:
item = Item()
item.center = (int(max_loc[0] + x + int(template.data.shape[1] * 0.5)), int(max_loc[1] + y + int(template.data.shape[0] * 0.5)))
item.name = key
item.score = max_val
item.roi = [max_loc[0] + x, max_loc[1] + y, template.data.shape[1], template.data.shape[0]]
center_abs = (item.center[0] - (inp_img.shape[1] // 2), item.center[1] - (inp_img.shape[0] // 2))
item.dist = math.dist(center_abs, (0, 0))
if item is not None and self._items_to_pick[item.name]:
item_list.append(item)
elapsed = time.time() - start
# print(f"Item Search: {elapsed}")
return item_list
# Testing: Throw some stuff on the ground see if it is found
if __name__ == "__main__":
from screen import Screen
from config import Config
config = Config()
screen = Screen(config.general["monitor"])
item_finder = ItemFinder(config)
while 1:
# img = cv2.imread("")
img = screen.grab().copy()
item_list = item_finder.search(img)
for item in item_list:
# print(item.name + " " + str(item.score))
cv2.circle(img, item.center, 5, (255, 0, 255), thickness=3)
cv2.rectangle(img, item.roi[:2], (item.roi[0] + item.roi[2], item.roi[1] + item.roi[3]), (0, 0, 255), 1)
# cv2.putText(img, item.name, item.center, cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 1, cv2.LINE_AA)
# img = cv2.resize(img, None, fx=0.5, fy=0.5)
cv2.imshow('test', img)
cv2.waitKey(1)
|
the-stack_0_18438 | """Nox Sessions."""
import tempfile
import nox
from nox_poetry import session
from nox_poetry.sessions import Session
locations = "src", "tests", "noxfile.py"
nox.options.sessions = "lint", "mypy", "pytype", "safety", "tests"
package = "pacioli"
@session(python=["3.7", "3.8", "3.9", "3.10"])
def tests(session: Session) -> None:
"""Run the test suite."""
session.install("pytest", "pytest-cov", ".")
session.run("pytest", "--cov=pacioli")
@session(python=["3.8", "3.9", "3.10"])
def lint(session: Session) -> None:
"""Run the lint session."""
args = session.posargs or locations
session.install(
"flake8",
"flake8-black",
"flake8-docstrings",
"flake8-isort",
)
session.run("flake8", *args)
@session(python=["3.9", "3.10"])
def black(session: Session) -> None:
"""Run black session."""
args = session.posargs or locations
session.install("black")
session.run("black", *args)
@session(python=["3.9", "3.10"])
def safety(session: Session) -> None:
"""Run the safety session."""
with tempfile.NamedTemporaryFile() as requirements:
session.run(
"poetry",
"export",
"--dev",
"--format=requirements.txt",
"--without-hashes",
f"--output={requirements.name}",
external=True,
)
session.install("safety")
session.run("safety", "check", f"--file={requirements.name}", "--full-report")
@session(python=["3.9", "3.10"])
def mypy(session: Session) -> None:
"""Type-check using mypy."""
args = session.posargs or locations
session.install("mypy")
session.run("mypy", *args)
@session(python="3.9")
def pytype(session: Session) -> None:
"""Run the static type checker."""
args = session.posargs or ["--disable=import-error", *locations]
session.install("pytype")
session.run("pytype", *args)
@session(python=["3.9", "3.8"])
def xdoctest(session: Session) -> None:
"""Run examples with xdoctest."""
args = session.posargs or ["all"]
session.run("poetry", "install", "--no-dev", external=True)
session.install("xdoctest")
session.run("python", "-m", "xdoctest", package, *args)
@session(python="3.10")
def docs(session: Session) -> None:
"""Build the documentation."""
session.run("poetry", "install", "--no-dev", external=True)
session.install("sphinx", "sphinx-autodoc-typehints")
session.run("sphinx-build", "docs", "docs/_build")
@session(python=["3.9", "3.10"])
def coverage(session: Session) -> None:
"""Upload coverage data."""
session.install("coverage[toml]", "codecov")
session.run("coverage", "xml", "--fail-under=0")
session.run("codecov", *session.posargs)
|
the-stack_0_18439 | import sys
import random as rand
sys.path.append('..\\')
sys.path.append('core')
import argparse
import torch.utils.data as data
import os
import os.path
from imageio import imread
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torchvision.transforms.functional import rgb_to_grayscale
import glob
from core.utils import utils
from core.utils import warp_utils
from core.raft import RAFT
from core.datasets import FlyingChairs
from mydatasets.flyingchairsdata import flying_chairs
from core.utils.utils import ArrayToTensor
from skimage.segmentation import slic
import time
try:
from torch.cuda.amp import GradScaler
except:
# dummy GradScaler for PyTorch < 1.6
class GradScaler:
def __init__(self):
pass
def scale(self, loss):
return loss
def unscale_(self, optimizer):
pass
def step(self, optimizer):
optimizer.step()
def update(self):
pass
# exclude extremly large displacements
MAX_FLOW = 400
SUM_FREQ = 100
CHECK_FREQ = 2000
EPSILON = 0.0001
Q = 0.5
def sequence_OCCloss(flow_preds_for, flow_preds_bac, flow_preds_for_OCC, flow_preds_bac_OCC, image1, image2_orig, gamma=0.8):
""" Loss function defined over sequence of flow predictions """
n_predictions = len(flow_preds_for)
flow_loss = 0.0
for i in range(n_predictions):
i_weight = gamma**(n_predictions - i - 1)
Loss = []
warpedimg = warp_utils.flow_warp(image2_orig, flow_preds_for[i])
occmap = utils.get_occlusion(flow_preds_for[i], flow_preds_bac[i])
occmap_xy = torch.cat((occmap, occmap), 1)
occmapTILDA = utils.get_occlusion(flow_preds_for_OCC[i], flow_preds_bac_OCC[i])
occmapTILDA_xy = torch.cat((occmapTILDA, occmapTILDA), 1)
OCCmask = torch.clamp(occmapTILDA_xy - occmap_xy, 0, 1)
#Lo loss
Loss += [torch.abs(flow_preds_for_OCC[i] - flow_preds_for[i].detach()) * OCCmask]
# Lp loss
diff = utils.hamming_distance(utils.ternary_transform(image1), utils.ternary_transform(warpedimg))
Loss += [diff.abs() * (1 - occmap)]
Loss = [(l ** 2 + EPSILON) ** Q for l in Loss]
i_loss = sum([l.mean() for l in Loss]) / (1 - occmap).mean()
flow_loss += i_weight * i_loss
return flow_loss
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def fetch_optimizer(args, model):
""" Create the optimizer and learning rate scheduler """
optimizer = optim.AdamW(model.parameters(), lr=args.lr, weight_decay=args.wdecay, eps=args.epsilon)
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, args.lr, args.num_steps+100,
pct_start=0.05, cycle_momentum=False, anneal_strategy='linear')
return optimizer, scheduler
def fetch_dataloader(args, TRAIN_DS='C+T+K+S+H'):
""" Create the data loader for the corresponding trainign set """
aug_params = {'crop_size': [368, 496], 'min_scale': -0.1, 'max_scale': 1.0, 'do_flip': True}
#train_dataset = FlyingChairs(aug_params, split='training', root='E:\RAFT datasets\FlyingChairs\data')
# Colab:
train_dataset = FlyingChairs(aug_params, split='training', root='/content/FlyingChairs_release/data')
train_loader = data.DataLoader(train_dataset, batch_size=args.batch_size,
pin_memory=False, shuffle=True, num_workers=1, drop_last=True)
print('Training with %d image pairs' % len(train_dataset))
return train_loader
def train(args):
torch.cuda.init()
model = nn.DataParallel(RAFT(args), device_ids=args.gpus)
print("Parameter Count: %d" % count_parameters(model))
if args.restore_ckpt is not None:
model.load_state_dict(torch.load(args.restore_ckpt), strict=False)
model.cuda()
model.train()
train_loader = fetch_dataloader(args)
optimizer, scheduler = fetch_optimizer(args, model)
total_steps = 0
scaler = GradScaler(enabled=args.mixed_precision)
should_keep_training = True
while should_keep_training:
for _, (image1, image2, _, _) in enumerate(train_loader):
optimizer.zero_grad()
image1 = image1.cuda()
image2 = image2.cuda()
#start = time.time()
if args.add_noise:
stdv = np.random.uniform(0.0, 5.0)
image1 = (image1 + stdv * torch.randn(*image1.shape).cuda()).clamp(0.0, 255.0)
image2 = (image2 + stdv * torch.randn(*image2.shape).cuda()).clamp(0.0, 255.0)
flow_for = model(image1, image2, iters=args.iters)
flow_bac = model(image2, image1, iters=args.iters)
## adding occlusions
segments = slic(image2[0].squeeze().permute(1, 2, 0).cpu().detach().numpy(), n_segments=300, compactness=800)
mask = rand.sample(list(np.unique(segments)), k=rand.randint(8, 12))
image2_orig = image2.clone().detach()
for b in image2:
for i in range(b[0].shape[0]):
for j in range(b[0].shape[1]):
if segments[i][j] in mask:
b[0][i][j] = float(rand.randrange(255))
b[1][i][j] = float(rand.randrange(255))
b[2][i][j] = float(rand.randrange(255))
flow_for_OCC = model(image1, image2, iters=args.iters)
flow_bac_OCC = model(image2, image1, iters=args.iters)
loss = sequence_OCCloss(flow_for, flow_bac, flow_for_OCC, flow_bac_OCC, image1, image2_orig, args.gamma)
if torch.isnan(loss) or torch.isinf(loss):
del loss
continue
scaler.scale(loss).backward()
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
scaler.step(optimizer)
scheduler.step()
scaler.update()
#end = time.time()
#print(end - start)
if total_steps % 10 == 0:
print('step %d, loss: %f' % (total_steps, loss))
if total_steps % CHECK_FREQ == CHECK_FREQ - 1:
PATH = '/checkpoints/%d_%s.pth' % (total_steps + 1, args.name)
torch.save(model.state_dict(), PATH)
print('checkpoint saved !')
total_steps += 1
if total_steps > args.num_steps:
should_keep_training = False
break
PATH = '/checkpoints/%s.pth' % args.name
torch.save(model.state_dict(), PATH)
return PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--name', default='raft', help="name your experiment")
parser.add_argument('--stage', help="determines which dataset to use for training")
parser.add_argument('--restore_ckpt', help="restore checkpoint")
parser.add_argument('--small', action='store_true', help='use small model')
parser.add_argument('--validation', type=str, nargs='+')
parser.add_argument('--lr', type=float, default=0.00002)
parser.add_argument('--num_steps', type=int, default=100000)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--image_size', type=int, nargs='+', default=[384, 512])
parser.add_argument('--gpus', type=int, nargs='+', default=[0,1])
parser.add_argument('--mixed_precision', action='store_true', help='use mixed precision')
parser.add_argument('--iters', type=int, default=12)
parser.add_argument('--wdecay', type=float, default=.00005)
parser.add_argument('--epsilon', type=float, default=1e-8)
parser.add_argument('--clip', type=float, default=1.0)
parser.add_argument('--dropout', type=float, default=0.0)
parser.add_argument('--gamma', type=float, default=0.8, help='exponential weighting')
parser.add_argument('--add_noise', action='store_true')
args = parser.parse_args()
torch.manual_seed(1234)
np.random.seed(1234)
if not os.path.isdir('checkpoints'):
os.mkdir('checkpoints')
train(args)
|
the-stack_0_18440 | # uninhm
# https://atcoder.jp/contests/arc108/tasks/arc108_b
# greedy, stack
n = int(input())
s = input()
t = ""
for c in s:
t += c
while len(t) >= 3 and t[-3:] == 'fox':
t = t[:-3]
print(len(t))
|
the-stack_0_18441 | # -*- coding: utf-8 -*-
# File : loss.py
# Author : Kai Ao
# Email : [email protected]
# Date : 2020/12/12 10:59
#
# This file is part of Rotation-Decoupled Detector.
# https://github.com/Capino512/pytorch-rotation-decoupled-detector
# Distributed under MIT License.
from collections import OrderedDict
import torch
from torch import nn
from torch.nn.functional import one_hot
from hiector.ssrdd.utils.box.bbox import bbox_iou, bbox_switch, encode
def match(bboxes, anchors, iou_thresh, batch=16):
# Reduce GPU memory usage
ious = torch.cat([bbox_iou(bboxes[i : i + batch], anchors) for i in range(0, bboxes.size(0), batch)])
max_ious, bbox_indexes = torch.max(ious, dim=0)
mask_neg = max_ious < iou_thresh[0]
mask_pos = max_ious > iou_thresh[1]
return mask_pos, mask_neg, bbox_indexes
def calc_loss_v1(pred_cls, pred_loc, targets, anchors, iou_thresh, variance, balance):
device = pred_cls.device
num_classes = pred_cls.size(-1)
weight_pos, weight_neg = 2 * balance, 2 * (1 - balance)
anchors_xyxy = bbox_switch(anchors, "xywh", "xyxy")
criterion_cls = nn.BCEWithLogitsLoss(reduction="none")
criterion_loc = nn.SmoothL1Loss(reduction="sum")
loss_cls, loss_loc = torch.zeros([2], dtype=torch.float, device=device, requires_grad=True)
num_pos = 0
for i, target in enumerate(targets):
if target:
bboxes = target["bboxes"].to(device)
labels = target["labels"].to(device)
bboxes_xyxy = bbox_switch(bboxes[:, :4], "xywh", "xyxy")
mask_pos, mask_neg, bbox_indexes = match(bboxes_xyxy, anchors_xyxy, iou_thresh)
labels = labels[bbox_indexes]
indexes_pos = bbox_indexes[mask_pos]
bboxes_matched = bboxes[indexes_pos]
anchors_matched = anchors[mask_pos]
bboxes_pred = pred_loc[i][mask_pos]
gt_bboxes, det_bboxes = encode(bboxes_matched, bboxes_pred, anchors_matched, variance)
labels = one_hot(labels, num_classes=num_classes).float()
labels[mask_neg] = 0
loss_cls_ = criterion_cls(pred_cls[i], labels)
loss_cls = loss_cls + loss_cls_[mask_pos].sum() * weight_pos + loss_cls_[mask_neg].sum() * weight_neg
loss_loc = loss_loc + criterion_loc(gt_bboxes, det_bboxes)
num_pos += mask_pos.sum().item()
else:
loss_cls = loss_cls + criterion_cls(pred_cls[i], torch.zeros_like(pred_cls[i])).sum()
num_pos = max(num_pos, 1)
return OrderedDict([("loss_cls", loss_cls / num_pos), ("loss_loc", loss_loc / num_pos)])
def calc_loss_v2(pred_cls, pred_loc, targets, anchors, iou_thresh, variance, balance):
# Calculate the loss centrally, has only a small acceleration effect
device = pred_cls.device
num_classes = pred_cls.size(-1)
weight_pos, weight_neg = 2 * balance, 2 * (1 - balance)
criterion_cls = nn.BCEWithLogitsLoss(reduction="none")
criterion_loc = nn.SmoothL1Loss(reduction="sum")
num_bboxes = [target["bboxes"].size(0) if target else 0 for target in targets]
bboxes = [target["bboxes"] for target in targets if target]
labels = [target["labels"] for target in targets if target]
if len(bboxes) > 0:
bboxes = torch.cat(bboxes).to(device)
labels = torch.cat(labels).to(device)
else:
loss_cls = criterion_cls(pred_cls, torch.zeros_like(pred_cls)).sum()
return OrderedDict([("loss_cls", loss_cls), ("loss_loc", torch.tensor(0.0, requires_grad=True))])
# Reduce GPU memory usage
batch = 16
iou = torch.cat([bbox_iou(bboxes[i : i + batch, :4], anchors, "xywh") for i in range(0, bboxes.size(0), batch)])
start = 0
max_iou_merged, bbox_indexes_merged = [], []
for i, num in enumerate(num_bboxes):
if num == 0:
max_iou = torch.zeros_like(pred_cls[i, :, 0])
bbox_indexes = torch.zeros_like(pred_cls[i, :, 0], dtype=torch.long)
else:
max_iou, bbox_indexes = torch.max(iou[start : start + num], dim=0) # a
max_iou_merged.append(max_iou)
bbox_indexes_merged.append(bbox_indexes + start)
start += num
max_iou_merged = torch.stack(max_iou_merged)
bbox_indexes_merged = torch.stack(bbox_indexes_merged)
masks_pos = max_iou_merged > iou_thresh[1]
masks_neg = max_iou_merged < iou_thresh[0]
labels_matched = labels[bbox_indexes_merged]
labels_matched = one_hot(labels_matched, num_classes=num_classes)
labels_matched[masks_neg] = 0
bboxes_matched = bboxes[bbox_indexes_merged[masks_pos]]
anchors_matched = anchors[None].repeat(len(targets), 1, 1)[masks_pos]
loss_cls = criterion_cls(pred_cls, labels_matched.float())
loss_cls = loss_cls[masks_pos].sum() * weight_pos + loss_cls[masks_neg].sum() * weight_neg
gt_bboxes, det_bboxes = encode(bboxes_matched, pred_loc[masks_pos], anchors_matched, variance)
loss_loc = criterion_loc(det_bboxes, gt_bboxes)
num_pos = max(masks_pos.sum().item(), 1)
return OrderedDict([("loss_cls", loss_cls / num_pos), ("loss_loc", loss_loc / num_pos)])
calc_loss = calc_loss_v1
|
the-stack_0_18443 | from flask_mongoengine import Document
from qingmi.utils.encoding import smart_text
def get_model_field(model):
""" get the verbose_name of all fields in the model """
field_dict = dict()
for field in model._fields:
attr = getattr(model, field)
if hasattr(attr, 'verbose_name'):
verbose_name = attr.verbose_name
if verbose_name:
field_dict[field] = verbose_name
return field_dict
def get_fields_in_model(instance):
"""
Returns the list of fields in the given model instance. Checks whether to use the official _meta API or use the raw
data. This method excludes many to many fields.
:param instance: The model instance to get the fields for
:type instance: Model
:return: The list of fields for the given model (instance)
:rtype: list
"""
assert isinstance(instance, Document)
return instance._fields
def model_instance_diff(old, new):
"""
Calculates the differences between two model instances. One of the instances may be ``None`` (i.e., a newly
created model or deleted model). This will cause all fields with a value to have changed (from ``None``).
:param old: The old state of the model instance.
:type old: Model
:param new: The new state of the model instance.
:type new: Model
:return: A dictionary with the names of the changed fields as keys and a two tuple of the old and new field values
as value.
:rtype: dict
"""
if not(old is None or isinstance(old, Document)):
raise TypeError("The supplied old instance is not a valid model instance.")
if not(new is None or isinstance(new, Document)):
raise TypeError("The supplied new instance is not a valid model instance.")
diff = {}
if old is not None and new is not None:
fields = set(list(old._fields.keys()) + list(new._fields.keys()))
elif old is not None:
fields = set(list(get_fields_in_model(old).keys()))
elif new is not None:
fields = set(list(get_fields_in_model(new).keys()))
else:
fields = set()
for field in fields:
try:
old_value = smart_text(getattr(old, field, None))
except Exception as e:
old_value = None
try:
new_value = smart_text(getattr(new, field, None))
except Exception as e:
new_value = None
if old_value != new_value:
diff[field] = (smart_text(old_value), smart_text(new_value))
if len(diff) == 0:
diff = None
return diff
|
the-stack_0_18446 | # -*- coding: utf-8 -*-
# FLEDGE_BEGIN
# See: http://fledge.readthedocs.io/
# FLEDGE_END
"""Services Registry class"""
import uuid
import asyncio
from fledge.common import logger
from fledge.common.service_record import ServiceRecord
from fledge.services.core.service_registry import exceptions as service_registry_exceptions
from fledge.services.core.interest_registry.interest_registry import InterestRegistry
__author__ = "Praveen Garg, Amarendra Kumar Sinha"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
class ServiceRegistry:
_registry = list()
# INFO - level 20
_logger = logger.setup(__name__, level=20)
@classmethod
def register(cls, name, s_type, address, port, management_port, protocol='http'):
""" registers the service instance
:param name: name of the service
:param s_type: a valid service type; e.g. Storage, Core, Southbound
:param address: any IP or host address
:param port: a valid positive integer
:param management_port: a valid positive integer for management operations e.g. ping, shutdown
:param protocol: defaults to http
:return: registered services' uuid
"""
new_service = True
try:
current_service = cls.get(name=name)
except service_registry_exceptions.DoesNotExist:
pass
else:
# Re: FOGL-1123
if current_service[0]._status in [ServiceRecord.Status.Running, ServiceRecord.Status.Unresponsive]:
raise service_registry_exceptions.AlreadyExistsWithTheSameName
else:
new_service = False
current_service_id = current_service[0]._id
if port is not None and cls.check_address_and_port(address, port):
raise service_registry_exceptions.AlreadyExistsWithTheSameAddressAndPort
if cls.check_address_and_mgt_port(address, management_port):
raise service_registry_exceptions.AlreadyExistsWithTheSameAddressAndManagementPort
if port is not None and (not isinstance(port, int)):
raise service_registry_exceptions.NonNumericPortError
if not isinstance(management_port, int):
raise service_registry_exceptions.NonNumericPortError
if new_service is False:
# Remove current service to enable the service to register with new management port etc
cls.remove_from_registry(current_service_id)
service_id = str(uuid.uuid4()) if new_service is True else current_service_id
registered_service = ServiceRecord(service_id, name, s_type, protocol, address, port, management_port)
cls._registry.append(registered_service)
cls._logger.info("Registered {}".format(str(registered_service)))
return service_id
@classmethod
def _expunge(cls, service_id, service_status):
""" removes the service instance from action
:param service_id: a uuid of registered service
:param service_status: service status to be marked
:return: service_id on successful deregistration
"""
services = cls.get(idx=service_id)
service_name = services[0]._name
services[0]._status = service_status
cls._remove_from_scheduler_records(service_name)
# Remove interest registry records, if any
interest_recs = InterestRegistry().get(microservice_uuid=service_id)
for interest_rec in interest_recs:
InterestRegistry().unregister(interest_rec._registration_id)
return services[0]
@classmethod
def unregister(cls, service_id):
""" deregisters the service instance
:param service_id: a uuid of registered service
:return: service_id on successful deregistration
"""
expunged_service = cls._expunge(service_id, ServiceRecord.Status.Shutdown)
cls._logger.info("Stopped {}".format(str(expunged_service)))
return service_id
@classmethod
def mark_as_failed(cls, service_id):
""" marks the service instance as failed
:param service_id: a uuid of registered service
:return: service_id on successful deregistration
"""
expunged_service = cls._expunge(service_id, ServiceRecord.Status.Failed)
cls._logger.info("Mark as failed {}".format(str(expunged_service)))
return service_id
@classmethod
def remove_from_registry(cls, service_id):
""" remove service_id from service_registry.
:param service_id: a uuid of registered service
"""
services = cls.get(idx=service_id)
cls._registry.remove(services[0])
@classmethod
def _remove_from_scheduler_records(cls, service_name):
""" removes service aka STARTUP from Scheduler internal records
:param service_name
:return:
"""
if service_name in ("Fledge Storage", "Fledge Core"): return
# Require a local import in order to avoid circular import references
from fledge.services.core import server
if server.Server.scheduler is None: return
asyncio.ensure_future(server.Server.scheduler.remove_service_from_task_processes(service_name))
@classmethod
def all(cls):
return cls._registry
@classmethod
def filter(cls, **kwargs):
# OR based filter
services = cls._registry
for k, v in kwargs.items():
if v:
services = [s for s in cls._registry if getattr(s, k, None) == v]
return services
@classmethod
def get(cls, idx=None, name=None, s_type=None):
services = cls.filter(_id=idx, _name=name, _type=s_type)
if len(services) == 0:
raise service_registry_exceptions.DoesNotExist
return services
@classmethod
def check_address_and_port(cls, address, port):
# AND based check
# ugly hack! <Make filter to support AND | OR>
services = [s for s in cls._registry if getattr(s, "_address") == address and getattr(s, "_port") == port and getattr(s, "_status") != ServiceRecord.Status.Failed]
if len(services) == 0:
return False
return True
@classmethod
def check_address_and_mgt_port(cls, address, m_port):
# AND based check
# ugly hack! <Make filter to support AND | OR>
services = [s for s in cls._registry if getattr(s, "_address") == address
and getattr(s, "_management_port") == m_port and getattr(s, "_status") != ServiceRecord.Status.Failed]
if len(services) == 0:
return False
return True
@classmethod
def filter_by_name_and_type(cls, name, s_type):
# AND based check
# ugly hack! <Make filter to support AND | OR>
services = [s for s in cls._registry if getattr(s, "_name") == name and getattr(s, "_type") == s_type]
if len(services) == 0:
raise service_registry_exceptions.DoesNotExist
return services
|
the-stack_0_18447 | """ Newforms Admin configuration for Photologue
"""
from django.contrib import admin
from models import *
class GalleryAdmin(admin.ModelAdmin):
list_display = ('title', 'date_added', 'photo_count', 'is_public')
list_filter = ['date_added', 'is_public']
date_hierarchy = 'date_added'
prepopulated_fields = {'title_slug': ('title',)}
filter_horizontal = ('photos',)
class PhotoAdmin(admin.ModelAdmin):
list_display = ('title', 'date_taken', 'date_added', 'is_public', 'tags', 'view_count', 'admin_thumbnail')
list_filter = ['date_added', 'is_public']
list_per_page = 10
prepopulated_fields = {'title_slug': ('title',)}
class PhotoEffectAdmin(admin.ModelAdmin):
list_display = ('name', 'description', 'color', 'brightness', 'contrast', 'sharpness', 'filters', 'admin_sample')
fieldsets = (
(None, {
'fields': ('name', 'description')
}),
('Adjustments', {
'fields': ('color', 'brightness', 'contrast', 'sharpness')
}),
('Filters', {
'fields': ('filters',)
}),
('Reflection', {
'fields': ('reflection_size', 'reflection_strength', 'background_color')
}),
('Transpose', {
'fields': ('transpose_method',)
}),
)
class PhotoSizeAdmin(admin.ModelAdmin):
list_display = ('name', 'width', 'height', 'crop', 'pre_cache', 'effect', 'increment_count')
fieldsets = (
(None, {
'fields': ('name', 'width', 'height', 'quality')
}),
('Options', {
'fields': ('upscale', 'crop', 'pre_cache', 'increment_count')
}),
('Enhancements', {
'fields': ('effect', 'watermark',)
}),
)
class WatermarkAdmin(admin.ModelAdmin):
list_display = ('name', 'opacity', 'style')
admin.site.register(Gallery, GalleryAdmin)
admin.site.register(GalleryUpload)
admin.site.register(Photo, PhotoAdmin)
admin.site.register(PhotoEffect, PhotoEffectAdmin)
admin.site.register(PhotoSize, PhotoSizeAdmin)
admin.site.register(Watermark, WatermarkAdmin) |
the-stack_0_18448 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from npu_bridge.npu_init import *
import numpy as np
import tensorflow as tf
import cv2
from tensorflow.contrib import slim
from avod.builders import feature_extractor_builder
from avod.core import anchor_encoder
from avod.core import anchor_filter
from avod.core import anchor_projector
from avod.core import box_3d_encoder
from avod.core import constants
from avod.core import losses
from avod.core import model
from avod.core import summary_utils
from avod.core.anchor_generators import grid_anchor_3d_generator
from avod.datasets.kitti import kitti_aug
class RpnModel(model.DetectionModel):
##############################
# Keys for Placeholders
##############################
PL_BEV_INPUT = 'bev_input_pl'
PL_IMG_INPUT = 'img_input_pl'
PL_ANCHORS = 'anchors_pl'
PL_BEV_ANCHORS = 'bev_anchors_pl'
PL_BEV_ANCHORS_NORM = 'bev_anchors_norm_pl'
PL_IMG_ANCHORS = 'img_anchors_pl'
PL_IMG_ANCHORS_NORM = 'img_anchors_norm_pl'
PL_LABEL_ANCHORS = 'label_anchors_pl'
PL_LABEL_BOXES_3D = 'label_boxes_3d_pl'
PL_LABEL_CLASSES = 'label_classes_pl'
PL_ANCHOR_IOUS = 'anchor_ious_pl'
PL_ANCHOR_OFFSETS = 'anchor_offsets_pl'
PL_ANCHOR_CLASSES = 'anchor_classes_pl'
# Sample info, including keys for projection to image space
# (e.g. camera matrix, image index, etc.)
PL_CALIB_P2 = 'frame_calib_p2'
PL_IMG_IDX = 'current_img_idx'
PL_GROUND_PLANE = 'ground_plane'
# Path Drop MASK
PL_IMG_PATH_DROP_MASK = 'img_path_drop_mask'
PL_BEV_PATH_DROP_MASK = 'bev_path_drop_mask'
##############################
# Keys for Predictions
##############################
PRED_ANCHORS = 'rpn_anchors'
PRED_MB_OBJECTNESS_GT = 'rpn_mb_objectness_gt'
PRED_MB_OFFSETS_GT = 'rpn_mb_offsets_gt'
PRED_MB_MASK = 'rpn_mb_mask'
PRED_MB_OBJECTNESS = 'rpn_mb_objectness'
PRED_MB_OFFSETS = 'rpn_mb_offsets'
PRED_TOP_INDICES = 'rpn_top_indices'
PRED_TOP_ANCHORS = 'rpn_top_anchors'
PRED_TOP_OBJECTNESS_SOFTMAX = 'rpn_top_objectness_softmax'
##############################
# Keys for Loss
##############################
LOSS_RPN_OBJECTNESS = 'rpn_objectness_loss'
LOSS_RPN_REGRESSION = 'rpn_regression_loss'
def __init__(self, model_config, train_val_test, dataset):
"""
Args:
model_config: configuration for the model
train_val_test: "train", "val", or "test"
dataset: the dataset that will provide samples and ground truth
"""
self.label_boxes_3d_shape = None
# Sets model configs (_config)
super(RpnModel, self).__init__(model_config)
if train_val_test not in ["train", "val", "test"]:
raise ValueError('Invalid train_val_test value,'
'should be one of ["train", "val", "test"]')
self._train_val_test = train_val_test
self._is_training = (self._train_val_test == 'train')
# Input config
input_config = self._config.input_config
self._bev_pixel_size = np.asarray([input_config.bev_dims_h+4,
input_config.bev_dims_w])
self._bev_depth = input_config.bev_depth
self._img_pixel_size = np.asarray([input_config.img_dims_h,
input_config.img_dims_w])
self._img_depth = input_config.img_depth
# Rpn config
rpn_config = self._config.rpn_config
self._proposal_roi_crop_size = \
[rpn_config.rpn_proposal_roi_crop_size] * 2
self._fusion_method = rpn_config.rpn_fusion_method
if self._train_val_test in ["train", "val"]:
self._nms_size = rpn_config.rpn_train_nms_size
else:
self._nms_size = rpn_config.rpn_test_nms_size
self._nms_iou_thresh = rpn_config.rpn_nms_iou_thresh
# Feature Extractor Nets
self._bev_feature_extractor = \
feature_extractor_builder.get_extractor(
self._config.layers_config.bev_feature_extractor)
self._img_feature_extractor = \
feature_extractor_builder.get_extractor(
self._config.layers_config.img_feature_extractor)
# Network input placeholders
self.placeholders = dict()
# Inputs to network placeholders
self._placeholder_inputs = dict()
# Information about the current sample
self.sample_info = dict()
# Dataset
self.dataset = dataset
self.dataset.train_val_test = self._train_val_test
self._area_extents = self.dataset.kitti_utils.area_extents
self._bev_extents = self.dataset.kitti_utils.bev_extents
self._cluster_sizes, _ = self.dataset.get_cluster_info()
self._anchor_strides = self.dataset.kitti_utils.anchor_strides
self._anchor_generator = \
grid_anchor_3d_generator.GridAnchor3dGenerator()
self._path_drop_probabilities = self._config.path_drop_probabilities
self._train_on_all_samples = self._config.train_on_all_samples
self._eval_all_samples = self._config.eval_all_samples
# Overwrite the dataset's variable with the config
self.dataset.train_on_all_samples = self._train_on_all_samples
if self._train_val_test in ["val", "test"]:
# Disable path-drop, this should already be disabled inside the
# evaluator, but just in case.
self._path_drop_probabilities[0] = 1.0
self._path_drop_probabilities[1] = 1.0
def _add_placeholder(self, dtype, shape, name):
placeholder = tf.placeholder(dtype, shape, name)
self.placeholders[name] = placeholder
return placeholder
def _set_up_input_pls(self):
"""Sets up input placeholders by adding them to self._placeholders.
Keys are defined as self.PL_*.
"""
# Combine config data
bev_dims = np.append(self._bev_pixel_size, self._bev_depth)
with tf.variable_scope('bev_input'):
# Placeholder for BEV image input, to be filled in with feed_dict
bev_input_placeholder = self._add_placeholder(tf.float32, bev_dims,
self.PL_BEV_INPUT)
self._bev_input_batches = tf.expand_dims(
bev_input_placeholder, axis=0)
self._bev_preprocessed = \
self._bev_feature_extractor.preprocess_input(
self._bev_input_batches, self._bev_pixel_size)
# Summary Images
bev_summary_images = tf.split(
bev_input_placeholder, self._bev_depth, axis=2)
tf.summary.image("bev_maps", bev_summary_images,
max_outputs=self._bev_depth)
with tf.variable_scope('img_input'):
# Take variable size input images
img_input_placeholder = self._add_placeholder(
tf.float32,
[self._img_pixel_size[0], self._img_pixel_size[1], self._img_depth],
self.PL_IMG_INPUT)
self._img_input_batches = tf.expand_dims(
img_input_placeholder, axis=0)
self._img_preprocessed = \
self._img_feature_extractor.preprocess_input(
self._img_input_batches, self._img_pixel_size)
# Summary Image
tf.summary.image("rgb_image", self._img_preprocessed,
max_outputs=2)
with tf.variable_scope('pl_labels'):
self._add_placeholder(tf.float32, [None, 6],
self.PL_LABEL_ANCHORS)
self._add_placeholder(tf.float32, [None, 7],
self.PL_LABEL_BOXES_3D)
self._add_placeholder(tf.float32, [None],
self.PL_LABEL_CLASSES)
# Placeholders for anchors
with tf.variable_scope('pl_anchors'):
self._add_placeholder(tf.float32, [None, 6],
self.PL_ANCHORS)
self._add_placeholder(tf.float32, [None],
self.PL_ANCHOR_IOUS)
self._add_placeholder(tf.float32, [None, 6],
self.PL_ANCHOR_OFFSETS)
self._add_placeholder(tf.float32, [None],
self.PL_ANCHOR_CLASSES)
with tf.variable_scope('bev_anchor_projections'):
self._add_placeholder(tf.float32, [None, 4],
self.PL_BEV_ANCHORS)
self._bev_anchors_norm_pl = self._add_placeholder(
tf.float32, [None, 4], self.PL_BEV_ANCHORS_NORM)
with tf.variable_scope('img_anchor_projections'):
self._add_placeholder(tf.float32, [None, 4],
self.PL_IMG_ANCHORS)
self._img_anchors_norm_pl = self._add_placeholder(
tf.float32, [None, 4], self.PL_IMG_ANCHORS_NORM)
with tf.variable_scope('sample_info'):
# the calib matrix shape is (3 x 4)
self._add_placeholder(
tf.float32, [3, 4], self.PL_CALIB_P2)
self._add_placeholder(tf.int32,
shape=[1],
name=self.PL_IMG_IDX)
self._add_placeholder(tf.float32, [4], self.PL_GROUND_PLANE)
with tf.variable_scope('path_drop_mask'):
self._add_placeholder(tf.float32, [1], self.PL_IMG_PATH_DROP_MASK)
self._add_placeholder(tf.float32, [1], self.PL_BEV_PATH_DROP_MASK)
def _set_up_feature_extractors(self):
"""Sets up feature extractors and stores feature maps and
bottlenecks as member variables.
"""
self.bev_feature_maps, self.bev_end_points = \
self._bev_feature_extractor.build(
self._bev_preprocessed,
self._bev_pixel_size,
self._is_training)
self.img_feature_maps, self.img_end_points = \
self._img_feature_extractor.build(
self._img_preprocessed,
self._img_pixel_size,
self._is_training)
with tf.variable_scope('bev_bottleneck'):
self.bev_bottleneck = slim.conv2d(
self.bev_feature_maps,
1, [1, 1],
scope='bottleneck',
normalizer_fn=slim.batch_norm,
normalizer_params={
'is_training': self._is_training})
with tf.variable_scope('img_bottleneck'):
self.img_bottleneck = slim.conv2d(
self.img_feature_maps,
1, [1, 1],
scope='bottleneck',
normalizer_fn=slim.batch_norm,
normalizer_params={
'is_training': self._is_training})
# # Visualize the end point feature maps being used
# for feature_map in list(self.bev_end_points.items()):
# if 'conv' in feature_map[0]:
# summary_utils.add_feature_maps_from_dict(self.bev_end_points,
# feature_map[0])
#
# for feature_map in list(self.img_end_points.items()):
# if 'conv' in feature_map[0]:
# summary_utils.add_feature_maps_from_dict(self.img_end_points,
# feature_map[0])
def build(self):
# Setup input placeholders
self._set_up_input_pls()
# Setup feature extractors
self._set_up_feature_extractors()
bev_proposal_input = self.bev_bottleneck
img_proposal_input = self.img_bottleneck
fusion_mean_div_factor = 2.0
# If both img and bev probabilites are set to 1.0, don't do
# path drop.
if not (self._path_drop_probabilities[0] ==
self._path_drop_probabilities[1] == 1.0):
with tf.variable_scope('rpn_path_drop'):
# random_values = tf.random_uniform(shape=[3],
# minval=0.0,
# maxval=1.0)
# img_mask, bev_mask = self.create_path_drop_masks(
# self._path_drop_probabilities[0],
# self._path_drop_probabilities[1],
# random_values)
self.bev_path_drop_mask = self.placeholders[self.PL_BEV_PATH_DROP_MASK]
self.img_path_drop_mask = self.placeholders[self.PL_IMG_PATH_DROP_MASK]
img_proposal_input = tf.multiply(img_proposal_input,
self.img_path_drop_mask)
bev_proposal_input = tf.multiply(bev_proposal_input,
self.bev_path_drop_mask)
# self.img_path_drop_mask = img_mask
# self.bev_path_drop_mask = bev_mask
# Overwrite the division factor
fusion_mean_div_factor = self.img_path_drop_mask + self.bev_path_drop_mask
with tf.variable_scope('proposal_roi_pooling'):
with tf.variable_scope('box_indices'):
def get_box_indices(boxes):
proposals_shape = boxes.get_shape().as_list()
if any(dim is None for dim in proposals_shape):
proposals_shape = tf.shape(boxes)
ones_mat = tf.ones(proposals_shape[:2], dtype=tf.int32)
multiplier = tf.expand_dims(
tf.range(start=0, limit=proposals_shape[0]), 1)
return tf.reshape(ones_mat * multiplier, [-1])
bev_boxes_norm_batches = tf.expand_dims(
self._bev_anchors_norm_pl, axis=0)
# These should be all 0's since there is only 1 image
tf_box_indices = get_box_indices(bev_boxes_norm_batches)
# Do ROI Pooling on BEV
bev_proposal_rois = tf.image.crop_and_resize(
bev_proposal_input,
self._bev_anchors_norm_pl,
tf_box_indices,
self._proposal_roi_crop_size)
# Do ROI Pooling on image
img_proposal_rois = tf.image.crop_and_resize(
img_proposal_input,
self._img_anchors_norm_pl,
tf_box_indices,
self._proposal_roi_crop_size)
with tf.variable_scope('proposal_roi_fusion'):
rpn_fusion_out = None
if self._fusion_method == 'mean':
tf_features_sum = tf.add(bev_proposal_rois, img_proposal_rois)
rpn_fusion_out = tf.divide(tf_features_sum,
fusion_mean_div_factor)
elif self._fusion_method == 'concat':
rpn_fusion_out = tf.concat(
[bev_proposal_rois, img_proposal_rois], axis=3)
else:
raise ValueError('Invalid fusion method', self._fusion_method)
# TODO: move this section into an separate AnchorPredictor class
with tf.variable_scope('anchor_predictor', 'ap', [rpn_fusion_out]):
tensor_in = rpn_fusion_out
# Parse rpn layers config
layers_config = self._config.layers_config.rpn_config
l2_weight_decay = layers_config.l2_weight_decay
if l2_weight_decay > 0:
weights_regularizer = slim.l2_regularizer(l2_weight_decay)
else:
weights_regularizer = None
with slim.arg_scope([slim.conv2d],
weights_regularizer=weights_regularizer):
# Use conv2d instead of fully_connected layers.
cls_fc6 = slim.conv2d(tensor_in,
layers_config.cls_fc6,
self._proposal_roi_crop_size,
padding='VALID',
scope='cls_fc6')
cls_fc6_drop = slim.dropout(cls_fc6,
layers_config.keep_prob,
is_training=self._is_training,
scope='cls_fc6_drop')
cls_fc7 = slim.conv2d(cls_fc6_drop,
layers_config.cls_fc7,
[1, 1],
scope='cls_fc7')
cls_fc7_drop = slim.dropout(cls_fc7,
layers_config.keep_prob,
is_training=self._is_training,
scope='cls_fc7_drop')
cls_fc8 = slim.conv2d(cls_fc7_drop,
2,
[1, 1],
activation_fn=None,
scope='cls_fc8')
objectness = tf.squeeze(
cls_fc8, [1, 2],
name='cls_fc8/squeezed')
# Use conv2d instead of fully_connected layers.
reg_fc6 = slim.conv2d(tensor_in,
layers_config.reg_fc6,
self._proposal_roi_crop_size,
padding='VALID',
scope='reg_fc6')
reg_fc6_drop = slim.dropout(reg_fc6,
layers_config.keep_prob,
is_training=self._is_training,
scope='reg_fc6_drop')
reg_fc7 = slim.conv2d(reg_fc6_drop,
layers_config.reg_fc7,
[1, 1],
scope='reg_fc7')
reg_fc7_drop = slim.dropout(reg_fc7,
layers_config.keep_prob,
is_training=self._is_training,
scope='reg_fc7_drop')
reg_fc8 = slim.conv2d(reg_fc7_drop,
6,
[1, 1],
activation_fn=None,
scope='reg_fc8')
offsets = tf.squeeze(
reg_fc8, [1, 2],
name='reg_fc8/squeezed')
# Histogram summaries
with tf.variable_scope('histograms_feature_extractor'):
with tf.variable_scope('bev_vgg'):
for end_point in self.bev_end_points:
tf.summary.histogram(
end_point, self.bev_end_points[end_point])
with tf.variable_scope('img_vgg'):
for end_point in self.img_end_points:
tf.summary.histogram(
end_point, self.img_end_points[end_point])
with tf.variable_scope('histograms_rpn'):
with tf.variable_scope('anchor_predictor'):
fc_layers = [cls_fc6, cls_fc7, cls_fc8, objectness,
reg_fc6, reg_fc7, reg_fc8, offsets]
for fc_layer in fc_layers:
# fix the name to avoid tf warnings
tf.summary.histogram(fc_layer.name.replace(':', '_'),
fc_layer)
# Return the proposals
with tf.variable_scope('proposals'):
anchors = self.placeholders[self.PL_ANCHORS]
# Decode anchor regression offsets
with tf.variable_scope('decoding'):
regressed_anchors = anchor_encoder.offset_to_anchor(
anchors, offsets)
with tf.variable_scope('bev_projection'):
_, bev_proposal_boxes_norm = anchor_projector.project_to_bev(
regressed_anchors, self._bev_extents)
with tf.variable_scope('softmax'):
objectness_softmax = tf.nn.softmax(objectness)
with tf.variable_scope('nms'):
objectness_scores = objectness_softmax[:, 1]
# Do NMS on regressed anchors
top_indices, _ = tf.image.non_max_suppression_padded(
bev_proposal_boxes_norm, objectness_scores,
max_output_size=self._nms_size,
iou_threshold=self._nms_iou_thresh,
pad_to_max_output_size=True)
top_anchors = tf.gather(regressed_anchors, top_indices)
top_anchors = tf.reshape(top_anchors, [self._nms_size, 6])
# For conversion
out_top_anchors = tf.identity(
top_anchors, name="out_top_anchors"
)
top_objectness_softmax = tf.gather(objectness_scores,
top_indices)
top_objectness_softmax = tf.reshape(top_objectness_softmax, [self._nms_size])
# top_offsets = tf.gather(offsets, top_indices)
# top_objectness = tf.gather(objectness, top_indices)
# Get mini batch
all_ious_gt = self.placeholders[self.PL_ANCHOR_IOUS]
all_offsets_gt = self.placeholders[self.PL_ANCHOR_OFFSETS]
all_classes_gt = self.placeholders[self.PL_ANCHOR_CLASSES]
with tf.variable_scope('mini_batch'):
mini_batch_utils = self.dataset.kitti_utils.mini_batch_utils
mini_batch_mask, _ = \
mini_batch_utils.sample_rpn_mini_batch(all_ious_gt)
# ROI summary images
rpn_mini_batch_size = \
self.dataset.kitti_utils.mini_batch_utils.rpn_mini_batch_size
with tf.variable_scope('bev_rpn_rois'):
mb_bev_anchors_norm = tf.boolean_mask(self._bev_anchors_norm_pl,
mini_batch_mask)
mb_bev_box_indices = tf.zeros_like(
tf.boolean_mask(all_classes_gt, mini_batch_mask),
dtype=tf.int32)
# Show the ROIs of the BEV input density map
# for the mini batch anchors
bev_input_rois = tf.image.crop_and_resize(
self._bev_preprocessed,
mb_bev_anchors_norm,
mb_bev_box_indices,
(32, 32))
bev_input_roi_summary_images = tf.split(
bev_input_rois, self._bev_depth, axis=3)
tf.summary.image('bev_rpn_rois',
bev_input_roi_summary_images[-1],
max_outputs=rpn_mini_batch_size)
with tf.variable_scope('img_rpn_rois'):
# ROIs on image input
mb_img_anchors_norm = tf.boolean_mask(self._img_anchors_norm_pl,
mini_batch_mask)
mb_img_box_indices = tf.zeros_like(
tf.boolean_mask(all_classes_gt, mini_batch_mask),
dtype=tf.int32)
# Do test ROI pooling on mini batch
img_input_rois = tf.image.crop_and_resize(
self._img_preprocessed,
mb_img_anchors_norm,
mb_img_box_indices,
(32, 32))
tf.summary.image('img_rpn_rois',
img_input_rois,
max_outputs=rpn_mini_batch_size)
# Ground Truth Tensors
with tf.variable_scope('one_hot_classes'):
# Anchor classification ground truth
# Object / Not Object
min_pos_iou = \
self.dataset.kitti_utils.mini_batch_utils.rpn_pos_iou_range[0]
objectness_classes_gt = tf.cast(
tf.greater_equal(all_ious_gt, min_pos_iou),
dtype=tf.int32)
objectness_gt = tf.one_hot(
objectness_classes_gt, depth=2,
on_value=1.0 - self._config.label_smoothing_epsilon,
off_value=self._config.label_smoothing_epsilon)
# Mask predictions for mini batch
with tf.variable_scope('prediction_mini_batch'):
objectness_masked = tf.boolean_mask(objectness, mini_batch_mask)
offsets_masked = tf.boolean_mask(offsets, mini_batch_mask)
with tf.variable_scope('ground_truth_mini_batch'):
objectness_gt_masked = tf.boolean_mask(
objectness_gt, mini_batch_mask)
offsets_gt_masked = tf.boolean_mask(all_offsets_gt,
mini_batch_mask)
# Specify the tensors to evaluate
predictions = dict()
# Temporary predictions for debugging
# predictions['anchor_ious'] = anchor_ious
# predictions['anchor_offsets'] = all_offsets_gt
if self._train_val_test in ['train', 'val']:
# All anchors
predictions[self.PRED_ANCHORS] = anchors
# Mini-batch masks
predictions[self.PRED_MB_MASK] = mini_batch_mask
# Mini-batch predictions
predictions[self.PRED_MB_OBJECTNESS] = objectness_masked
predictions[self.PRED_MB_OFFSETS] = offsets_masked
# Mini batch ground truth
predictions[self.PRED_MB_OFFSETS_GT] = offsets_gt_masked
predictions[self.PRED_MB_OBJECTNESS_GT] = objectness_gt_masked
# Proposals after nms
predictions[self.PRED_TOP_INDICES] = top_indices
predictions[self.PRED_TOP_ANCHORS] = top_anchors
predictions[
self.PRED_TOP_OBJECTNESS_SOFTMAX] = top_objectness_softmax
else:
# self._train_val_test == 'test'
predictions['out_top_anchors'] = out_top_anchors
predictions[self.PRED_TOP_ANCHORS] = top_anchors
predictions[
self.PRED_TOP_OBJECTNESS_SOFTMAX] = top_objectness_softmax
return predictions
def preprocess_img(self, img):
""" Preprocess image of any size to desired size:
self._img_pixel_size: input_config.img_dims_h,input_config.img_dims_w
Args:
img: input image, nparray of shape [?,?,3]
Returns:
resized imnage
"""
resized_image = cv2.resize(img, (self._img_pixel_size[1],self._img_pixel_size[0]))
return resized_image
def create_feed_dict(self, sample_index=None):
""" Fills in the placeholders with the actual input values.
Currently, only a batch size of 1 is supported
Args:
sample_index: optional, only used when train_val_test == 'test',
a particular sample index in the dataset
sample list to build the feed_dict for
Returns:
a feed_dict dictionary that can be used in a tensorflow session
"""
if self._train_val_test in ["train", "val"]:
# sample_index should be None
if sample_index is not None:
raise ValueError('sample_index should be None. Do not load '
'particular samples during train or val')
# During training/validation, we need a valid sample
# with anchor info for loss calculation
sample = None
anchors_info = []
valid_sample = False
while not valid_sample:
if self._train_val_test == "train":
# Get the a random sample from the remaining epoch
samples = self.dataset.next_batch(batch_size=1)
# samples = self.dataset.load_samples([2795]) # 000003
else: # self._train_val_test == "val"
# Load samples in order for validation
samples = self.dataset.next_batch(batch_size=1,
shuffle=False)
# Only handle one sample at a time for now
sample = samples[0]
anchors_info = sample.get(constants.KEY_ANCHORS_INFO)
# When training, if the mini batch is empty, go to the next
# sample. Otherwise carry on with found the valid sample.
# For validation, even if 'anchors_info' is empty, keep the
# sample (this will help penalize false positives.)
# We will substitue the necessary info with zeros later on.
# Note: Training/validating all samples can be switched off.
train_cond = (self._train_val_test == "train" and
self._train_on_all_samples)
eval_cond = (self._train_val_test == "val" and
self._eval_all_samples)
if anchors_info or train_cond or eval_cond:
valid_sample = True
else:
# For testing, any sample should work
if sample_index is not None:
samples = self.dataset.load_samples([sample_index])
else:
samples = self.dataset.next_batch(batch_size=1, shuffle=False)
# Only handle one sample at a time for now
sample = samples[0]
anchors_info = sample.get(constants.KEY_ANCHORS_INFO)
sample_name = sample.get(constants.KEY_SAMPLE_NAME)
sample_augs = sample.get(constants.KEY_SAMPLE_AUGS)
# Get ground truth data
label_anchors = sample.get(constants.KEY_LABEL_ANCHORS)
label_classes = sample.get(constants.KEY_LABEL_CLASSES)
# We only need orientation from box_3d
label_boxes_3d = sample.get(constants.KEY_LABEL_BOXES_3D)
# Network input data
image_input = self.preprocess_img(sample.get(constants.KEY_IMAGE_INPUT))
# image_input = sample.get(constants.KEY_IMAGE_INPUT)
bev_input = sample.get(constants.KEY_BEV_INPUT)
bev_input = np.pad(bev_input, ((4,0), (0, 0), (0, 0)))
# Image shape (h, w)
image_shape = [image_input.shape[0], image_input.shape[1]]
ground_plane = sample.get(constants.KEY_GROUND_PLANE)
stereo_calib_p2 = sample.get(constants.KEY_STEREO_CALIB_P2)
if anchors_info:
# anchors_info = [np.zeros(1), 0.5 * np.ones(1), -np.ones((1,6)), -np.ones(1)]
anchors_info = (
np.pad(anchors_info[0], ( 0, 30000 - anchors_info[0].shape[0]) ), # anchor_indices 0
np.pad(anchors_info[1], ( 0, 30000 - anchors_info[1].shape[0]), constant_values=0.5), # ious: pad 0.5
np.pad(anchors_info[2], ((0, 30000 - anchors_info[2].shape[0]), (0, 0)), constant_values=-1), # offsets: pad -1
np.pad(anchors_info[3], ( 0, 30000 - anchors_info[3].shape[0]), constant_values=-1), # ious: pad -1
)
# Fill the placeholders for anchor information
self._fill_anchor_pl_inputs(anchors_info=anchors_info,
ground_plane=ground_plane,
image_shape=image_shape,
stereo_calib_p2=stereo_calib_p2,
sample_name=sample_name,
sample_augs=sample_augs)
# this is a list to match the explicit shape for the placeholder
self._placeholder_inputs[self.PL_IMG_IDX] = [int(sample_name)]
# Fill in the rest
self._placeholder_inputs[self.PL_BEV_INPUT] = bev_input
self._placeholder_inputs[self.PL_IMG_INPUT] = image_input
# label_boxes_3d = np.pad(label_boxes_3d, ((0, self.Max_Labels - len(label_boxes_3d)), (0,0)) )
label_anchors = np.pad(label_anchors, ( (0, 20 - label_anchors.shape[0]), (0, 0) ))
label_boxes_3d = np.pad(label_boxes_3d, ( (0, 20 - label_boxes_3d.shape[0]), (0, 0) ))
label_classes = np.pad(label_classes, (0, 20 - label_classes.shape[0]))
self._placeholder_inputs[self.PL_LABEL_ANCHORS] = label_anchors
self._placeholder_inputs[self.PL_LABEL_BOXES_3D] = label_boxes_3d
self._placeholder_inputs[self.PL_LABEL_CLASSES] = label_classes
# Sample Info
# img_idx is a list to match the placeholder shape
self._placeholder_inputs[self.PL_IMG_IDX] = [int(sample_name)]
self._placeholder_inputs[self.PL_CALIB_P2] = stereo_calib_p2
self._placeholder_inputs[self.PL_GROUND_PLANE] = ground_plane
# Temporary sample info for debugging
self.sample_info.clear()
self.sample_info['sample_name'] = sample_name
self.sample_info['rpn_mini_batch'] = anchors_info
# Path Drop Mask
random_values = np.random.uniform(0.0,1.0,3)
img_mask, bev_mask = self.create_path_drop_masks_np(
self._path_drop_probabilities[0],
self._path_drop_probabilities[1],
random_values)
self._placeholder_inputs[self.PL_BEV_PATH_DROP_MASK] = [float(bev_mask)]
self._placeholder_inputs[self.PL_IMG_PATH_DROP_MASK] = [float(img_mask)]
# Create a feed_dict and fill it with input values
feed_dict = dict()
for key, value in self.placeholders.items():
feed_dict[value] = self._placeholder_inputs[key]
# print('{}: {}'.format(value,self._placeholder_inputs[key]))
return feed_dict
def _fill_anchor_pl_inputs(self,
anchors_info,
ground_plane,
image_shape,
stereo_calib_p2,
sample_name,
sample_augs):
"""
Fills anchor placeholder inputs with corresponding data
Args:
anchors_info: anchor info from mini_batch_utils
ground_plane: ground plane coefficients
image_shape: image shape (h, w), used for projecting anchors
sample_name: name of the sample, e.g. "000001"
sample_augs: list of sample augmentations
"""
# Lists for merging anchors info
all_anchor_boxes_3d = []
anchors_ious = []
anchor_offsets = []
anchor_classes = []
# Create anchors for each class
if len(self.dataset.classes) > 1:
for class_idx in range(len(self.dataset.classes)):
# Generate anchors for all classes
grid_anchor_boxes_3d = self._anchor_generator.generate(
area_3d=self._area_extents,
anchor_3d_sizes=self._cluster_sizes[class_idx],
anchor_stride=self._anchor_strides[class_idx],
ground_plane=ground_plane)
all_anchor_boxes_3d.append(grid_anchor_boxes_3d)
all_anchor_boxes_3d = np.concatenate(all_anchor_boxes_3d)
else:
# Don't loop for a single class
class_idx = 0
grid_anchor_boxes_3d = self._anchor_generator.generate(
area_3d=self._area_extents,
anchor_3d_sizes=self._cluster_sizes[class_idx],
anchor_stride=self._anchor_strides[class_idx],
ground_plane=ground_plane)
all_anchor_boxes_3d = grid_anchor_boxes_3d
# Filter empty anchors
# Skip if anchors_info is []
sample_has_labels = True
if self._train_val_test in ['train', 'val']:
# Read in anchor info during training / validation
if anchors_info:
anchor_indices, anchors_ious, anchor_offsets, \
anchor_classes = anchors_info
anchor_boxes_3d_to_use = all_anchor_boxes_3d[anchor_indices]
else:
train_cond = (self._train_val_test == "train" and
self._train_on_all_samples)
eval_cond = (self._train_val_test == "val" and
self._eval_all_samples)
if train_cond or eval_cond:
sample_has_labels = False
else:
# sample_has_labels = False
sample_has_labels = True
anchor_boxes_3d_to_use = all_anchor_boxes_3d
if not sample_has_labels:
# During testing, or validation with no anchor info, manually
# filter empty anchors
# TODO: share voxel_grid_2d with BEV generation if possible
voxel_grid_2d = \
self.dataset.kitti_utils.create_sliced_voxel_grid_2d(
sample_name, self.dataset.bev_source,
image_shape=image_shape)
# Convert to anchors and filter
anchors_to_use = box_3d_encoder.box_3d_to_anchor(
all_anchor_boxes_3d)
empty_filter = anchor_filter.get_empty_anchor_filter_2d(
anchors_to_use, voxel_grid_2d, density_threshold=1)
anchor_boxes_3d_to_use = all_anchor_boxes_3d[empty_filter]
# Convert lists to ndarrays
anchor_boxes_3d_to_use = np.asarray(anchor_boxes_3d_to_use)
anchors_ious = np.asarray(anchors_ious)
anchor_offsets = np.asarray(anchor_offsets)
anchor_classes = np.asarray(anchor_classes)
# Flip anchors and centroid x offsets for augmented samples
if kitti_aug.AUG_FLIPPING in sample_augs:
anchor_boxes_3d_to_use = kitti_aug.flip_boxes_3d(
anchor_boxes_3d_to_use, flip_ry=False)
if anchors_info:
anchor_offsets[:, 0] = -anchor_offsets[:, 0]
# Convert to anchors
anchors_to_use = box_3d_encoder.box_3d_to_anchor(
anchor_boxes_3d_to_use)
num_anchors = len(anchors_to_use)
# Project anchors into bev
bev_anchors, bev_anchors_norm = anchor_projector.project_to_bev(
anchors_to_use, self._bev_extents)
# Project box_3d anchors into image space
img_anchors, img_anchors_norm = \
anchor_projector.project_to_image_space(
anchors_to_use, stereo_calib_p2, image_shape)
# Reorder into [y1, x1, y2, x2] for tf.crop_and_resize op
self._bev_anchors_norm = bev_anchors_norm[:, [1, 0, 3, 2]]
self._img_anchors_norm = img_anchors_norm[:, [1, 0, 3, 2]]
self._placeholder_inputs[self.PL_ANCHORS] = anchors_to_use # (16215, 6)
# If we are in train/validation mode, and the anchor infos
# are not empty, store them. Checking for just anchors_ious
# to be non-empty should be enough.
if self._train_val_test in ['train', 'val'] and \
len(anchors_ious) > 0:
self._placeholder_inputs[self.PL_ANCHOR_IOUS] = anchors_ious
self._placeholder_inputs[self.PL_ANCHOR_OFFSETS] = anchor_offsets
self._placeholder_inputs[self.PL_ANCHOR_CLASSES] = anchor_classes
# During test, or val when there is no anchor info
elif self._train_val_test in ['test'] or \
len(anchors_ious) == 0:
# During testing, or validation with no gt, fill these in with 0s
self._placeholder_inputs[self.PL_ANCHOR_IOUS] = \
np.zeros(num_anchors)
self._placeholder_inputs[self.PL_ANCHOR_OFFSETS] = \
np.zeros([num_anchors, 6])
self._placeholder_inputs[self.PL_ANCHOR_CLASSES] = \
np.zeros(num_anchors)
else:
raise ValueError('Got run mode {}, and non-empty anchor info'.
format(self._train_val_test))
self._placeholder_inputs[self.PL_BEV_ANCHORS] = bev_anchors
self._placeholder_inputs[self.PL_BEV_ANCHORS_NORM] = \
self._bev_anchors_norm
self._placeholder_inputs[self.PL_IMG_ANCHORS] = img_anchors
self._placeholder_inputs[self.PL_IMG_ANCHORS_NORM] = \
self._img_anchors_norm
def loss(self, prediction_dict):
# these should include mini-batch values only
objectness_gt = prediction_dict[self.PRED_MB_OBJECTNESS_GT]
offsets_gt = prediction_dict[self.PRED_MB_OFFSETS_GT]
# Predictions
with tf.variable_scope('rpn_prediction_mini_batch'):
objectness = prediction_dict[self.PRED_MB_OBJECTNESS]
offsets = prediction_dict[self.PRED_MB_OFFSETS]
with tf.variable_scope('rpn_losses'):
with tf.variable_scope('objectness'):
cls_loss = losses.WeightedSoftmaxLoss()
cls_loss_weight = self._config.loss_config.cls_loss_weight
objectness_loss = cls_loss(objectness,
objectness_gt,
weight=cls_loss_weight)
with tf.variable_scope('obj_norm'):
# normalize by the number of anchor mini-batches
objectness_loss = objectness_loss / tf.cast(
tf.shape(objectness_gt)[0], dtype=tf.float32)
tf.summary.scalar('objectness', objectness_loss)
with tf.variable_scope('regression'):
reg_loss = losses.WeightedSmoothL1Loss()
reg_loss_weight = self._config.loss_config.reg_loss_weight
anchorwise_localization_loss = reg_loss(offsets,
offsets_gt,
weight=reg_loss_weight)
masked_localization_loss = \
anchorwise_localization_loss * objectness_gt[:, 1]
localization_loss = tf.reduce_sum(masked_localization_loss)
with tf.variable_scope('reg_norm'):
# normalize by the number of positive objects
num_positives = tf.reduce_sum(objectness_gt[:, 1])
# Assert the condition `num_positives > 0`
with tf.control_dependencies(
[tf.assert_positive(num_positives)]):
localization_loss = localization_loss / num_positives
tf.summary.scalar('regression', localization_loss)
with tf.variable_scope('total_loss'):
total_loss = objectness_loss + localization_loss
loss_dict = {
self.LOSS_RPN_OBJECTNESS: objectness_loss,
self.LOSS_RPN_REGRESSION: localization_loss,
}
return loss_dict, total_loss
def create_path_drop_masks(self,
p_img,
p_bev,
random_values):
"""Determines global path drop decision based on given probabilities.
Args:
p_img: A tensor of float32, probability of keeping image branch
p_bev: A tensor of float32, probability of keeping bev branch
random_values: A tensor of float32 of shape [3], the results
of coin flips, values should range from 0.0 - 1.0.
Returns:
final_img_mask: A constant tensor mask containing either one or zero
depending on the final coin flip probability.
final_bev_mask: A constant tensor mask containing either one or zero
depending on the final coin flip probability.
"""
def keep_branch(): return tf.constant(1.0)
def kill_branch(): return tf.constant(0.0)
# The logic works as follows:
# We have flipped 3 coins, first determines the chance of keeping
# the image branch, second determines keeping bev branch, the third
# makes the final decision in the case where both branches were killed
# off, otherwise the initial img and bev chances are kept.
img_chances = tf.case([(tf.less(random_values[0], p_img),
keep_branch)], default=kill_branch)
bev_chances = tf.case([(tf.less(random_values[1], p_bev),
keep_branch)], default=kill_branch)
# Decision to determine whether both branches were killed off
third_flip = tf.logical_or(tf.cast(img_chances, dtype=tf.bool),
tf.cast(bev_chances, dtype=tf.bool))
third_flip = tf.cast(third_flip, dtype=tf.float32)
# Make a second choice, for the third case
# Here we use a 50/50 chance to keep either image or bev
# If its greater than 0.5, keep the image
img_second_flip = tf.case([(tf.greater(random_values[2], 0.5),
keep_branch)],
default=kill_branch)
# If its less than or equal to 0.5, keep bev
bev_second_flip = tf.case([(tf.less_equal(random_values[2], 0.5),
keep_branch)],
default=kill_branch)
# Use lambda since this returns another condition and it needs to
# be callable
final_img_mask = tf.case([(tf.equal(third_flip, 1),
lambda: img_chances)],
default=lambda: img_second_flip)
final_bev_mask = tf.case([(tf.equal(third_flip, 1),
lambda: bev_chances)],
default=lambda: bev_second_flip)
return final_img_mask, final_bev_mask
def create_path_drop_masks_np(self,
p_img,
p_bev,
random_values):
"""Determines global path drop decision based on given probabilities.
Args:
p_img: A tensor of float32, probability of keeping image branch
p_bev: A tensor of float32, probability of keeping bev branch
random_values: A tensor of float32 of shape [3], the results
of coin flips, values should range from 0.0 - 1.0.
Returns:
final_img_mask: A constant tensor mask containing either one or zero
depending on the final coin flip probability.
final_bev_mask: A constant tensor mask containing either one or zero
depending on the final coin flip probability.
"""
keep_branch = 1
kill_branch = 0
# The logic works as follows:
# We have flipped 3 coins, first determines the chance of keeping
# the image branch, second determines keeping bev branch, the third
# makes the final decision in the case where both branches were killed
# off, otherwise the initial img and bev chances are kept.
img_chances = keep_branch if random_values[0] < p_img else kill_branch
bev_chances = keep_branch if random_values[1] < p_bev else kill_branch
# Decision to determine whether both branches were killed off
third_flip = img_chances + bev_chances
# Make a second choice, for the third case
# Here we use a 50/50 chance to keep either image or bev
# If its greater than 0.5, keep the image
img_second_flip = keep_branch if random_values[2] > 0.5 else kill_branch
# If its less than or equal to 0.5, keep bev
bev_second_flip = keep_branch if random_values[2] <= 0.5 else kill_branch
# Use lambda since this returns another condition and it needs to
# be callable
final_img_mask = img_chances if third_flip >= 1 else img_second_flip
final_bev_mask = bev_chances if third_flip >= 1 else bev_second_flip
return final_img_mask, final_bev_mask |
the-stack_0_18450 | import tkinter as tk
class Scrollbar(tk.Frame):
def __init__(self, master, textw, *args, **kwargs):
super().__init__(master, *args, **kwargs)
self.master = master
self.tw = textw
self.font = ("Arial", 1, "bold")
self.config(bg="#252526", highlightthickness=0, padx=1)
self.cw = tk.Canvas(self, bg="#1e1e1e", width=15, highlightthickness=0)
self.cw.pack(fill=tk.BOTH, expand=True, side=tk.LEFT)
self.slider_image = tk.PhotoImage(data="""iVBORw0KGgoAAAANSUhEUgAAAG4AAABFCAYAAACrMNMO
AAAACXBIWXMAAABfAAAAXwEqnu0dAAAAGXRFWHRTb2Z0d2FyZQB3d3cuaW5rc2NhcGUub3Jnm+48GgAAAMBJRE
FUeJzt0UENwCAAwMAxLajjhwOkz8M+pMmdgiYda5/5kPPeDuAf46KMizIuyrgo46KMizIuyrgo46KMizIuyrgo
46KMizIuyrgo46KMizIuyrgo46KMizIuyrgo46KMizIuyrgo46KMizIuyrgo46KMizIuyrgo46KMizIuyrgo46
KMizIuyrgo46KMizIuyrgo46KMizIuyrgo46KMizIuyrgo46KMizIuyrgo46KMizIuyrgo46KMizIu6gNeAwIJ
26ERewAAAABJRU5ErkJggg==""")
self.cw.create_image(0, 0, image=self.slider_image, anchor=tk.NW, tag="slider")
self.extra_y = 10
self.y_top_lim = 0
self.drag_data = {"y": 0, "item": None}
self.yvalue = 0
self.cw.tag_bind("slider", "<ButtonPress-1>", self.drag_start)
self.cw.tag_bind("slider", "<ButtonRelease-1>", self.drag_stop)
self.cw.tag_bind("slider", "<B1-Motion>", self.drag)
if textw:
self.redraw()
def attach(self, textw):
self.tw = textw
def redraw(self):
self.y_bottom_lim = int(self.tw.textw.index(tk.END).split(".")[0]) * 2 + self.extra_y
def drag_start(self, event):
self.drag_data["item"] = self.cw.find_closest(event.x, event.y)[0]
self.drag_data["y"] = event.y
def drag_stop(self, event):
self.drag_data["item"] = None
self.drag_data["y"] = 0
def drag(self, event):
item = self.drag_data["item"]
if item != 1:
return
delta_y = event.y - self.drag_data["y"]
self.cw.move(item, 0, delta_y)
self.drag_data["y"] = event.y
self.yvalue = y = self.cw.coords(item)[1]
if y <= self.y_top_lim:
self.cw.move("slider", 0, -(y - self.y_top_lim))
elif y >= self.y_bottom_lim:
self.cw.move("slider", 0, -(y - self.y_bottom_lim))
self.tw.textw.yview(int(y / self.cw.winfo_height() * 100))
self.tw.master.redraw_ln()
|
the-stack_0_18453 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import sys
import os
from rlpytorch import *
if __name__ == '__main__':
trainer = Trainer()
runner = SingleProcessRun()
env, all_args = load_env(os.environ, trainer=trainer, runner=runner)
GC = env["game"].initialize()
model = env["model_loaders"][0].load_model(GC.params)
env["mi"].add_model("model", model, opt=True)
env["mi"].add_model("actor", model, copy=True, cuda=all_args.gpu is not None, gpu_id=all_args.gpu)
trainer.setup(sampler=env["sampler"], mi=env["mi"], rl_method=env["method"])
GC.reg_callback("train", trainer.train)
GC.reg_callback("actor", trainer.actor)
runner.setup(GC, episode_summary=trainer.episode_summary,
episode_start=trainer.episode_start)
runner.run()
|
the-stack_0_18454 |
"""
##############################################
Recommendations (``examples.recommendations``)
##############################################
In this examples of collaborative filtering we consider movie recommendation using common MovieLens data set. It
represents typical cold start problem. A recommender system compares the user's profile to reference
characteristics from the user's social environment. In the collaborative filtering approach, the recommender
system identify users who share the same preference with the active user and propose items which the like-minded
users favoured (and the active user has not yet seen).
We used the MovieLens 100k data set in this example. This data set consists of 100 000 ratings (1-5) from 943
users on 1682 movies. Each user has rated at least 20 movies. Simple demographic info for the users is included.
Factorization is performed on a split data set as provided by the collector of the data. The data is split into
two disjoint sets each consisting of training set and a test set with exactly 10 ratings per user.
It is common that matrices in the field of recommendation systems are very sparse (ordinary user rates only a small
fraction of items from the large items' set), therefore ``scipy.sparse`` matrix formats are used in this example.
The configuration of this example is SNMF/R factorization method using Random Vcol algorithm for initialization.
.. note:: MovieLens movies' rating data set used in this example is not included in the `datasets` and need to be
downloaded. Download links are listed in the ``datasets``. Download compressed version of the MovieLens 100k.
To run the example, the extracted data set must exist in the ``MovieLens`` directory under ``datasets``.
.. note:: No additional knowledge in terms of ratings' timestamps, information about items and their
genres or demographic information about users is used in this example.
To run the example simply type::
python recommendations.py
or call the module's function::
import nimfa.examples
nimfa.examples.recommendations.run()
.. note:: This example uses ``matplotlib`` library for producing visual interpretation of the RMSE error measure.
"""
from os.path import dirname, abspath
from os.path import join
from warnings import warn
import numpy as np
import nimfa
try:
import matplotlib.pylab as plb
except ImportError as exc:
warn("Matplotlib must be installed to run Recommendations example.")
def run():
"""
Run SNMF/R on the MovieLens data set.
Factorization is run on `ua.base`, `ua.test` and `ub.base`, `ub.test` data set. This is MovieLens's data set split
of the data into training and test set. Both test data sets are disjoint and with exactly 10 ratings per user
in the test set.
"""
for data_set in ['ua', 'ub']:
V = read(data_set)
W, H = factorize(V)
rmse(W, H, data_set)
def factorize(V):
"""
Perform SNMF/R factorization on the sparse MovieLens data matrix.
Return basis and mixture matrices of the fitted factorization model.
:param V: The MovieLens data matrix.
:type V: `numpy.matrix`
"""
snmf = nimfa.Snmf(V, seed="random_vcol", rank=30, max_iter=30, version='r', eta=1.,
beta=1e-4, i_conv=10, w_min_change=0)
print("Algorithm: %s\nInitialization: %s\nRank: %d" % (snmf, snmf.seed, snmf.rank))
fit = snmf()
sparse_w, sparse_h = fit.fit.sparseness()
print("""Stats:
- iterations: %d
- Euclidean distance: %5.3f
- Sparseness basis: %5.3f, mixture: %5.3f""" % (fit.fit.n_iter,
fit.distance(metric='euclidean'),
sparse_w, sparse_h))
return fit.basis(), fit.coef()
def read(data_set):
"""
Read movies' ratings data from MovieLens data set.
:param data_set: Name of the split data set to be read.
:type data_set: `str`
"""
print("Read MovieLens data set")
fname = join(dirname(dirname(abspath(__file__))), "datasets", "MovieLens", "%s.base" % data_set)
V = np.ones((943, 1682)) * 2.5
for line in open(fname):
u, i, r, _ = list(map(int, line.split()))
V[u - 1, i - 1] = r
return V
def rmse(W, H, data_set):
"""
Compute the RMSE error rate on MovieLens data set.
:param W: Basis matrix of the fitted factorization model.
:type W: `numpy.matrix`
:param H: Mixture matrix of the fitted factorization model.
:type H: `numpy.matrix`
:param data_set: Name of the split data set to be read.
:type data_set: `str`
"""
fname = join(dirname(dirname(abspath(__file__))), "datasets", "MovieLens", "%s.test" % data_set)
rmse = []
for line in open(fname):
u, i, r, _ = list(map(int, line.split()))
sc = max(min((W[u - 1, :] * H[:, i - 1])[0, 0], 5), 1)
rmse.append((sc - r) ** 2)
print("RMSE: %5.3f" % np.sqrt(np.mean(rmse)))
if __name__ == "__main__":
"""Run the Recommendations example."""
run()
|
the-stack_0_18455 | # ARGUMENTS:
# rds_db_name: {0}
SQL = """
DO
$do$
BEGIN
IF NOT EXISTS (SELECT FROM pg_database WHERE datname = '{0}') THEN
CREATE DATABASE {0} LC_COLLATE 'C' TEMPLATE template0 ENCODING 'UTF8';
END IF;
END
$do$;
"""
def define_query(rds_db_name: str) -> str:
print('Defining SQL query')
try:
return SQL.format(rds_db_name)
except Exception:
print('Error during SQL query definition')
raise
|
the-stack_0_18456 | import numpy as np
import gym
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy
from rl.memory import SequentialMemory
ENV_NAME = 'CartPole-v0'
# Get the environment and extract the number of actions.
env = gym.make(ENV_NAME)
np.random.seed(123)
env.seed(123)
nb_actions = env.action_space.n
# Next, we build a very simple model.
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.shape))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(16))
model.add(Activation('relu'))
model.add(Dense(nb_actions))
model.add(Activation('linear'))
print(model.summary())
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=50000, window_length=1)
policy = BoltzmannQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=10,
target_model_update=1e-2, policy=policy)
dqn.compile(Adam(lr=1e-3), metrics=['mae'])
# Okay, now it's time to learn something! We visualize the training here for show, but this
# slows down training quite a lot. You can always safely abort the training prematurely using
# Ctrl + C.
dqn.fit(env, nb_steps=50000, visualize=True, verbose=2)
# After training is done, we save the final weights.
dqn.save_weights('dqn_{}_weights.h5f'.format(ENV_NAME), overwrite=True)
# Finally, evaluate our algorithm for 5 episodes.
dqn.test(env, nb_episodes=5, visualize=True)
print('yep') |
the-stack_0_18457 | import os
import pytest
import bitjws
rawkey = bitjws.gen_privatekey()
def test_too_big():
key = bitjws.PrivateKey(rawkey)
try:
ser = bitjws.sign_serialize(key, test='a' * 4294967295)
h, p = bitjws.validate_deserialize(ser)
assert h and p
except MemoryError:
pytest.skip('Not enough memory to run this test')
return
|
the-stack_0_18458 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
project = u"pycounts"
copyright = u"2021, Jonna"
author = u"Jonna"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"myst_nb",
"autoapi.extension",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
autoapi_dirs = ["../src"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
|
the-stack_0_18460 | import numpy as np
import scipy.sparse as sp
import torch
import torch.nn.functional as F
import torch.nn as nn
import sys
import pickle as pkl
import networkx as nx
import random
def encode_onehot(labels):
classes = set(labels)
classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
enumerate(classes)}
labels_onehot = np.array(list(map(classes_dict.get, labels)),
dtype=np.int32)
return labels_onehot
def to_sparse(x):
""" converts dense tensor x to sparse format """
x_typename = torch.typename(x).split('.')[-1]
sparse_tensortype = getattr(torch.sparse, x_typename)
indices = torch.nonzero(x)
if len(indices.shape) == 0: # if all elements are zeros
return sparse_tensortype(*x.shape)
indices = indices.t()
values = x[tuple(indices[i] for i in range(indices.shape[0]))]
return torch.sparse.FloatTensor(indices, values, x.size())
def load_data(path="./data/cora/", dataset="cora"):
"""Load citation network dataset (cora only for now)"""
print('Loading {} dataset...'.format(dataset))
idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
dtype=np.dtype(str))
features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
labels = encode_onehot(idx_features_labels[:, -1])
# build graph
idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
idx_map = {j: i for i, j in enumerate(idx)}
edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
dtype=np.int32)
edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
dtype=np.int32).reshape(edges_unordered.shape)
adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
shape=(labels.shape[0], labels.shape[0]),
dtype=np.float32)
# build symmetric adjacency matrix
adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
features = normalize(features)
adj = normalize(adj + sp.eye(adj.shape[0]))
idx_train = range(140)
idx_val = range(200, 500)
idx_test = range(500, 1500)
features = torch.FloatTensor(np.array(features.todense()))
labels = torch.LongTensor(np.where(labels)[1])
adj = sparse_mx_to_torch_sparse_tensor(adj)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, labels, idx_train, idx_val, idx_test
def normalize(mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
mx = r_mat_inv.dot(mx)
return mx
def accuracy(output, labels):
preds = output.max(1)[1].type_as(labels)
correct = preds.eq(labels).double()
correct = correct.sum()
return correct / len(labels)
def sparse_mx_to_torch_sparse_tensor(sparse_mx):
"""Convert a scipy sparse matrix to a torch sparse tensor."""
sparse_mx = sparse_mx.tocoo().astype(np.float32)
indices = torch.from_numpy(
np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
values = torch.from_numpy(sparse_mx.data)
shape = torch.Size(sparse_mx.shape)
return torch.sparse.FloatTensor(indices, values, shape)
def IterGCNLoss(weights, preds, K, K_min):
values, indices = preds.topk(K+1, dim=1)
secondary_values = values[:, 1:1+K]
secondary_values = torch.mean(secondary_values, dim=1)
min_values, _ = preds.topk(K_min, dim=1, largest=False)
min_values = torch.mean(min_values, dim=1)
max_value = values[:, 0]
return torch.mean((max_value - secondary_values)/(max_value - min_values) * weights)
def DelTensorElem(arr, index):
arr1 = arr[0:index]
arr2 = arr[index+1:]
return torch.cat((arr1,arr2),dim=0)
def RemovFromVec(vec1_, vec2_):
vec1 = vec1_.detach().clone()
vec2 = vec2_.copy()
vec2_num = vec2.shape[0]
for vec2_idx in range(vec2_num):
elem = vec2[vec2_idx]
elem_idx = torch.where(vec1 == elem)[0][0]
vec1 = DelTensorElem(vec1, elem_idx)
return vec1
def SelectPseudoForPoi(GCN_output, ori_idx, ori_labels, selected_num):
selected_num = selected_num
GCN_output = GCN_output.detach().clone()
[confidence_values, col_idx_max] = GCN_output.max(axis=1)
values_rank, idx_rank = confidence_values.sort(dim = 0, descending = True)
idx_rank = RemovFromVec(idx_rank, ori_idx.numpy())
selected_idx = idx_rank[0:selected_num]
confident_u_res = torch.zeros([GCN_output.shape[0], GCN_output.shape[1]]).cuda()
confident_u_res[ori_idx, ori_labels] = 1
confident_u_res[selected_idx, :] = GCN_output[selected_idx, :]
return confident_u_res
def KLDivLoss_1(y, x):
x = F.log_softmax(x)
y = F.softmax(y, dim=1)
criterion = nn.KLDivLoss()
return criterion(x, y)
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_data_1(dataset_str, tr_num, val_num):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("./data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("./data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
features = normalize(features)
features = torch.FloatTensor(np.array(features.todense()))
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
labels_vec = labels.argmax(1)
labels_num = labels_vec.max()+1
idx_train = []
idx_val = []
idx_test = torch.tensor(range(labels.shape[0]))
for label_idx in range(labels_num):
pos0 = np.argwhere(labels_vec == label_idx).flatten()
tr_val_pos = random.sample(pos0.tolist(), tr_num+val_num)
idx_train.append(tr_val_pos[0:tr_num])
idx_val.append(tr_val_pos[-val_num:])
idx_train = np.array(idx_train).flatten()
idx_val = np.array(idx_val).flatten()
idx_test = RemovFromVec(idx_test, idx_train)
idx_test = RemovFromVec(idx_test, idx_val)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, torch.LongTensor(labels_vec), idx_train, idx_val, idx_test
def load_data_std(dataset_str, tr_num, val_num):
"""
Loads input data from gcn/data directory
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
object;
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
All objects above must be saved using python pickle module.
:param dataset_str: Dataset name
:return: All data input files loaded (as well the training/test data).
"""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("./data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("./data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
features = normalize(features)
features = torch.FloatTensor(np.array(features.todense()))
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_test_std = torch.LongTensor(idx_test)
idx_train = range(len(y))
idx_train_std = torch.LongTensor(idx_train)
idx_val = range(len(y), len(y)+500)
idx_val_std = torch.LongTensor(idx_val)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
labels_vec = labels.argmax(1)
labels_num = labels_vec.max()+1
idx_train = []
idx_val = []
idx_test = torch.tensor(range(labels.shape[0]))
for label_idx in range(labels_num):
pos0 = np.argwhere(labels_vec == label_idx).flatten()
tr_val_pos = random.sample(pos0.tolist(), tr_num+val_num)
idx_train.append(tr_val_pos[0:tr_num])
idx_val.append(tr_val_pos[-val_num:])
idx_train = np.array(idx_train).flatten()
idx_val = np.array(idx_val).flatten()
idx_test = RemovFromVec(idx_test, idx_train)
idx_test = RemovFromVec(idx_test, idx_val)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, torch.LongTensor(labels_vec), idx_train_std, idx_val_std, idx_test_std
def load_new_dataset_random_1(data_name, tr_num, val_num):
[adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask] = np.load(data_name, allow_pickle=True)
pos_adj = np.argwhere(adj==1)
adj[pos_adj[:,1],pos_adj[:,0]]=1
adj = normalize(adj + sp.eye(adj.shape[0]))
adj = sparse_mx_to_torch_sparse_tensor(adj)
features = normalize(features)
features = torch.FloatTensor(np.array(features.todense()))
labels_1 = torch.LongTensor(np.argmax(y_train+ y_val+ y_test, 1))
labels_vec = np.argmax(y_train+ y_val+ y_test, 1)
labels_num = labels_1.max()+1
idx_train = []
idx_val = []
idx_test = torch.tensor(range(labels_1.shape[0]))
independent_idx = []
for i in range(adj.shape[0]):
if adj[i,i]==1:
independent_idx.append(i)
for label_idx in range(labels_num):
pos0 = np.argwhere(labels_vec == label_idx).flatten()
while(1):
tr_val_pos = random.sample(pos0.tolist(), tr_num+val_num)
list1 = tr_val_pos[0:tr_num]
for j in list1:
if j in independent_idx:
continue
break
idx_train.append(tr_val_pos[0:tr_num])
idx_val.append(tr_val_pos[-val_num:])
idx_train = np.array(idx_train).flatten()
idx_val = np.array(idx_val).flatten()
idx_test = RemovFromVec(idx_test, idx_train)
idx_test = RemovFromVec(idx_test, idx_val)
idx_train = torch.LongTensor(idx_train)
idx_val = torch.LongTensor(idx_val)
idx_test = torch.LongTensor(idx_test)
return adj, features, torch.LongTensor(labels_vec), idx_train, idx_val, idx_test
def DelDiagEdgeIndex(edge_index):
vec1 = edge_index[0,:] - edge_index[1,:]
pos1 = torch.where(vec1!=0)
return edge_index[:, pos1[0]]
def kl_categorical(p_logit, q_logit):
p = F.softmax(p_logit, dim=-1)
_kl = torch.sum(p * (F.log_softmax(p_logit, dim=-1)
- F.log_softmax(q_logit, dim=-1)), 1)
return torch.mean(_kl)
def sim(z1: torch.Tensor, z2: torch.Tensor):
z1 = F.normalize(z1)
z2 = F.normalize(z2)
return torch.mm(z1, z2.t())
def semi_loss(z1: torch.Tensor, z2: torch.Tensor, tau):
f = lambda x: torch.exp(x / tau)
refl_sim = f(sim(z1, z1))
between_sim = f(sim(z1, z2))
return -torch.log(
between_sim.diag()
/ (refl_sim.sum(1) + between_sim.sum(1) - refl_sim.diag()))
def contrastive_loss(z1: torch.Tensor, z2: torch.Tensor,
mean: bool = True):
h1 = z1
h2 = z2
l1 = semi_loss(h1, h2, 0.5)
l2 = semi_loss(h2, h1, 0.5)
ret = (l1 + l2) * 0.5
ret = ret.mean() if mean else ret.sum()
return ret |
the-stack_0_18461 | _base_ = [
'../../config/_base_/datasets/nus-mono3d.py', '../../config/_base_/models/fcos3d.py',
'../../config/_base_/schedules/mmdet_schedule_1x.py', '../../config/_base_/default_runtime.py'
]
# model settings
model = dict(
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
# frozen_stages=1,
frozen_stages=1,
norm_cfg=dict(type='SyncBN', requires_grad=True),
# norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=False,
# norm_eval=True,
style='pytorch',
# style='caffe'
dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
stage_with_dcn=(False, False, True, True)
))
class_names = [
'car', 'truck', 'trailer', 'bus', 'construction_vehicle', 'bicycle',
'motorcycle', 'pedestrian', 'traffic_cone', 'barrier'
]
img_norm_cfg = dict(
mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFileMono3D'),
dict(
type='LoadAnnotations3D',
with_bbox=True,
with_label=True,
with_attr_label=True,
with_bbox_3d=True,
with_label_3d=True,
with_bbox_depth=True),
dict(type='Resize', img_scale=(1600, 900), keep_ratio=True),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(
type='Collect3D',
keys=[
'img', 'gt_bboxes', 'gt_labels', 'attr_labels', 'gt_bboxes_3d',
'gt_labels_3d', 'centers2d', 'depths'
]),
]
test_pipeline = [
dict(type='LoadImageFromFileMono3D'),
dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
flip=False,
transforms=[
dict(type='RandomFlip3D'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['img']),
])
]
data = dict(
samples_per_gpu=8,
workers_per_gpu=8,
train=dict(pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
lr=0.008, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
# warmup_iters=1500, # for moco
warmup_ratio=1.0 / 3,
step=[8, 11])
total_epochs = 12
evaluation = dict(interval=2)
# load_from=None
# load_from='checkpoints/waymo_ep50_with_backbone.pth'
# load_from='checkpoints/imgsup_finetune_waymo_ep1_with_backbone.pth'
# load_from='checkpoints/resnet50-19c8e357_convert_mono3d.pth'
# load_from='checkpoints/imgsup_finetune_waymo_ep5_with_backbone_repro.pth'
# load_from='checkpoints/imgsup_finetune_waymo_ep5_with_backbone_moco.pth'
# load_from=None
# load_from='checkpoints/mono3d_waymo_half.pth'
# load_from='checkpoints/mono3d_waymo_oneten.pth'
load_from='checkpoints/mono3d_waymo_full.pth'
# load_from='checkpoints/mono3d_waymo_onefive.pth'
|
the-stack_0_18464 | # Python stdlib
from datetime import datetime, timezone
import logging
import os
# PyPi packages
import azure.functions as func
# local packages
from shared_code.fetcher import fetch_results
# Set up logger
LOG_LEVEL = os.getenv("LOGLEVEL", "INFO")
LOG_LEVEL = LOG_LEVEL.upper()
logger = logging.getLogger()
logger.setLevel(logging.getLevelName(LOG_LEVEL))
# Getting required data from environment variable, raising an error when not configured
fn_env = {
"tenant_name": os.environ["NETSKOPE_FQDN"],
"token": os.environ["NETSKOPE_TOKEN"],
"security_results_access_key": os.environ["DLP_SCAN_RESULT_STORAGE"],
"timestamp_container": os.environ["TIMESTAMP_CONTAINER"],
"actions": [],
}
def _update_env(fn_env):
actions = ("delete", "encrypt", "label", "quarantine", "restrict")
enabled = []
for action in actions:
if os.environ.get(f"{action.upper()}_ACTION"):
enabled.append(
{
"action": action,
"policies": os.environ.get(f"{action.upper()}_MATCH_POLICIES", ""),
"profiles": os.environ.get(f"{action.upper()}_MATCH_PROFILES", ""),
"rules": os.environ.get(f"{action.upper()}_MATCH_RULES", ""),
}
)
fn_env["actions"] = enabled
_update_env(fn_env)
def main(mytimer: func.TimerRequest):
utc_timestamp = datetime.utcnow().replace(tzinfo=timezone.utc).isoformat()
if mytimer.past_due:
logger.info("The timer is past due!")
logger.info(f"Executing Scheduled DLP-Scan alerts gathering function trigger at {utc_timestamp}")
for act in fn_env["actions"]:
try:
action_env = fn_env.copy()
action_env.update(act)
message = fetch_results(action_env)
logger.info(message)
except Exception as err:
logger.error(f"Error fetching DLP Scan results {err}")
|
the-stack_0_18467 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from executors.tool_executor import *
class TracediffRiscVExecutor(ToolExecutor):
def __init__(self):
super().__init__()
self.trace_cmd = None
self.log = None
self.elog = None
def load(self, arg_ctrl_item):
super().load(arg_ctrl_item)
def skip(self):
return False
def execute(self):
if not PathUtils.check_file("sim.log"):
Msg.info(
"[TracediffRiscVExecutor::skip] skipping since no sim.log "
"found"
)
return True
if not PathUtils.check_file("fpix_sim.log"):
Msg.info(
"[TracediffRiscVExecutor::skip] skipping since fpx_sim.log "
"not found"
)
return True
my_cmd = (
'diff -y fpix_sim.log sim.log | grep -ve "---" | grep -vie '
'"exi" | grep -vie "exe" | grep -ve "_t"'
)
self.log = "tracediff_result.log"
self.elog = "tracediff_result.err"
my_result = SysUtils.exec_process(
my_cmd, self.log, self.elog, self.ctrl_item.timeout, True
)
my_use_result = None
vbar_symbol_count = 0
exception_count = 0
success = False
with open(self.log, "r") as results_file:
for line in results_file:
vbar_symbol_count += line.count("|")
if "Excpt ID 0x2" in line:
exception_count = exception_count + 1
if vbar_symbol_count == 0 and exception_count == 0:
success = True
my_use_result = list(my_result)
my_use_result[0] = int(not success)
# This inversion is necessary because int 0 means success to the
# Summary class.
with open(self.log, "a") as out_file:
if not success:
out_file.write(
"tracediff_riscv.log fail, look for | symbols or "
"'Excpt ID 0x2'; "
+ str(vbar_symbol_count)
+ " mismatches, and up to "
+ str(exception_count)
+ " suspicious exceptions."
)
else:
out_file.write(
"tracediff_riscv.log success, only bootcode difference "
"between standalone and interactive as expected."
)
Msg.info("CMPCommand = " + str({"trace-cmp-cmd": my_cmd}))
my_extract_results = self.extract_results(
my_use_result, "./" + self.log, None
)
Msg.info("CMPResult = " + str(my_extract_results))
Msg.flush()
# return SysUtils.success(0) #[0,1,2] #Doesn't seem to really matter
# what this is, the Summary system needs fixing.
return SysUtils.success(int(my_result[ToolResult.process_retcode]))
def query_result_log(self, arg_hfile):
my_msg = ""
# for now only the first line is of interest
for line in arg_hfile:
pass
my_msg = line
return my_msg
|
the-stack_0_18468 | import copy
import cvxpy
import numpy as np
from scripts.utils import yaml_paser as yaml
from scripts.utils.utils import Utils as utils
import logging
import os
import time
from collections import OrderedDict
'''
minimize
(1/2) * x.T * P * x + q.T * x
subject to
lbG <= G * x <= ubG
lbD <= D * x <= ubD
A * x == b
lb <= x <= ub
D - dynamic constraint
'''
class SQPsolver:
def __init__(self, main_logger_name=__name__, verbose=False, log_file=False):
self.P = []
self.G = []
self.A = []
self.q = []
self.lb = []
self.ub = []
self.lbG = []
self.ubG = []
self.b = []
self.D = None
self.lbD = None
self.ubD = None
self.initial_guess = []
self.status = "-1"
self.norm_ = 1
self.rho_k = 0
self.is_converged = False
self.is_initialized = False
self.solver_config = OrderedDict()
self.solver = []
self.penalty_norm = 1
self.trust_region_norm = np.inf
self.num_qp_iterations = 0
self.num_sqp_iterations = 0
self.solving_time = 0
self.predicted_reductions = []
self.actual_reductions = []
self.model_costs = []
self.actual_costs = []
self.actual_reduction_improve = 0
self.predicted_reduction_improve = 0
self.actual_cost_improve = 0
self.model_cost_improve = 0
self.logger = logging.getLogger(main_logger_name + __name__)
utils.setup_logger(self.logger, main_logger_name, verbose, log_file)
# initializing SQP solver variables and solver config
def init(self, **kwargs):
self.D = None
self.lbD = None
self.ubD = None
self.P = utils.get_var_from_kwargs("P", **kwargs)
self.q = utils.get_var_from_kwargs("q", **kwargs)
self.G = utils.get_var_from_kwargs("G", optional=False, **kwargs)
self.lbG = utils.get_var_from_kwargs("lbG", optional=True, **kwargs)
self.ubG = utils.get_var_from_kwargs("ubG", optional=True, **kwargs)
self.A = utils.get_var_from_kwargs("A", optional=False, **kwargs)
self.b = utils.get_var_from_kwargs("b", optional=False, **kwargs)
self.initial_guess = utils.get_var_from_kwargs("initial_guess", optional=True,
default=np.zeros((self.P.shape[0], 1)).flatten(), **kwargs)
solver_config = utils.get_var_from_kwargs("solver_config", optional=True, **kwargs)
solver = utils.get_var_from_kwargs("solver", optional=True, **kwargs)
if solver_config is not None:
self.solver_config = solver_config
else:
file_path_prefix = os.path.join(os.path.dirname(__file__), '../../config/')
sqp_config_file = file_path_prefix + 'sqp_config.yaml'
sqp_yaml = yaml.ConfigParser(sqp_config_file)
self.solver_config = sqp_yaml.get_by_key("sqp")
self.penalty_norm = self.solver_config["penalty_norm"]
self.trust_region_norm = self.solver_config["trust_region_norm"]
self.analyse_inputs()
if solver is not None:
self.solver = solver
else:
self.solver = self.solver_config["solver"][0]
# to print problem data
def display_problem(self):
print ("P")
print (self.P)
print ("q")
print (self.q)
print ("G")
print (self.G)
print ("lbG")
print (self.lbG)
print ("ubG")
print (self.ubG)
print ("A")
print (self.A)
print ("b")
print (self.b)
print ("lb")
print (self.lb)
print ("ub")
print (self.ub)
print ("initial guess")
print (self.initial_guess)
# updating solver variables on each call of callback function
def update_prob(self, G=None, lbG=None, ubG=None, A=None, b=None):
if G is not None:
self.G = G
if lbG is not None:
self.lbG = lbG
if ubG is not None:
self.ubG = ubG
if A is not None:
self.A = A
if b is not None:
self.b = b
self.analyse_inputs()
# to analyze input and accordingly to adjust constraints and limits
def analyse_inputs(self):
# replacing lower limit none constraints with very lower value
if self.lbG is not None:
self.lbG = np.array([utils.replace_none(lb, float(self.solver_config["replace_none_with"]), negate=True)
for lb in self.lbG])
# replacing upper limit none constraints with very lower value
if self.ubG is not None:
self.ubG = np.array([utils.replace_none(ub, float(self.solver_config["replace_none_with"]))
for ub in self.ubG])
if self.G is not None and self.lbG is None and self.ubG is not None:
self.lbG = -self.ubG
if self.G is not None and self.ubG is None and self.lbG is not None:
self.G = np.vstack([-self.G, -self.G])
self.lbG = np.hstack([-self.ubG, -self.ubG])
self.ubG = np.hstack([self.ubG, -self.ubG])
# method to check if the solver state variable has converged
def is_x_converged(self, x_k, p_k, tolerance=1e-3):
return abs((np.linalg.norm(x_k - (x_k + p_k), np.inf))) <= tolerance
# method to check if the objective function has converged
def is_objective_function_converged(self, objective, tolerance=1e-3):
return abs(objective) <= tolerance
# method to check if the given state variable respects the given constraints
def is_constraints_satisfied(self, x_k, p, tolerance=1e-3):
cons1_cond = np.isclose(np.matmul(self.G, x_k) <= self.ubG, 1, rtol=tolerance, atol=tolerance)
cons2_cond = np.isclose(np.matmul(self.G, x_k) >= self.lbG, 1, rtol=tolerance, atol=tolerance)
cons3_cond = np.isclose(np.matmul(self.A, x_k), self.b, rtol=tolerance, atol=tolerance)
cons4_cond = True
if self.D is not None:
p_k = np.hstack([x_k] * (self.D.shape[1] / p.shape[0]))
cons4_cond = np.isclose(np.matmul(self.D, p_k) >= self.lbD, 1, rtol=tolerance, atol=tolerance).all()
return cons1_cond.all() and cons2_cond.all() and cons3_cond.all() and cons4_cond
# evaluating constraints for a given solver state
def evaluate_constraints(self, x_k, p):
cons1 = np.subtract(np.matmul(self.G, x_k), self.ubG)
cons2 = np.add(np.matmul(-self.G, x_k), self.lbG)
cons3 = np.subtract(np.matmul(self.A, x_k), self.b)
cons4 = 0
if self.D is not None:
p_k = np.hstack([x_k] * (self.D.shape[1] / p.shape[0]))
cons4 = self.lbD - cvxpy.matmul(self.D, p_k)
return cons1.flatten(), cons2.flatten(), cons3.flatten(), cons4
# gradient of solver constraint matrices
def get_constraints_gradients(self):
cons1_grad = self.G
cons2_grad = -self.G
cons3_grad = self.A
cons4_grad = 0
if self.D is not None:
cons4_grad = -self.D
return cons1_grad, cons2_grad, cons3_grad, cons4_grad
# gradient and hessian of the solver objective function
def get_objective_gradient_and_hessian(self, x_k):
model_grad = 0.5 * np.matmul((self.P + self.P.T), x_k)
model_hess = 0.5 * (self.P + self.P.T)
return model_grad, model_hess
# formulating objective function with l1 times constraint norm
def get_model_objective(self, x_k, p, penalty):
cons1_at_xk, cons2_at_xk, cons3_at_xk, cons4_at_xk = self.evaluate_constraints(x_k, p)
cons1_grad_at_xk, cons2_grad_at_xk, cons3_grad_at_xk, cons4_grad_at_xk = self.get_constraints_gradients()
cons1_model = cons1_at_xk + cons1_grad_at_xk * p
cons2_model = cons2_at_xk + cons2_grad_at_xk * p
cons3_model = cons3_at_xk + cons3_grad_at_xk * p
cons4_model = 0
if self.D is not None:
p1 = cvxpy.hstack([p] * (self.D.shape[1] / p.shape[0]))
cons4_model = cons4_at_xk + cvxpy.matmul(cons4_grad_at_xk, p1)
objective_grad_at_xk, objective_hess_at_xk = self.get_objective_gradient_and_hessian(x_k)
objective_at_xk = self.get_actual_objective(x_k, p, penalty)
model = objective_at_xk.value + objective_grad_at_xk * p + 0.5 * cvxpy.quad_form(p, objective_hess_at_xk)
model += penalty * (cvxpy.norm(cons1_model, self.penalty_norm) + cvxpy.norm(cons2_model, self.penalty_norm)
+ cvxpy.norm(cons3_model, self.penalty_norm)
+ cvxpy.norm(cons4_model, self.penalty_norm))
return model, objective_at_xk
# to get the value of the original objective cost
def get_actual_objective(self, xk, p, penalty):
x = cvxpy.Variable(self.P.shape[0])
x.value = copy.copy(xk)
objective = 0.5 * cvxpy.quad_form(x, self.P) + self.q * x
constraints1 = cvxpy.norm(self.G * x - self.ubG.flatten(), self.penalty_norm)
constraints2 = cvxpy.norm(-self.G * x + self.lbG.flatten(), self.penalty_norm)
constraints3 = cvxpy.norm(self.A * x - self.b.flatten(), self.penalty_norm)
constraints4 = 0
if self.D is not None:
p1 = np.hstack([xk] * (self.D.shape[1] / p.shape[0]))
constraints4 = cvxpy.norm(self.lbD - cvxpy.matmul(self.D, p1), self.penalty_norm)
objective += penalty * (constraints1 + constraints2 + constraints3 + constraints4)
return objective
# solving given SQP sub-problem
def solve_problem(self, x_k, penalizer, p, delta, constraints=None, lower_limit=None, upper_limit=None):
model_objective, actual_objective = self.get_model_objective(x_k, p, penalizer)
# if self.D is not None:
# print self.D.shape, p.shape, delta, penalizer.value
constraints = [cvxpy.norm(p, self.trust_region_norm) <= delta]
problem = cvxpy.Problem(cvxpy.Minimize(model_objective), constraints)
if self.solver == "CVXOPT":
start = time.time()
result = problem.solve(solver=self.solver, warm_start=True, kktsolver=cvxpy.ROBUST_KKTSOLVER, verbose=False)
end = time.time()
else:
start = time.time()
result = problem.solve(solver=self.solver, warm_start=True, verbose=False, max_iters=100)
end = time.time()
self.solving_time += end - start
return p.value, model_objective, actual_objective, problem.status, problem.value
# to check if two quatities are approximately equal
def approx_equal(self, x, y, tolerance=0.001):
return abs(x - y) <= 0.5 * tolerance * (x + y)
# calculating the constraint norm
def get_constraints_norm(self, x_k):
con1, con2, con3, cons4 = self.evaluate_constraints(x_k)
max_con1 = (np.linalg.norm(con1, np.inf))
max_con2 = (np.linalg.norm(con2, np.inf))
max_con3 = (np.linalg.norm(con3, np.inf))
max_con4 = (np.linalg.norm(con3, np.inf))
return max_con1, max_con2, max_con3, max_con4
# calculating cost improve from initial to final solution of the given problem
def calc_cost_improve(self):
act_redutcion = self.actual_reductions
pred_reduction = self.predicted_reductions
actual_costs = self.actual_costs
model_costs = self.model_costs
if len(act_redutcion):
self.actual_reduction_improve = act_redutcion[0] - act_redutcion[-1]
self.actual_reduction_improve /= (act_redutcion[0] + 0.000000001)
self.actual_reduction_improve *= 100
if len(pred_reduction):
self.predicted_reduction_improve = pred_reduction[0] - pred_reduction[-1]
self.predicted_reduction_improve /= (pred_reduction[0] + 0.000000001)
self.predicted_reduction_improve *= 100
if len(actual_costs):
self.actual_cost_improve = actual_costs[0] - actual_costs[-1]
self.actual_cost_improve /= (actual_costs[0] + 0.000000001)
self.actual_cost_improve *= 100
if len(actual_costs):
self.model_cost_improve = model_costs[0] - model_costs[-1]
self.model_cost_improve /= (model_costs[0] + 0.000000001)
self.model_cost_improve *= 100
# solving SQP problem
def solve(self, initial_guess=None, callback_function=None):
self.logger.info("Starting SQP solver . . . . . . .")
x = cvxpy.Variable(self.P.shape[0])
p = cvxpy.Variable(x.shape[0])
penalty = cvxpy.Parameter(nonneg=True)
if initial_guess is None:
x_0 = self.initial_guess
else:
x_0 = initial_guess
p_0 = cvxpy.Variable(x.shape[0])
p_0.value = np.zeros(p.shape[0])
penalty.value = float(self.solver_config["initial_penalty"])
trust_box_size = float(self.solver_config["trust_region_size"])
max_penalty = float(self.solver_config["max_penalty"])
min_trust_box_size = float(self.solver_config["min_trust_region_size"])
max_trust_box_size = float(self.solver_config["max_trust_region_size"])
trust_shrink_ratio = float(self.solver_config["trust_shrink_ratio"])
trust_expand_ratio = float(self.solver_config["trust_expand_ratio"])
trust_good_region_ratio = float(self.solver_config["trust_good_region_ratio"])
trust_bad_region_ratio = float(self.solver_config["trust_bad_region_ratio"])
max_iteration = float(self.solver_config["max_iteration"])
min_actual_redution = float(self.solver_config["min_actual_redution"])
min_x_redution = float(self.solver_config["min_x_redution"])
const_violation_tolerance = float(self.solver_config["const_violation_tolerance"])
x_k = copy.copy(x_0)
iteration_count = 0
check_for_constraints = False
is_adjust_penalty = False
dynamic_constraints_satisfied = False
actual_reduction = 1000
self.rho_k = 0
self.is_converged = False
inter_status = "-1"
p_k = [0] * len(x_0)
last_p_k = [0] * len(x_0)
p.value = copy.deepcopy(p_0.value)
while penalty.value <= max_penalty:
self.logger.debug("penalty " + str(penalty.value))
self.num_qp_iterations += 1
self.num_sqp_iterations += 1
while iteration_count < max_iteration:
iteration_count += 1
self.num_qp_iterations += 1
self.logger.debug("iteration_count " + str(iteration_count))
if callback_function is not None:
self.D, self.lbD, self.ubD = callback_function(x_k, p_k)
while trust_box_size >= min_trust_box_size:
self.num_qp_iterations += 1
if callback_function is not None or not self.is_initialized:
if self.D is not None or not self.is_initialized:
self.is_initialized = True
p_k, model_objective_at_p_k, \
actual_objective_at_x_k, solver_status, prob_value = self.solve_problem(x_k, penalty, p,
trust_box_size,
self.D, self.lbD,
self.ubD)
else:
dynamic_constraints_satisfied = True
inter_status = "dynamic constrained satisfied "
self.logger.info(inter_status)
self.status = "Solved"
break
else:
p_k, model_objective_at_p_k, actual_objective_at_x_k, solver_status, prob_value = self.solve_problem(
x_k,
penalty,
p,
trust_box_size)
if p_k is None:
x_k -= last_p_k
p.value = last_p_k
p_k = copy.deepcopy(last_p_k)
if p_k is not None:
actual_objective_at_x_plus_p_k = self.get_actual_objective(x_k + p_k, p, penalty)
model_objective_at_p_0 = self.get_model_objective(x_k, p_0, penalty)[0]
actual_reduction = actual_objective_at_x_k.value - actual_objective_at_x_plus_p_k.value
predicted_reduction = model_objective_at_p_0.value - model_objective_at_p_k.value
if predicted_reduction == 0:
predicted_reduction = 0.0000001
self.rho_k = actual_reduction / predicted_reduction
self.predicted_reductions.append(predicted_reduction)
self.actual_reductions.append(actual_reduction)
self.model_costs.append(prob_value)
self.actual_costs.append(actual_objective_at_x_k.value)
self.logger.debug("\n x_k " + str(x_k))
self.logger.debug("rho_k " + str(self.rho_k))
if solver_status == cvxpy.INFEASIBLE or solver_status == cvxpy.INFEASIBLE_INACCURATE or solver_status == cvxpy.UNBOUNDED or solver_status == cvxpy.UNBOUNDED_INACCURATE:
penalty.value *= trust_expand_ratio
break
if self.rho_k >= trust_good_region_ratio:
trust_box_size = np.fmin(trust_box_size * trust_expand_ratio, max_trust_box_size)
self.logger.debug("expanding trust region" + str(trust_box_size))
x_k += p_k
break
elif self.rho_k <= trust_bad_region_ratio:
trust_box_size *= trust_shrink_ratio
self.logger.debug("shrinking trust region " + str(trust_box_size))
x_k -= p_k
if trust_box_size < min_x_redution:
check_for_constraints = True
break
last_p_k = p_k
trust_box_size = np.fmax(trust_box_size, min_trust_box_size / (trust_shrink_ratio * 0.5))
if is_adjust_penalty or dynamic_constraints_satisfied:
break
if check_for_constraints:
break
if self.is_objective_function_converged(actual_reduction, min_actual_redution):
self.is_converged = True
inter_status = "actual reduction is very small"
self.logger.info(inter_status)
self.status = "Solved"
break
# else:
# self.is_converged = False
if self.is_x_converged(x_k, p_k, min_x_redution):
self.is_converged = True
inter_status = "reduction in x is very small"
self.logger.info(inter_status)
self.status = "Solved"
break
if self.is_constraints_satisfied(x_k, p, const_violation_tolerance):
if callback_function is not None:
if dynamic_constraints_satisfied:
self.is_converged = True
if inter_status != "-1":
inter_status += " and"
else:
inter_status = ""
self.logger.info(inter_status + " constraints violations are satisfied")
self.status = "Solved"
break
else:
self.is_converged = False
else:
self.is_converged = True
if inter_status != "-1":
inter_status += " and"
else:
inter_status = ""
self.logger.info(inter_status + " constraints violations are satisfied")
self.status = "Solved"
break
else:
self.is_converged = False
check_for_constraints = False
dynamic_constraints_satisfied = False
if self.is_converged or dynamic_constraints_satisfied:
break
penalty.value *= 10
iteration_count = 0
is_adjust_penalty = False
trust_box_size = float(self.solver_config["trust_region_size"])
self.logger.debug("\n initial x_0 " + str(x_0))
self.logger.debug("\n final x_k " + str(x_k))
self.logger.debug("solver status: " + self.status)
self.calc_cost_improve()
return self.status, x_k
|
the-stack_0_18476 | import pickle
import csv
import json
import dicttoxml, xmltodict
class Serializable:
def dump(self, filename):
self._write_attributes(filename)
def load(self, filename):
obj = self._read_attributes(filename)
self.__dict__.clear()
self.__dict__.update(obj)
def _write_attributes(self, filename):
with open(filename, 'wb') as pickle_file:
pickle.dump(self.__dict__, pickle_file, pickle.HIGHEST_PROTOCOL)
def _read_attributes(self, filename):
with open(filename, 'rb') as pickle_file:
data = pickle.load(pickle_file)
return data
class CSVMixin:
def _write_attributes(self, filename):
with open(filename, 'w') as csv_file:
writer = csv.DictWriter(csv_file, self.__dict__.keys())
writer.writeheader()
writer.writerow(self.__dict__)
def _read_attributes(self, filename):
with open(filename, 'r') as csv_file:
data = next(csv.DictReader(csv_file))
return data
class JSONMixin:
def _write_attributes(self, filename):
with open(filename, 'w') as json_file:
json.dump(self.__dict__, json_file)
def _read_attributes(self, filename):
with open(filename, 'r') as json_file:
data = json.load(json_file)
return data
class XMLMixin:
def _write_attributes(self, filename):
xml = dicttoxml.dicttoxml(self.__dict__)
with open(filename, 'w') as xml_file:
xml_file.write(xml.decode())
def _read_attributes(self, filename):
with open(filename, 'r') as xml_file:
obj = xmltodict.parse(xml_file.read())
data = {key: eval(obj['root'][key]['@type'])(obj['root'][key]['#text'])
for key in obj['root'].keys()}
return data
# --- tests ---
class Book1(Serializable):
def __init__(self, title, author, price):
self.title = title
self.author = author
self.price = price
def __str__(self):
return str(self.__dict__)
print('Pickle test')
b1 = Book1('Example title - pickle', 'Example author', 25.75)
print(b1)
b1.dump('w33.pickle')
b2 = Book1('a', 'b', 0)
print(b2)
b2.load('w33.pickle')
print(b2)
class Book2(CSVMixin, Serializable):
def __init__(self, title, author, price):
self.title = title
self.author = author
self.price = price
def __str__(self):
return str(self.__dict__)
print('CSV test')
b1 = Book2('Example title - CVS', 'Example author', 25.75)
print(b1)
b1.dump('w33.csv')
b2 = Book2('c', 'd', 0)
print(b2)
b2.load('w33.csv')
print(b2)
class Book3(JSONMixin, Serializable):
def __init__(self, title, author, price):
self.title = title
self.author = author
self.price = price
def __str__(self):
return str(self.__dict__)
print('JSON test')
b1 = Book3('Example title - JSON', 'Example author', 25.75)
print(b1)
b1.dump('w33.json')
b2 = Book3('e', 'f', 0)
print(b2)
b2.load('w33.json')
print(b2)
class Book4(XMLMixin, Serializable):
def __init__(self, title, author, price):
self.title = title
self.author = author
self.price = price
def __str__(self):
return str(self.__dict__)
print('XML test')
b1 = Book4('Example title - XML', 'Example author', 25.75)
print(b1)
b1.dump('w33.xml')
b2 = Book4('g', 'h', 0)
print(b2)
b2.load('w33.xml')
print(b2)
|
the-stack_0_18477 | ################################################################################
#
# New Zealand Geographic Board gazetteer application,
# Crown copyright (c) 2020, Land Information New Zealand on behalf of
# the New Zealand Government.
#
# This file is released under the MIT licence. See the LICENCE file found
# in the top-level directory of this distribution for more information.
#
################################################################################
from past.builtins import cmp
from builtins import str
def populateCombo(widget, rows, display=""):
"""
Populate a QComboWidget with a list of items defined by in iterator
returning either single values, or [value, string] pairs.
Optionally can have a display element, which is either a function to
get the display value from each object in the list, or the name of an
attribute to use for displaying each item.
"""
if isinstance(rows, dict):
r = [(k, str(rows[k])) for k in list(rows.keys())]
r.sort(lambda a, b: cmp(a[1], b[1]))
rows = r
widget.clear()
first = True
addItem = None
if callable(display):
addItem = lambda r: widget.addItem(str(display(r)), r)
elif display:
addItem = lambda r: widget.addItem(str(r.__getattribute__(display)), r)
for r in rows:
if addItem == None:
if isinstance(r, list) or isinstance(r, tuple):
if len(r) > 1:
addItem = lambda r: widget.addItem(str(r[1]), r[0])
else:
addItem = lambda r: widget.addItem(str(r[0]), r[0])
else:
addItem = widget.addItem(str(r), r)
addItem(r)
widget.setCurrentIndex(-1)
def comboValue(widget):
"""
Retrieve the value of a combo box as a python object
"""
index = widget.currentIndex()
if index == -1:
return None
return widget.itemData(index)
|
the-stack_0_18478 | import json, requests
from math import pi, radians
from geopy.distance import geodesic
r = requests.get("https://opensky-network.org/api/states/all")
response = r.json()
lat = radians(float(input("Enter latitude in degrees (positive for N, negative for S):")))
lon = radians(float(input("Enter longitude in degrees (positive for E, negative for W):")))
coord = (lat, lon)
closest_index = 0
closest_distance = 6378*2*pi
for i in range(0, len(response['states'])):
flight = response['states'][i]
f_lon, f_lat = flight[5], flight[6]
if(f_lon != None and f_lat!= None):
f_coord = (radians(f_lat), radians(f_lon))
dist = geodesic(coord, f_coord)
if(dist < closest_distance):
closest_distance = dist
closest_index = i
flight = response['states'][closest_index]
icao, callsign, country, f_lon, f_lat, f_alt = flight[0], flight[1], flight[2], flight[5], flight[6], flight[13]
print("Closest flight is:")
print("ICAO24 ID:", icao)
print("Callsign:", callsign)
print("Country of origin:", country)
print("Latitude:", f_lat, "Longitude:", f_lon, "Altitude:", f_alt)
print("Distance from given location:", closest_distance)
|
the-stack_0_18481 | import re
import data_reader
from collections import Counter
import conversation
re_art = re.compile(r'\b(a|an|the)\b')
re_punc = re.compile(r'[!"#$%&()*+,-./:;<=>?@\[\]\\^`{|}~_\']')
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re_art.sub(' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
return re_punc.sub(' ', text) # convert punctuation to spaces
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def _exact_match(guess, answers):
"""Check if guess is a (normalized) exact match with any answer."""
if guess is None or answers is None:
return False
guess = normalize_answer(guess)
for a in answers:
if guess == normalize_answer(a):
return True
return False
def _prec_recall_f1_score(pred_items, gold_items):
"""
Computes precision, recall and f1 given a set of gold and prediction items.
:param pred_items: iterable of predicted values
:param gold_items: iterable of gold values
:return: tuple (p, r, f1) for precision, recall, f1
"""
common = Counter(gold_items) & Counter(pred_items)
num_same = sum(common.values())
if num_same == 0:
return 0, 0, 0
precision = 1.0 * num_same / len(pred_items)
recall = 1.0 * num_same / len(gold_items)
f1 = (2 * precision * recall) / (precision + recall)
return precision, recall, f1
def _f1_score(guess, answers):
"""Return the max F1 score between the guess and *any* answer."""
if guess is None or answers is None:
return 0
g_tokens = normalize_answer(guess).split()
scores = [
_prec_recall_f1_score(g_tokens, normalize_answer(a).split())for a in answers
]
return max(f1 for p, r, f1 in scores)
def eval_f1_for_valid(cand_num, convers):
total_f1 = 0.0
count = 0
for conver in convers:
profile = conver['profile']
questions = conver['question']
answers = conver['answer']
for index in range(len(questions)):
question = questions[index]
answer = answers[index]
response = conversation.get_response(profile, question, cand_num)
#print ('response = ', response)
#print ('answer = ', answer)
f1 = _f1_score(response, [' '.join(answer)])
total_f1 += f1
if count % 10 == 1:
print ('cand_num = %d, count = %d' % (cand_num, count))
count += 1
return total_f1
def find_optimal_cand_num():
cand_nums = [20, 50, 80, 100, 120, 150, 180, 200]
convers, _ = data_reader.read_training_data('data/valid_self_original.txt', True, 0)
print ('leng of converse: %d' % len(convers))
save_f = open('cand_report.txt', 'w')
for cand_num in cand_nums:
result = eval_f1_for_valid(cand_num, convers[: 100])
print ('cand_num = %d has fscore = %f' % (cand_num, result))
save_f.write('%d: %f\n' % (cand_num, result))
save_f.close()
if __name__ == '__main__':
find_optimal_cand_num()
|
the-stack_0_18483 | # Copyright (c) 2018 Georgia Tech Research Corporation
# Distributed under the terms of the BSD-3-Clause License
# Derived from robotkernel
# Copyright (c) 2018, Asko Soukka
# Distributed under the terms of the BSD-3-Clause License
""" An interactive Robot Framework runner
"""
from collections import defaultdict
from io import StringIO
from pathlib import Path
from tempfile import TemporaryDirectory
from uuid import uuid4
import importnb
from robot.running import TestSuiteBuilder
from traitlets.config import LoggingConfigurable
from . import irobot
VDOM_MIME = "application/vdom.v1+json"
ICONS = {"INIT": "▷", "PASS": "⬜", "FAIL": "❌"}
COLORS = {"INIT": "#333", "PASS": "#999", "FAIL": "red"}
class InteractiveRunner(LoggingConfigurable):
""" An interactive Robot Framework runner
"""
def __init__(self, silent=False):
super().__init__()
self.silent = silent
self._tmpdir = TemporaryDirectory()
self.path = Path(self._tmpdir.name)
self.suite = None
self.results = None
self.stdout = None
self._handlers = defaultdict(list)
self.test_data = irobot.TestCaseString()
def __del__(self):
self._tmpdir.cleanup()
@property
def failed(self):
""" wrapper for crazy-long path
"""
try:
return self.results.statistics.total.critical.failed
except AttributeError:
return 0
def populate(self, *code):
""" Populate with some code lines
"""
list(map(self.test_data.populate, code))
return self
def clear_tests(self):
""" Clear the tests table
"""
self.test_data.testcase_table.tests.clear()
return self
def build(self, name="Untitled Test Suite"):
""" Build a test suite
"""
# pylint: disable=W0212
self.suite = TestSuiteBuilder()._build_suite(self.test_data)
self.suite._name = name
return self
def on_suite(self, handler):
""" Set a listener for start events
"""
self._handlers[irobot.SuiteEventListener].append(handler)
def on_status(self, handler):
""" Set a listener for status events
"""
self._handlers[irobot.StatusEventListener].append(handler)
def on_return_value(self, handler):
""" Set a listener for return values
"""
self._handlers[irobot.ReturnValueListener].append(handler)
def on_import(self, handler):
""" Set a listener for imports
"""
self._handlers[irobot.ImportListener].append(handler)
def run(self):
""" Run the built suite
"""
with importnb.Notebook():
with StringIO() as stdout:
self.results = self.suite.run(
outputdir=str(self.path),
stdout=stdout,
listener=sum(
[
list(map(klass, handlers))
for klass, handlers in self._handlers.items()
],
[],
),
)
self.stdout = stdout.getvalue().strip().splitlines()
return self
class KernelRunner(InteractiveRunner):
""" A kernel-aware runner
"""
def __init__(self, kernel, code, silent=False, history=None):
super().__init__(silent=silent)
self.return_values = []
self.kernel = kernel
self.populate(*(history or []))
self.clear_tests()
self.populate(code)
self.total_tests = 0
self.tests_completed = 0
self.imports = {}
def run(self):
""" Actually execute the test cases, and show a progress bar
"""
display_id = str(uuid4())
progress = dict(
tagName="div",
attributes=dict(style=dict(display="flex", flexWrap="wrap")),
children=[],
)
if not self.silent:
@self.on_suite
def on_suite(attributes):
""" handle the start of a suite
"""
self.total_tests = attributes["totaltests"]
progress["children"] = [
dict(
tagName="div",
children=[],
attributes=dict(
style=dict(
borderBottom="solid 1px #eee",
flex="1",
margin="0.25em",
padding="0.25em",
minHeight="1.5em",
)
),
)
for i in range(self.total_tests)
]
self.send_display_data({VDOM_MIME: progress}, display_id=display_id)
@self.on_status
def on_status(attributes):
""" handle status responses
"""
parent = progress["children"][self.tests_completed]
if "status" in attributes:
child = parent["children"][-1]
status = attributes["status"]
icon = ICONS.get(status, ICONS["FAIL"])
child["children"] = icon
else:
status = "INIT"
icon = ICONS["INIT"]
child = dict(
tagName="span",
children=icon,
attributes=dict(
style=dict(marginLeft="0.25em", transition="all 0.2s")
),
)
parent["children"].append(child)
child["attributes"]["title"] = f"""{attributes["kwname"]}: {status}"""
child["attributes"]["style"].update(color=COLORS[status])
self.send_update_display_data(
{VDOM_MIME: progress}, display_id=display_id
)
@self.on_return_value
def on_return_value(name, attributes, return_value=None):
""" handle a return value
"""
if "endtime" in attributes:
self.return_values.append(return_value)
self.tests_completed += 1
else:
progress["children"][self.tests_completed]["attributes"].update(
title=name
)
@self.on_import
def on_import(name, attributes):
self.imports[name] = attributes
super().run()
def send_display_data(self, data=None, metadata=None, display_id=None):
""" Send some display data to the frontend
"""
if isinstance(data, str):
self.kernel.send_response(
self.kernel.iopub_socket, "display_data", {"data": {"text/plain": data}}
)
else:
self.kernel.send_response(
self.kernel.iopub_socket,
"display_data",
{
"data": data or {},
"metadata": metadata or {},
"transient": {"display_id": display_id},
},
)
def send_update_display_data(self, data=None, metadata=None, display_id=None):
""" Update a display
"""
# noqa: E501
self.kernel.send_response(
self.kernel.iopub_socket,
"update_display_data",
{
"data": data or {},
"metadata": metadata or {},
"transient": {"display_id": display_id},
},
)
def send_execute_result(self, data=None, metadata=None, display_id=None):
""" Send an execute_response message
"""
self.kernel.send_response(
self.kernel.iopub_socket,
"execute_result",
{
"data": data or {},
"metadata": metadata or {},
"transient": {"display_id": display_id},
"execution_count": self.kernel.execution_count,
},
)
|
the-stack_0_18485 | import urllib
import Image
import StringIO
src = urllib.urlopen('http://huge:[email protected]/pc/return/wire.png').read()
im = Image.open(StringIO.StringIO(src))
ans = Image.new(im.mode, (100, 100))
step = [[1, 0], [0, 1], [-1, 0], [0, -1]]
maxLen = [[i, i - 1, i - 1, i - 2] for i in range(100, 1, -2)]
maxLen = reduce(lambda x, y: x + y, maxLen)
idx = 0
stepIdx = 0
pos = (-1, 0)
for l in maxLen:
for i in range(l):
pos = tuple(map(lambda x, y: x + y, pos, step[stepIdx]))
ans.putpixel(pos, im.getpixel((idx, 0)))
idx += 1
stepIdx = (stepIdx + 1) % 4
ans.show() |
the-stack_0_18487 | import numpy as np
import time
import copy
############################################
############################################
def calculate_mean_prediction_error(env, action_sequence, models, data_statistics):
model = models[0]
# true
true_states = perform_actions(env, action_sequence)['observation']
# predicted
ob = np.expand_dims(true_states[0],0)
pred_states = []
for ac in action_sequence:
pred_states.append(ob)
action = np.expand_dims(ac,0)
ob = model.get_prediction(ob, action, data_statistics)
pred_states = np.squeeze(pred_states)
# mpe
mpe = mean_squared_error(pred_states, true_states)
return mpe, true_states, pred_states
def perform_actions(env, actions):
ob = env.reset()
obs, acs, rewards, next_obs, terminals, image_obs = [], [], [], [], [], []
steps = 0
for ac in actions:
obs.append(ob)
acs.append(ac)
ob, rew, done, _ = env.step(ac)
# add the observation after taking a step to next_obs
next_obs.append(ob)
rewards.append(rew)
steps += 1
# If the episode ended, the corresponding terminal value is 1
# otherwise, it is 0
if done:
terminals.append(1)
break
else:
terminals.append(0)
return Path(obs, image_obs, acs, rewards, next_obs, terminals)
def mean_squared_error(a, b):
return np.mean((a-b)**2)
############################################
############################################
def sample_trajectory(env, policy, max_path_length, render=False, render_mode=('rgb_array')):
# initialize env for the beginning of a new rollout
ob = env.reset()
# init vars
obs, acs, rewards, next_obs, terminals, image_obs = [], [], [], [], [], []
steps = 0
while True:
# render image of the simulated env
if render:
if 'rgb_array' in render_mode:
if hasattr(env, 'sim'):
image_obs.append(env.sim.render(camera_name='track', height=500, width=500)[::-1])
else:
image_obs.append(env.render(mode=render_mode))
if 'human' in render_mode:
env.render(mode=render_mode)
time.sleep(env.model.opt.timestep)
# use the most recent ob to decide what to do
obs.append(ob)
ac = policy.get_action(ob) # HINT: query the policy's get_action function
ac = ac[0]
acs.append(ac)
# take that action and record doc
ob, rew, done, _ = env.step(ac)
# record result of taking that action
steps += 1
next_obs.append(ob)
rewards.append(rew)
# HINT: rollout can end due to done, or due to max_path_length
rollout_done = 1 if done or max_path_length == steps else 0
terminals.append(rollout_done)
if rollout_done:
break
return Path(obs, image_obs, acs, rewards, next_obs, terminals)
def sample_trajectories(env, policy, min_timesteps_per_batch, max_path_length, render=False, render_mode=('rgb_array')):
timesteps_this_batch = 0
paths = []
while timesteps_this_batch < min_timesteps_per_batch:
path = sample_trajectory(env=env, policy=policy, max_path_length=max_path_length, render=render,
render_mode=render_mode)
timesteps_this_batch += get_pathlength(path)
paths.append(path)
return paths, timesteps_this_batch
def sample_n_trajectories(env, policy, ntraj, max_path_length, render=False, render_mode=('rgb_array')):
"""
Collect ntraj rollouts.
Hint1: use sample_trajectory to get each path (i.e. rollout) that goes into paths
"""
paths = []
for i in range(ntraj):
path = sample_trajectory(env=env, policy=policy, max_path_length=max_path_length, render=render,
render_mode=render_mode)
paths.append(path)
return paths
############################################
############################################
def Path(obs, image_obs, acs, rewards, next_obs, terminals):
"""
Take info (separate arrays) from a single rollout
and return it in a single dictionary
"""
if image_obs != []:
image_obs = np.stack(image_obs, axis=0)
return {"observation" : np.array(obs, dtype=np.float32),
"image_obs" : np.array(image_obs, dtype=np.uint8),
"reward" : np.array(rewards, dtype=np.float32),
"action" : np.array(acs, dtype=np.float32),
"next_observation": np.array(next_obs, dtype=np.float32),
"terminal": np.array(terminals, dtype=np.float32)}
def convert_listofrollouts(paths):
"""
Take a list of rollout dictionaries
and return separate arrays,
where each array is a concatenation of that array from across the rollouts
"""
observations = np.concatenate([path["observation"] for path in paths])
actions = np.concatenate([path["action"] for path in paths])
next_observations = np.concatenate([path["next_observation"] for path in paths])
terminals = np.concatenate([path["terminal"] for path in paths])
concatenated_rewards = np.concatenate([path["reward"] for path in paths])
unconcatenated_rewards = [path["reward"] for path in paths]
return observations, actions, next_observations, terminals, concatenated_rewards, unconcatenated_rewards
############################################
############################################
def get_pathlength(path):
return len(path["reward"])
def normalize(data, mean, std, eps=1e-8):
return (data-mean)/(std+eps)
def unnormalize(data, mean, std):
return data*std+mean
def add_noise(data_inp, noiseToSignal=0.01):
data = copy.deepcopy(data_inp) #(num data points, dim)
#mean of data
mean_data = np.mean(data, axis=0)
#if mean is 0,
#make it 0.001 to avoid 0 issues later for dividing by std
mean_data[mean_data == 0] = 0.000001
#width of normal distribution to sample noise from
#larger magnitude number = could have larger magnitude noise
std_of_noise = mean_data * noiseToSignal
for j in range(mean_data.shape[0]):
data[:, j] = np.copy(data[:, j] + np.random.normal(
0, np.absolute(std_of_noise[j]), (data.shape[0],)))
return data |
the-stack_0_18488 | from project.category import Category
from project.document import Document
from project.storage import Storage
from project.topic import Topic
c1 = Category(1, "work")
t1 = Topic(1, "daily tasks", "C:\\work_documents")
d1 = Document(1, 1, 1, "finilize project")
d1.add_tag("urgent")
d1.add_tag("work")
storage = Storage()
storage.add_category(c1)
storage.add_topic(t1)
storage.add_document(d1)
print(c1)
print(t1)
print(storage.get_document(1))
print(storage)
|
the-stack_0_18489 | import pytest
from unittest.mock import (
Mock,
)
from vns_utils import (
decode_hex,
)
from web3.contract import (
CONCISE_NORMALIZERS,
ConciseContract,
ConciseMethod,
)
def deploy(web3, Contract, args=None):
args = args or []
deploy_txn = Contract.constructor(*args).transact()
deploy_receipt = web3.vns.waitForTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
contract = Contract(address=deploy_receipt['contractAddress'])
assert len(web3.vns.getCode(contract.address)) > 0
return contract
@pytest.fixture()
def EMPTY_ADDR(address_conversion_func):
addr = '0x' + '00' * 20
return address_conversion_func(addr)
@pytest.fixture()
def zero_address_contract(web3, WithConstructorAddressArgumentsContract, EMPTY_ADDR):
deploy_txn = WithConstructorAddressArgumentsContract.constructor(
EMPTY_ADDR,
).transact()
deploy_receipt = web3.vns.waitForTransactionReceipt(deploy_txn)
assert deploy_receipt is not None
_address_contract = WithConstructorAddressArgumentsContract(
address=deploy_receipt['contractAddress'],
)
return ConciseContract(_address_contract)
def test_concisecontract_call_default():
mock = Mock()
sweet_method = ConciseMethod(mock.functions.grail)
sweet_method(1, 2)
mock.functions.grail.assert_called_once_with(1, 2)
# Checking in return_value, ie the function instance
mock.functions.grail.return_value.call.assert_called_once_with({})
def test_concisecontract_custom_transact():
mock = Mock()
sweet_method = ConciseMethod(mock.functions.grail)
sweet_method(1, 2, transact={'holy': 3})
mock.functions.grail.assert_called_once_with(1, 2)
# Checking in return_value, ie the function instance
mock.functions.grail.return_value.transact.assert_called_once_with({'holy': 3})
def test_concisecontract_two_keywords_fail():
mock = Mock()
sweet_method = ConciseMethod(mock)
with pytest.raises(TypeError):
sweet_method(1, 2, transact={'holy': 3}, call={'count_to': 4})
def test_concisecontract_unknown_keyword_fails():
contract = Mock()
sweet_method = ConciseMethod(contract.functions.grail)
with pytest.raises(TypeError):
sweet_method(1, 2, count={'to': 5})
def test_concisecontract_returns_none_for_0addr(zero_address_contract):
result = zero_address_contract.testAddr()
assert result is None
def test_class_construction_sets_class_vars(web3,
MATH_ABI,
MATH_CODE,
MATH_RUNTIME,
some_address,
):
MathContract = web3.vns.contract(
abi=MATH_ABI,
bytecode=MATH_CODE,
bytecode_runtime=MATH_RUNTIME,
)
classic = MathContract(some_address)
assert classic.web3 == web3
assert classic.bytecode == decode_hex(MATH_CODE)
assert classic.bytecode_runtime == decode_hex(MATH_RUNTIME)
def test_conciscecontract_keeps_custom_normalizers_on_base(web3, MATH_ABI):
base_contract = web3.vns.contract(abi=MATH_ABI)
# give different normalizers to this base instance
base_contract._return_data_normalizers = base_contract._return_data_normalizers + tuple([None])
# create concisce contract with custom contract
new_normalizers_size = len(base_contract._return_data_normalizers)
concise = ConciseContract(base_contract)
# check that concise contract includes the new normalizers
concise_normalizers_size = len(concise._classic_contract._return_data_normalizers)
assert concise_normalizers_size == new_normalizers_size + len(CONCISE_NORMALIZERS)
assert concise._classic_contract._return_data_normalizers[0] is None
def test_conciscecontract_function_collision(
web3,
StringContract):
contract = deploy(web3, StringContract, args=["blarg"])
def getValue():
assert 'getValue' in [
item['name'] for item
in StringContract.abi
if 'name' in item]
setattr(ConciseContract, 'getValue', getValue)
concise_contract = ConciseContract(contract)
assert isinstance(concise_contract, ConciseContract)
with pytest.raises(AttributeError, match=r'Namespace collision .* with ConciseContract API.'):
concise_contract.getValue()
def test_concisecontract_deprecation_warning(web3, StringContract):
contract = deploy(web3, StringContract, args=["blarg"])
with pytest.warns(DeprecationWarning):
ConciseContract(contract)
|
the-stack_0_18491 | # ----------------------------------------------------------------------
# MIB JSON-RPC API endpoint
# ----------------------------------------------------------------------
# Copyright (C) 2007-2021 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Third-party modules
from fastapi import APIRouter
# NOC modules
from noc.core.debug import error_report
from noc.core.error import NOCError
from noc.core.service.loader import get_service
from noc.core.service.models.jsonrpc import JSONRemoteProcedureCall
from noc.services.mib.api.mib import MIBAPI
router = APIRouter()
@router.post("/api/mib/")
@router.post("/api/mib")
def api_mib(req: JSONRemoteProcedureCall):
if req.method not in MIBAPI.get_methods():
return {"error": f"Invalid method: '{req.method}'", "id": req.id}
service = get_service()
api = MIBAPI(service, None, None)
api_method = getattr(api, req.method)
result = None
error = None
try:
result = api_method(*req.params)
except NOCError as e:
error = f"Failed: {e}"
except Exception as e:
error_report()
error = f"Failed: {e}"
return {"result": result, "error": error, "id": req.id}
|
the-stack_0_18492 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 服务端工具
Case Name : 关闭指定服务器数据库时,设置停止模式和超时时间
Description :
1.查看数据库状态
2.关闭数据库时指定-h,设置超时时间并设置关闭模式为fast是否可以关闭成功
3.启动数据库
4.查看数据库状态
5.关闭数据库时指定-h,设置超时时间并设置关闭模式为immediate是否可以关闭成功
6.启动数据库
7.查看数据库状态
Expect :
1.状态正常
2.关闭数据库
3.启动成功
4.状态正常
5.关闭数据库
6.启动成功
7.状态正常
History :
"""
import os
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from testcase.utils.CommonSH import CommonSH
Primary_SH = CommonSH('PrimaryDbUser')
class Tools(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info('---Opengauss_Function_Tools_gs_om_Case0057start---')
self.dbusernode = Node('PrimaryDbUser')
self.constant = Constant()
def test_server_tools1(self):
self.log.info('-----------------步骤1:查看数据库状态-------------------')
status_cmd1 = f'source {macro.DB_ENV_PATH};' \
f'gs_om -t status --detail;'
self.log.info(status_cmd1)
status_msg1 = self.dbusernode.sh(status_cmd1).result()
self.log.info(status_msg1)
self.assertTrue("Degraded" in status_msg1 or "Normal" in status_msg1)
self.log.info('-----查看主机名称-----')
check_cmd = f'hostname'
self.log.info(check_cmd)
hostname = self.dbusernode.sh(check_cmd).result()
self.log.info(hostname)
self.log.info('---步骤2:关闭指定服务器数据库,设置超时时间并设置关闭模式为fast---')
certificate_path1 = os.path.join(macro.DB_INSTANCE_PATH, 'server.*')
ls_cmd1 = f'ls -l {certificate_path1};'
self.log.info(ls_cmd1)
stop_cmd1 = f'source {macro.DB_ENV_PATH};' \
f'gs_om ' \
f'-t stop ' \
f'-h {hostname} ' \
f'--time-out=268 ' \
f'--mode=fast ;'
self.log.info(stop_cmd1)
stop_msg1 = self.dbusernode.sh(stop_cmd1).result()
self.log.info(stop_msg1)
self.assertIn(self.constant.STOP_NODE_SUC_MSG, stop_msg1)
self.log.info('----------步骤3:启动数据库-----------')
start_cmd1 = f'source {macro.DB_ENV_PATH};' \
f'gs_om -t start -h {hostname} ;'
self.log.info(start_cmd1)
start_msg1 = self.dbusernode.sh(start_cmd1).result()
self.log.info(start_msg1)
self.assertIn(self.constant.GS_OM_START_SUCCESS_MSG, start_msg1)
self.log.info('----重建备机----')
build_msg_list = Primary_SH.get_standby_and_build()
for msg in build_msg_list:
self.assertIn(self.constant.BUILD_SUCCESS_MSG, msg)
self.log.info('-----------------步骤4:查看数据库状态-------------------')
status_cmd1 = f'source {macro.DB_ENV_PATH};' \
f'gs_om -t status --detail;'
self.log.info(status_cmd1)
status_msg1 = self.dbusernode.sh(status_cmd1).result()
self.log.info(status_msg1)
self.assertTrue("Degraded" in status_msg1 or "Normal" in status_msg1)
self.log.info('---步骤5:关闭指定服务器数据库,设置超时时间并设置关闭模式为immediate---')
certificate_path2 = os.path.join(macro.DB_INSTANCE_PATH, 'server.*')
ls_cmd2 = f'ls -l {certificate_path2};'
self.log.info(ls_cmd2)
stop_cmd2 = f'source {macro.DB_ENV_PATH};' \
f'gs_om ' \
f'-t stop ' \
f'-h {hostname} ' \
f'--time-out=268 ' \
f'--mode=immediate ;'
self.log.info(stop_cmd2)
stop_msg2 = self.dbusernode.sh(stop_cmd2).result()
self.log.info(stop_msg2)
self.assertIn(self.constant.STOP_NODE_SUC_MSG, stop_msg2)
self.log.info('----------步骤6:启动数据库-----------')
start_cmd2 = f'source {macro.DB_ENV_PATH};' \
f'gs_om -t start -h {hostname} ;'
self.log.info(start_cmd2)
start_msg2 = self.dbusernode.sh(start_cmd2).result()
self.log.info(start_msg2)
self.assertIn(self.constant.GS_OM_START_SUCCESS_MSG, start_msg2)
self.log.info('----重建备机----')
build_msg_list = Primary_SH.get_standby_and_build()
for msg in build_msg_list:
self.assertIn(self.constant.BUILD_SUCCESS_MSG, msg)
self.log.info('-----------------步骤7:查看数据库状态-------------------')
status_cmd2 = f'source {macro.DB_ENV_PATH};' \
f'gs_om -t status --detail;'
self.log.info(status_cmd2)
status_msg2 = self.dbusernode.sh(status_cmd2).result()
self.log.info(status_msg2)
self.assertTrue("Degraded" in status_msg1 or "Normal" in status_msg2)
def tearDown(self):
self.log.info('------------恢复环境-------------')
start_cmd3 = f'source {macro.DB_ENV_PATH};gs_om -t start ;'
self.log.info(start_cmd3)
start_msg3 = self.dbusernode.sh(start_cmd3).result()
self.log.info(start_msg3)
self.log.info('--Opengauss_Function_Tools_gs_om_Case0057finish--')
|
the-stack_0_18493 | from collections import defaultdict
from collections import deque
import torch
class SmoothedValue(object):
def __init__(self, window_size=20):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def update(self, value):
self.deque.append(value)
self.series.append(value)
self.count += 1
self.total += value
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque))
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {:.8f} ({:.8f})".format(name, meter.median, meter.global_avg)
)
return self.delimiter.join(loss_str)
|
the-stack_0_18494 | from django.contrib.auth.models import Group, Permission, User
from django.core.management import BaseCommand
from website import models
GROUPS_PERMISSIONS = {
'PaperAdmins': {
models.Paper: ['add', 'change', 'delete', 'view'],
},
'UserAdmins': {
User: ['add', 'change', 'view'],
models.Profile: ['add', 'change', 'view'],
},
'PAClabAdmins': {
models.Dataset: ['view'],
models.Project: ['add', 'change', 'view'],
models.ProjectSelector: ['add', 'change', 'view', 'website:view_disabled_selectors'],
models.Filter: ['view'],
models.FilterDetail: ['add', 'change', 'view'],
models.TransformedProject: ['add', 'change', 'view'],
models.ProjectTransformer: ['add', 'change', 'view', 'website:view_disabled_transforms'],
models.Transform: ['view'],
models.TransformOption: ['add', 'change', 'view'],
},
}
'''Make groups
Usage: manage.py makegroups
Create default PAClab groups/permissions
'''
class Command(BaseCommand):
help = 'Create default PAClab groups/permissions'
def handle(self, *args, **options):
for group_name in GROUPS_PERMISSIONS:
group, created = Group.objects.get_or_create(name=group_name)
for model_cls in GROUPS_PERMISSIONS[group_name]:
for _, perm_name in enumerate(GROUPS_PERMISSIONS[group_name][model_cls]):
if ':' in perm_name:
codename = perm_name[perm_name.find(':') + 1:]
else:
codename = perm_name + '_' + model_cls._meta.model_name
try:
group.permissions.add(Permission.objects.get(codename=codename))
self.stdout.write('Adding ' + codename + ' to group ' + group.__str__())
except Permission.DoesNotExist:
self.stderr.write(codename + ' not found')
group.save()
|
the-stack_0_18495 | from stix_shifter.stix_translation.src.patterns.pattern_objects import ObservationExpression, ComparisonExpression, \
ComparisonExpressionOperators, ComparisonComparators, Pattern, \
CombinedComparisonExpression, CombinedObservationExpression, ObservationOperators
from stix_shifter.stix_translation.src.utils.transformers import TimestampToMilliseconds, DateTimeToUnixTimestamp, EpochSecondsToTimestamp
from stix_shifter.stix_translation.src.json_to_stix import observable
import datetime
import logging
import re
logger = logging.getLogger(__name__)
class QueryStringPatternTranslator:
# Change comparator values to match with supported data source operators
comparator_lookup = {
ComparisonExpressionOperators.And: "AND",
ComparisonExpressionOperators.Or: "OR",
ComparisonComparators.GreaterThan: ":>",
ComparisonComparators.GreaterThanOrEqual: ":>=",
ComparisonComparators.LessThan: ":<",
ComparisonComparators.LessThanOrEqual: ":<=",
ComparisonComparators.Equal: ":",
ComparisonComparators.NotEqual: "NOT",
ComparisonComparators.Like: ":",
ComparisonComparators.In: ":",
ComparisonComparators.Matches: ':', # Elastic Search does not support PCRE.
ComparisonComparators.IsSubSet: ':',
ComparisonComparators.IsSuperSet: ':',
ObservationOperators.Or: 'OR',
ObservationOperators.And: 'OR' # Treat AND's as OR's -- Unsure how two ObsExps wouldn't cancel each other out.
}
def __init__(self, pattern: Pattern, data_model_mapper):
self.dmm = data_model_mapper
self.pattern = pattern
# List for any queries that are split due to START STOP qualifier
self.qualified_queries = []
# Translated query string without any qualifiers
self.translated = self.parse_expression(pattern)
self.qualified_queries.append(self.translated)
self.qualified_queries = _format_translated_queries(self.qualified_queries)
@staticmethod
def _format_set(values) -> str:
gen = values.element_iterator()
return "({})".format(' OR '.join(['"{}"'.format(value) for value in gen]))
@staticmethod
def _format_equality(value) -> str:
return '"{}"'.format(value)
@staticmethod
def _format_like(value) -> str:
# Replacing value with % to * and _ to ? for to support Like comparator
if isinstance(value, str):
return '{}'.format(value.replace('%', '*').replace('_', '?'))
else:
return value
@staticmethod
def _escape_value(value, comparator=None) -> str:
if isinstance(value, str):
return '{}'.format(value.replace('\\', '\\\\').replace('\"', '\\"').replace('(', '\\(').replace(')', '\\)'))
else:
return value
@staticmethod
def _negate_comparison(comparison_string):
return "(NOT ({}))".format(comparison_string)
@staticmethod
def _parse_mapped_fields(self, expression, value, comparator, stix_field, mapped_fields_array):
comparison_string = ""
mapped_fields_count = len(mapped_fields_array)
for mapped_field in mapped_fields_array:
if expression.comparator == ComparisonComparators.NotEqual or \
expression.comparator == ComparisonComparators.IsSuperSet:
comparator = ':'
comparison_string += "(NOT {mapped_field} {comparator} {value} AND {mapped_field}:*)".format(mapped_field=mapped_field, comparator=comparator, value=value)
elif expression.comparator == ComparisonComparators.GreaterThan or \
expression.comparator == ComparisonComparators.LessThan or \
expression.comparator == ComparisonComparators.GreaterThanOrEqual or \
expression.comparator == ComparisonComparators.LessThanOrEqual:
# Check whether value is in datetime format, Ex: process.created
pattern = "^\d{4}(-\d{2}){2}T\d{2}(:\d{2}){2}(\.\d+)?Z$"
try:
match = bool(re.search(pattern, value))
except:
match = False
if match:
# IF value is in datetime format then do conversion of datetime into
# proper Range query of timestamps supported by elastic_ecs for comparators like :<,:>,:<=,:>=
comparison_string += _get_timestamp(mapped_field, comparator, value)
else:
comparison_string += "{mapped_field}{comparator}{value}".format(mapped_field=mapped_field,
comparator=comparator,
value=value)
elif expression.comparator == ComparisonComparators.IsSubSet:
comparison_string += "({mapped_field} {comparator} {value} AND {mapped_field}:*)".format(
mapped_field=mapped_field, comparator=comparator, value=value)
else:
comparison_string += "{mapped_field} {comparator} {value}".format(mapped_field=mapped_field,
comparator=comparator,
value=value)
if (mapped_fields_count > 1):
comparison_string += " OR "
mapped_fields_count -= 1
return comparison_string
def _parse_expression(self, expression, qualifier=None) -> str:
if isinstance(expression, ComparisonExpression): # Base Case
# Resolve STIX Object Path to a field in the target Data Model
stix_object, stix_field = expression.object_path.split(':')
# Multiple data source fields may map to the same STIX Object
mapped_fields_array = self.dmm.map_field(stix_object, stix_field)
# Resolve the comparison symbol to use in the query string (usually just ':')
comparator = self.comparator_lookup[expression.comparator]
if stix_field == 'start' or stix_field == 'end':
transformer = TimestampToMilliseconds()
expression.value = transformer.transform(expression.value)
# Some values are formatted differently based on how they're being compared
# if expression.comparator == ComparisonComparators.Matches: # needs forward slashes
# value = self._format_match(expression.value)
# should be (x, y, z, ...)
elif expression.comparator == ComparisonComparators.In:
value = self._format_set(expression.value)
elif expression.comparator == ComparisonComparators.Equal or \
expression.comparator == ComparisonComparators.NotEqual or \
expression.comparator == ComparisonComparators.IsSubSet or \
expression.comparator == ComparisonComparators.IsSuperSet:
value = self._format_equality(expression.value)
# '%' -> '*' wildcard, '_' -> '?' single wildcard
elif expression.comparator == ComparisonComparators.Like:
value = self._format_like(expression.value)
else:
value = self._escape_value(expression.value)
comparison_string = self._parse_mapped_fields(self, expression, value, comparator, stix_field, mapped_fields_array)
if(len(mapped_fields_array) > 1):
# More than one data source field maps to the STIX attribute, so group comparisons together.
grouped_comparison_string = "(" + comparison_string + ")"
comparison_string = grouped_comparison_string
if expression.negated:
comparison_string = self._negate_comparison(comparison_string)
if qualifier is not None:
self.qualified_queries.append("{} {}".format(comparison_string, qualifier))
return ''
else:
return "{}".format(comparison_string)
elif isinstance(expression, CombinedComparisonExpression):
operator = self.comparator_lookup[expression.operator]
expression_01 = self._parse_expression(expression.expr1)
expression_02 = self._parse_expression(expression.expr2)
if not expression_01 or not expression_02:
return ''
if isinstance(expression.expr1, CombinedComparisonExpression):
expression_01 = "({})".format(expression_01)
if isinstance(expression.expr2, CombinedComparisonExpression):
expression_02 = "({})".format(expression_02)
query_string = "{} {} {}".format(expression_01, operator, expression_02)
if qualifier is not None:
self.qualified_queries.append("{} {}".format(query_string, qualifier))
return ''
else:
return "{}".format(query_string)
elif isinstance(expression, ObservationExpression):
return self._parse_expression(expression.comparison_expression, qualifier)
elif hasattr(expression, 'qualifier') and hasattr(expression, 'observation_expression'):
if isinstance(expression.observation_expression, CombinedObservationExpression):
operator = self.comparator_lookup[expression.observation_expression.operator]
# qualifier only needs to be passed into the parse expression once since it will be the same for both expressions
expression_01 = self._parse_expression(expression.observation_expression.expr1)
expression_02 = self._parse_expression(expression.observation_expression.expr2, expression.qualifier)
if expression_01:
return "{expr1}".format(expr1=expression_01)
else:
return self._parse_expression(expression.observation_expression.comparison_expression, expression.qualifier)
elif isinstance(expression, CombinedObservationExpression):
operator = self.comparator_lookup[expression.operator]
expression_01 = self._parse_expression(expression.expr1)
expression_02 = self._parse_expression(expression.expr2)
if expression_01 and expression_02:
return "({}) {} ({})".format(expression_01, operator, expression_02)
elif expression_01:
return "{}".format(expression_01)
elif expression_02:
return "{}".format(expression_02)
else:
return ''
elif isinstance(expression, Pattern):
return "{expr}".format(expr=self._parse_expression(expression.expression))
else:
raise RuntimeError("Unknown Recursion Case for expression={}, type(expression)={}".format(
expression, type(expression)))
def parse_expression(self, pattern: Pattern):
return self._parse_expression(pattern)
def _get_timestamp(mapped_field, comparator, value):
converted_value = None
time_pattern = '%Y-%m-%dT%H:%M:%S.%fZ'
epoch = datetime.datetime(1970, 1, 1)
# convert date value from UTC timestamp to epoch seconds
converted_epoch_seconds = int((datetime.datetime.strptime(value, time_pattern) - epoch).total_seconds())
if converted_epoch_seconds and comparator == ':>':
converted_epoch_seconds = converted_epoch_seconds + 1
# convert epoch seconds to UTC Timestamp format
value_in_timestamp = EpochSecondsToTimestamp.transform(converted_epoch_seconds)
# Form RANGE Query [UTC TIMESTAMP TO *]
converted_value = ':["{}" TO *]'.format(value_in_timestamp)
elif converted_epoch_seconds and comparator == ':<':
converted_epoch_seconds = converted_epoch_seconds - 1
# convert epoch seconds to UTC Timestamp format
value_in_timestamp = EpochSecondsToTimestamp.transform(converted_epoch_seconds)
# Form RANGE Query [* TO UTC TIMESTAMP]
converted_value = ':[* TO "{}"]'.format(value_in_timestamp)
elif comparator == ':<=':
# Form RANGE Query [* TO UTC TIMESTAMP]
converted_value = ':[* TO "{}"]'.format(value)
elif comparator == ':>=':
# Form RANGE Query [UTC TIMESTAMP TO *]
converted_value = ':["{}" TO *]'.format(value)
if converted_value:
return "({mapped_field}{value})".format(mapped_field=mapped_field,
value=converted_value)
def _test_or_add_milliseconds(timestamp) -> str:
if not _test_timestamp(timestamp):
raise ValueError("Invalid timestamp")
# remove single quotes around timestamp
timestamp = re.sub("'", "", timestamp)
# check for 3-decimal milliseconds
pattern = "\.\d+Z$"
if not bool(re.search(pattern, timestamp)):
timestamp = re.sub('Z$', '.000Z', timestamp)
return timestamp
def _test_START_STOP_format(query_string) -> bool:
# Matches STARTt'1234-56-78T00:00:00.123Z'STOPt'1234-56-78T00:00:00.123Z'
pattern = "START((t'\d{4}(-\d{2}){2}T\d{2}(:\d{2}){2}(\.\d+)?Z')|(\s\d{13}\s))STOP"
match = re.search(pattern, query_string)
return bool(match)
def _test_timerange_format(query_string) -> bool:
# Matches @timestamp:["2019-01-28T12:24:01.009Z" TO "2019-01-28T12:54:01.009Z]"
pattern = r'\@timestamp:\["\d{4}(-\d{2}){2}T\d{2}(:\d{2}){2}(\.\d+)?Z"\s*TO'
match = re.search(pattern, query_string)
return bool(match)
def _test_timestamp(timestamp) -> bool:
pattern = "^'\d{4}(-\d{2}){2}T\d{2}(:\d{2}){2}(\.\d+)?Z'$"
match = re.search(pattern, timestamp)
return bool(match)
def _convert_timestamps_to_milliseconds(query_parts):
# grab time stamps from array
start_time = _test_or_add_milliseconds(query_parts[2])
stop_time = _test_or_add_milliseconds(query_parts[4])
return query_parts[0] + ' AND (@timestamp:["' + str(start_time) + '" TO "' + str(stop_time) + '"])'
def _format_translated_queries(query_array):
# remove empty strings in the array
query_array = list(map(lambda x: x.strip(), list(filter(None, query_array))))
formatted_queries = []
for query in query_array:
if _test_START_STOP_format(query):
# Remove leading 't' before timestamps
query = re.sub("(?<=START)t|(?<=STOP)t", "", query)
# Split individual query to isolate timestamps
query_parts = re.split("(START)|(STOP)", query)
# Remove None array entries
query_parts = list(map(lambda x: x.strip(), list(filter(None, query_parts))))
if len(query_parts) == 5:
formatted_queries.append(_convert_timestamps_to_milliseconds(query_parts))
else:
logger.info("Omitting query due to bad format for START STOP qualifier timestamp")
continue
else:
formatted_queries.append(query)
return formatted_queries
def translate_pattern(pattern: Pattern, data_model_mapping, options):
# Added size parameter in tranmission module
#result_limit = options['result_limit']
timerange = options['timerange']
translated_query_strings = QueryStringPatternTranslator(pattern, data_model_mapping)
queries = []
translated_queries = translated_query_strings.qualified_queries
for query_string in translated_queries:
has_start_stop = _test_timerange_format(query_string)
if(has_start_stop):
queries.append("{}".format(query_string))
else:
# Set times based on default timerange or what is in the options
stop_time = datetime.datetime.utcnow()
go_back_in_minutes = datetime.timedelta(minutes=timerange)
start_time = stop_time - go_back_in_minutes
# converting from UTC timestamp 2019-04-13 23:13:06.130401 to
# string format 2019-04-13 23:13:06.130Z
converted_starttime = start_time.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
converted_stoptime = stop_time.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z'
timerange_str = 'AND (@timestamp:["' + str(converted_starttime) + '" TO "' + str(
converted_stoptime) + '"])'
queries.append("{} {}".format(query_string, timerange_str))
return queries
|
the-stack_0_18498 | import gtfs_data.loader
import unittest
TEST_FILE = 'gtfs_data/testdata/agency.txt'
TEST_FILE_BROKEN_CHARACTER = 'gtfs_data/testdata/calendar.txt'
FIRST_ROW = {
'agency_id': '03C',
'agency_name': 'GoAhead Commuter',
'agency_url': 'https://www.transportforireland.ie',
'agency_timezone': 'Europe/Dublin',
'agency_lang': 'EN'
}
class TestLoader(unittest.TestCase):
def testLoadAll(self):
result = gtfs_data.loader.Load(TEST_FILE)
self.assertEqual(len(result), 4)
self.assertEqual(result[0], FIRST_ROW)
def testLoadWithFilter(self):
result = gtfs_data.loader.Load(TEST_FILE, {'agency_id': set(['03C'])})
self.assertEqual(len(result), 1)
self.assertEqual(result[0], FIRST_ROW)
def testLoadWithMultiFilter(self):
result = gtfs_data.loader.Load(TEST_FILE, {
'agency_id': set(['03C', '978']),
'agency_lang': set(['EN'])})
self.assertEqual(len(result), 2)
self.assertEqual(result[0]['agency_id'], '03C')
self.assertEqual(result[1]['agency_id'], '978')
def testBrokenCharacterRemoval(self):
result = gtfs_data.loader.Load(TEST_FILE_BROKEN_CHARACTER)
self.assertIn('service_id', result[0].keys())
if __name__ == '__main__':
unittest.main()
|
the-stack_0_18499 | # Copyright 2018-present University of Tuebingen, Chair of Communication Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Marco Haeberle ([email protected])
# Joshua Hartmann
#
#
import grpc
import cPickle
import topo_pb2
import topo_pb2_grpc
ca_path = '../tools/certstrap/out/p4sec-ca.crt'
cert_path = '../tools/certstrap/out/localhost.crt'
key_path = '../tools/certstrap/out/localhost.key'
class TopoClient:
def __init__(self, address):
# prepare tls creds
try:
with open(ca_path, 'rb') as ca_file:
ca = ca_file.read()
except (FileNotFoundError, PermissionError, IsADirectoryError) as e:
print(e)
sys.exit("[E] Error opening CA file")
try:
with open(cert_path, 'rb') as cert_file:
cert = cert_file.read()
except (FileNotFoundError, PermissionError, IsADirectoryError) as e:
print(e)
sys.exit("[E] Error opening cert file")
try:
with open(key_path, 'rb') as key_file:
key = key_file.read()
except (FileNotFoundError, PermissionError, IsADirectoryError) as e:
print(e)
sys.exit("[E] Error opening key file")
client_creds = grpc.ssl_channel_credentials(ca, key, cert)
self.channel = grpc.secure_channel(address, client_creds)
self.stub = topo_pb2_grpc.TopoServiceStub(self.channel)
self.key_bddp = None
def updateTopo(self, switch, topo):
request = topo_pb2.topo()
request.switch = switch
request.topo = cPickle.dumps(topo, cPickle.HIGHEST_PROTOCOL)
response = self.stub.updateTopo(request)
if not response.success:
print('ERROR topo client: ' + response.error)
else:
print('Topology update at controller successfull')
def registerController(self, address_local, switch, mac_switch):
request = topo_pb2.controller()
request.address = address_local
request.switch = switch
request.mac = mac_switch
response = self.stub.registerController(request)
if not response.status.success:
print('ERROR topo client when registering at central controller: ' + response.error)
else:
self.key_bddp = response.key
print('Controller successfully registered at central controller')
|
the-stack_0_18506 | class UserSchema:
id: str
username: str
name: str
flags: int
icon: str
header: str
bio: str
website: str
location: str
application: str
class User:
def __init__(self, user: UserSchema, client):
self.id = user.get("id")
self.username = user.get("username")
self.name = user.get("name")
self.flags = user.get("flags")
self.icon = user.get("icon")
self.header = user.get("header")
self.bio = user.get("bio")
self.website = user.get("website")
self.location = user.get("location")
self.application = user.get("application")
|
the-stack_0_18510 | # model settings
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://resnext101_32x4d',
backbone=dict(
type='ResNeXt',
depth=101,
groups=32,
base_width=4,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
style='pytorch'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.05, nms=dict(type='nms', iou_thr=0.5), max_per_img=100)
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(save_every_n_steps=2500, max_to_keep=1)
# yapf:disable
log_config = dict(interval=100)
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'faster_rcnn_x101_32x4d_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
the-stack_0_18513 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of windows_tools module
"""
Windows registry simple API
Versioning semantics:
Major version: backward compatibility breaking changes
Minor version: New functionality
Patch version: Backwards compatible bug fixes
"""
__intname__ = "windows_tools.registry"
__author__ = "Orsiris de Jong"
__copyright__ = "Copyright (C) 2019-2021 Orsiris de Jong"
__description__ = "Windows registry 32 and 64 bits simple API"
__licence__ = "BSD 3 Clause"
__version__ = "1.0.1"
__build__ = "202110501"
from typing import List, NoReturn, Optional, Union
# that import is needed so we get CONSTANTS from winreg (eg HKEY_LOCAL_MACHINE etc) for direct use in module
from winreg import * # noqa ignore=F405
# The following lines make lint tools happy
from winreg import (
ConnectRegistry,
OpenKey,
EnumKey,
EnumValue,
QueryInfoKey,
QueryValueEx,
DeleteKey,
)
from winreg import KEY_WOW64_32KEY, KEY_WOW64_64KEY, KEY_READ, KEY_ALL_ACCESS, HKEYType
from windows_tools.misc import windows_ticks_to_date
def get_value(
hive: int,
key: str,
value: Optional[str],
arch: int = 0,
last_modified: bool = False,
) -> Union[str, dict]:
"""
Returns a value from a given registry path
:param hive: registry hive (windows.registry.HKEY_LOCAL_MACHINE...)
:param key: which registry key we're searching for
:param value: which value we query, may be None if unnamed value is searched
:param arch: which registry architecture we seek (0 = default, windows.registry.KEY_WOW64_64KEY, windows.registry.KEY_WOW64_32KEY)
Giving multiple arches here will return first result
:return: value
"""
def _get_value(hive: int, key: str, value: Optional[str], arch: int) -> str:
try:
open_reg = ConnectRegistry(None, hive)
open_key = OpenKey(open_reg, key, 0, KEY_READ | arch)
if last_modified:
output = {}
output["value"], key_type = QueryValueEx(open_key, value)
timestamp = windows_ticks_to_date(QueryInfoKey(open_key)[2])
output["last_modified"] = timestamp
else:
output, key_type = QueryValueEx(open_key, value)
# Return the first match
return output
except (FileNotFoundError, TypeError, OSError) as exc:
raise FileNotFoundError(
"Registry key [%s] with value [%s] not found. %s" % (key, value, exc)
)
# 768 = 0 | KEY_WOW64_64KEY | KEY_WOW64_32KEY (where 0 = default)
if arch == 768:
for _arch in [KEY_WOW64_64KEY, KEY_WOW64_32KEY]:
try:
return _get_value(hive, key, value, _arch)
except FileNotFoundError:
pass
raise FileNotFoundError
else:
return _get_value(hive, key, value, arch)
def get_values(
hive: int,
key: str,
names: List[str],
arch: int = 0,
combine: bool = False,
last_modified: bool = False,
) -> list:
"""
Returns a dictionnary of values in names from registry key
:param hive: registry hive (windows.registry.HKEY_LOCAL_MACHINE...)
:param key: which registry key we're searching for
:param names: which value names we query for
:param arch: which registry architecture we seek (0 = default, windows.registry.KEY_WOW64_64KEY, windows.registry.KEY_WOW64_32KEY)
:param combine: shall we combine multiple arch results or return first match
:return: list of strings
"""
def _get_values(hive: int, key: str, names: List[str], arch: int) -> list:
try:
open_reg = ConnectRegistry(None, hive)
open_key = OpenKey(open_reg, key, 0, KEY_READ | arch)
subkey_count, value_count, _ = QueryInfoKey(open_key)
output = []
for index in range(subkey_count):
values = {}
subkey_name = EnumKey(open_key, index)
subkey_handle = OpenKey(open_key, subkey_name)
for name in names:
try:
if last_modified:
values[name] = {}
values[name]["value"] = QueryValueEx(subkey_handle, name)[0]
timestamp = windows_ticks_to_date(
QueryInfoKey(subkey_handle)[2]
)
values[name]["last_modified"] = timestamp
else:
values[name] = QueryValueEx(subkey_handle, name)[0]
except (FileNotFoundError, TypeError):
pass
output.append(values)
return output
except (FileNotFoundError, TypeError, OSError) as exc:
raise FileNotFoundError("Cannot query registry key [%s]. %s" % (key, exc))
# 768 = 0 | KEY_WOW64_64KEY | KEY_WOW64_32KEY (where 0 = default)
if arch == 768:
result = []
for _arch in [KEY_WOW64_64KEY, KEY_WOW64_32KEY]:
try:
if combine:
result = result + (_get_values(hive, key, names, _arch))
else:
return _get_values(hive, key, names, _arch)
except FileNotFoundError:
pass
return result
else:
return _get_values(hive, key, names, arch)
OPEN_REGISTRY_HANDLE = None
def get_keys(
hive: int,
key: str,
arch: int = 0,
recursion_level: int = 1,
filter_on_names: List[str] = None,
combine: bool = False,
last_modified: bool = False,
) -> dict:
"""
:param hive: registry hive (windows.registry.HKEY_LOCAL_MACHINE...)
:param key: which registry key we're searching for
:param arch: which registry architecture we seek (0 = default, windows.registry.KEY_WOW64_64KEY, windows.registry.KEY_WOW64_32KEY)
:param recursion_level: recursivity level
:param filter_on_names: list of strings we search, if none given, all value names are returned
:param combine: shall we combine multiple arch results or return first match
:return: list of strings
"""
global OPEN_REGISTRY_HANDLE
def _get_keys(
hive: int, key: str, arch: int, recursion_level: int, filter_on_names: List[str]
):
global OPEN_REGISTRY_HANDLE
try:
if not OPEN_REGISTRY_HANDLE:
OPEN_REGISTRY_HANDLE = ConnectRegistry(None, hive)
open_key = OpenKey(OPEN_REGISTRY_HANDLE, key, 0, KEY_READ | arch)
subkey_count, value_count, _ = QueryInfoKey(open_key)
output = {}
values = []
for index in range(value_count):
name, value, type = EnumValue(open_key, index)
if isinstance(filter_on_names, list) and name not in filter_on_names:
pass
else:
if last_modified:
last_modified_date = windows_ticks_to_date(
QueryInfoKey(open_key)[2]
)
data = {
"name": name,
"value": value,
"type": type,
"last_modified": last_modified_date,
}
else:
data = {"name": name, "value": value, "type": type}
values.append(data)
if not values == []:
output[""] = values
if recursion_level > 0:
for subkey_index in range(subkey_count):
try:
subkey_name = EnumKey(open_key, subkey_index)
sub_values = get_keys(
hive=0,
key=key + "\\" + subkey_name,
arch=arch,
recursion_level=recursion_level - 1,
filter_on_names=filter_on_names,
last_modified=last_modified,
)
output[subkey_name] = sub_values
except FileNotFoundError:
pass
return output
except (FileNotFoundError, TypeError, OSError) as exc:
raise FileNotFoundError("Cannot query registry key [%s]. %s" % (key, exc))
# 768 = 0 | KEY_WOW64_64KEY | KEY_WOW64_32KEY (where 0 = default)
if arch == 768:
result = {}
for _arch in [KEY_WOW64_64KEY, KEY_WOW64_32KEY]:
try:
if combine:
result.update(
_get_keys(hive, key, _arch, recursion_level, filter_on_names)
)
else:
return _get_keys(hive, key, _arch, recursion_level, filter_on_names)
except FileNotFoundError:
pass
return result
else:
return _get_keys(hive, key, arch, recursion_level, filter_on_names)
def delete_sub_key(root_key: int, current_key: str, arch: int = 0) -> None:
"""
:param root_key: winreg registry root key constant
:param current_key:
:param arch:
:return:
"""
def _delete_sub_key(root_key: int, current_key: str, arch: int) -> NoReturn:
open_key = OpenKey(root_key, current_key, 0, KEY_ALL_ACCESS | arch)
info_key = QueryInfoKey(open_key)
for _ in range(0, info_key[0]):
# NOTE:: This code is to delete the key and all sub_keys.
# If you just want to walk through them, then
# you should pass x to EnumKey. sub_key = EnumKey(open_key, x)
# Deleting the sub_key will change the sub_key count used by EnumKey.
# We must always pass 0 to EnumKey so we
# always get back the new first sub_key.
sub_key = EnumKey(open_key, 0)
try:
DeleteKey(open_key, sub_key)
except OSError:
_delete_sub_key(root_key, "\\".join([current_key, sub_key]), arch)
# No extra delete here since each call
# to delete_sub_key will try to delete itself when its empty.
DeleteKey(open_key, "")
open_key.Close()
return
# 768 = 0 | KEY_WOW64_64KEY | KEY_WOW64_32KEY (where 0 = default)
if arch == 768:
for _arch in [KEY_WOW64_64KEY, KEY_WOW64_32KEY]:
_delete_sub_key(root_key, current_key, _arch)
else:
_delete_sub_key(root_key, current_key, arch)
|
the-stack_0_18514 | import collections
import numbers
import numpy as np
from brian2.core.variables import (Variables, Subexpression, get_dtype)
from brian2.groups.group import Group, CodeRunner
from brian2.utils.logger import get_logger
from brian2.units.fundamentalunits import Unit, Quantity
from brian2.units.allunits import second
__all__ = ['StateMonitor']
logger = get_logger(__name__)
class StateMonitorView(object):
def __init__(self, monitor, item):
self.monitor = monitor
self.item = item
self.indices = self._calc_indices(item)
self._group_attribute_access_active = True
def __getattr__(self, item):
# We do this because __setattr__ and __getattr__ are not active until
# _group_attribute_access_active attribute is set, and if it is set,
# then __getattr__ will not be called. Therefore, if getattr is called
# with this name, it is because it hasn't been set yet and so this
# method should raise an AttributeError to agree that it hasn't been
# called yet.
if item == '_group_attribute_access_active':
raise AttributeError
if not hasattr(self, '_group_attribute_access_active'):
raise AttributeError
mon = self.monitor
if item == 't':
return Quantity(mon.variables['t'].get_value(), dim=second.dim)
elif item == 't_':
return mon.variables['t'].get_value()
elif item in mon.record_variables:
unit = mon.variables[item].unit
return Quantity(mon.variables[item].get_value().T[self.indices],
dim=unit.dim, copy=True)
elif item.endswith('_') and item[:-1] in mon.record_variables:
return mon.variables[item[:-1]].get_value().T[self.indices].copy()
else:
raise AttributeError('Unknown attribute %s' % item)
def _calc_indices(self, item):
'''
Convert the neuron indices to indices into the stored values. For example, if neurons [0, 5, 10] have been
recorded, [5, 10] is converted to [1, 2].
'''
dtype = get_dtype(item)
# scalar value
if np.issubdtype(dtype, np.int) and not isinstance(item, np.ndarray):
indices = np.nonzero(self.monitor.record == item)[0]
if len(indices) == 0:
raise IndexError('Index number %d has not been recorded' % item)
return indices[0]
if self.monitor.record_all:
return item
indices = []
for index in item:
if index in self.monitor.record:
indices.append(np.nonzero(self.monitor.record == index)[0][0])
else:
raise IndexError('Index number %d has not been recorded' % index)
return np.array(indices)
def __repr__(self):
description = '<{classname}, giving access to elements {elements} recorded by {monitor}>'
return description.format(classname=self.__class__.__name__,
elements=repr(self.item),
monitor=self.monitor.name)
class StateMonitor(Group, CodeRunner):
'''
Record values of state variables during a run
To extract recorded values after a run, use the ``t`` attribute for the
array of times at which values were recorded, and variable name attribute
for the values. The values will have shape ``(len(indices), len(t))``,
where ``indices`` are the array indices which were recorded. When indexing
the `StateMonitor` directly, the returned object can be used to get the
recorded values for the specified indices, i.e. the indexing semantic
refers to the indices in ``source``, not to the relative indices of the
recorded values. For example, when recording only neurons with even numbers,
`mon[[0, 2]].v` will return the values for neurons 0 and 2, whereas
`mon.v[[0, 2]]` will return the values for the first and third *recorded*
neurons, i.e. for neurons 0 and 4.
Parameters
----------
source : `Group`
Which object to record values from.
variables : str, sequence of str, True
Which variables to record, or ``True`` to record all variables
(note that this may use a great deal of memory).
record : None, False, True, sequence of ints, optional
Which indices to record, nothing is recorded for ``None`` or ``False``,
everything is recorded for ``True`` (warning: may use a great deal of
memory), or a specified subset of indices. Defaults to ``None``.
dt : `Quantity`, optional
The time step to be used for the monitor. Cannot be combined with
the `clock` argument.
clock : `Clock`, optional
The update clock to be used. If neither a clock, nor the ``dt`` argument
is specified, the clock of the `source` will be used.
when : str, optional
At which point during a time step the values should be recorded.
Defaults to ``'start'``.
order : int, optional
The priority of of this group for operations occurring at the same time
step and in the same scheduling slot. Defaults to 0.
name : str, optional
A unique name for the object, otherwise will use
``source.name+'statemonitor_0'``, etc.
codeobj_class : `CodeObject`, optional
The `CodeObject` class to create.
Examples
--------
Record all variables, first 5 indices::
eqs = """
dV/dt = (2-V)/(10*ms) : 1
"""
threshold = 'V>1'
reset = 'V = 0'
G = NeuronGroup(100, eqs, threshold=threshold, reset=reset)
G.V = rand(len(G))
M = StateMonitor(G, True, record=range(5))
run(100*ms)
plot(M.t, M.V.T)
show()
Notes
-----
Since this monitor by default records in the ``'start'`` time slot,
recordings of the membrane potential in integrate-and-fire models may look
unexpected: the recorded membrane potential trace will never be above
threshold in an integrate-and-fire model, because the reset statement will
have been applied already. Set the ``when`` keyword to a different value if
this is not what you want.
Note that ``record=True`` only works in runtime mode for synaptic variables.
This is because the actual array of indices has to be calculated and this is
not possible in standalone mode, where the synapses have not been created
yet at this stage. Consider using an explicit array of indices instead,
i.e. something like ``record=np.arange(n_synapses)``.
'''
invalidates_magic_network = False
add_to_magic_network = True
def __init__(self, source, variables, record=None, dt=None, clock=None,
when='start', order=0, name='statemonitor*', codeobj_class=None):
self.source = source
# Make the monitor use the explicitly defined namespace of its source
# group (if it exists)
self.namespace = getattr(source, 'namespace', None)
self.codeobj_class = codeobj_class
# run by default on source clock at the end
if dt is None and clock is None:
clock = source.clock
# variables should always be a list of strings
if variables is True:
variables = source.equations.names
elif isinstance(variables, str):
variables = [variables]
#: The variables to record
self.record_variables = variables
# record should always be an array of ints
self.record_all = False
if hasattr(record, '_indices'):
# The ._indices method always returns absolute indices
# If the source is already a subgroup of another group, we therefore
# have to shift the indices to become relative to the subgroup
record = record._indices() - getattr(source, '_offset', 0)
if record is True:
self.record_all = True
try:
record = np.arange(len(source), dtype=np.int32)
except NotImplementedError:
# In standalone mode, this is not possible for synaptic
# variables because the number of synapses is not defined yet
raise NotImplementedError(('Cannot determine the actual '
'indices to record for record=True. '
'This can occur for example in '
'standalone mode when trying to '
'record a synaptic variable. '
'Consider providing an explicit '
'array of indices for the record '
'argument.'))
elif record is None or record is False:
logger.warn(('The StateMonitor set up to record the variable(s) '
'{vars} of "{source}" is not recording any value. '
'Did you forget to provide the record '
'argument?').format(vars=', '.join('"%s"' % var
for var in variables),
source=self.source.name),
once=True)
record = np.array([], dtype=np.int32)
elif isinstance(record, numbers.Number):
record = np.array([record], dtype=np.int32)
else:
record = np.asarray(record, dtype=np.int32)
#: The array of recorded indices
self.record = record
self.n_indices = len(record)
# Some dummy code so that code generation takes care of the indexing
# and subexpressions
code = ['_to_record_%s = _source_%s' % (v, v)
for v in variables]
code = '\n'.join(code)
CodeRunner.__init__(self, group=self, template='statemonitor',
code=code, name=name,
clock=clock,
dt=dt,
when=when,
order=order,
check_units=False)
self.add_dependency(source)
# Setup variables
self.variables = Variables(self)
self.variables.add_dynamic_array('t', size=0, unit=second,
constant=False, constant_size=False)
self.variables.add_array('N', unit=Unit(1), dtype=np.int32,
size=1, scalar=True, read_only=True)
self.variables.add_array('_indices', size=len(self.record),
unit=Unit(1), dtype=self.record.dtype,
constant=True, read_only=True,
values=self.record)
self.variables.create_clock_variables(self._clock,
prefix='_clock_')
for varname in variables:
var = source.variables[varname]
if var.scalar and len(self.record) > 1:
logger.warn(('Variable %s is a shared variable but it will be '
'recorded once for every target.' % varname),
once=True)
index = source.variables.indices[varname]
self.variables.add_reference('_source_%s' % varname,
source, varname, index=index)
if not index in ('_idx', '0') and index not in variables:
self.variables.add_reference(index, source)
self.variables.add_dynamic_array(varname,
size=(0, len(self.record)),
resize_along_first=True,
unit=var.unit,
dtype=var.dtype,
constant=False,
constant_size=False)
for varname in variables:
var = self.source.variables[varname]
self.variables.add_auxiliary_variable('_to_record_' + varname,
unit=var.unit,
dtype=var.dtype,
scalar=var.scalar)
self.recorded_variables = dict([(varname, self.variables[varname])
for varname in variables])
recorded_names = [varname for varname in variables]
self.needed_variables = recorded_names
self.template_kwds = {'_recorded_variables': self.recorded_variables}
self._enable_group_attributes()
def resize(self, new_size):
self.variables['N'].set_value(new_size)
self.variables['t'].resize(new_size)
for var in self.recorded_variables.values():
var.resize((new_size, self.n_indices))
def reinit(self):
raise NotImplementedError()
def __getitem__(self, item):
dtype = get_dtype(item)
if np.issubdtype(dtype, np.int):
return StateMonitorView(self, item)
elif isinstance(item, collections.Sequence):
index_array = np.array(item)
if not np.issubdtype(index_array.dtype, np.int):
raise TypeError('Index has to be an integer or a sequence '
'of integers')
return StateMonitorView(self, item)
elif hasattr(item, '_indices'):
# objects that support the indexing interface will return absolute
# indices but here we need relative ones
# TODO: How to we prevent the use of completely unrelated objects here?
source_offset = getattr(self.source, '_offset', 0)
return StateMonitorView(self, item._indices() - source_offset)
else:
raise TypeError('Cannot use object of type %s as an index'
% type(item))
def __getattr__(self, item):
# We do this because __setattr__ and __getattr__ are not active until
# _group_attribute_access_active attribute is set, and if it is set,
# then __getattr__ will not be called. Therefore, if getattr is called
# with this name, it is because it hasn't been set yet and so this
# method should raise an AttributeError to agree that it hasn't been
# called yet.
if item == '_group_attribute_access_active':
raise AttributeError
if not hasattr(self, '_group_attribute_access_active'):
raise AttributeError
if item in self.record_variables:
unit = self.variables[item].unit
return Quantity(self.variables[item].get_value().T,
dim=unit.dim, copy=True)
elif item.endswith('_') and item[:-1] in self.record_variables:
return self.variables[item[:-1]].get_value().T
else:
return Group.__getattr__(self, item)
def __repr__(self):
description = '<{classname}, recording {variables} from {source}>'
return description.format(classname=self.__class__.__name__,
variables=repr(self.record_variables),
source=self.source.name)
def record_single_timestep(self):
'''
Records a single time step. Useful for recording the values at the end
of the simulation -- otherwise a `StateMonitor` will not record the
last simulated values since its ``when`` attribute defaults to
``'start'``, i.e. the last recording is at the *beginning* of the last
time step.
Notes
-----
This function will only work if the `StateMonitor` has been already run,
but a run with a length of ``0*ms`` does suffice.
Examples
--------
>>> from brian2 import *
>>> G = NeuronGroup(1, 'dv/dt = -v/(5*ms) : 1')
>>> G.v = 1
>>> mon = StateMonitor(G, 'v', record=True)
>>> run(0.5*ms)
>>> mon.v
array([[ 1. , 0.98019867, 0.96078944, 0.94176453, 0.92311635]])
>>> mon.t[:]
array([ 0., 100., 200., 300., 400.]) * usecond
>>> G.v[:] # last value had not been recorded
array([ 0.90483742])
>>> mon.record_single_timestep()
>>> mon.t[:]
array([ 0., 100., 200., 300., 400., 500.]) * usecond
>>> mon.v[:]
array([[ 1. , 0.98019867, 0.96078944, 0.94176453, 0.92311635,
0.90483742]])
'''
if self.codeobj is None:
raise TypeError('Can only record a single time step after the '
'network has been run once.')
self.codeobj()
|
the-stack_0_18515 | import sys
from collections import Counter
import lxml.html
from . import constants
from . import t
messages: t.Set[str]
messages = set()
messageCounts: t.Dict[str, int]
messageCounts = Counter()
def p(msg, sep=None, end=None):
if constants.quiet == float("infinity"):
return
if isinstance(msg, tuple):
msg, ascii = msg
else:
ascii = msg.encode("ascii", "replace").decode()
if constants.asciiOnly:
msg = ascii
try:
print(msg, sep=sep, end=end)
except UnicodeEncodeError:
if ascii is not None:
print(ascii, sep=sep, end=end)
else:
warning = formatMessage(
"warning",
"Your console does not understand Unicode.\n Messages may be slightly corrupted.",
)
if warning not in messages:
print(warning)
messages.add(warning)
print(msg.encode("ascii", "xmlcharrefreplace"), sep=sep, end=end)
def die(msg, el=None, lineNum=None):
if lineNum is None and el is not None and el.get("line-number"):
lineNum = el.get("line-number")
msg = formatMessage("fatal", msg, lineNum=lineNum)
if msg not in messages:
messageCounts["fatal"] += 1
messages.add(msg)
if constants.quiet < 3:
p(msg)
if constants.errorLevelAt("fatal"):
errorAndExit()
def linkerror(msg, el=None, lineNum=None):
if lineNum is None and el is not None and el.get("line-number"):
lineNum = el.get("line-number")
suffix = ""
if el is not None:
if el.get("bs-autolink-syntax"):
suffix = "\n" + el.get("bs-autolink-syntax")
else:
suffix = "\n" + lxml.html.tostring(el, with_tail=False, encoding="unicode")
msg = formatMessage("link", msg + suffix, lineNum=lineNum)
if msg not in messages:
messageCounts["linkerror"] += 1
messages.add(msg)
if constants.quiet < 2:
p(msg)
if constants.errorLevelAt("link-error"):
errorAndExit()
def warn(msg, el=None, lineNum=None):
if lineNum is None and el is not None and el.get("line-number"):
lineNum = el.get("line-number")
msg = formatMessage("warning", msg, lineNum=lineNum)
if msg not in messages:
messageCounts["warning"] += 1
messages.add(msg)
if constants.quiet < 1:
p(msg)
if constants.errorLevelAt("warning"):
errorAndExit()
def say(msg):
if constants.quiet < 1:
p(formatMessage("message", msg))
def success(msg):
if constants.quiet < 4:
p(formatMessage("success", msg))
def failure(msg):
if constants.quiet < 4:
p(formatMessage("failure", msg))
def resetSeenMessages():
global messages
messages = set()
global messageCounts
messageCounts = Counter()
def printColor(text, color="white", *styles):
if constants.printMode == "console":
colorsConverter = {
"black": 30,
"red": 31,
"green": 32,
"yellow": 33,
"blue": 34,
"magenta": 35,
"cyan": 36,
"light gray": 37,
"dark gray": 90,
"light red": 91,
"light green": 92,
"light yellow": 93,
"light blue": 94,
"light magenta": 95,
"light cyan": 96,
"white": 97,
}
stylesConverter = {
"normal": 0,
"bold": 1,
"bright": 1,
"dim": 2,
"underline": 4,
"underlined": 4,
"blink": 5,
"reverse": 7,
"invert": 7,
"hidden": 8,
}
colorNum = colorsConverter[color.lower()]
styleNum = ";".join(str(stylesConverter[style.lower()]) for style in styles)
return f"\033[{styleNum};{colorNum}m{text}\033[0m"
return text
def formatMessage(type, text, lineNum=None):
if constants.printMode == "markup":
text = text.replace("<", "<")
if type == "fatal":
return f"<fatal>{text}</fatal>"
if type == "link":
return f"<linkerror>{text}</linkerror>"
if type == "warning":
return f"<warning>{text}</warning>"
if type == "message":
return f"<message>{text}</message>"
if type == "success":
return f"<final-success>{text}</final-success>"
if type == "failure":
return f"<final-failure>{text}</final-failure>"
else:
if type == "message":
return text
if type == "success":
return (
printColor(" ✔ ", "green", "invert") + " " + text,
printColor("YAY", "green", "invert") + " " + text,
)
if type == "failure":
return (
printColor(" ✘ ", "red", "invert") + " " + text,
printColor("ERR", "red", "invert") + " " + text,
)
if type == "fatal":
headingText = "FATAL ERROR"
color = "red"
elif type == "link":
headingText = "LINK ERROR"
color = "yellow"
elif type == "warning":
headingText = "WARNING"
color = "light cyan"
if lineNum is not None:
headingText = f"LINE {lineNum}"
return printColor(headingText + ":", color, "bold") + " " + text
def errorAndExit():
failure("Did not generate, due to fatal errors")
sys.exit(2)
|
the-stack_0_18516 | """List hardware servers."""
# :license: MIT, see LICENSE for more details.
import click
import SoftLayer
from SoftLayer.CLI import columns as column_helper
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
# pylint: disable=unnecessary-lambda
COLUMNS = [
column_helper.Column('guid', ('globalIdentifier',)),
column_helper.Column('primary_ip', ('primaryIpAddress',)),
column_helper.Column('backend_ip', ('primaryBackendIpAddress',)),
column_helper.Column('datacenter', ('datacenter', 'name')),
column_helper.Column(
'action',
lambda server: formatting.active_txn(server),
mask='activeTransaction[id, transactionStatus[name, friendlyName]]'),
column_helper.Column('power_state', ('powerState', 'name')),
column_helper.Column(
'created_by',
('billingItem', 'orderItem', 'order', 'userRecord', 'username')),
column_helper.Column(
'tags',
lambda server: formatting.tags(server.get('tagReferences')),
mask="tagReferences.tag.name"),
]
DEFAULT_COLUMNS = [
'id',
'hostname',
'primary_ip',
'backend_ip',
'datacenter',
'action',
]
@click.command()
@click.option('--cpu', '-c', help='Filter by number of CPU cores')
@click.option('--domain', '-D', help='Filter by domain')
@click.option('--datacenter', '-d', help='Filter by datacenter')
@click.option('--hostname', '-H', help='Filter by hostname')
@click.option('--memory', '-m', help='Filter by memory in gigabytes')
@click.option('--network', '-n', help='Filter by network port speed in Mbps')
@helpers.multi_option('--tag', help='Filter by tags')
@click.option('--sortby', help='Column to sort by', default='hostname', show_default=True)
@click.option('--columns',
callback=column_helper.get_formatter(COLUMNS),
help='Columns to display. [options: %s]' % ', '.join(column.name for column in COLUMNS),
default=','.join(DEFAULT_COLUMNS),
show_default=True)
@click.option('--limit', '-l',
help='How many results to get in one api call, default is 100',
default=100,
show_default=True)
@environment.pass_env
def cli(env, sortby, cpu, domain, datacenter, hostname, memory, network, tag, columns, limit):
"""List hardware servers."""
manager = SoftLayer.HardwareManager(env.client)
servers = manager.list_hardware(hostname=hostname,
domain=domain,
cpus=cpu,
memory=memory,
datacenter=datacenter,
nic_speed=network,
tags=tag,
mask="mask(SoftLayer_Hardware_Server)[%s]" % columns.mask(),
limit=limit)
table = formatting.Table(columns.columns)
table.sortby = sortby
for server in servers:
table.add_row([value or formatting.blank()
for value in columns.row(server)])
env.fout(table)
|
the-stack_0_18517 | from typing import List, Optional
from collections import OrderedDict
from .bgg import Bgg
class Item(Bgg):
id: int
objecttype: str
subtype: str
objectid: int
objectname: str
username: str
postdate: str
editdate: str
thumbs: int
imageid: int
body: Optional[str]
@classmethod
def create(cls, item: OrderedDict):
_item = Item(
id=Bgg.parse_int(item.get("@id")),
objecttype=item.get("@objecttype"),
subtype=item.get("@subtype"),
objectid=item.get("@objectid"),
objectname=item.get("@objectname"),
username=item.get("@username"),
postdate=item.get("@postdate"),
editdate=item.get("@editdate"),
thumbs=Bgg.parse_int(item.get("@thumbs")),
imageid=Bgg.parse_int(item.get("@imageid")),
body=item.get("body"),
)
return _item
class GeekList(Bgg):
id: int
postdate: str
postdate_timestamp: int
editdate: str
editdate_timestamp: int
thumbs: str
numitems: str
username: str
title: str
description: str
item: List[Item]
@classmethod
def create(cls, geekList: OrderedDict):
if type(geekList.get("item")) == OrderedDict:
items = [geekList.get("item")]
else:
items = geekList.get("item")
_items = [Item.create(item) for item in items]
return GeekList(
id=Bgg.parse_int(geekList.get("@id")),
postdate=geekList.get("postdate"),
postdate_timestamp=Bgg.parse_int(geekList.get("postdate_timestamp")),
editdate=geekList.get("editdate"),
editdate_timestamp=Bgg.parse_int(geekList.get("editdate_timestamp")),
thumbs=Bgg.parse_int(geekList.get("thumbs")),
numitems=Bgg.parse_int(geekList.get("numitems")),
username=geekList.get("username"),
title=geekList.get("title"),
description=geekList.get("description"),
item=_items,
)
|
the-stack_0_18524 | # -*- coding: utf-8 -*-
"""Cisco DNA Center CountOfNotifications data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidatorFd269Fe156E4B5Ad3F4210B7B168(object):
"""CountOfNotifications request schema definition."""
def __init__(self):
super(JSONSchemaValidatorFd269Fe156E4B5Ad3F4210B7B168, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"$schema": "http://json-schema.org/draft-04/schema#",
"properties": {
"response": {
"type": "string"
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
|
the-stack_0_18525 | #
# Copyright 2019, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import itertools
import json
import os
import os.path
import platform
import re
import sys
import warnings
from distutils.command.install_headers import install_headers as install_headers_orig
from shutil import copyfile, copymode
from setuptools.command.build_ext import build_ext
import pathlib
import gen_config
curdir = pathlib.Path(__file__).parent
def get_json_build_cfg():
with open(str(curdir.joinpath("cbuild_cfg.json"))) as JSONFILE:
return json.load(JSONFILE)
BUILD_CFG = get_json_build_cfg()
PYCBC_LCB_API = os.getenv("PYCBC_LCB_API", BUILD_CFG.get('comp_options', {}).get('PYCBC_LCB_API'))
def get_all_sources():
return BUILD_CFG.get('source', []) + BUILD_CFG.get('apis', {}).get(PYCBC_LCB_API, {}).get('sources', [])
def get_sources():
sources_ext={}
all_sources = get_all_sources()
SOURCEMODS = list(filter(re.compile(r'^.*\.c$').match, all_sources))
SOURCEMODS_CPP = list(filter(re.compile(r'^.*\.(cpp|cxx|cc)$').match, all_sources))
sources_ext['sources'] = list(map(str, SOURCEMODS+SOURCEMODS_CPP))
return sources_ext
couchbase_core = BUILD_CFG.get("comp_options",{}).get("PYCBC_CORE","couchbase")
def get_cbuild_options():
extoptions={}
extoptions['extra_compile_args'] = []
extoptions['extra_link_args'] = []
def boolean_option(flag):
return ["-D{}={}".format(flag, os.environ.get(flag))]
def string_option(flag):
return ["-D{}={}".format(flag, os.environ.get(flag))]
COMP_OPTION_PREFIX = "PYCBC_COMP_OPT_"
def comp_option(flag):
return ["-{}={}".format(flag.replace(COMP_OPTION_PREFIX, ""), os.environ.get(flag))]
COMP_OPTION_BOOL_PREFIX = "PYCBC_COMP_OPT_BOOL_"
def comp_option_bool(flag):
return ["-{}".format(flag.replace(COMP_OPTION_BOOL_PREFIX, ""))]
CLANG_SAN_OPTIONS = {"address": "lsan", "undefined": "ubsan"}
CLANG_SAN_PREFIX = "PYCBC_SAN_OPT_"
def comp_clang_san_option(flag):
san_option = flag.replace(CLANG_SAN_PREFIX, "")
fsanitize_statements = ["-fsanitize={}".format(san_option), "-fno-omit-frame-pointer"]
extoptions['extra_link_args'] += fsanitize_statements + ['-Llibclang_rt.asan_osx_dynamic']
return fsanitize_statements
def comp_option_pattern(prefix):
return re.escape(prefix) + ".*"
comp_flags = {"PYCBC_STRICT": boolean_option,
"PYCBC_TABBED_CONTEXTS_ENABLE": boolean_option,
"PYCBC_LCB_API": string_option,
"PYCBC_REF_ACCOUNTING": boolean_option,
"PYCBC_TRACING_DISABLE": boolean_option, "PYCBC_DEBUG": boolean_option,
"PYCBC_GEN_PYTHON": boolean_option,
"PYCBC_CRYPTO_VERSION": boolean_option, comp_option_pattern(COMP_OPTION_PREFIX): comp_option,
comp_option_pattern(COMP_OPTION_BOOL_PREFIX): comp_option_bool,
comp_option_pattern(CLANG_SAN_PREFIX): comp_clang_san_option}
debug_symbols = len(set(os.environ.keys()) & {"PYCBC_DEBUG", "PYCBC_DEBUG_SYMBOLS"}) > 0
comp_arg_additions = list(itertools.chain.from_iterable(
action(actual_flag) for flag, action in comp_flags.items() for actual_flag in os.environ.keys() if
re.match(flag, actual_flag)))
print(comp_arg_additions)
extoptions['include_dirs'] = []
extoptions['extra_compile_args'] += list(comp_arg_additions)
return extoptions, debug_symbols
def get_ext_options():
extoptions, debug_symbols = get_cbuild_options()
pkgdata = {}
if sys.platform != 'win32':
extoptions['extra_compile_args'] += ['-Wno-strict-prototypes', '-fPIC','-std=c11']
extoptions['libraries'] = ['couchbase']
if debug_symbols:
extoptions['extra_compile_args'] += ['-O0', '-g3']
extoptions['extra_link_args'] += ['-O0', '-g3']
if sys.platform == 'darwin':
extoptions['library_dirs'] = ['/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/10.0.0/lib/darwin/']
extoptions['extra_compile_args']+=['-Wsometimes-uninitialized','-Wconditional-uninitialized']
extoptions['extra_compile_args']+=['-Wuninitialized',
'-Wswitch','-Werror','-Wno-missing-braces']
print(pkgdata)
else:
if sys.version_info < (3, 0, 0):
raise RuntimeError("Windows on Python earlier than v3 unsupported.")
warnings.warn("I'm detecting you're running windows."
"You might want to modify "
"the 'setup.py' script to use appropriate paths")
# The layout i have here is an ..\lcb-winbuild, in which there are subdirs
# called 'x86' and 'x64', for x86 and x64 architectures. The default
# 'nmake install' on libcouchbase will install them to 'deps'
bit_type = platform.architecture()[0]
lcb_root = os.path.join(os.path.pardir, 'lcb-winbuild')
if bit_type.startswith('32'):
lcb_root = os.path.join(lcb_root, 'x86')
else:
lcb_root = os.path.join(lcb_root, 'x64')
lcb_root = os.path.join(lcb_root, 'deps')
extoptions['libraries'] = ['libcouchbase']
if debug_symbols:
extoptions['extra_compile_args'] += ['/Zi', '/DEBUG', '/O0']
extoptions['extra_link_args'] += ['/DEBUG', '-debug']
extoptions['library_dirs'] = [os.path.join(lcb_root, 'lib')]
extoptions['include_dirs'] = [os.path.join(lcb_root, 'include')]
extoptions['define_macros'] = [('_CRT_SECURE_NO_WARNINGS', 1)]
pkgdata[couchbase_core] = ['libcouchbase.dll']
extoptions['extra_compile_args']+=['-DPYCBC_LCB_API={}'.format(PYCBC_LCB_API)]
extoptions.update(get_sources())
return extoptions, pkgdata
class CBuildInfo:
def __init__(self, cmake_base=None):
self.setbase(cmake_base)
self.cfg="Release"
self.pkg_data_dir=os.path.join(couchbase_core)
@property
def base(self):
print("self.base is {}".format(self._cmake_base))
return self._cmake_base
def setbase(self, path):
self._cmake_base=(path if isinstance(path,list) else list(os.path.split(path))) if path else None
print("set base as {}".format(self._cmake_base))
@base.setter
def base(self, path):
self.setbase(path)
def entries(self):
plat = get_plat_code()
print("Got platform {}".format(plat))
default = ['libcouchbase.so.6']
return {'darwin': ['libcouchbase.2.dylib', 'libcouchbase.dylib'], 'linux': default,
'win': ['libcouchbase_d.dll','libcouchbase.dll']}.get(get_plat_code(), default)
def lcb_build_base(self):
print("self.base is {}".format(self.base))
return self._cmake_base + ['install', 'lib']
def lcb_pkgs_srcs(self):
return {'Debug':self.lcb_build_base() + ['Debug'],'Release':self.lcb_build_base() + ['Release']}
def lcb_pkgs(self, cfg):
return map(lambda x: self.lcb_pkgs_srcs()[cfg] + [x], self.entries())
def lcb_pkgs_strlist(self):
print("got pkgs {}".format(self.entries()))
for x in self.entries():
print("yielding binary {} : {}".format(x, os.path.join(self.pkg_data_dir,x)))
yield os.path.join(self.pkg_data_dir, x)
def get_rpaths(self, cfg):
result= [{'Darwin': '@loader_path', 'Linux': '$ORIGIN'}.get(platform.system(), "$ORIGIN"),
os.path.join(*self.lcb_pkgs_srcs()[cfg])]
print("got rpaths {}".format(result))
return result
def get_lcb_dirs(self):
lcb_dbg_build = os.path.join(*(self.base + ["install", "lib", "Debug"]))
lcb_build = os.path.join(*(self.base + ["install", "lib", "Release"]))
lib_dirs = [lcb_dbg_build, lcb_build]
return lib_dirs
class LazyCommandClass(dict):
"""
Lazy command class that defers operations requiring given cmdclass until
they've actually been downloaded and installed by setup_requires.
"""
def __init__(self, cmdclass_real):
super(LazyCommandClass, self).__init__()
self.cmdclass_real=cmdclass_real
def __contains__(self, key):
return (
key == 'build_ext'
or super(LazyCommandClass, self).__contains__(key)
)
def __setitem__(self, key, value):
if key == 'build_ext':
raise AssertionError("build_ext overridden!")
super(LazyCommandClass, self).__setitem__(key, value)
def __getitem__(self, key):
if key != 'build_ext':
return super(LazyCommandClass, self).__getitem__(key)
return self.cmdclass_real
class CBuildCommon(build_ext):
@classmethod
def setup_build_info(cls, extoptions, pkgdata):
cls.info = CBuildInfo()
cls.info.pkgdata = pkgdata
cls.info.pkg_data_dir = os.path.join(os.path.abspath("."), couchbase_core)
pkgdata['couchbase'] = list(cls.info.lcb_pkgs_strlist())
extoptions['library_dirs'] = [cls.info.pkg_data_dir] + extoptions.get('library_dirs', [])
def build_extension(self, ext):
self.init_info_and_rpaths(ext)
self.prep_build(ext)
self.add_inc_and_lib_bundled(ext, self.get_lcb_api_flags())
build_ext.build_extension(self, ext)
def prep_build(self, ext):
pass
def init_info_and_rpaths(self, ext):
self.ssl_config = gen_config.gen_config(self.build_temp, couchbase_core=couchbase_core)
self.info.setbase(self.build_temp)
self.info.cfg = self.cfg_type()
self.compiler.add_include_dir(os.path.join(*self.info.base+["install","include"]))
self.compiler.add_library_dir(os.path.join(*self.info.base+["install","lib",self.cfg_type()]))
if sys.platform == 'darwin':
warnings.warn('Adding /usr/local to lib search path for OS X')
self.compiler.add_library_dir('/usr/local/lib')
self.compiler.add_include_dir('/usr/local/include')
self.add_rpaths(ext)
def add_rpaths(self, ext=None, extoptions=None):
rpaths=self.info.get_rpaths(self.cfg_type())
if platform.system() != 'Windows':
if self.compiler:
try:
existing_rpaths = self.compiler.runtime_library_dirs
self.compiler.set_runtime_library_dirs(rpaths + existing_rpaths)
except:
pass
for rpath in rpaths:
if self.compiler:
self.compiler.add_runtime_library_dir(rpath)
linker_arg='-Wl,-rpath,' + rpath
ext.runtime_library_dirs=(ext.runtime_library_dirs if ext.runtime_library_dirs else [])+[rpath]
ext.extra_link_args+=[linker_arg]
(extoptions['extra_link_args'] if extoptions else ext.extra_link_args if ext else []).insert(0,linker_arg)
def cfg_type(self):
return 'Debug' if self.debug else 'Release'
def copy_binary_to(self, cfg, dest_dir, lib_paths, name):
try:
os.makedirs(dest_dir)
except:
pass
dest = os.path.join(dest_dir, name)
failures = {}
lib_paths_prioritized = [(k, v) for k, v in lib_paths.items() if k == cfg]
lib_paths_prioritized += [(k, v) for k, v in lib_paths.items() if k != cfg]
for rel_type, binary_path in lib_paths_prioritized:
src = os.path.join(*(binary_path + [name]))
try:
if os.path.exists(src):
print("copying {} to {}".format(src, dest))
copyfile(src, dest)
print("success")
except Exception as e:
failures[rel_type] = "copying {} to {}, got {}".format(src, dest, repr(e))
if len(failures) == len(lib_paths):
raise Exception("Failed to copy binary: {}".format(failures))
def copy_test_file(self, src_file):
'''
Copy ``src_file`` to ``dest_file`` ensuring parent directory exists.
By default, message like `creating directory /path/to/package` and
`copying directory /src/path/to/package -> path/to/package` are displayed on standard output. Adapted from scikit-build.
'''
# Create directory if needed
dest_dir = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'tests', 'bin')
if dest_dir != "" and not os.path.exists(dest_dir):
print("creating directory {}".format(dest_dir))
os.makedirs(dest_dir)
# Copy file
dest_file = os.path.join(dest_dir, os.path.basename(src_file))
print("copying {} -> {}".format(src_file, dest_file))
copyfile(src_file, dest_file)
copymode(src_file, dest_file)
def add_inc_and_lib_bundled(self, ext, lcb_api_flags):
from distutils.ccompiler import CCompiler
ext.extra_compile_args += lcb_api_flags
compiler = self.compiler # type: CCompiler
lcb_include = os.path.join(self.build_temp, "install", "include")
try:
compiler.set_include_dirs([lcb_include]+compiler.include_dirs)
except:
compiler.add_include_dirs([lcb_include])
lib_dirs = [self.info.pkg_data_dir] + self.info.get_lcb_dirs()
try:
existing_lib_dirs = compiler.library_dirs
compiler.set_library_dirs(lib_dirs + existing_lib_dirs)
except:
compiler.add_library_dirs(lib_dirs)
def get_pycbc_lcb_api(self):
return os.getenv("PYCBC_LCB_API",
BUILD_CFG.get('comp_options', {}).get('PYCBC_LCB_API', None))
def get_lcb_api_flags(self):
pycbc_lcb_api=self.get_pycbc_lcb_api()
return ['-DPYCBC_LCB_API={}'.format(pycbc_lcb_api)] if pycbc_lcb_api else []
class install_headers(install_headers_orig):
def run(self):
headers = self.distribution.headers or []
for header in headers:
dst = os.path.join(self.install_dir, os.path.dirname(header))
self.mkpath(dst)
(out, _) = self.copy_file(header, dst)
self.outfiles.append(out)
def get_plat_code():
plat = sys.platform.lower()
substitutions = {'win': r'^win.*$'}
for target, pattern in substitutions.items():
plat = re.compile(pattern).sub(target, plat)
return plat
build_type = os.getenv("PYCBC_BUILD",
{"Windows": "CMAKE_HYBRID", "Darwin": "CMAKE_HYBRID", "Linux": "CMAKE_HYBRID"}.get(platform.system(),
"CMAKE_HYBRID"))
|
the-stack_0_18526 | #!/usr/bin/env python2
from __future__ import print_function
"""Iterative version of Fibonacci."""
i = 0
n = 10
a = 0
b = 1
while 1: # Slightly easier to compile than 'while True:'
# Artifical change to test 'continue'
if i == 0:
i = i + 1
continue
print(b)
# NOTE: This would generate BUILD_TUPLE and UNPACK_SEQUENCE bytecodes.
#a, b = b, a+b
tmp = a
a = b
b = tmp + b
i = i + 1 # Don't use augmented assignment
if i == n:
break
print('Done fib_iterative.py') # To make sure we implemented 'break' properly
|
the-stack_0_18527 | # Copyright 2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the :class:`.Parametrized` class which acts as
an abstract base class for all parametrized objects
"""
from mrmustard.types import Tensor, Dict, List, Trainable
from mrmustard.math import Math
math = Math()
class Parametrized:
r"""Abstract base class for all parametrized objects (gates, detectors, etc.)
For each trainable parameter keyword arguments must be passed for the initial value ``xxx``
(tensor), the numerical bounds ``xxx_bounds`` (float, float), whether the parameter ``xxx`` will
be optimized ``xxx_trainable`` (bool), along with any other parameters.
"""
def __init__(self, **kwargs): # NOTE: only kwargs so that we can use the arg names
self._trainable_parameters = []
self._constant_parameters = []
self._param_names = []
owner = f"{self.__class__.__qualname__}"
for name, value in kwargs.items():
if math.from_backend(value):
if math.is_trainable(value):
self._trainable_parameters.append(value)
elif name + "_trainable" in kwargs and kwargs[name + "_trainable"]:
value = math.new_variable(value, kwargs[name + "_bounds"], owner + ":" + name)
self._trainable_parameters.append(value)
else:
self._constant_parameters.append(value)
elif name + "_trainable" in kwargs and kwargs[name + "_trainable"]:
value = math.new_variable(value, kwargs[name + "_bounds"], owner + ":" + name)
self._trainable_parameters.append(value)
elif name + "_trainable" in kwargs and not kwargs[name + "_trainable"]:
value = math.new_constant(value, owner + ":" + name)
self._constant_parameters.append(value)
else:
name = "_" + name
self.__dict__[name] = value
self._param_names += [] if name.startswith("_") else [name]
@property
def trainable_parameters(self) -> Dict[str, List[Trainable]]:
r"""Returns the dictionary of trainable parameters, searching recursively in the object tree (for example, when in a Circuit)."""
if hasattr(self, "_ops"):
return {
"symplectic": math.unique_tensors(
[p for item in self._ops for p in item.trainable_parameters["symplectic"]]
),
"orthogonal": math.unique_tensors(
[p for item in self._ops for p in item.trainable_parameters["orthogonal"]]
),
"euclidean": math.unique_tensors(
[p for item in self._ops for p in item.trainable_parameters["euclidean"]]
),
}
return {
"symplectic": [],
"orthogonal": [],
"euclidean": self._trainable_parameters,
} # default
@property
def constant_parameters(self) -> Dict[str, List[Tensor]]:
r"""Returns the dictionary of constant parameters, searching recursively in the object tree (for example, when in a Circuit)."""
if hasattr(self, "_ops"):
return {
"symplectic": math.unique_tensors(
[p for item in self._ops for p in item.constant_parameters["symplectic"]]
),
"orthogonal": math.unique_tensors(
[p for item in self._ops for p in item.constant_parameters["orthogonal"]]
),
"euclidean": math.unique_tensors(
[p for item in self._ops for p in item.constant_parameters["euclidean"]]
),
}
return {
"symplectic": [],
"orthogonal": [],
"euclidean": self._constant_parameters,
} # default
|
the-stack_0_18528 | # Copyright (c) 2020, Huawei Technologies.All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import numpy as np
import sys
import copy
from common_utils import TestCase, run_tests
from common_device_type import dtypes, instantiate_device_type_tests
from util_test import create_common_tensor
class TestDiag(TestCase):
def cpu_op_exec(self, input, diagonal):
output = torch.diag(input, diagonal=diagonal)
output = output.numpy()
return output
def npu_op_exec(self, input, diagonal):
output = torch.diag(input, diagonal=diagonal)
output = output.to("cpu")
output = output.numpy()
return output
def cpu_op_exec_out(self, input, diagonal, out):
torch.diag(input, diagonal=diagonal, out=out)
output = out.numpy()
return output
def npu_op_exec_out(self, input, diagonal, out):
torch.diag(input, diagonal=diagonal, out=out)
output = out.to("cpu")
output = output.numpy()
return output
def generate_npu_data(self, min_d, max_d, shape, dtype):
input1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
output1 = np.random.uniform(min_d, max_d, shape).astype(dtype)
npu_input1 = torch.from_numpy(input1)
npu_output1 = torch.from_numpy(output1)
return npu_input1, npu_output1
def test_diag_common_shape_format(self, device):
shape_format = [
[[np.float32, -1, [16]], 0], # test the condition of 1-dimension
[[np.float32, -1, [1024]], 0],
[[np.float32, -1, [5, 5]], 0], # test the condition of 2-dimension
[[np.float32, -1, [256, 256]], 0],
]
for item in shape_format:
cpu_input, npu_input = create_common_tensor(item[0], 0, 100)
cpu_output = self.cpu_op_exec(cpu_input, item[1])
npu_output = self.npu_op_exec(npu_input, item[1])
self.assertRtolEqual(cpu_output, npu_output)
def test_diag_float32_out(self, device):
shape_format = [
[[np.float32, -1, [16]], [np.float32, -1, [20]], 0], # test the condition of 1-dimension
[[np.float32, -1, [1024]], [np.float32, -1, [20, 20]], 0],
[[np.float32, -1, [5, 5]], [np.float32, -1, [5, 5, 5]], 0], # test the condition of 2-dimension
[[np.float32, -1, [256, 256]], [np.float32, -1, [256]], 0],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 100)
cpu_input2, npu_input2 = create_common_tensor(item[1], 0, 100)
cpu_output = self.cpu_op_exec(cpu_input1, item[2])
npu_output = self.npu_op_exec_out(npu_input1, item[2], npu_input2)
self.assertRtolEqual(cpu_output, npu_output)
def test_diag_float16_out(self, device):
shape_format = [
[[np.float16, -1, [16]], [np.float16, -1, [20]], 0], # test the condition of 1-dimension
[[np.float16, -1, [1024]], [np.float16, -1, [20, 20]], 0],
[[np.float16, -1, [5, 5]], [np.float16, -1, [5, 5, 5]], 0], # test the condition of 2-dimension
[[np.float16, -1, [256, 256]], [np.float16, -1, [256]], 0],
]
for item in shape_format:
cpu_input1, npu_input1 = create_common_tensor(item[0], 0, 100)
cpu_input2, npu_input2 = create_common_tensor(item[1], 0, 100)
if cpu_input1.dtype == torch.float16:
cpu_input1 = cpu_input1.to(torch.float32)
cpu_output = self.cpu_op_exec(cpu_input1, item[2])
npu_output = self.npu_op_exec_out(npu_input1, item[2], npu_input2)
cpu_output = cpu_output.astype(npu_output.dtype)
self.assertRtolEqual(cpu_output, npu_output)
def test_diag_float16_shape_format(self, device):
def cpu_op_exec_fp16(input, diagonal):
input = input.to(torch.float32)
output = torch.diag(input, diagonal)
output = output.numpy()
output = output.astype(np.float16)
return output
shape_format = [
[[np.float16, -1, [4]], 0], # test the condition of 1-dimension
[[np.float16, -1, [512]], 0],
[[np.float16, -1, [4, 4]], 0], # test the condition of 2-dimension
[[np.float16, -1, [256, 256]], 0],
]
for item in shape_format:
cpu_input, npu_input = create_common_tensor(item[0], 1, 100)
cpu_output = cpu_op_exec_fp16(cpu_input, item[1])
npu_output = self.npu_op_exec(npu_input, item[1])
self.assertRtolEqual(cpu_output, npu_output)
instantiate_device_type_tests(TestDiag, globals(), except_for="cpu")
if __name__ == "__main__":
run_tests()
|
the-stack_0_18529 | import sys
import importlib
COMPILER_ASM_MODULE = "Compiler.ASM.Codegen"
COMPILER_VM_MODULE = "Compiler.VM.Codegen"
INTERPRETER_MODULE = "Interpreter.Eval"
class AST:
def __init__(self, _class, _name):
self._class = _class
self._name = _name
def compile_asm(self, compiler):
module_name = "%s.%s" % (COMPILER_ASM_MODULE, self._class)
if module_name not in sys.modules:
importlib.import_module(module_name)
return getattr(sys.modules[module_name], self._name)(compiler, self)
def compile_vm(self, commands, data):
module_name = "%s.%s" % (COMPILER_VM_MODULE, self._class)
if module_name not in sys.modules:
importlib.import_module(module_name)
return getattr(sys.modules[module_name], self._name)(commands, data, self)
def interpret(self, env):
module_name = "%s.%s" % (INTERPRETER_MODULE, self._class)
if module_name not in sys.modules:
importlib.import_module(module_name)
return getattr(sys.modules[module_name], self._name)(env, self)
|
the-stack_0_18530 | """
Simple example that counts the number of objects in an osm file.
Shows how to write a handler for the different types of objects.
"""
import osmium as o
import argparse
class NodeRetrievingHandler(o.SimpleHandler):
def __init__(self, min_lat, min_lng, max_lat, max_lng,):
super(NodeRetrievingHandler, self).__init__()
self.nodes = set()
self.min_lat = min_lat
self.min_lng = min_lng
self.max_lat = max_lat
self.max_lng = max_lng
def way(self, w):
if 'highway' in w.tags:
need_add = False
for n in w.nodes:
if contains(self.min_lat, self.min_lng, self.max_lat, self.max_lng, n.lat, n.lon):
need_add = True
break
if need_add:
for n in w.nodes:
self.nodes.add(n.ref)
class HighwayRetrievingHandler(o.SimpleHandler):
def __init__(self, min_lat, min_lng, max_lat, max_lng, nodes, writer):
super(HighwayRetrievingHandler, self).__init__()
self.min_lat = min_lat
self.min_lng = min_lng
self.max_lat = max_lat
self.max_lng = max_lng
self.nodes = nodes
self.writer = writer
def node(self, n):
if n.id in self.nodes:
self.writer.add_node(n)
def way(self, w):
need_add = False
try:
if 'highway' in w.tags:
for n in w.nodes:
if contains(self.min_lat, self.min_lng, self.max_lat, self.max_lng, n.lat, n.lon):
need_add = True
break
if need_add:
self.writer.add_way(w)
except o.InvalidLocationError:
# A location error might occur if the osm file is an extract
# where nodes of ways near the boundary are missing.
print("WARNING: way %d incomplete. Ignoring." % w.id)
def contains(min_lat, min_lng, max_lat, max_lng, lat, lng):
return min_lat <= lat < max_lat and min_lng <= lng < max_lng
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--min_lat', type=float, default=39.8451, help='the min lat of the interested region')
parser.add_argument('--min_lng', type=float, default=116.2810, help='the min lng of the interested region')
parser.add_argument('--max_lat', type=float, default=39.9890, help='the max lat of the interested region')
parser.add_argument('--max_lng', type=float, default=116.4684, help='the max lng of the interested region')
parser.add_argument('--input_path', help='the input path of the original osm data')
parser.add_argument('--output_path', help='the output path of the clipped osm data')
opt = parser.parse_args()
print(opt)
# go through the ways to find all relevant nodes
nh = NodeRetrievingHandler(opt.min_lat, opt.min_lng, opt.max_lat, opt.max_lng)
nh.apply_file(opt.input_path, locations=True)
# go through the file again and write out the data
writer = o.SimpleWriter(opt.output_path)
hh = HighwayRetrievingHandler(opt.min_lat, opt.min_lng, opt.max_lat, opt.max_lng, nh.nodes, writer)
hh.apply_file(opt.input_path, locations=True)
writer.close()
|
the-stack_0_18532 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import io
import json
import os
import six
import unicodedata
from shutil import copyfile
from typing import Iterable, Iterator, Optional, List, Any, Callable, Union
from paddle.utils import try_import
from paddlenlp.utils.downloader import get_path_from_url, COMMUNITY_MODEL_PREFIX
from paddlenlp.utils.env import MODEL_HOME
from paddlenlp.utils.log import logger
from dataclasses import dataclass, field
try:
from functools import lru_cache
except ImportError:
from backports.functools_lru_cache import lru_cache
from ..data.vocab import Vocab
from .utils import InitTrackerMeta, fn_args_to_dict
from collections import OrderedDict
__all__ = [
'PretrainedTokenizer', 'BPETokenizer', 'tokenize_chinese_chars',
'is_chinese_char'
]
def convert_to_unicode(text):
"""
Converts `text` to Unicode (if it's not already), assuming utf-8 input.
Args:
text (str|bytes): Text to be converted to unicode.
Returns:
str: converted text.
"""
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
def whitespace_tokenize(text):
"""
Runs basic whitespace cleaning and splitting on a peice of text.
Args:
text (str): Text to be tokened.
Returns:
list(str): Token list.
"""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
def _is_whitespace(char):
"""
Checks whether `chars` is a whitespace character.
"""
# \t, \n, and \r are technically contorl characters but we treat them
# as whitespace since they are generally considered as such.
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
cat = unicodedata.category(char)
if cat == "Zs":
return True
return False
def _is_control(char):
"""Checks whether `chars` is a control character."""
# These are technically control characters but we count them as whitespace
# characters.
if char == "\t" or char == "\n" or char == "\r":
return False
cat = unicodedata.category(char)
if cat.startswith("C"):
return True
return False
def _is_punctuation(char):
"""Checks whether `chars` is a punctuation character."""
cp = ord(char)
# We treat all non-letter/number ASCII as punctuation.
# Characters such as "^", "$", and "`" are not in the Unicode
# Punctuation class but we treat them as punctuation anyways, for
# consistency.
if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or
(cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
def is_chinese_char(cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if ((cp >= 0x4E00 and cp <= 0x9FFF) or #
(cp >= 0x3400 and cp <= 0x4DBF) or #
(cp >= 0x20000 and cp <= 0x2A6DF) or #
(cp >= 0x2A700 and cp <= 0x2B73F) or #
(cp >= 0x2B740 and cp <= 0x2B81F) or #
(cp >= 0x2B820 and cp <= 0x2CEAF) or
(cp >= 0xF900 and cp <= 0xFAFF) or #
(cp >= 0x2F800 and cp <= 0x2FA1F)): #
return True
return False
@dataclass(frozen=True, eq=True)
class AddedToken:
"""
AddedToken represents a token to be added to a Tokenizer An AddedToken can have special options defining the
way it should behave.
"""
content: str = field(default_factory=str)
single_word: bool = False
lstrip: bool = False
rstrip: bool = False
normalized: bool = True
def __getstate__(self):
return self.__dict__
class Trie:
"""
Trie in Python. Creates a Trie out of a list of words. The trie is used to split on `added_tokens` in one pass
Loose reference https://en.wikipedia.org/wiki/Trie
"""
def __init__(self):
self.data = {}
def add(self, word: str):
"""
Passes over every char (utf-8 char) on word and recursively adds it to the internal `data` trie representation.
The special key `""` is used to represent termination.
This function is idempotent, adding twice the same word will leave the trie unchanged
Example::
>>> trie = Trie()
>>> trie.add("Hello 友達")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}}
>>> trie.add("Hello")
>>> trie.data
{"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}}
"""
if not word:
return
ref = self.data
for char in word:
ref[char] = char in ref and ref[char] or {}
ref = ref[char]
ref[""] = 1
def split(self, text: str) -> List[str]:
"""
Will look for the words added to the trie within `text`. Output is the original string splitted along the
boundaries of the words found.
This trie will match the longest possible word first !
Example::
>>> trie = Trie()
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS] This is a extra_id_100"]
>>> trie.add("[CLS]")
>>> trie.add("extra_id_1")
>>> trie.add("extra_id_100")
>>> trie.split("[CLS] This is a extra_id_100")
["[CLS]", " This is a ", "extra_id_100"]
"""
# indexes are counted left of the chars index.
# "hello", index 0, is left of h, index 1 is between h and e.
# index 5 is right of the "o".
# States are going to capture every possible start (indexes as above)
# as keys, and have as values, a pointer to the position in the trie
# where we're at. This is a partial match for now.
# This enables to keep track of multiple matches while we're iterating
# the string
# If the trie contains, "blowing", and "lower" and we encounter the
# string "blower", we need to split into ["b", "lower"].
# This is where we need to keep track of multiple possible starts.
states = OrderedDict()
# This will contain every indices where we need
# to cut.
# We force to cut at offset 0 and len(text) (added later)
offsets = [0]
# This is used by the lookahead which needs to skip over
# some text where the full match exceeded the place in the initial
# for loop
skip = None
# Main loop, Giving this algorithm O(n) complexity
for current, current_char in enumerate(text):
if skip and current < skip:
# Prevents the lookahead for matching twice
# like extra_id_100 and id_100
continue
# This will track every state
# that stop matching, we need to stop tracking them.
# If we look at "lowball", we're going to match "l" (add it to states), "o", "w", then
# fail on "b", we need to remove 0 from the valid states.
to_remove = set()
# Whenever we found a match, we need to drop everything
# this is a greedy algorithm, it will match on the first found token
reset = False
# In this case, we already have partial matches (But unfinished)
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
# Lookahead to match longest first
# Important in case of extra_id_1 vs extra_id_100
# Here we are also actively looking for other earlier partial
# matches
# "[CLS]", "L", we need to match CLS even if L is special
for lookstart, looktrie_pointer in states.items():
if lookstart > start:
# This partial match is later, we can stop looking
break
elif lookstart < start:
# This partial match is earlier, the trie pointer
# was already updated, so index is + 1
lookahead_index = current + 1
end = current + 1
else:
# Here lookstart == start and
# looktrie_pointer == trie_pointer
# It wasn't updated yet so indices are current ones
lookahead_index = current
end = current
next_char = text[
lookahead_index] if lookahead_index < len(
text) else None
while next_char in looktrie_pointer:
looktrie_pointer = looktrie_pointer[next_char]
lookahead_index += 1
if "" in looktrie_pointer:
start = lookstart
end = lookahead_index
skip = lookahead_index
if lookahead_index == len(text):
# End of string
break
next_char = text[lookahead_index]
# End lookahead
# Storing and resetting
offsets.append(start)
offsets.append(end)
reset = True
break
elif current_char in trie_pointer:
# The current character being looked at has a match within the trie
# update the pointer (it will be stored back into states later).
trie_pointer = trie_pointer[current_char]
# Storing back the new pointer into the states.
# Partial matches got longer by one.
states[start] = trie_pointer
else:
# The new character has not match in the trie, we need
# to stop keeping track of this partial match.
# We can't do it directly within the loop because of how
# python iteration works
to_remove.add(start)
# Either clearing the full start (we found a real match)
# Or clearing only the partial matches that didn't work.
if reset:
states = {}
else:
for start in to_remove:
del states[start]
# If this character is a starting character within the trie
# start keeping track of this partial match.
if current_char in self.data:
states[current] = self.data[current_char]
# We have a cut at the end with states.
for start, trie_pointer in states.items():
if "" in trie_pointer:
# This is a final match, we need to reset and
# store the results in `offsets`.
end = len(text)
offsets.append(start)
offsets.append(end)
# Longest cut is always the one with lower start so the first
# item so we need to break.
break
return self.cut_text(text, offsets)
def cut_text(self, text, offsets):
# We have all the offsets now, we just need to do the actual splitting.
# We need to eventually add the first part of the string and the eventual
# last part.
offsets.append(len(text))
tokens = []
start = 0
for end in offsets:
if start > end:
logger.error(
"There was a bug in Trie algorithm in tokenization. Attempting to recover. Please report it anyway."
)
continue
elif start == end:
# This might happen if there's a match at index 0
# we're also preventing zero-width cuts in case of two
# consecutive matches
continue
tokens.append(text[start:end])
start = end
return tokens
def tokenize_chinese_chars(text):
"""Adds whitespace around any CJK character."""
output = []
buff = ""
for char in text:
cp = ord(char)
if is_chinese_char(cp):
if buff != "":
output.append(buff)
buff = ""
output.append(char)
else:
buff += char
if buff != "":
output.append(buff)
return output
@six.add_metaclass(InitTrackerMeta)
class PretrainedTokenizer(object):
"""
The base class for all pretrained tokenizers. It mainly provides common methods
for loading (construction and loading) and saving pretrained tokenizers. Loading
and saving also rely on the following class attributes which should be overridden
by derived classes accordingly:
- **tokenizer_config_file** (str): Represents the file name of tokenizer
configuration for configuration saving and loading in local file system.
The value is `tokenizer_config.json`.
- **resource_files_names** (dict): Represents resources to specific file
names mapping for resource saving and loading in local file system. The
keys of dict representing resource items should be argument names in
tokenizer's `__init__` method, and the values are file names for saving
and loading corresponding resources. The mostly used resources here are
vocabulary file and sentence-piece model file.
- **pretrained_init_configuration** (dict): Provides the tokenizer configurations
of built-in pretrained tokenizers (contrasts to tokenizers in local file
system). It has pretrained tokenizer names as keys (the same as pretrained
model names, such as `bert-base-uncased`), and the values are dict preserving
corresponding configuration for tokenizer initialization.
- **pretrained_resource_files_map** (dict): Provides resource URLs of built-in
pretrained tokenizers (contrasts to tokenizers in local file system). It
has the same keys as `resource_files_names`, and the values are also `dict`
mapping specific pretrained tokenizer names (such as `bert-base-uncased`)
to corresponding resource URLs.
Moreover, methods common to tokenizers for tokenization, token/id conversion
and encoding as model inputs are also provided here.
Besides, metaclass `InitTrackerMeta` is used to create `PretrainedTokenizer`,
by which subclasses can track arguments for initialization automatically
and expose special tokens initialization used as attributes.
"""
tokenizer_config_file = "tokenizer_config.json"
pretrained_init_configuration = {}
resource_files_names = {} # keys are arguments of __init__
pretrained_resource_files_map = {}
padding_side = 'right'
pad_token_type_id = 0
special_tokens_map_extended = {}
_additional_special_tokens = []
def _wrap_init(self, original_init, *args, **kwargs):
"""
It would be hooked after `__init__` to add specials tokens (arguments of
`__init__` whose name ends with `_token`) as attributes of the tokenizer
instance.
"""
# expose tokens as attributes
self.padding_side = kwargs.pop("padding_side", self.padding_side)
assert self.padding_side in [
"right", "left"
], "Padding side must be either left or right"
init_dict = fn_args_to_dict(original_init, *args, **kwargs)
self.added_tokens_encoder = {}
self.added_tokens_decoder = {}
# TODO(guosheng): Use OrderedDict, otherwise `all_special_tokens` returns
# a list without order.
self.tokens_trie = Trie()
self.special_tokens_map = {}
for identifier, value in init_dict.items():
if identifier.endswith('_token'):
self.special_tokens_map[identifier] = value
if identifier == "additional_special_tokens":
assert isinstance(value, (
list, tuple)), f"Value {value} is not a list or tuple"
self._additional_special_tokens += value
assert all(
isinstance(t, (str, AddedToken)) for t in
value), "One of the tokens is not a string or an AddedToken"
self.special_tokens_map[
identifier] = self._additional_special_tokens
self.add_tokens(self.all_special_tokens, special_tokens=True)
additional_special_tokens = []
for token in self.all_special_tokens:
if isinstance(token, AddedToken):
token = token.content
if token not in self.special_tokens_map.values():
additional_special_tokens.append(token)
self.special_tokens_map[
"additional_special_tokens"] = additional_special_tokens
def _build_special_tokens_map_extended(self, **kwargs):
for identifier, token in kwargs.items():
if identifier.endswith('_token') and isinstance(token, AddedToken):
self.special_tokens_map_extended[identifier] = token
def __call__(self,
text,
text_pair=None,
max_seq_len: Optional[int]=None,
stride=0,
is_split_into_words=False,
pad_to_max_seq_len=False,
truncation_strategy="longest_first",
return_position_ids=False,
return_token_type_ids=True,
return_attention_mask=False,
return_length=False,
return_overflowing_tokens=False,
return_special_tokens_mask=False):
"""
Performs tokenization and uses the tokenized tokens to prepare model
inputs. It supports sequence or sequence pair as input, and batch input
is allowed. `self.encode()` or `self.batch_encode()` would be called
separately for single or batch input depending on input format and
`is_split_into_words` argument.
Args:
text (str, List[str] or List[List[str]]):
The sequence or batch of sequences to be processed. One sequence
is a string or a list of strings depending on whether it has been
pretokenized. If each sequence is provided as a list of strings
(pretokenized), you must set `is_split_into_words` as `True` to
disambiguate with a batch of sequences.
text_pair (str, List[str] or List[List[str]], optional):
Same as `text` argument, while it represents for the latter
sequence of the sequence pair.
max_seq_len (int, optional):
If set to a number, will limit the total sequence returned so
that it has a maximum length. If there are overflowing tokens,
those overflowing tokens will be added to the returned dictionary
when `return_overflowing_tokens` is `True`. Defaults to `None`.
stride (int, optional):
Only available for batch input of sequence pair and mainly for
question answering usage. When for QA, `text` represents questions
and `text_pair` represents contexts. If `stride` is set to a
positive number, the context will be split into multiple spans
where `stride` defines the number of (tokenized) tokens to skip
from the start of one span to get the next span, thus will produce
a bigger batch than inputs to include all spans. Moreover, 'overflow_to_sample'
and 'offset_mapping' preserving the original example and position
information will be added to the returned dictionary. Defaults to 0.
pad_to_max_seq_len (bool, optional):
If set to `True`, the returned sequences would be padded up to
`max_seq_len` specified length according to padding side
(`self.padding_side`) and padding token id. Defaults to `False`.
truncation_strategy (str, optional):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence
until the input is under `max_seq_len` starting from the longest
one at each token (when there is a pair of input sequences).
- 'only_first': Only truncate the first sequence.
- 'only_second': Only truncate the second sequence.
- 'do_not_truncate': Do not truncate (raise an error if the input
sequence is longer than `max_seq_len`).
Defaults to 'longest_first'.
return_position_ids (bool, optional):
Whether to include tokens position ids in the returned dictionary.
Defaults to `False`.
return_token_type_ids (bool, optional):
Whether to include token type ids in the returned dictionary.
Defaults to `True`.
return_attention_mask (bool, optional):
Whether to include the attention mask in the returned dictionary.
Defaults to `False`.
return_length (bool, optional):
Whether to include the length of each encoded inputs in the
returned dictionary. Defaults to `False`.
return_overflowing_tokens (bool, optional):
Whether to include overflowing token information in the returned
dictionary. Defaults to `False`.
return_special_tokens_mask (bool, optional):
Whether to include special tokens mask information in the returned
dictionary. Defaults to `False`.
Returns:
dict or list[dict] (for batch input):
The dict has the following optional items:
- **input_ids** (list[int]): List of token ids to be fed to a model.
- **position_ids** (list[int], optional): List of token position ids to be
fed to a model. Included when `return_position_ids` is `True`
- **token_type_ids** (list[int], optional): List of token type ids to be
fed to a model. Included when `return_token_type_ids` is `True`.
- **attention_mask** (list[int], optional): List of integers valued 0 or 1,
where 0 specifies paddings and should not be attended to by the
model. Included when `return_attention_mask` is `True`.
- **seq_len** (int, optional): The input_ids length. Included when `return_length`
is `True`.
- **overflowing_tokens** (list[int], optional): List of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **num_truncated_tokens** (int, optional): The number of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **special_tokens_mask** (list[int], optional): List of integers valued 0 or 1,
with 0 specifying special added tokens and 1 specifying sequence tokens.
Included when `return_special_tokens_mask` is `True`.
- **offset_mapping** (list[int], optional): list of pair preserving the
index of start and end char in original input for each token.
For a special token, the index pair is `(0, 0)`. Included when
`stride` works.
- **overflow_to_sample** (int, optional): Index of example from which this
feature is generated. Included when `stride` works.
"""
# Input type checking for clearer error
assert isinstance(text, str) or (
isinstance(text, (list, tuple)) and (len(text) == 0 or (
isinstance(text[0], str) or
(isinstance(text[0], (list, tuple)) and
(len(text[0]) == 0 or isinstance(text[0][0], str)))))
), ("text input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples).")
assert (text_pair is None or isinstance(text_pair, str) or (
isinstance(text_pair, (list, tuple)) and (len(text_pair) == 0 or (
isinstance(text_pair[0], str) or
(isinstance(text_pair[0], (list, tuple)) and
(len(text_pair[0]) == 0 or isinstance(text_pair[0][0], str)))))
)), (
"text_pair input must of type `str` (single example), `List[str]` (batch or single pretokenized example) "
"or `List[List[str]]` (batch of pretokenized examples).")
is_batched = bool(
(not is_split_into_words and isinstance(text, (list, tuple))) or
(is_split_into_words and isinstance(text, (list, tuple)) and
text and isinstance(text[0], (list, tuple))))
if is_batched:
batch_text_or_text_pairs = list(zip(
text, text_pair)) if text_pair is not None else text
return self.batch_encode(
batch_text_or_text_pairs=batch_text_or_text_pairs,
max_seq_len=max_seq_len,
stride=stride,
is_split_into_words=is_split_into_words,
pad_to_max_seq_len=pad_to_max_seq_len,
truncation_strategy=truncation_strategy,
return_position_ids=return_position_ids,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_length=return_length,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask)
else:
return self.encode(
text=text,
text_pair=text_pair,
max_seq_len=max_seq_len,
pad_to_max_seq_len=pad_to_max_seq_len,
truncation_strategy=truncation_strategy,
return_position_ids=return_position_ids,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_length=return_length,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask)
@property
def all_special_tokens(self):
"""
list: All the special tokens ('<unk>', '<cls>'...) corresponding to
special token arguments in `__init__` (arguments end with '_end').
"""
all_toks = []
set_attr = self.special_tokens_map
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (
list, tuple)) else [attr_value])
all_toks = list(OrderedDict.fromkeys(all_toks))
return all_toks
@property
def all_special_tokens_extended(self):
"""
list: All the special tokens ('<unk>', '<cls>'...) corresponding to
special token arguments in `__init__` (arguments end with '_end').
"""
all_toks = []
set_attr = self.special_tokens_map_extended
for attr_value in set_attr.values():
all_toks = all_toks + (list(attr_value) if isinstance(attr_value, (
list, tuple)) else [attr_value])
all_toks = list(set(all_toks))
return all_toks
@property
def all_special_ids(self):
"""
list: All the token ids corresponding to all the special tokens.
"""
all_toks = self.all_special_tokens
all_ids = self.convert_tokens_to_ids(all_toks)
return all_ids
def __len__(self):
"""
Size of the full vocabulary with the added tokens.
"""
return self.vocab_size + len(self.added_tokens_encoder)
def _add_tokens(self, new_tokens, special_tokens=True):
if special_tokens:
add_special_tokens = []
add_special_tokens_extended = []
for token in new_tokens:
if isinstance(token, AddedToken):
if token.content not in add_special_tokens:
self.tokens_trie.add(token.content)
add_special_tokens_extended.append(token)
add_special_tokens.append(token.content)
if token.content != self.unk_token and self.convert_tokens_to_ids(
token.content) == self.convert_tokens_to_ids(
self.unk_token):
self.added_tokens_encoder[token.content] = len(self)
self.added_tokens_decoder[len(self) -
1] = token.content
else:
if token not in add_special_tokens:
self.tokens_trie.add(token)
add_special_tokens.append(token)
if token != self.unk_token and self.convert_tokens_to_ids(
token) == self.convert_tokens_to_ids(
self.unk_token):
self.added_tokens_encoder[token] = len(self)
self.added_tokens_decoder[len(self) - 1] = token
self.special_tokens_map_extended[
"additional_special_tokens"] = add_special_tokens_extended
else:
for token in new_tokens:
if not isinstance(token, str):
raise TypeError(
f"Token {token} is not a string but a {type(token)}.")
if hasattr(self, "do_lower_case") and self.do_lower_case:
token = token.lower()
if token not in self.added_tokens_encoder and token != self.unk_token and self.convert_tokens_to_ids(
token) == self.convert_tokens_to_ids(self.unk_token):
self.added_tokens_encoder[token] = len(self)
self.added_tokens_decoder[len(self) - 1] = token
return len(self.added_tokens_encoder)
def add_tokens(self, new_tokens, special_tokens=True):
if not new_tokens:
return 0
if not isinstance(new_tokens, (list, tuple)):
new_tokens = [new_tokens]
return self._add_tokens(new_tokens, special_tokens=special_tokens)
def prepare_for_tokenization(self, text, **kwargs):
return text
def tokenize(self, text, **kwargs):
all_special_tokens_extended = dict(
(t.content, t) for t in self.all_special_tokens_extended
if isinstance(t, AddedToken))
no_split_token = set(self.all_special_tokens)
text = self.prepare_for_tokenization(text, **kwargs)
tokens = self.tokens_trie.split(text)
for i, token in enumerate(tokens):
if token in no_split_token:
tok_extended = all_special_tokens_extended.get(token, None)
left = tokens[i - 1] if i > 0 else None
right = tokens[i + 1] if i < len(tokens) - 1 else None
if isinstance(tok_extended, AddedToken):
if tok_extended.rstrip and right:
# A bit counter-intuitive but we strip the left of the string
# since tok_extended.rstrip means the special token is eating all white spaces on its right
tokens[i + 1] = right.lstrip()
# Strip white spaces on the left
if tok_extended.lstrip and left:
tokens[i - 1] = left.rstrip() # Opposite here
else:
# We strip left and right by default
if right:
tokens[i + 1] = right.lstrip()
if left:
tokens[i - 1] = left.rstrip()
tokenized_text = []
for token in tokens:
if not token:
continue
if token in no_split_token:
tokenized_text.append(token)
else:
tokenized_text.extend(self._tokenize(token, **kwargs))
return tokenized_text
def convert_tokens_to_ids(self, tokens):
if tokens is None:
return None
if isinstance(tokens, str):
if tokens in self.added_tokens_encoder:
return self.added_tokens_encoder[tokens]
else:
return self._convert_token_to_id(tokens)
ids = []
for token in tokens:
if token in self.added_tokens_encoder:
ids.append(self.added_tokens_encoder[token])
else:
ids.append(self._convert_token_to_id(token))
return ids
def _convert_token_to_id(self, token):
return self.vocab.to_indices(token)
def convert_tokens_to_string(self, tokens):
"""
Converts a sequence of tokens (list of string) to a single string by
using ``' '.join(tokens)`` .
Args:
tokens (list[str]): A sequence of tokens.
Returns:
str: Converted string.
"""
return " ".join(tokens)
def convert_ids_to_tokens(self, ids, skip_special_tokens=False):
if isinstance(ids, int):
if ids in self.added_tokens_decoder:
return self.added_tokens_decoder[ids]
else:
return self._convert_id_to_token(ids)
tokens = []
for index in ids:
if skip_special_tokens and index in self.all_special_ids:
continue
if index in self.added_tokens_decoder:
tokens.append(self.added_tokens_decoder[index])
else:
tokens.append(self._convert_id_to_token(index))
return tokens
def _convert_id_to_token(self, index):
return self.vocab.to_tokens(index)
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
"""
Creates an instance of `PretrainedTokenizer`. Related resources are loaded
by specifying name of a built-in pretrained model, or a community-contributed
pretrained model, or a local file directory path.
Args:
pretrained_model_name_or_path (str): Name of pretrained model or dir path
to load from. The string can be:
- Name of built-in pretrained model
- Name of a community-contributed pretrained model.
- Local directory path which contains tokenizer related resources
and tokenizer config file ("tokenizer_config.json").
*args (tuple): position arguments for model `__init__`. If provided,
use these as position argument values for tokenizer initialization.
**kwargs (dict): keyword arguments for model `__init__`. If provided,
use these to update pre-defined keyword argument values for tokenizer
initialization.
Returns:
PretrainedTokenizer: An instance of `PretrainedTokenizer`.
Example:
.. code-block::
from paddlenlp.transformers import BertTokenizer
# Name of built-in pretrained model
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Name of community-contributed pretrained model
tokenizer = BertTokenizer.from_pretrained('yingyibiao/bert-base-uncased-sst-2-finetuned')
# Load from local directory path
tokenizer = BertTokenizer.from_pretrained('./my_bert/')
"""
pretrained_models = list(cls.pretrained_init_configuration.keys())
vocab_files = {}
init_configuration = {}
# From built-in pretrained models
if pretrained_model_name_or_path in pretrained_models:
for file_id, map_list in cls.pretrained_resource_files_map.items():
vocab_files[file_id] = map_list[pretrained_model_name_or_path]
init_configuration = copy.deepcopy(
cls.pretrained_init_configuration[
pretrained_model_name_or_path])
# From local dir path
elif os.path.isdir(pretrained_model_name_or_path):
for file_id, file_name in cls.resource_files_names.items():
full_file_name = os.path.join(pretrained_model_name_or_path,
file_name)
if os.path.isfile(full_file_name):
vocab_files[file_id] = full_file_name
vocab_files["tokenizer_config_file"] = os.path.join(
pretrained_model_name_or_path, cls.tokenizer_config_file)
else:
# Assuming from community-contributed pretrained models
for file_id, file_name in cls.resource_files_names.items():
full_file_name = os.path.join(COMMUNITY_MODEL_PREFIX,
pretrained_model_name_or_path,
file_name)
vocab_files[file_id] = full_file_name
vocab_files["tokenizer_config_file"] = os.path.join(
COMMUNITY_MODEL_PREFIX, pretrained_model_name_or_path,
cls.tokenizer_config_file)
default_root = os.path.join(MODEL_HOME, pretrained_model_name_or_path)
resolved_vocab_files = {}
for file_id, file_path in vocab_files.items():
if file_path is None or os.path.isfile(file_path):
resolved_vocab_files[file_id] = file_path
continue
path = os.path.join(default_root, file_path.split('/')[-1])
if os.path.exists(path):
logger.info("Already cached %s" % path)
resolved_vocab_files[file_id] = path
else:
logger.info("Downloading %s and saved to %s" %
(file_path, default_root))
try:
resolved_vocab_files[file_id] = get_path_from_url(
file_path, default_root)
except RuntimeError as err:
logger.error(err)
raise RuntimeError(
f"Can't load tokenizer for '{pretrained_model_name_or_path}'.\n"
f"Please make sure that '{pretrained_model_name_or_path}' is:\n"
"- a correct model-identifier of built-in pretrained models,\n"
"- or a correct model-identifier of community-contributed pretrained models,\n"
"- or the correct path to a directory containing relevant tokenizer files.\n"
)
# Prepare tokenizer initialization kwargs
# Did we saved some inputs and kwargs to reload ?
tokenizer_config_file = resolved_vocab_files.pop(
"tokenizer_config_file", None)
if tokenizer_config_file is not None:
with io.open(tokenizer_config_file, encoding="utf-8") as f:
init_kwargs = json.load(f)
else:
init_kwargs = init_configuration
# position args are stored in kwargs, maybe better not include
init_args = init_kwargs.pop("init_args", ())
init_kwargs.pop("init_class", None)
# Update with newly provided args and kwargs
init_args = init_args if not args else args
init_kwargs.update(kwargs)
# Merge resolved_vocab_files arguments in init_kwargs if not including.
# Maybe need more ways to load resources.
for args_name, file_path in resolved_vocab_files.items():
# when `pretrained_model_name_or_path` is a pretrained model name,
# use pretrained_init_configuration as `init_kwargs` to init which
# does not include the vocab file in it, thus add vocab file into
# args.
if args_name not in init_kwargs:
init_kwargs[args_name] = file_path
# when `pretrained_model_name_or_path` is a pretrained model dir,
# use tokenizer_config_file.json as `init_kwargs` to init which
# does include a vocab file path in it. However, if the vocab file
# path included in json does not exist, such as was deleted, to make
# it still work, use the vocab file under this dir.
elif not os.path.isfile(init_kwargs[args_name]) and os.path.isfile(
file_path):
init_kwargs[args_name] = file_path
# TODO(guosheng): avoid reduplication of position args and key word args
tokenizer = cls(*init_args, **init_kwargs)
return tokenizer
def save_pretrained(self, save_directory):
"""
Save tokenizer configuration and related resources to files under
`save_directory`. The tokenizer configuration would be saved into
`tokenizer_config_file` indicating file (thus `tokenizer_config.json`),
and resources would be saved into `resource_files_names` indicating files
by using `self.save_resources(save_directory)`.
The `save_directory` can be used in `from_pretrained` as argument value
of `pretrained_model_name_or_path` to re-load the tokenizer.
Args:
save_directory (str): Directory to save files into.
Example:
.. code-block::
from paddlenlp.transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
tokenizer.save_pretrained('trained_model')
# reload from save_directory
tokenizer = BertTokenizer.from_pretrained('trained_model')
"""
assert not os.path.isfile(
save_directory
), "Saving directory ({}) should be a directory, not a file".format(
save_directory)
os.makedirs(save_directory, exist_ok=True)
tokenizer_config_file = os.path.join(save_directory,
self.tokenizer_config_file)
# init_config is set in metaclass created `__init__`,
tokenizer_config = self.init_config
with io.open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))
self.save_resources(save_directory)
def save_resources(self, save_directory):
"""
Save tokenizer related resources to `resource_files_names` indicating
files under `save_directory` by copying directly. Override it if necessary.
Args:
save_directory (str): Directory to save files into.
"""
for name, file_name in self.resource_files_names.items():
src_path = self.init_config[name]
dst_path = os.path.join(save_directory, file_name)
if os.path.abspath(src_path) != os.path.abspath(dst_path):
copyfile(src_path, dst_path)
@staticmethod
def load_vocabulary(filepath,
unk_token=None,
pad_token=None,
bos_token=None,
eos_token=None,
**kwargs):
"""
Instantiate an instance of `Vocab` from a file reserving all tokens
by using `Vocab.from_dict`. The file contains a token per line, and the
line number would be the index of corresponding token.
Args:
filepath (str): path of file to construct vocabulary.
unk_token (str): special token for unknown token. If no need, it also
could be `None`. Defaults to `None`.
pad_token (str): special token for padding token. If no need, it also
could be `None`. Defaults to `None`.
bos_token (str): special token for bos token. If no need, it also
could be `None`. Defaults to `None`.
eos_token (str): special token for eos token. If no need, it also
could be `None`. Defaults to `None`.
**kwargs (dict): keyword arguments for `Vocab.from_dict`.
Returns:
Vocab: An instance of `Vocab`.
"""
token_to_idx = {}
with io.open(filepath, 'r', encoding='utf-8') as f:
for index, line in enumerate(f):
token = line.rstrip('\n')
token_to_idx[token] = int(index)
vocab = Vocab.from_dict(
token_to_idx,
unk_token=unk_token,
pad_token=pad_token,
bos_token=bos_token,
eos_token=eos_token,
**kwargs)
return vocab
@staticmethod
def save_vocabulary(filepath, vocab):
"""
Save all tokens to a vocabulary file. The file contains a token per line,
and the line number would be the index of corresponding token.
Args:
filepath (str): File path to be saved to.
vocab (Vocab|dict): The `Vocab` or `dict` instance to be saved.
"""
if isinstance(vocab, Vocab):
tokens = vocab.idx_to_token
else:
tokens = sorted(vocab.keys(), key=lambda token: vocab[token])
with io.open(filepath, 'w', encoding='utf-8') as f:
for token in tokens:
f.write(token + '\n')
def __getattr__(self, name):
if name.endswith('_token'):
return self.special_tokens_map[name]
elif name.endswith('_token_id'):
return self._convert_token_to_id(self.special_tokens_map[name[:-3]])
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, name))
def truncate_sequences(self,
ids,
pair_ids=None,
num_tokens_to_remove=0,
truncation_strategy='longest_first',
stride=0):
"""
Truncates a sequence pair in place to the maximum length.
Args:
ids: list of tokenized input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
pair_ids: Optional second list of input ids. Can be obtained from a string by chaining the
`tokenize` and `convert_tokens_to_ids` methods.
num_tokens_to_remove (:obj:`int`, `optional`, defaults to ``0``):
number of tokens to remove using the truncation strategy
truncation_strategy: string selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence until the input is under max_seq_len
starting from the longest one at each token (when there is a pair of input sequences).
Overflowing tokens only contains overflow from the first sequence.
- 'only_first': Only truncate the first sequence. raise an error if the first sequence is shorter or equal to than num_tokens_to_remove.
- 'only_second': Only truncate the second sequence
- 'do_not_truncate': Does not truncate (raise an error if the input sequence is longer than max_seq_len)
stride (:obj:`int`, `optional`, defaults to ``0``):
If set to a number along with max_seq_len, the overflowing tokens returned will contain some tokens
from the main sequence returned. The value of this argument defines the number of additional tokens.
"""
if num_tokens_to_remove <= 0:
return ids, pair_ids, []
if truncation_strategy == 'longest_first':
overflowing_tokens = []
for _ in range(num_tokens_to_remove):
if pair_ids is None or len(ids) > len(pair_ids):
overflowing_tokens = [ids[-1]] + overflowing_tokens
ids = ids[:-1]
else:
pair_ids = pair_ids[:-1]
window_len = min(len(ids), stride)
if window_len > 0:
overflowing_tokens = ids[-window_len:] + overflowing_tokens
elif truncation_strategy == 'only_first':
assert len(ids) > num_tokens_to_remove
window_len = min(len(ids), stride + num_tokens_to_remove)
overflowing_tokens = ids[-window_len:]
ids = ids[:-num_tokens_to_remove]
elif truncation_strategy == 'only_second':
assert pair_ids is not None and len(pair_ids) > num_tokens_to_remove
window_len = min(len(pair_ids), stride + num_tokens_to_remove)
overflowing_tokens = pair_ids[-window_len:]
pair_ids = pair_ids[:-num_tokens_to_remove]
elif truncation_strategy == 'do_not_truncate':
raise ValueError(
"Input sequence are too long for max_length. Please select a truncation strategy."
)
else:
raise ValueError(
"Truncation_strategy should be selected in ['longest_first', 'only_first', 'only_second', 'do_not_truncate']"
)
return (ids, pair_ids, overflowing_tokens)
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
Should be overridden in a subclass if the model has a special way of building those.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
List[int]: List of input_id with the appropriate special tokens.
"""
if token_ids_1 is None:
return token_ids_0
return token_ids_0 + token_ids_1
def build_offset_mapping_with_special_tokens(self,
offset_mapping_0,
offset_mapping_1=None):
"""
Build offset map from a pair of offset map by concatenating and adding offsets of special tokens.
Should be overridden in a subclass if the model has a special way of building those.
Args:
offset_mapping_0 (List[tuple]):
List of char offsets to which the special tokens will be added.
offset_mapping_1 (List[tuple], optional):
Optional second list of char offsets for offset mapping pairs.
Returns:
List[tuple]: List of char offsets with the appropriate offsets of special tokens.
"""
if offset_mapping_1 is None:
return offset_mapping_0
return offset_mapping_0 + offset_mapping_1
def get_special_tokens_mask(self,
token_ids_0,
token_ids_1=None,
already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``encode`` methods.
Args:
token_ids_0 (List[int]): List of ids of the first sequence.
token_ids_1 (List[int], optional): List of ids of the second sequence.
already_has_special_tokens (bool, optional): Whether or not the token list is already
formatted with special tokens for the model. Defaults to None.
Returns:
results (List[int]): The list of integers in the range [0, 1]:
1 for a special token, 0 for a sequence token.
"""
return [0] * ((len(token_ids_1)
if token_ids_1 else 0) + len(token_ids_0))
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
Should be overridden in a subclass if the model has a special way of building those.
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (List[int]):
List of IDs.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs.
Returns:
List[int]: List of token_type_id according to the given sequence(s).
"""
if token_ids_1 is None:
return len(token_ids_0) * [0]
return [0] * len(token_ids_0) + [1] * len(token_ids_1)
def num_special_tokens_to_add(self, pair):
"""
Returns the number of added tokens when encoding a sequence with special tokens.
Args:
pair (bool, optional):
Whether the number of added tokens should be computed in the case of a sequence pair or a single
sequence. Defaults to `False`.
Returns:
int: Number of special tokens added to sequences.
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def encode(self,
text,
text_pair=None,
max_seq_len=512,
pad_to_max_seq_len=False,
truncation_strategy="longest_first",
return_position_ids=False,
return_token_type_ids=True,
return_attention_mask=False,
return_length=False,
return_overflowing_tokens=False,
return_special_tokens_mask=False):
"""
Performs tokenization and uses the tokenized tokens to prepare model
inputs. It supports sequence or sequence pair as input, and batch input
is not allowed.
Args:
text (str, List[str] or List[int]):
The sequence to be processed. One sequence is a string, a list
of strings, or a list of integers depending on whether it has
been pretokenized and converted to ids.
text_pair (str, List[str] or List[List[str]]):
Same as `text` argument, while it represents for the latter
sequence of the sequence pair.
max_seq_len (int, optional):
If set to a number, will limit the total sequence returned so
that it has a maximum length. If there are overflowing tokens,
those overflowing tokens will be added to the returned dictionary
when `return_overflowing_tokens` is `True`. Defaults to `None`.
stride (int, optional):
Only available for batch input of sequence pair and mainly for
question answering usage. When for QA, `text` represents questions
and `text_pair` represents contexts. If `stride` is set to a
positive number, the context will be split into multiple spans
where `stride` defines the number of (tokenized) tokens to skip
from the start of one span to get the next span, thus will produce
a bigger batch than inputs to include all spans. Moreover, 'overflow_to_sample'
and 'offset_mapping' preserving the original example and position
information will be added to the returned dictionary. Defaults to 0.
pad_to_max_seq_len (bool, optional):
If set to `True`, the returned sequences would be padded up to
`max_seq_len` specified length according to padding side
(`self.padding_side`) and padding token id. Defaults to `False`.
truncation_strategy (str, optional):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence
until the input is under `max_seq_len` starting from the longest
one at each token (when there is a pair of input sequences).
- 'only_first': Only truncate the first sequence.
- 'only_second': Only truncate the second sequence.
- 'do_not_truncate': Do not truncate (raise an error if the input
sequence is longer than `max_seq_len`).
Defaults to 'longest_first'.
return_position_ids (bool, optional):
Whether to include tokens position ids in the returned dictionary.
Defaults to `False`.
return_token_type_ids (bool, optional):
Whether to include token type ids in the returned dictionary.
Defaults to `True`.
return_attention_mask (bool, optional):
Whether to include the attention mask in the returned dictionary.
Defaults to `False`.
return_length (bool, optional):
Whether to include the length of each encoded inputs in the
returned dictionary. Defaults to `False`.
return_overflowing_tokens (bool, optional):
Whether to include overflowing token information in the returned
dictionary. Defaults to `False`.
return_special_tokens_mask (bool, optional):
Whether to include special tokens mask information in the returned
dictionary. Defaults to `False`.
Returns:
dict:
The dict has the following optional items:
- **input_ids** (list[int]): List of token ids to be fed to a model.
- **position_ids** (list[int], optional): List of token position ids to be
fed to a model. Included when `return_position_ids` is `True`
- **token_type_ids** (list[int], optional): List of token type ids to be
fed to a model. Included when `return_token_type_ids` is `True`.
- **attention_mask** (list[int], optional): List of integers valued 0 or 1,
where 0 specifies paddings and should not be attended to by the
model. Included when `return_attention_mask` is `True`.
- **seq_len** (int, optional): The input_ids length. Included when `return_length`
is `True`.
- **overflowing_tokens** (list[int], optional): List of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **num_truncated_tokens** (int, optional): The number of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **special_tokens_mask** (list[int], optional): List of integers valued 0 or 1,
with 0 specifying special added tokens and 1 specifying sequence tokens.
Included when `return_special_tokens_mask` is `True`.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
ids = get_input_ids(text)
pair_ids = get_input_ids(text_pair) if text_pair is not None else None
pair = bool(pair_ids is not None)
len_ids = len(ids)
len_pair_ids = len(pair_ids) if pair else 0
encoded_inputs = {}
# Truncation: Handle max sequence length
total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(
pair=pair))
if max_seq_len and total_len > max_seq_len:
ids, pair_ids, overflowing_tokens = self.truncate_sequences(
ids,
pair_ids=pair_ids,
num_tokens_to_remove=total_len - max_seq_len,
truncation_strategy=truncation_strategy, )
if return_overflowing_tokens:
encoded_inputs["overflowing_tokens"] = overflowing_tokens
encoded_inputs["num_truncated_tokens"] = total_len - max_seq_len
# Add special tokens
sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(ids,
pair_ids)
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
encoded_inputs[
"special_tokens_mask"] = self.get_special_tokens_mask(ids,
pair_ids)
if return_length:
encoded_inputs["seq_len"] = len(encoded_inputs["input_ids"])
# Check lengths
assert max_seq_len is None or len(encoded_inputs[
"input_ids"]) <= max_seq_len
# Padding
needs_to_be_padded = pad_to_max_seq_len and \
max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len
if needs_to_be_padded:
difference = max_seq_len - len(encoded_inputs["input_ids"])
if self.padding_side == 'right':
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[
"input_ids"]) + [0] * difference
if return_token_type_ids:
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] +
[self.pad_token_type_id] * difference)
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = encoded_inputs[
"special_tokens_mask"] + [1] * difference
encoded_inputs["input_ids"] = encoded_inputs[
"input_ids"] + [self.pad_token_id] * difference
elif self.padding_side == 'left':
if return_attention_mask:
encoded_inputs["attention_mask"] = [0] * difference + [
1
] * len(encoded_inputs["input_ids"])
if return_token_type_ids:
encoded_inputs["token_type_ids"] = (
[self.pad_token_type_id] * difference +
encoded_inputs["token_type_ids"])
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = [
1
] * difference + encoded_inputs["special_tokens_mask"]
encoded_inputs["input_ids"] = [
self.pad_token_id
] * difference + encoded_inputs["input_ids"]
else:
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(encoded_inputs[
"input_ids"])
if return_position_ids:
encoded_inputs["position_ids"] = list(
range(len(encoded_inputs["input_ids"])))
return encoded_inputs
def batch_encode(self,
batch_text_or_text_pairs,
max_seq_len=512,
pad_to_max_seq_len=False,
stride=0,
is_split_into_words=False,
truncation_strategy="longest_first",
return_position_ids=False,
return_token_type_ids=True,
return_attention_mask=False,
return_length=False,
return_overflowing_tokens=False,
return_special_tokens_mask=False):
"""
Performs tokenization and uses the tokenized tokens to prepare model
inputs. It supports batch inputs of sequence or sequence pair.
Args:
batch_text_or_text_pairs (list):
The element of list can be sequence or sequence pair, and the
sequence is a string or a list of strings depending on whether
it has been pretokenized. If each sequence is provided as a list
of strings (pretokenized), you must set `is_split_into_words` as
`True` to disambiguate with a sequence pair.
max_seq_len (int, optional):
If set to a number, will limit the total sequence returned so
that it has a maximum length. If there are overflowing tokens,
those overflowing tokens will be added to the returned dictionary
when `return_overflowing_tokens` is `True`. Defaults to `None`.
stride (int, optional):
Only available for batch input of sequence pair and mainly for
question answering usage. When for QA, `text` represents questions
and `text_pair` represents contexts. If `stride` is set to a
positive number, the context will be split into multiple spans
where `stride` defines the number of (tokenized) tokens to skip
from the start of one span to get the next span, thus will produce
a bigger batch than inputs to include all spans. Moreover, 'overflow_to_sample'
and 'offset_mapping' preserving the original example and position
information will be added to the returned dictionary. Defaults to 0.
pad_to_max_seq_len (bool, optional):
If set to `True`, the returned sequences would be padded up to
`max_seq_len` specified length according to padding side
(`self.padding_side`) and padding token id. Defaults to `False`.
truncation_strategy (str, optional):
String selected in the following options:
- 'longest_first' (default) Iteratively reduce the inputs sequence
until the input is under `max_seq_len` starting from the longest
one at each token (when there is a pair of input sequences).
- 'only_first': Only truncate the first sequence.
- 'only_second': Only truncate the second sequence.
- 'do_not_truncate': Do not truncate (raise an error if the input
sequence is longer than `max_seq_len`).
Defaults to 'longest_first'.
return_position_ids (bool, optional):
Whether to include tokens position ids in the returned dictionary.
Defaults to `False`.
return_token_type_ids (bool, optional):
Whether to include token type ids in the returned dictionary.
Defaults to `True`.
return_attention_mask (bool, optional):
Whether to include the attention mask in the returned dictionary.
Defaults to `False`.
return_length (bool, optional):
Whether to include the length of each encoded inputs in the
returned dictionary. Defaults to `False`.
return_overflowing_tokens (bool, optional):
Whether to include overflowing token information in the returned
dictionary. Defaults to `False`.
return_special_tokens_mask (bool, optional):
Whether to include special tokens mask information in the returned
dictionary. Defaults to `False`.
Returns:
list[dict]:
The dict has the following optional items:
- **input_ids** (list[int]): List of token ids to be fed to a model.
- **position_ids** (list[int], optional): List of token position ids to be
fed to a model. Included when `return_position_ids` is `True`
- **token_type_ids** (list[int], optional): List of token type ids to be
fed to a model. Included when `return_token_type_ids` is `True`.
- **attention_mask** (list[int], optional): List of integers valued 0 or 1,
where 0 specifies paddings and should not be attended to by the
model. Included when `return_attention_mask` is `True`.
- **seq_len** (int, optional): The input_ids length. Included when `return_length`
is `True`.
- **overflowing_tokens** (list[int], optional): List of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **num_truncated_tokens** (int, optional): The number of overflowing tokens.
Included when if `max_seq_len` is specified and `return_overflowing_tokens`
is True.
- **special_tokens_mask** (list[int], optional): List of integers valued 0 or 1,
with 0 specifying special added tokens and 1 specifying sequence tokens.
Included when `return_special_tokens_mask` is `True`.
- **offset_mapping** (list[int], optional): list of pair preserving the
index of start and end char in original input for each token.
For a sqecial token, the index pair is `(0, 0)`. Included when
`stride` works.
- **overflow_to_sample** (int, optional): Index of example from which this
feature is generated. Included when `stride` works.
"""
def get_input_ids(text):
if isinstance(text, str):
tokens = self.tokenize(text)
return self.convert_tokens_to_ids(tokens)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], str):
return self.convert_tokens_to_ids(text)
elif isinstance(text,
(list, tuple)) and len(text) > 0 and isinstance(
text[0], int):
return text
else:
raise ValueError(
"Input is not valid. Should be a string, a list/tuple of strings or a list/tuple of integers."
)
batch_outputs = {}
batch_encode_inputs = []
for example_id, tokens_or_pair_tokens in enumerate(
batch_text_or_text_pairs):
if not isinstance(tokens_or_pair_tokens, (list, tuple)):
text, text_pair = tokens_or_pair_tokens, None
elif is_split_into_words and not isinstance(
tokens_or_pair_tokens[0], (list, tuple)):
text, text_pair = tokens_or_pair_tokens, None
else:
text, text_pair = tokens_or_pair_tokens
first_ids = get_input_ids(text)
second_ids = get_input_ids(
text_pair) if text_pair is not None else None
if stride > 0 and second_ids is not None:
max_len_for_pair = max_seq_len - len(
first_ids) - self.num_special_tokens_to_add(pair=True)
token_offset_mapping = self.get_offset_mapping(text)
token_pair_offset_mapping = self.get_offset_mapping(text_pair)
offset = 0
while offset < len(second_ids):
encoded_inputs = {}
length = len(second_ids) - offset
if length > max_len_for_pair:
length = max_len_for_pair
ids = first_ids
pair_ids = second_ids[offset:offset + length]
mapping = token_offset_mapping
pair_mapping = token_pair_offset_mapping[offset:offset +
length]
offset_mapping = self.build_offset_mapping_with_special_tokens(
mapping, pair_mapping)
sequence = self.build_inputs_with_special_tokens(ids,
pair_ids)
token_type_ids = self.create_token_type_ids_from_sequences(
ids, pair_ids)
# Build output dictionnary
encoded_inputs["input_ids"] = sequence
if return_token_type_ids:
encoded_inputs["token_type_ids"] = token_type_ids
if return_special_tokens_mask:
encoded_inputs[
"special_tokens_mask"] = self.get_special_tokens_mask(
ids, pair_ids)
if return_length:
encoded_inputs["seq_len"] = len(encoded_inputs[
"input_ids"])
# Check lengths
assert max_seq_len is None or len(encoded_inputs[
"input_ids"]) <= max_seq_len
# Padding
needs_to_be_padded = pad_to_max_seq_len and \
max_seq_len and len(encoded_inputs["input_ids"]) < max_seq_len
encoded_inputs['offset_mapping'] = offset_mapping
if needs_to_be_padded:
difference = max_seq_len - len(encoded_inputs[
"input_ids"])
if self.padding_side == 'right':
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(
encoded_inputs[
"input_ids"]) + [0] * difference
if return_token_type_ids:
# 0 for padding token mask
encoded_inputs["token_type_ids"] = (
encoded_inputs["token_type_ids"] +
[self.pad_token_type_id] * difference)
if return_special_tokens_mask:
encoded_inputs[
"special_tokens_mask"] = encoded_inputs[
"special_tokens_mask"] + [1
] * difference
encoded_inputs["input_ids"] = encoded_inputs[
"input_ids"] + [self.pad_token_id] * difference
encoded_inputs['offset_mapping'] = encoded_inputs[
'offset_mapping'] + [(0, 0)] * difference
elif self.padding_side == 'left':
if return_attention_mask:
encoded_inputs["attention_mask"] = [
0
] * difference + [1] * len(encoded_inputs[
"input_ids"])
if return_token_type_ids:
# 0 for padding token mask
encoded_inputs["token_type_ids"] = (
[self.pad_token_type_id] * difference +
encoded_inputs["token_type_ids"])
if return_special_tokens_mask:
encoded_inputs["special_tokens_mask"] = [
1
] * difference + encoded_inputs[
"special_tokens_mask"]
encoded_inputs["input_ids"] = [
self.pad_token_id
] * difference + encoded_inputs["input_ids"]
encoded_inputs['offset_mapping'] = [
(0, 0)
] * difference + encoded_inputs['offset_mapping']
else:
if return_attention_mask:
encoded_inputs["attention_mask"] = [1] * len(
encoded_inputs["input_ids"])
if return_position_ids:
encoded_inputs["position_ids"] = list(
range(len(encoded_inputs["input_ids"])))
encoded_inputs['overflow_to_sample'] = example_id
for key, value in encoded_inputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
if offset + length == len(second_ids):
break
offset += min(length, stride)
else:
encoded_inputs = self.encode(
first_ids,
second_ids,
max_seq_len=max_seq_len,
pad_to_max_seq_len=pad_to_max_seq_len,
truncation_strategy=truncation_strategy,
return_position_ids=return_position_ids,
return_token_type_ids=return_token_type_ids,
return_attention_mask=return_attention_mask,
return_length=return_length,
return_overflowing_tokens=return_overflowing_tokens,
return_special_tokens_mask=return_special_tokens_mask)
for key, value in encoded_inputs.items():
if key not in batch_outputs:
batch_outputs[key] = []
batch_outputs[key].append(value)
return batch_outputs
def get_offset_mapping(self, text):
"""
Returns the map of tokens and the start and end index of their start and end character.
Modified from https://github.com/bojone/bert4keras/blob/master/bert4keras/tokenizers.py#L372
Args:
text (str):
Input text.
Returns:
list: The offset map of input text.
"""
split_tokens = []
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token
if sub_token != self.unk_token else token)
normalized_text, char_mapping = '', []
for i, ch in enumerate(text):
if self.basic_tokenizer.do_lower_case:
ch = ch.lower()
ch = unicodedata.normalize('NFD', ch)
ch = ''.join([c for c in ch if unicodedata.category(c) != 'Mn'])
ch = ''.join([
c for c in ch
if not (ord(c) == 0 or ord(c) == 0xfffd or _is_control(c))
])
normalized_text += ch
char_mapping.extend([i] * len(ch))
text, token_mapping, offset = normalized_text, [], 0
for token in split_tokens:
if token[:2] == '##':
token = token[2:]
start = text[offset:].index(token) + offset
end = start + len(token)
token_mapping.append(
(char_mapping[start], char_mapping[end - 1] + 1))
offset = end
return token_mapping
class BPETokenizer(PretrainedTokenizer):
"""
The base class for all bpe tokenizers. It mainly provides common tokenize
methods for bpe type tokenizer.
Args:
vocab_file (str):
file path of the vocabulary.
encoder_json_path (str, optional):
file path of the id to vocab.
vocab_bpe_path (str, optional):
file path of word merge text.
unk_token (str, optional):
The special token for unknown words.
Defaults to "[UNK]".
sep_token (str, optional):
The special token for separator token.
Defaults to "[SEP]".
pad_token (str, optional):
The special token for padding.
Defaults to "[PAD]".
cls_token (str, optional):
The special token for cls.
Defaults to "[CLS]".
mask_token (str, optional):
The special token for mask.
Defaults to "[MASK]".
"""
class Encoder(object):
def __init__(self,
encoder,
bpe_merges,
errors='replace',
special_tokens=["[SEP]", "[p]", "[q]", "[/q]"]):
self.encoder = encoder
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = self._bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
self.cache = {}
self.re = try_import("regex")
self.special_tokens = special_tokens
# Should haved added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
self.pat = self.re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
@lru_cache()
def _bytes_to_unicode(self):
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = (list(range(ord("!"), ord("~") + 1)) +
list(range(ord("¡"), ord("¬") + 1)) +
list(range(ord("®"), ord("ÿ") + 1)))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
ddict = dict(zip(bs, cs))
return dict(zip(bs, cs))
def _get_pairs(self, word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = self._get_pairs(word)
if not pairs:
return token
while True:
bigram = min(
pairs,
key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[
i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = self._get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def tokenize(self, text):
tokens = text.split(' ')
sub_tokens = []
for token_i, token in enumerate(tokens):
if self.is_special_token(token):
if token_i == 0:
sub_tokens.extend([token])
else:
sub_tokens.extend([" " + token])
else:
if token_i == 0:
sub_tokens.extend(self.re.findall(self.pat, token))
else:
sub_tokens.extend(
self.re.findall(self.pat, " " + token))
return sub_tokens
def tokenize_old(self, text):
return self.re.findall(self.pat, text)
def is_special_token(self, tok):
if isinstance(tok, int):
return False
res = False
for t in self.special_tokens:
# if tok.find(t) != -1:
if tok.strip() == t:
res = True
break
return res
def tokenize_bpe(self, token):
if self.is_special_token(token):
return [token.strip()] # remove space for convert_to_ids
else:
token = ''.join(self.byte_encoder[b]
for b in token.encode('utf-8'))
return [
self.encoder[bpe_token]
for bpe_token in self.bpe(token).split(' ')
]
def encode(self, text):
bpe_tokens = []
for token in self.tokenize(text):
bpe_tokens.extend(self.tokenize_bpe(token))
return bpe_tokens
def decode(self, tokens):
pre_token_i = 0
texts = []
for token_i, token in enumerate(tokens):
if self.is_special_token(token):
# proprecess tokens before token_i
if token_i - pre_token_i > 0:
text = ''.join([
self.decoder[int(tok)]
for tok in tokens[pre_token_i:token_i]
])
text = bytearray(
[self.byte_decoder[c] for c in text]).decode(
'utf-8', errors=self.errors)
texts.append(text)
# texts.append(token)
if token_i == 0:
texts.append(
token
) # in the beginning, there is no space before special tokens
else:
texts.extend(
[" ", token]
) # in middle sentence, there must be a space before special tokens
pre_token_i = token_i + 1
if pre_token_i < len(tokens):
text = ''.join(
[self.decoder[int(tok)] for tok in tokens[pre_token_i:]])
text = bytearray([self.byte_decoder[c] for c in text]).decode(
'utf-8', errors=self.errors)
texts.append(text)
return ''.join(texts)
def __init__(self,
vocab_file,
encoder_json_path="./configs/encoder.json",
vocab_bpe_path="./configs/vocab.bpe",
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
self.vocab = self.load_vocabulary(
vocab_file,
unk_token=unk_token,
sep_token=sep_token,
cls_token=cls_token,
mask_token=mask_token)
self.encoder_json_path = encoder_json_path
self.vocab_bpe_path = vocab_bpe_path
self.encoder = self._get_encoder(encoder_json_path, vocab_bpe_path)
self.nltk = try_import('nltk')
def _tokenize(self, text, is_sentencepiece=True):
text = convert_to_unicode(text)
text = " ".join(text.split()) # remove duplicate whitespace
if is_sentencepiece:
sents = self.nltk.tokenize.sent_tokenize(text)
bpe_ids = sum([self.encoder.encode(sent) for sent in sents], [])
else:
bpe_ids = self.encoder.encode(text)
tokens = [str(bpe_id) for bpe_id in bpe_ids]
return tokens
def _get_encoder(self, encoder_json_path, vocab_bpe_path):
with open(encoder_json_path, 'r') as f:
encoder = json.load(f)
with open(vocab_bpe_path, 'r') as f:
bpe_data = f.read()
bpe_merges = [
tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]
]
return self.Encoder(
encoder=encoder,
bpe_merges=bpe_merges, )
|
the-stack_0_18535 | import pronouncing
def get_alliterations(word, phone_count=2):
pronunciations = pronouncing.phones_for_word(word)
alliterations = set()
for pronunciation in pronunciations:
first_phones = pronunciation.split()[:phone_count]
curr_alliterations = pronouncing.search('^' + ' '.join(first_phones))
alliterations.update(curr_alliterations)
return alliterations
def get_rhymes(word):
pronunciations = pronouncing.phones_for_word(word)
rhymes = set()
for pronunciation in pronunciations:
rhyming_part = pronouncing.rhyming_part(pronunciation)
curr_rhymes = pronouncing.search(rhyming_part + "$")
rhymes.update(curr_rhymes)
return rhymes
if __name__ == '__main__':
# preposterous rhinocerous?
print(get_rhymes('rhinocerous'))
|
the-stack_0_18536 | import csv
import math
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import numpy as np
import os
from PIL import Image
from skimage import transform
import torch
import torch.nn.functional as F
from utils import imsmooth, normalize, str2bool
data_dirs = {}
data_dirs['gradient'] = '/scratch/shared/slow/mandela/gradient'
data_dirs['guided_backprop'] ='/scratch/shared/slow/mandela/guided_backprop'
data_dirs['rise'] = '/scratch/shared/slow/mandela/rise'
data_dirs['grad_cam'] = '/scratch/shared/slow/mandela/grad_cam'
data_dirs['perturbations'] = '/scratch/shared/slow/vedaldi/vis/exp20-sal-im12val-vgg16'
alphas = {}
alphas['gradient'] = {'mean': 1.5, 'min_max_diff': 0.2, 'energy': 0.45}
alphas['guided_backprop'] = {'mean': 2.0, 'min_max_diff': 0.05, 'energy': 0.6}
alphas['rise'] = {'mean': 1.0, 'min_max_diff': 0.65, 'energy': 0.1}
alphas['grad_cam'] = {'mean': 2.0, 'min_max_diff': 0.3, 'energy': 0.4}
alphas['perturbations'] = {'mean': 1.0, 'min_max_diff': 0.5, 'energy': 0.55}
def generate_bbox(data_dir, image_path, synset, image_name,
method='mean', alpha=0.5, smooth=True):
mask_path = os.path.join(data_dir, synset, image_name + '.pth')
# Load results from torch file.
if not os.path.exists(mask_path):
print('DON EXIST')
return [synset, -2, -2, -2, -2], None, None
res = torch.load(mask_path)
# Get original image dimensions.
img = Image.open(image_path)
(img_w, img_h) = img.size
# Load and verify 2D mask.
mask = res['mask']
# if list of masks, find mean mask
if len(mask.shape) == 4:
mask = torch.mean(mask, dim=0, keepdim=True)
if smooth:
mask = imsmooth(mask, sigma=20)
mask = mask.squeeze()
assert(len(mask.shape) == 2)
mask = mask.cpu().data.squeeze().numpy()
if (not np.max(mask) <= 1) or (not np.min(mask) >= 0):
print('Normalizing')
mask = normalize(mask)
assert(np.max(mask) <= 1)
assert(np.min(mask) >= 0)
# Resize mask to original image dimensions.
resized_mask = transform.resize(mask, (img_h, img_w))
assert(np.max(resized_mask) <= 1)
assert(np.min(resized_mask) >= 0)
# Threshold mask and get smallest bounding box coordinates.
heatmap = resized_mask
if method == 'mean':
threshold = alpha*heatmap.mean()
heatmap = heatmap >= threshold
elif method == 'min_max_diff':
threshold = alpha*(heatmap.max()-heatmap.min())
heatmap_m = heatmap - heatmap.min()
heatmap = heatmap_m >= threshold
heatmap[heatmap_m < threshold] = 0
elif method == 'energy':
heatmap_f = heatmap.flatten()
sorted_idx = np.argsort(heatmap_f)[::-1]
tot_energy = heatmap.sum()
heatmap_cum = np.cumsum(heatmap_f[sorted_idx])
ind = np.where(heatmap_cum >= alpha*tot_energy)[0][0]
heatmap_f = np.ones(heatmap_f.shape)
heatmap_f[sorted_idx[ind:]] = 0
heatmap = np.reshape(heatmap_f, heatmap.shape)
x = np.where(heatmap.sum(0) > 0)[0] + 1
y = np.where(heatmap.sum(1) > 0)[0] + 1
if len(x) == 0 or len(y) == 0:
return [synset, -1, -1, -1, -1], resized_mask, heatmap
return [synset, x[0],y[0],x[-1],y[-1]], resized_mask, heatmap
def compute_overlap(bb, bbgt):
assert(len(bbgt) == 4)
assert(len(bb) == 4)
ov_vector = []
bi=[max(bb[0],bbgt[0]), max(bb[1],bbgt[1]),min(bb[2],bbgt[2]), min(bb[3],bbgt[3])]
iw=bi[2]-bi[0]+1
ih=bi[3]-bi[1]+1
ov = -1
if iw>0 and ih>0:
# Compute overlap as area of intersection / area of union.
ua=((bb[2]-bb[0]+1)*(bb[3]-bb[1]+1)+(bbgt[2]-bbgt[0]+1)*(bbgt[3]-bbgt[1]+1)-iw*ih)
ov=iw*ih/float(ua)
return ov
else:
return 0
def debug(method='min_max_diff', indexes = [25, 100, 346, 75, 100], save_path='.'):
file_paths = np.loadtxt('./data/val_imdb_0_1000.txt', dtype=str)[indexes, 0]
for file_path in file_paths:
image = Image.open(file_path)
image_name = file_path.split('/')[-1]
filename = file_path.split('/')[-1].strip('.JPEG')
synset = file_path.split('/')[-2]
bboxes = None
with open('/scratch/shared/slow/ruthfong/ILSVRC2012/val.csv') as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
for row in csv_reader:
if row["fname"] == filename:
xmin = int(row["xmin"])
ymin = int(row["ymin"])
xmax = int(row["xmax"])
ymax = int(row["ymax"])
bbgt = (xmin, ymin, xmax, ymax)
resize_shape = (int(row["height"]), int(row["width"]))
break
f, ax = plt.subplots(3, 5, figsize=(5*4, 3*4))
plt.title(method)
ax[0][0].set_ylabel('mask')
ax[1][0].set_ylabel('heatmap (thres)')
ax[2][0].set_ylabel('BBox')
for i, mask_method in enumerate(['rise', 'guided_backprop', 'gradient', 'grad_cam', 'perturbations']):
data_dir = data_dirs[mask_method]
alpha = alphas[mask_method][method]
smooth = True if mask_method == 'perturbations' else False
bb_pred, mask, heatmap = generate_bbox(data_dir, file_path, synset, image_name,
method=method, alpha=alpha, smooth=smooth)
if mask is None:
continue
overlap = compute_overlap(bb_pred[1:], bbgt)
ax[0][i].imshow(mask, vmin=0, vmax=1, cmap='jet')
ax[0][i].set_title(mask_method)
ax[1][i].imshow(heatmap, vmin=0, vmax=1, cmap='jet')
(xmin, ymin, xmax, ymax) = bb_pred[1:]
(xmin_gt, ymin_gt, xmax_gt, ymax_gt) = bbgt
ax[2][i].imshow(image)
rect_gt = patches.Rectangle((xmin_gt, ymin_gt), xmax_gt-xmin_gt,
ymax_gt-ymin_gt, linewidth=1, edgecolor='r',facecolor='none')
rect_p = patches.Rectangle((xmin, ymin), xmax-xmin, ymax-ymin,
linewidth=1, edgecolor='b',facecolor='none')
ax[2][i].add_patch(rect_gt)
ax[2][i].add_patch(rect_p)
ax[2][i].set_title("Overlap: %.3f" % (overlap))
ax[2][i].legend((rect_gt, rect_p), ('Ground Truth', 'Predicted'))
plt.savefig(os.path.join(save_path, filename + '_' + method))
plt.close()
if __name__ == '__main__':
import argparse
import sys
import traceback
try:
parser = argparse.ArgumentParser()
parser.register('type', 'bool', str2bool)
parser.add_argument('--method', type=str, default='min_max_diff')
parser.add_argument('--save_path', type=str, default='.')
parser.add_argument('--indexes', type=int, nargs='*', default=[25, 100, 346, 75, 100])
args = parser.parse_args()
debug(method=args.method, indexes=args.indexes, save_path=args.save_path)
except:
traceback.print_exc(file=sys.stdout)
sys.exit(1) |
the-stack_0_18538 | """ Checkpointing log
:Author: Jonathan Karr <[email protected]>
:Author: Arthur Goldberg <[email protected]>
:Date: 2017-08-30
:Copyright: 2016-2020, Karr Lab
:License: MIT
"""
from bisect import bisect
import math
import numpy
import os
import pickle
import re
from de_sim.config import core
from de_sim.errors import SimulatorError
from wc_utils.util.misc import obj_to_str
MAX_TIME_PRECISION = core.get_config()['de_sim']['max_time_precision']
class Checkpoint(object):
""" Represents a simulation checkpoint
Attributes:
time (:obj:`float`): the checkpoint's simulated time, in simulation time units
state (:obj:`object`): the simulation application's state at time `time`
random_state (:obj:`object`): the state of the simulator's random number generator at time `time`
"""
def __init__(self, time, state, random_state):
self.time = time
self.state = state
self.random_state = random_state
def __str__(self):
""" Provide a human readable representation of this `Checkpoint`
Returns:
:obj:`str`: a human readable representation of this `Checkpoint`
"""
return obj_to_str(self, ['time', 'state', 'random_state'])
def __eq__(self, other):
""" Compare two checkpoints
Assumes that state objects implement the equality comparison operation `__eq__()`
Args:
other (:obj:`checkpoint`): other checkpoint
Returns:
:obj:`bool`: true if checkpoints are semantically equal
"""
if other.__class__ is not self.__class__:
return False
if other.time != self.time:
return False
if other.state != self.state:
return False
try:
numpy.testing.assert_equal(other.random_state, self.random_state)
except AssertionError:
return False
return True
def __ne__(self, other):
""" Compare two checkpoints
Args:
other (:obj:`checkpoint`): other checkpoint
Returns:
:obj:`bool`: true if checkpoints are semantically unequal
"""
return not self.__eq__(other)
class AccessCheckpoints(object):
""" Represents a directory containing simulation checkpoints
Attributes:
dir_path (:obj:`str`): a directory containing simulation checkpoints
last_dir_mod (:obj:`str`): last time at which `dir_path` was modified
all_checkpoints (:obj:`list` of :obj:`str`): all the checkpoints in `dir_path`
"""
def __init__(self, dir_path):
self.dir_path = dir_path
self.last_dir_mod = os.stat(self.dir_path).st_mtime_ns
self.all_checkpoints = None
def set_checkpoint(self, checkpoint):
""" Save a checkpoint in the directory `self.dir_path`
Args:
checkpoint (:obj:`Checkpoint`): checkpoint
"""
file_name = self.get_file_name(checkpoint.time)
with open(file_name, 'wb') as file:
pickle.dump(checkpoint, file)
def get_checkpoint(self, time=None):
""" Get the latest checkpoint in directory `self.dir_path` with time before or equal to `time`
For example, consider checkpoints at 1.0 s, 1.5 s, and 2.0 s. If `time` = 1.5 s, then
return the checkpoint from 1.5 s. Return the same checkpoint if `time` = 1.9 s.
If no checkpoint with time <= `time` exists, then return the first checkpoint. E.g., if
`time` = 0.9 s, the checkpoint from 1.0 s would be returned.
Finally, if `time` is `None`, return the last checkpoint.
Args:
time (:obj:`float`, optional): time in simulated time units of desired checkpoint; if not provided,
the most recent checkpoint is returned
Returns:
:obj:`Checkpoint`: the most recent checkpoint before time `time`, or the most recent
checkpoint if `time` is not provided
"""
# get list of checkpoints
checkpoint_times = self.list_checkpoints()
# select closest checkpoint
if time is None:
nearest_time = checkpoint_times[-1]
else:
index = bisect(checkpoint_times, time) - 1
index = max(index, 0)
nearest_time = checkpoint_times[index]
file_name = self.get_file_name(nearest_time)
# load and return this checkpoint
with open(file_name, 'rb') as file:
return pickle.load(file)
def list_checkpoints(self, error_if_empty=True):
""" Get sorted list of times of saved checkpoints in checkpoint directory `self.dir_path`
To enhance performance the list of times is cached in attribute `all_checkpoints` and
reloaded if the directory is updated.
Args:
error_if_empty (:obj:`bool`, optional): if set, report an error if no checkpoints found
Returns:
:obj:`list`: sorted list of times of saved checkpoints
Raises:
:obj:`SimulatorError`: if `dirname` doesn't contain any checkpoints
"""
# reload all_checkpoints if they have not been obtained
# or self.dir_path has been modified since all_checkpoints was last obtained
if self.all_checkpoints is None or self.last_dir_mod < os.stat(self.dir_path).st_mtime_ns:
self.last_dir_mod = os.stat(self.dir_path).st_mtime_ns
# find checkpoint times
checkpoint_times = []
pattern = r'^(\d+\.\d{' + f'{MAX_TIME_PRECISION},{MAX_TIME_PRECISION}' + r'})\.pickle$'
for file_name in os.listdir(self.dir_path):
match = re.match(pattern, file_name)
if os.path.isfile(os.path.join(self.dir_path, file_name)) and match:
checkpoint_times.append(float(match.group(1)))
# sort by time
checkpoint_times.sort()
self.all_checkpoints = checkpoint_times
# error if no checkpoints found
if error_if_empty and not self.all_checkpoints:
raise SimulatorError("no checkpoints found in '{}'".format(self.dir_path))
# return list of checkpoint times
return self.all_checkpoints
def get_file_name(self, time):
""" Get file name for checkpoint at time `time`
Args:
time (:obj:`float`): time
Returns:
:obj:`str`: file name for checkpoint at time `time`
"""
filename_time = f'{time:.{MAX_TIME_PRECISION}f}'
if not math.isclose(float(filename_time), time):
raise SimulatorError(f"filename time {filename_time} is not close to time {time}")
return os.path.join(self.dir_path, f'{filename_time}.pickle')
|
the-stack_0_18539 | import matplotlib.pyplot as plt
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
from variables import multi_step_lags
import sys
sys.path.append(root_path)
from models import multi_step_gbrt
if __name__ == '__main__':
multi_step_gbrt(
root_path=root_path,
station='Xianyang',
decomposer='dwt',
predict_pattern='forecast',
llags_dict = variables['lags_dict'],
model_id=1
)
plt.show()
|
the-stack_0_18540 | """ Create report for experiment that has already been run. """
import os
import pickle
import json
from typing import Dict, Any
from meta.report.plot import plot
from meta.report.tabulate import tabulate
from meta.utils.utils import save_dir_from_name
def report(config: Dict[str, Any]) -> None:
"""
Create a report of the saved results of a training run. The expected entries of
`config` are documented below.
Parameters
----------
save_name : str
Name of saved results directory to report results from.
tables : List[List[str]]
Specification of tables to create. Each element of `tables` is a list of strings
that specifies the contents of a single table. Each table will have a row for
each method used in the experiment we are reporting on, a column for each metric
whose name is in the corresponding element of `tables`.
name_subs : Dict[str, str]
Translation table from internal metric names to display metric names. Used to
make plots and tables that are more readable.
"""
# Check that requested results exist.
results_dir = save_dir_from_name(config["save_name"])
if not os.path.isdir(results_dir):
raise ValueError(f"Results directory '{results_dir}' does not exist.")
# Create save directory for this report.
save_name = f"{config['save_name']}_report"
original_save_name = str(save_name)
save_dir = save_dir_from_name(save_name)
n = 0
while os.path.isdir(save_dir):
n += 1
if n > 1:
index_start = save_name.rindex("_")
save_name = f"{save_name[:index_start]}_{n}"
else:
save_name += f"_{n}"
save_dir = save_dir_from_name(save_name)
os.makedirs(save_dir)
if original_save_name != save_name:
print(
f"There already exists saved results with name '{original_save_name}'."
f" Saving current results under name '{save_name}'."
)
# Save config.
config_path = os.path.join(save_dir, f"{save_name}_config.json")
with open(config_path, "w") as config_file:
json.dump(config, config_file, indent=4)
# Load checkpoint from saved results and get metrics.
checkpoint_path = os.path.join(results_dir, "checkpoint.pkl")
with open(checkpoint_path, "rb") as checkpoint_file:
checkpoint = pickle.load(checkpoint_file)
metrics = checkpoint["metrics"]
# Create plot.
plot_path = os.path.join(save_dir, f"{save_name}_plot.png")
summary = None
if isinstance(metrics, dict):
methods = list(metrics.keys())
summary = metrics["summary"]
plot_metrics = {
method: metrics[method]["mean"] for method in methods if method != "summary"
}
plot(plot_metrics, plot_path, summary)
# Create tables.
table_path = os.path.join(save_dir, f"{save_name}_table.tex")
tabulate(metrics, table_path, config["tables"], config["name_subs"])
|
the-stack_0_18541 | import json
from typing import Dict, Any
from .services import update_count
from .response import Response
def handler(event: Dict[str, Any], context: Any):
print(json.dumps(event))
path = event['pathParameters']['proxy']
if not path:
return Response.error(
400, body='no path given', error_type='BadRequest'
)
count = update_count(path)
return Response.success(body=json.dumps({
'path': path,
'count': count,
})) |
the-stack_0_18543 | #!/usr/bin/python
## main
''' Run this program to scan the sample and detect graphene'''
from coordinate_creator import coor_gen
from microscope_controller import esp, prior_motor
from datetime import datetime
from preprocessing import check_screen_scale_rate
import os
import sys
import pandas as pd
# check if screen scale rate equal to 1.5
check_screen_scale_rate(1)
# [TIME] setup a start_time and record progress
start_time = datetime.now()
progress = 0.
# newport controller device, baud rate, default axis
controller = esp(dev="COM3",b=921600,axis=1,reset=True,initpos=0.0,useaxis=[],timeout=0.5)
mt = prior_motor("COM1")
# Get the origin x, y (input_1_x & y)
input_1_x, input_1_y = controller.get_pos()
# Create folder that named by current time to store photos and log file
folder_name = str(start_time)[:19].replace(':', "'")
os.mkdir(folder_name)
resultPath = f'./{folder_name}/Detection result'
os.mkdir(resultPath)
# Perfrom 9 times z scans to find the best figure ranging from -20 to 20
imgPath = './'+ folder_name +'/'+ 'Origin.png'
origin_z = mt.focusLens(7, 30, imgPath)
# Write the info to a txt file
f = open(resultPath+'/Log file.txt', 'a')
f.write('Start_time: ' + str(start_time)[:-7] + '\n')
f.write('The Origin coordinate: x = ' + str(input_1_x) + ', y = ' + str(input_1_y) + '\n')
f.close()
# generate main corrd. and extra coord. (pd.dataframe)
main, extra, num_x, num_y = coor_gen(input_1_x, input_1_y)
# get the total length of corrdinates for looping later
row_len = main.shape[0]
# creat z record
z_array = []
count_row = 1
reverse = 1
try:
# Start looping every coordinate and save screenshot
for i in range(row_len):
# Get the start point from corrdinate generator
abs_x = main.iloc[i][0]
abs_y = main.iloc[i][1]
# apply the start point to it
controller.x_y_move(abs_x, abs_y)
imgPath = './'+ folder_name +'/'+ str(i) + '.png'
# return the best quality z
if count_row % (num_y + 1) == 0:
count_row = 1
reverse *= -1
abs_z = mt.focusLens_fast2(2, 5, imgPath, reverse)
print(f'{i}th figure. Clear z is: {abs_z}')
progress += 1
print(f'\nCurrent progress: {round(progress/row_len*100)}%')
z_record = (abs_x, abs_y, abs_z)
z_array.append(z_record)
count_row += 1
except:
# Go back to origin x, y (input_1_x & y)
controller.x_y_move(input_1_x, input_1_y)
controller.close()
# Adjust the z to the original place
mt.move_z_pos(origin_z)
mt.close()
sys.exit()
# [TIME] Calculate the time differnece
end_time = datetime.now()
time_delta = (end_time - start_time)
# Go back to origin x, y (input_1_x & y)
controller.x_y_move(input_1_x, input_1_y)
del controller
# Adjust the z to the original place
mt.move_z_pos(origin_z)
del mt
# [Time] Print time information
print('Run finished.\nStart time: ' + str(start_time)[:-7] + '\nEnd time: ' + str(end_time)[:-7] + '\nElapsed time: ' + str(time_delta)[:-7])
# Write info to a txt
f = open(resultPath+'/Log file.txt', 'a')
f.write('End_time: ' + str(end_time)[:-7] + '\n')
f.write('Elapsed time: ' + str(time_delta)[:-7] + '\n\n')
f.close()
# Save the coordinates and z value
z_data = pd.DataFrame(z_array)
z_data.to_csv(resultPath+'/main_coordinates.txt', sep='\t', mode='w')
# Start TF2 Object Detection
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~
from graphene_detection import detect
detect(folder_name, resultPath, main, probability=0.3, flip_horizontally=False, grayscale=False)
|
the-stack_0_18545 | """Tests for the aerial_position module."""
from auvsi_suas.models.aerial_position import AerialPosition
from auvsi_suas.models.gps_position import GpsPosition
from django.test import TestCase
class TestAerialPositionModel(TestCase):
"""Tests the AerialPosition model."""
def assertDistanceEqual(self, pos1, pos2, dist, threshold=10):
"""AerialPosition distances are within threshold (ft)."""
self.assertAlmostEqual(pos1.distance_to(pos2), dist, delta=threshold)
self.assertAlmostEqual(pos2.distance_to(pos1), dist, delta=threshold)
def evaluate_distance_inputs(self, io_list):
"""Evaluates the distance_to calc with the given input list."""
for (lon1, lat1, alt1, lon2, lat2, alt2, dist_actual) in io_list:
pos1 = AerialPosition(
latitude=lat1, longitude=lon1, altitude_msl=alt1)
pos2 = AerialPosition(
latitude=lat2, longitude=lon2, altitude_msl=alt2)
self.assertDistanceEqual(pos1, pos2, dist_actual)
def test_clean(self):
"""Tests validation."""
pos = AerialPosition(latitude=0, longitude=0, altitude_msl=0)
pos.full_clean()
def test_distance_zero(self):
"""Tests distance calc for same position."""
self.evaluate_distance_inputs([
# (lon1, lat1, alt1, lon2, lat2, alt2, dist_actual)
(0, 0, 0, 0, 0, 0, 0),
(1, 2, 3, 1, 2, 3, 0),
(-30, 30, 100, -30, 30, 100, 0),
]) # yapf: disable
def test_distance_competition_amounts(self):
"""Tests distance calc for competition amounts."""
self.evaluate_distance_inputs([
# (lon1, lat1, alt1, lon2, lat2, alt2, dist_actual)
(-76.428709, 38.145306, 0, -76.426375, 38.146146, 0, 736.4),
(-76.428537, 38.145399, 0, -76.427818, 38.144686, 100, 344.4),
(-76.434261, 38.142471, 100, -76.418876, 38.147838, 800, 4873.7),
]) # yapf: disable
def test_duplicate_unequal(self):
"""Tests the duplicate function with unequal positions."""
pos1 = AerialPosition(latitude=0, longitude=0, altitude_msl=0)
pos2 = AerialPosition(latitude=1, longitude=1, altitude_msl=0)
pos3 = AerialPosition(latitude=0, longitude=0, altitude_msl=1)
self.assertFalse(pos1.duplicate(pos2))
self.assertFalse(pos1.duplicate(pos3))
def test_duplicate_equal(self):
"""Tests the duplicate function with unequal positions."""
pos1 = AerialPosition(latitude=0, longitude=0, altitude_msl=0)
pos2 = AerialPosition(latitude=0, longitude=0, altitude_msl=0)
self.assertTrue(pos1.duplicate(pos2))
|
the-stack_0_18547 | class StarterPokemon:
"""
This class defined one of the starter Pokémon given to the player by
Professor Oak at the start of Pokémon Red, Green, Blue, FireRed, and
LeafGreen.
Actual Pokémon can be created by calling the specific classes defining the
starter Pokémon types. You can see more details about them at :ref:`starter`.
"""
def __init__(self):
self.name = None
self.evolution = None
self.ability = None
self.pokemon_type = None
def who_is_that_pokemon(self):
"""
Shows the Pokémon name and its evolution.
Usage:
.. doctest::
>>> import pokedex
>>> friend = pokedex.Bulbasaur()
>>> friend.who_is_that_pokemon()
This pokemon is Bulbasaur.
It will evolve into Ivysaur, Venusaur, Mega Venusaur, Gigantamax Venusaur.
>>>
"""
print(f"This pokemon is {self.name}.")
print(f"It will evolve into {', '.join(self.evolution)}.")
class Bulbasaur(StarterPokemon):
"""
Bulbasaur is a dual-type Grass/Poison Pokémon introduced in Generation I.
It evolves into Ivysaur starting at level 16, which evolves into Venusaur
starting at level 32.
Along with :class:`Charmander` and :class:`Squirtle`, Bulbasaur is one of
three starter Pokémon of Kanto available at the beginning of Pokémon Red,
Green, Blue, FireRed, and LeafGreen.
"""
def __init__(self):
self.name = "Bulbasaur"
self.pokemon_type = {"grass", "poison"}
self.ability = "Overgrow"
self.evolution = ["Ivysaur",
"Venusaur",
"Mega Venusaur",
"Gigantamax Venusaur"]
class Charmander(StarterPokemon):
"""
Charmander is a Fire-type Pokémon introduced in Generation I.
It evolves into Charmeleon starting at level 16, which evolves into
Charizard starting at level 36.
Along with :class:`Bulbasaur` and :class:`Squirtle`, Charmander is one of
three starter Pokémon of Kanto available at the beginning of Pokémon Red,
Green, Blue, FireRed, and LeafGreen.
.. note::
Charmeleon and Charizard are fire-type Pokémon, but Mega Charizard X and
Gigantamax Charizard are also Flying Pokémon, while Mega Charizard Y is a
Dragon-type Pokémon.
"""
def __init__(self):
self.name = "Charmander"
self.pokemon_type = {"fire"}
self.ability = "Blaze"
self.evolution = [
"Charmeleon",
"Charizard",
"Mega Charizard X",
"Mega Charizard Y",
"Gigantamax Charizard",
]
class Squirtle(StarterPokemon):
"""
Squirtle is a Water-type Pokémon introduced in Generation I.
It evolves into Wartortle starting at level 16, which evolves into Blastoise
starting at level 36.
Along with :class:`Bulbasaur` and :class:`Charmander`, Squirtle is one of
three starter Pokémon of Kanto available at the beginning of Pokémon Red,
Green, Blue, FireRed, and LeafGreen.
"""
def __init__(self):
self.name = "Squirtle"
self.pokemon_type = {"water"}
self.ability = "Torrent"
self.evolution = ["Wartortle"]
|
the-stack_0_18551 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.db import models
from django.db import router
from django.db import transaction
from django.db.models.deletion import Collector
from networkapi.models.BaseManager import BaseManager
class BaseModel(models.Model):
"""
Classe básica para as classes que herdam de "django.db.models.Model".
Deverão herdar desta classe as classes "Model" que necessitam gerar log das
suas operações de escrita e exclusão de dados no banco de dados.
"""
objects = BaseManager()
class Meta:
abstract = True
def __unicode__(self):
if hasattr(self, 'nome'):
return u'{0}'.format(self.nome)
elif hasattr(self, 'id'):
return u'{0}'.format(self.id)
else:
return u'{0}'.format(self.__str__())
def set_authenticated_user(self, user):
self.authenticated_user = user
def save(self, user=None, force_insert=False, force_update=False, commit=False, **kwargs):
if user:
self.set_authenticated_user(user)
super(BaseModel, self).save(force_insert, force_update, **kwargs)
if commit is True:
transaction.commit()
def delete(self, *args, **kwargs):
"""
Replace super(BaseModel, self).delete()
Cause: When delete relationship in cascade default no have attribute User to Log.
"""
using = router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, "%s object can't be deleted because its %s attribute is set to None." % (
self._meta.object_name, self._meta.pk.attname)
collector = Collector(using=using)
collector.collect([self])
collector.delete()
|
the-stack_0_18552 | from pathlib import Path
from scrapli.driver import GenericDriver
from scrapli.driver.core import IOSXEDriver
from scrapli.response import Response
import nornir_scrapli
from nornir_scrapli.exceptions import NornirScrapliNoConfigModeGenericDriver
CONFIG_FILE = f"{Path(nornir_scrapli.__file__).parents[1]}/tests/files/send_configs_from_file"
def test_send_configs_from_file(nornir, monkeypatch):
from nornir_scrapli.tasks import send_configs_from_file
def mock_open(cls):
pass
def mock_send_configs_from_file(
cls,
file,
strip_prompt,
failed_when_contains="",
stop_on_failed=False,
privilege_level="",
timeout_ops=None,
):
with open(file, "r") as f:
configs = f.read().splitlines()
responses = []
response = Response(host="fake_as_heck", channel_input=configs[0])
response._record_response(b"")
responses.append(response)
response = Response(host="fake_as_heck", channel_input=configs[1])
response._record_response(b"")
responses.append(response)
return responses
monkeypatch.setattr(IOSXEDriver, "open", mock_open)
monkeypatch.setattr(IOSXEDriver, "send_configs_from_file", mock_send_configs_from_file)
result = nornir.run(
task=send_configs_from_file,
dry_run=False,
file=CONFIG_FILE,
)
assert result["sea-ios-1"][0].result == "interface loopback123\ndescription neat\n"
assert result["sea-ios-1"].failed is False
assert result["sea-ios-1"].changed is True
def test_send_configs_from_file_dry_run(nornir, monkeypatch):
from nornir_scrapli.tasks import send_configs_from_file
def mock_open(cls):
pass
def mock_acquire_priv(cls, priv):
return
monkeypatch.setattr(IOSXEDriver, "open", mock_open)
monkeypatch.setattr(IOSXEDriver, "acquire_priv", mock_acquire_priv)
result = nornir.run(
task=send_configs_from_file,
dry_run=True,
file=CONFIG_FILE,
)
assert result["sea-ios-1"].result is None
assert result["sea-ios-1"].failed is False
assert result["sea-ios-1"].changed is False
def test_send_configs_from_file_generic_driver(nornir_generic, monkeypatch):
from nornir_scrapli.tasks import send_configs_from_file
def mock_open(cls):
pass
monkeypatch.setattr(GenericDriver, "open", mock_open)
result = nornir_generic.run(
task=send_configs_from_file,
dry_run=True,
file="whatever",
)
assert (
"nornir_scrapli.exceptions.NornirScrapliNoConfigModeGenericDriver"
in result["sea-ios-1"].result
)
assert result["sea-ios-1"].failed is True
assert result["sea-ios-1"].changed is False
assert isinstance(result["sea-ios-1"].exception, NornirScrapliNoConfigModeGenericDriver)
|
the-stack_0_18553 | import json
import os
import pandas as pd
import sys
sys.path.append("..")
sys.path.append("../../column_matching")
import data_build_scripts.helpers as hlp
def main():
local_path = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(local_path, "dimensions_cities_build.json"))
data = json.load(f)
matching = hlp.return_matching_dict()
two_up = os.path.abspath(os.path.join(local_path, "../.."))
source_dir = os.path.join(two_up, data['source']) # should work in both mac and windows
target_dir = os.path.join(two_up, data['target'])
source = os.path.join(source_dir, data['cities']['folder'], data['cities']['file'])
df = pd.read_csv(source)
df = df.drop_duplicates(subset='fms_city_id', keep='last')
#df = df[data['column_order']]
target_folder = os.path.join(target_dir, data['output_folder'])
hlp.make_folder_if_not_exists(target_folder)
target = os.path.join(target_folder, data['output_file'])
df.to_csv(target, index=False)
main() |
the-stack_0_18557 | from bs4 import BeautifulSoup
from urllib.parse import urljoin
import repo
def crawl(url, queued_count, maximum_items, get_html, extract_content):
if not url:
print('URL not provided', url)
return
already_seen = _seen(url)
if already_seen:
print('URL already seen ', already_seen)
return
total = queued_count + repo.count_visited() + repo.count_queued()
if total >= maximum_items:
print('Exiting! queued + visited over maximum:', queued_count, total)
return
repo.add_to_queue(url)
links, content = _crawl(url, get_html, extract_content)
repo.move_from_queued_to_visited(url)
return links, content
def add_results_to_queue(urls, allow_url_filter):
if not urls:
return
for url in urls:
if allow_url_filter(url) and not _seen(url):
print('Add URL to visit queue', url)
repo.add_to_visit(url)
def _crawl(url, get_html, extract_content):
print('Crawl ->', url)
html = get_html(url)
soup = BeautifulSoup(html, 'html.parser')
links = _extract_links(url, soup)
content = extract_content(url, soup)
return links, content
def _extract_links(url, soup):
return list({
urljoin(url, a.get('href'))
for a in soup.find_all('a')
if a.get('href') and not(a.get('rel') and 'nofollow' in a.get('rel'))
})
def _seen(url):
return repo.is_visited(url) or repo.is_queued(url)
|
the-stack_0_18561 | #
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Test database cleanup.
"""
import json
import multiprocessing
import os
import unittest
from shutil import copyfile, rmtree
from codechecker_api.codeCheckerDBAccess_v6.ttypes import RunFilter, Severity
from libtest import codechecker
from libtest import env
class TestDbCleanup(unittest.TestCase):
def setUp(self):
self.test_workspace = os.environ['TEST_WORKSPACE']
test_class = self.__class__.__name__
print('Running ' + test_class + ' tests in ' + self.test_workspace)
self.codechecker_cfg = env.import_codechecker_cfg(self.test_workspace)
self.test_dir = os.path.join(self.test_workspace, 'test_files')
try:
os.makedirs(self.test_dir)
except os.error:
# Directory already exists.
pass
cc_package = env.codechecker_package()
original_severity_cfg = os.path.join(cc_package,
'config',
'checker_severity_map.json')
self.workspace_severity_cfg = os.path.join(self.test_workspace,
'checker_severity_map.json')
copyfile(original_severity_cfg, self.workspace_severity_cfg)
self.codechecker_cfg['check_env']['CC_SEVERITY_MAP_FILE'] = \
self.workspace_severity_cfg
def __create_test_dir(self):
makefile = "all:\n\t$(CXX) -c a/main.cpp -o /dev/null\n"
project_info = {
"name": "hello",
"clean_cmd": "",
"build_cmd": "make"
}
source_main = """
// Test file for db_cleanup
#include "f.h"
int main() { f(0); }
"""
source_f = """
// Test file for db_cleanup
int f(int x) { return 1 / x; }
"""
os.makedirs(os.path.join(self.test_dir, 'a'))
with open(os.path.join(self.test_dir, 'Makefile'), 'w',
encoding="utf-8", errors="ignore") as f:
f.write(makefile)
with open(os.path.join(self.test_dir, 'project_info.json'), 'w',
encoding="utf-8", errors="ignore") as f:
json.dump(project_info, f)
with open(os.path.join(self.test_dir, 'a', 'main.cpp'), 'w',
encoding="utf-8", errors="ignore") as f:
f.write(source_main)
with open(os.path.join(self.test_dir, 'a', 'f.h'), 'w',
encoding="utf-8", errors="ignore") as f:
f.write(source_f)
def __rename_project_dir(self):
os.rename(os.path.join(self.test_dir, 'a'),
os.path.join(self.test_dir, 'b'))
makefile = "all:\n\t$(CXX) -c b/main.cpp -o /dev/null\n"
with open(os.path.join(self.test_dir, 'Makefile'), 'w',
encoding="utf-8", errors="ignore") as f:
f.write(makefile)
def __get_files_in_report(self):
run_filter = RunFilter()
run_filter.names = ['db_cleanup_test']
run_filter.exactMatch = True
codechecker.check_and_store(self.codechecker_cfg,
'db_cleanup_test',
self.test_dir)
runs = self._cc_client.getRunData(run_filter, None, 0, None)
run_id = runs[0].runId
reports \
= self._cc_client.getRunResults([run_id], 10, 0, [], None, None,
False)
details = self._cc_client.getReportDetails(reports[0].reportId)
files = set()
files.update([bp.fileId for bp in details.pathEvents])
files.update([bp.fileId for bp in details.executionPath])
file_ids = set()
for file_id in files:
file_data = self._cc_client.getSourceFileData(file_id, False, None)
if file_data.fileId is not None:
file_ids.add(file_data.fileId)
return file_ids
def __check_serverity_of_reports(self):
"""
This will check whether reports in the database has the same severity
levels as in the severity map config file.
"""
run_filter = RunFilter()
run_filter.names = ['db_cleanup_test']
run_filter.exactMatch = True
runs = self._cc_client.getRunData(run_filter, None, 0, None)
run_id = runs[0].runId
reports \
= self._cc_client.getRunResults([run_id], 10, 0, [], None, None,
False)
with open(self.workspace_severity_cfg, 'r',
encoding="utf-8", errors="ignore") as severity_cgf_file:
severity_map = json.load(severity_cgf_file)
for report in reports:
severity_id = severity_map.get(report.checkerId, 'UNSPECIFIED')
self.assertEqual(Severity._VALUES_TO_NAMES[report.severity],
severity_id)
def test_garbage_file_collection(self):
event = multiprocessing.Event()
event.clear()
self.codechecker_cfg['viewer_port'] = env.get_free_port()
env.export_test_cfg(self.test_workspace,
{'codechecker_cfg': self.codechecker_cfg})
env.enable_auth(self.test_workspace)
server_access = codechecker.start_server(self.codechecker_cfg, event)
server_access['viewer_port'] \
= self.codechecker_cfg['viewer_port']
server_access['viewer_product'] \
= self.codechecker_cfg['viewer_product']
codechecker.add_test_package_product(server_access,
self.test_workspace)
self._cc_client = env.setup_viewer_client(self.test_workspace)
self.assertIsNotNone(self._cc_client)
self.__create_test_dir()
files_in_report_before = self.__get_files_in_report()
# Checker severity levels.
self.__check_serverity_of_reports()
self.__rename_project_dir()
# Delete previous analysis report directory.
rmtree(self.codechecker_cfg['reportdir'])
files_in_report_after = self.__get_files_in_report()
event.set()
event.clear()
# Change severity level of core.DivideZero to LOW.
with open(self.workspace_severity_cfg, 'r+',
encoding='utf-8', errors='ignore') as severity_cgf_file:
severity_map = json.load(severity_cgf_file)
severity_map['core.DivideZero'] = 'LOW'
severity_cgf_file.seek(0)
severity_cgf_file.truncate()
severity_cgf_file.write(str(json.dumps(severity_map)))
self.codechecker_cfg['viewer_port'] = env.get_free_port()
env.export_test_cfg(self.test_workspace,
{'codechecker_cfg': self.codechecker_cfg})
codechecker.start_server(self.codechecker_cfg,
event)
codechecker.login(self.codechecker_cfg,
self.test_workspace,
'cc',
'test')
self._cc_client = env.setup_viewer_client(self.test_workspace)
self.assertIsNotNone(self._cc_client)
self.assertEqual(len(files_in_report_before & files_in_report_after),
0)
for file_id in files_in_report_before:
f = self._cc_client.getSourceFileData(file_id, False, None)
self.assertIsNone(f.fileId)
# Checker severity levels.
self.__check_serverity_of_reports()
event.set()
|
the-stack_0_18563 | """Added auditing table
Revision ID: e1ae9340410e
Revises: b1b588677b3c
Create Date: 2020-05-02 22:30:36.285095
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e1ae9340410e'
down_revision = 'b1b588677b3c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('auditing',
sa.Column('id', sa.String(), nullable=False),
sa.Column('created_date', sa.DateTime(), nullable=True),
sa.Column('description', sa.String(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_auditing_created_date'), 'auditing', ['created_date'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_auditing_created_date'), table_name='auditing')
op.drop_table('auditing')
# ### end Alembic commands ###
|
the-stack_0_18564 | import discord, json, asyncio
DISCORD_BOT_TOKEN = input("Введите Discord token: ")
client = discord.Client()
async def svetofor():
await client.wait_until_ready()
sleep = 0.7
print("Working...")
while not client.is_closed():
await client.change_presence(status=discord.Status.online)
await asyncio.sleep(sleep)
await client.change_presence(status=discord.Status.idle)
await asyncio.sleep(sleep)
await client.change_presence(status=discord.Status.dnd)
await asyncio.sleep(sleep)
@client.event
async def on_command_error(error, ctx):
pass
@client.event
async def on_ready():
client.loop.create_task(svetofor())
client.run(DISCORD_BOT_TOKEN, bot=False) |
the-stack_0_18565 | import os, sys, re, math, datetime
import numpy as np
from scipy.spatial import distance
import cv2
import Tkinter, tkMessageBox
from config import *
reload(sys)
sys.setdefaultencoding('utf-8')
class Photo:
""" A structure maintaining photo related attributes.
# Member objects
path: Photo file path.
img: CV2 image object.
shape: Image shape.
size: Image size.
info: A string of photo file info (file name, size, last modified time).
feature: Feature vector.
"""
def __init__(self, path):
self.path = path
self.img = cv2.imread(path)
if self.img is not None:
self.shape = self.img.shape
self.size = self.img.size
self.info = '\n'.join(["File name:", self.path.split('/')[-1],'',
"File size (kB):", str(round(float(os.stat(self.path).st_size)/1000, 1)),'',
"Last modified:", str(datetime.datetime.fromtimestamp(os.stat(self.path).st_ctime)).split('.')[0]])
else:
self.shape = None
self.size = None
self.info = ""
self.feature = None
class Database:
""" Load and process photos from a directory.
# Member objects
sift (static): A CV2 SIFT operator object.
DIST_THRESH (static): photos with distance < DIST_THRESH are considered similar.
progress_bar: A ttk.Progressbar object for displaying progress.
crawler: A Python generator, emits a batch of similar photos per request.
"""
sift = cv2.xfeatures2d.SIFT_create()
DIST_THRESH = SIFT_THRESH
crawler = None
def __init__(self, progress_bar):
self.progress_bar = progress_bar
def load(self, path):
if path:
self.crawler = self.duplicatedPhotoGenerator(path)
else:
self.crawler = None
def getNextDuplicatedBatch(self):
if self.crawler is not None:
return self.crawler.next()
def isSimilarPhotos(self, p1, p2):
"""
- If 2 pictures have same size, use L1 norm as first-pass filtering.
- Otherwise:
1. Extract SIFT feature points for both photos.
2. Run k-means clustering over all SIFT feature points from both
photos.
3. Collect the distribution (histogram) of feature points over
different clusters as its signature.
4. If the cosine distance between 2 histograms belows threshold,
then similar.
# Arguments:
p1: The 1st photo.
p2: The 2nd photo.
# Returns:
True if 2 photos are similar; False otherwise.
"""
# 1st pass: L1 norm
if p1.shape==p2.shape:
dist_l1 = cv2.norm(p1.img, p2.img, cv2.NORM_L1)/p1.size
if dist_l1 < L1_SAME_PHOTO_THRESH:
return True
if dist_l1 > L1_DIFFERENT_PHOTO_THRESH:
return False
# 2nd pass: SIFT + K-means
if p1.feature is None:
gray = cv2.cvtColor(p1.img, cv2.COLOR_BGR2GRAY)
keypoint, p1.feature = self.sift.detectAndCompute(gray, None)
if p2.feature is None:
gray = cv2.cvtColor(p2.img, cv2.COLOR_BGR2GRAY)
keypoint, p2.feature = self.sift.detectAndCompute(gray, None)
compactness,labels,centers = cv2.kmeans(np.concatenate((p1.feature,p2.feature)), 16, None, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_MAX_ITER, 1, 10), attempts=1, flags=cv2.KMEANS_PP_CENTERS)
hist1 = [0 for i in range(16)]
hist2 = [0 for i in range(16)]
for l in labels[:len(p1.feature), 0]: hist1[l]+=1
for l in labels[len(p1.feature):, 0]: hist2[l]+=1
dist = distance.cosine(hist1, hist2)
return (dist<self.DIST_THRESH)
def duplicatedPhotoGenerator(self, path):
""" Iterate over all photos in directory. Yield similar photos as an array of Photo objects. """
prev_photo = None
self.duplicated_batch = []
self.progress_bar.start()
total_num_files = len(os.popen("ls %s/*" % re.escape(path)).read().splitlines())
for idx, photo_file in enumerate(os.popen("ls %s/*" % re.escape(path)).read().splitlines()):
self.progress_bar["value"] = 100.0*idx/total_num_files
self.progress_bar.update()
photo = Photo(photo_file)
if photo.img is None:
continue
if prev_photo is not None and self.isSimilarPhotos(prev_photo, photo):
self.duplicated_batch.append(photo)
else:
if len(self.duplicated_batch)>1:
self.progress_bar.stop()
yield self.duplicated_batch
self.progress_bar.start()
self.duplicated_batch = [photo]
prev_photo = photo
self.progress_bar.stop()
if len(self.duplicated_batch)>1: # the last batch
yield self.duplicated_batch
raise StopIteration
|
the-stack_0_18566 | """Test event comparison and reporting."""
from meetup2xibo.log_summarizer.event import Event
import pytest
from hypothesis import given, assume, example
import hypothesis.strategies as st
from datetime import datetime
SAMPLE_MEETUP_ID = 'qlpqsqyzhbqb'
SAMPLE_NAME = 'Arduino User Group'
SAMPLE_LOCATION = 'Conference Room 3'
SAMPLE_START_TIME = '2019-05-12 15:00:00'
SAMPLE_END_TIME = '2019-05-12 17:00:00'
MINIMUM_TEST_DATE = datetime(2019, 1, 1, 0, 0, 0)
MAXIMUM_TEST_DATE = datetime(2029, 12, 31, 23, 59, 59)
def iso_format(the_date_time):
"""Format a date/time in ISO YYYY-MM-DD hh:mm:ss format."""
return the_date_time.strftime('%Y-%m-%d %H:%M:%S')
typical_dates = st.datetimes(min_value = MINIMUM_TEST_DATE, max_value = MAXIMUM_TEST_DATE)
iso_dates = typical_dates.map(iso_format)
#@pytest.fixture
#def crud_lister():
# """Return an event CRUD log line lister."""
# return CrudLister()
def make_event(
name=SAMPLE_NAME,
location=SAMPLE_LOCATION,
start_time=SAMPLE_START_TIME,
end_time=SAMPLE_END_TIME,
meetup_id=SAMPLE_MEETUP_ID):
"""Make an event with default sample values."""
return Event(name, start_time, end_time, meetup_id, location)
def test_url():
"""Test generating a URL from an event."""
event = make_event()
assert event.url == "https://www.meetup.com/NOVA-Makers/events/qlpqsqyzhbqb/"
def test_equals_identical():
"""Test comparing an event with it self."""
event = make_event()
assert event == event
def test_equals_same():
"""Test comparing events with the same value."""
event1 = make_event()
event2 = make_event()
assert event1 == event2
def test_equals_different_names():
"""Test comparing events with different names."""
event1 = make_event()
event2 = make_event(name="foo")
assert event1 != event2
def test_equals_different_locations():
"""Test comparing events with different locations."""
event1 = make_event()
event2 = make_event(location="foo")
assert event1 != event2
def test_equals_different_start_times():
"""Test comparing events with different start_times."""
event1 = make_event()
event2 = make_event(start_time="2019-05-12 10:00:00")
assert event1 != event2
def test_equals_different_end_times():
"""Test comparing events with different end_times."""
event1 = make_event()
event2 = make_event(end_time="2019-05-12 19:00:00")
assert event1 != event2
def test_equals_different_meetup_ids():
"""Test comparing events with different meetup IDs."""
event1 = make_event()
event2 = make_event(meetup_id="zvbxrpl")
assert event1 != event2
def test_differences_different_names():
"""Test listing differences for events with different names."""
event1 = make_event()
event2 = make_event(name="foo")
assert [('name', 'Arduino User Group', 'foo')] == event1.differences(event2)
def test_differences_different_locations():
"""Test listing differences for events with different locations."""
event1 = make_event()
event2 = make_event(location="foo")
assert [('location', 'Conference Room 3', 'foo')] == event1.differences(event2)
def test_differences_different_start_times():
"""Test listing differences for events with different start_times."""
event1 = make_event()
event2 = make_event(start_time="2019-05-12 10:00:00")
assert [('start time', '2019-05-12 15:00:00', '2019-05-12 10:00:00')] == event1.differences(event2)
def test_differences_different_end_times():
"""Test listing differences for events with different end_times."""
event1 = make_event()
event2 = make_event(end_time="2019-05-12 19:00:00")
assert [('end time', '2019-05-12 17:00:00', '2019-05-12 19:00:00')] == event1.differences(event2)
def test_differences_different_meetup_ids():
"""Test listing differences for events with different meetup IDs."""
event1 = make_event()
event2 = make_event(meetup_id="zvbxrpl")
assert [('Meetup ID', 'qlpqsqyzhbqb', 'zvbxrpl')] == event1.differences(event2)
def test_differences_multiple():
"""Test listing differences for events with multiple changes."""
event1 = make_event()
event2 = make_event(location="foo", start_time="2019-05-12 10:00:00")
expected_differences = [
('location', 'Conference Room 3', 'foo'),
('start time', '2019-05-12 15:00:00', '2019-05-12 10:00:00')
]
assert expected_differences == event1.differences(event2)
def test_equals_non_event():
"""Test comparing an event with a non-event."""
event = make_event()
assert event != "foo"
def test_hash_identical():
"""Test that an event hash is always the same."""
event = make_event()
assert hash(event) == hash(event)
def test_hash_same():
"""Test that two events with the same values hash the same."""
event1 = make_event()
event2 = make_event()
assert hash(event1) == hash(event2)
@given(st.text(max_size=15))
def test_hash_different_names(other_name):
"""Test hashing events with different names."""
assume(other_name != SAMPLE_NAME)
event1 = make_event()
event2 = make_event(name=other_name)
assert hash(event1) != hash(event2)
@given(st.text(max_size=15))
def test_hash_different_locations(other_location):
"""Test hashing events with different locations."""
assume(other_location != SAMPLE_LOCATION)
event1 = make_event()
event2 = make_event(location=other_location)
assert hash(event1) != hash(event2)
@given(iso_dates)
def test_hash_different_start_times(other_start_time):
"""Test hashing events with different start_times."""
assume(other_start_time != SAMPLE_START_TIME)
event1 = make_event()
event2 = make_event(start_time=other_start_time)
assert hash(event1) != hash(event2)
@given(iso_dates)
def test_hash_different_end_times(other_end_time):
"""Test hashing events with different end_times."""
assume(other_end_time != SAMPLE_END_TIME)
event1 = make_event()
event2 = make_event(end_time=other_end_time)
assert hash(event1) != hash(event2)
@given(st.text(min_size=8, max_size=10))
def test_hash_different_meetup_ids(other_meetup_id):
"""Test hashing events with different meetup IDs."""
assume(other_meetup_id != SAMPLE_MEETUP_ID)
event1 = make_event()
event2 = make_event(meetup_id=other_meetup_id)
assert hash(event1) != hash(event2)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4 autoindent
|
the-stack_0_18567 | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
from MObject import MObject
class Bullet(MObject):
def __init__(self, posX, posY):
self.mObString = [["b"]]
self.height = 1
self.width = 1
self.posX = posX
self.posY = posY
self.speedX = 0
self.speedY = -4
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.