function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def __init__(self, key, child_proxy):
try:
self._key = int(key)
except ValueError:
self._key = key
self._child_proxy = child_proxy | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def __init__(self):
self._cache = {}
self._bucket_client = None
self._param_client = None
self._secret_client = None | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def get_file_content(self, filepath):
cache_key = ("FILE", filepath)
def getter():
with open(filepath, "r") as f:
return f.read()
return self._get_or_create_cached_value(cache_key, getter) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def getter():
return self._secret_client.get(name) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def get_bucket_file(self, key):
cache_key = ("BUCKET_FILE", key)
if not self._bucket_client:
self._bucket_client = get_bucket_client()
def getter():
return self._bucket_client.download_to_tmpfile(key)
return self._get_or_create_cached_value(cache_key, getter) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def getter():
return self._param_client.get(key) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def __init__(self, path=None, resolver=None):
self._path = path or ()
if not resolver:
resolver = Resolver()
self._resolver = resolver | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def _from_python(self, key, value):
new_path = self._path + (key,)
if isinstance(value, dict):
value = self.custom_classes.get(new_path, ConfigDict)(value, new_path)
elif isinstance(value, list):
value = self.custom_classes.get(new_path, ConfigList)(value, new_path)
elif isinstance(value, str):
match = self.PROXY_VAR_RE.match(value)
if match:
value = self._make_proxy(key, match)
return value | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def __len__(self):
return len(self._collection) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def __setitem__(self, key, value):
self._collection[key] = self._from_python(key, value) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def __init__(self, config_l, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = []
for key, value in enumerate(config_l):
self._collection.append(self._from_python(str(key), value)) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def __iter__(self):
for element in self._collection:
yield self._to_python(element) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def __init__(self, config_d, path=None, resolver=None):
super().__init__(path=path, resolver=resolver)
self._collection = {}
for key, value in config_d.items():
self._collection[key] = self._from_python(key, value) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def get(self, key, default=None):
try:
value = self[key]
except KeyError:
value = self._to_python(default)
return value | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def keys(self):
return self._collection.keys() | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def items(self):
for key, value in self._collection.items():
yield key, self._to_python(value) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def setdefault(self, key, default=None):
return self._collection.setdefault(key, self._from_python(key, default)) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def popitem(self):
key, value = self._collection.popitem()
return key, self._to_python(value) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def update(self, *args, **kwargs):
chain = []
for arg in args:
if isinstance(arg, dict):
iterator = arg.items()
else:
iterator = arg
chain = itertools.chain(chain, iterator)
if kwargs:
chain = itertools.chain(chain, kwargs.items())
for key, value in iterator:
self._collection[key] = self._from_python(key, value) | zentralopensource/zentral | [
671,
87,
671,
23,
1445349783
] |
def get_schema(self):
"""Returns the set YAML schema for the metric class.
Returns:
YAML schema of the metrics type.
"""
return self._schema | kubeflow/pipelines | [
3125,
1400,
3125,
892,
1526085107
] |
def __init__(self, schema_file: str):
self._schema = artifact_utils.read_schema_file(schema_file)
self._type_name, self._metric_fields = artifact_utils.parse_schema(
self._schema)
self._values = {} | kubeflow/pipelines | [
3125,
1400,
3125,
892,
1526085107
] |
def __getattr__(self, name: str) -> Any:
"""Custom __getattr__ to allow access to metrics schema fields."""
if name not in self._metric_fields:
raise AttributeError('No field: {} in metrics.'.format(name))
return self._values[name] | kubeflow/pipelines | [
3125,
1400,
3125,
892,
1526085107
] |
def __init__(self):
super().__init__('confidence_metrics.yaml')
self._initialized = True | kubeflow/pipelines | [
3125,
1400,
3125,
892,
1526085107
] |
def __init__(self):
super().__init__('confusion_matrix.yaml')
self._matrix = [[]]
self._categories = []
self._initialized = True | kubeflow/pipelines | [
3125,
1400,
3125,
892,
1526085107
] |
def log_row(self, row_category: str, row: List[int]):
"""Logs a confusion matrix row.
Args:
row_category: Category to which the row belongs.
row: List of integers specifying the values for the row.
Raises:
ValueError: If row_category is not in the list of categories set in
set_categories or size of the row does not match the size of
categories.
"""
if row_category not in self._categories:
raise ValueError('Invalid category: {} passed. Expected one of: {}'.\
format(row_category, self._categories))
if len(row) != len(self._categories):
raise ValueError('Invalid row. Expected size: {} got: {}'.\
format(len(self._categories), len(row)))
self._matrix[self._categories.index(row_category)] = row | kubeflow/pipelines | [
3125,
1400,
3125,
892,
1526085107
] |
def __init__(self,inp='INP.mcnp'):
""" Wrapped Cylinder MCNPX Model of RPM8
Keywords:
inp -- desired name of the input deck
"""
# Material dictionary for the moderator, light guide, and detector
self.material = {'Moderator':None,'Detector':None,'LightGuide':None}
self.material['Detector'] = {'name':'Detector','mt': 3, 'rho': 1.1,'matString':None} # detector
self.material['LightGuide'] = {'name': 'PMMA','mt':10, 'rho':0.93} # PMMA
self.material['Moderator'] = {'name':'HDPE','mt':456, 'rho': 0.93} # HPDE | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def __str__(self):
s = '\tMCNPX Model of Wrapped Cylinder\n'
s += '\t Cell Number Starts: {0:d}\n'.format(self.CellStartNum)
s += '\t Surface Number Starts: {0:d}\n'.format(self.SurfaceStartNum)
return s | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def setMaterial(self,massFraction,polymer):
"""
Sets the detector material
"""
M = Materials()
num = self.material['Detector']['mt']
if polymer == 'PS':
self.material['Detector']['matString'] = M.GetPSLiF(massFraction,num)
elif polymer == 'PEN':
self.material['Detector']['matString'] = M.GetPENLiF(massFraction,num)
else:
raise ValueError('Polymer {} is not in the material database'.format(polymer)) | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def calculateDetectorArea(self):
"""
Calculates the area used in a detector
"""
area = 0.0
r = self.geoParam['CylinderLightGuideRadius']
while(r + self.geoParam['DetectorThickness'] < self.geoParam['CylinderRadius']):
area -= math.pow(r,2)
r += self.geoParam['DetectorThickness']
area += math.pow(r,2)
r += self.geoParam['DetectorSpacing']
return math.pi*area | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def runModel(self):
"""
Runs the Model by submission to Tourqe / Maui
"""
qsub= subprocess.check_output('which qsub',shell=True).strip()
cmd = '#!/bin/bash\n'
cmd += '#PBS -N {0}\n#PBS -V\n#PBS -q gen1\n#PBS -l nodes=1:ppn=1\n'
cmd += 'cd $PBS_O_WORKDIR\nmpirun mcnpx inp={1} name={2}\n'
job = cmd.format('Job_RPMCylinder',self.inp,self.name)
with open('qsub','w') as o:
o.write(job)
subprocess.call(qsub+' qsub',shell=True)
subprocess.call('rm qsub',shell=True) | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def createInputDeck(self,cylinderPositions,inp=None,name=None):
""" createInputDeck
Creates an input deck of the given geometry
"""
self.inp = inp
self.name = name
if not inp:
self.inp = 'INP_Cylinder.mcnp'
if not name:
self.name = 'OUT_Cylinder.'
oFile = self.inp | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def getRunString(self):
runString ='c ------------------------------ Run Info ---------------------------------\n'
runString +='nps 1E6 \n'
runString +='IMP:N 1 {0:d}R 0 $ Particle Importances within cells \n'
runString +='c -------------- Output --------------------------------------------------\n'
runString +='PRDMP j j 1 $ Write a MCTAL File \n'
runString +='PRINT 40 \n'
runString +='c ------------------------------ Physics ---------------------------------\n'
runString +='MODE N \n'
runString +='PHYS:N 100 4j -1 2 \n'
runString +='CUT:N 2j 0 0 \n'
return runString | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def getMaterialString(self):
"""
Returns the MCNXP material string
"""
matString = 'm10 1001.70c -0.080538 $Lucite (PMMA / Plexiglass) rho = 1.19 g/cc\n'
matString += ' 6012.70c -0.599848 8016.70c -0.319614 \n'
matString += 'm204 7014.70c -0.755636 $air (US S. Atm at sea level) rho = 0.001225 \n'
matString += ' 8016.70c -0.231475 18036.70c -3.9e-005 18038.70c -8e-006\n'
matString += ' 18040.70c -0.012842 \n'
matString += 'm5 98252.66c 1 $ Cf-252, rho =15.1 g/cc wiki \n'
matString += 'm406 82204.70c -0.013781 $Lead, \n'
matString += ' 82206.70c -0.239557 82207.70c -0.220743 82208.70c -0.525919\n'
matString += 'm456 1001.70c -0.143716 $Polyethylene - rho = 0.93 g/cc \n'
matString += ' 6000.70c -0.856284 \n'
matString += 'm488 14028.70c -0.009187 $Steel, Stainless 316 rho = 7.92 \n'
matString += ' 14029.70c -0.000482 14030.70c -0.000331 24050.70c -0.007095\n'
matString += ' 24052.70c -0.142291 24053.70c -0.016443 24054.70c -0.004171\n'
matString += ' 25055.70c -0.02 26054.70c -0.037326 26056.70c -0.601748\n'
matString += ' 26057.70c -0.014024 26058.70c -0.001903 28058.70c -0.080873\n'
matString += ' 28060.70c -0.031984 28061.70c -0.001408 28062.70c -0.004546\n'
matString += ' 28064.70c -0.001189 42092.70c -0.003554 42094.70c -0.002264\n'
matString += ' 42095.70c -0.003937 42096.70c -0.004169 42097.70c -0.002412\n'
matString += ' 42098.70c -0.006157 42100.70c -0.002507 \n'
matString += 'mt3 poly.01t \n'
matString += 'mt456 poly.01t \n'
matString += 'mt10 poly.01t \n'
return matString | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def RunCylinder(l,p,cylinderPositions):
"""
Runs an mcnpx model of the cylinder of loading l, polymer p, with
cylinder positions cylinderPositions.
Keywords:
l - loading of the films
p - polymer
cylinderPositions - the cylinder positons
"""
# Creating input and output deck names
posString = ''
for pos in cylinderPositions:
posString += '{:2.1f}-'.format(pos[0])
posString = posString.rstrip('-')
inp='Cyl_{}LiF_{}_{}.mcnp'.format(int(l*100),p,posString)
name='OUTCyl_{}LiF_{}_{}.'.format(int(l*100),p,posString)
print inp
# Creating and running the model
m = CylinderRPM()
m.createSurfaceGeo()
m.setMaterial(l,p)
m.createDetectorCylinder()
m.createInputDeck(cylinderPositions,inp,name)
m.runModel() | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def PositionOptimization(loading,polymers,positions):
"""
Runs a matrix of loading, polymers and positions
"""
for l in loading:
for p in polymers:
for pos in positions:
RunCylinder(l,p,pos) | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def createInputPlotDecks():
positions = list()
positions.append(((4.23,10.16),(4.23,-10.16)))
positions.append(((4.23,7.625),(4.23,0),(4.23,-7.625)))
#positions.append(((4.23,9.15),(4.23,3.05),(4.23,-3.05),(4.23,-9.15)))
for pos in positions:
m = CylinderRPM()
m.createSurfaceGeo()
m.createDetectorCylinder()
inp='Cylinder_{}.mcnp'.format(len(pos))
name='OUTCylinder_{}.'.format(len(pos))
m.createInputDeck(pos,inp,name) | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def extractRunInfo(filename):
"""
Extracts the loading and polymer from the file name
"""
tokens = filename.split('_')
loading = tokens[1].strip('LiF')
polymer = tokens[2].strip('.m')
return (float(loading)/100, polymer) | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def GetInteractionRate(f,tallyNum=54,src=2.3E3):
"""
Returns the interaction rate of the mctal file
"""
m = mctal.MCTAL(f)
t = m.tallies[tallyNum]
return (t.data[-1]*src,t.errors[-1]*t.data[-1]*src) | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def summerize():
files = glob.glob('OUTCylinder*.m')
s = 'Polymer, loading, mass Li, count rate, error, count rate per mass\n'
for f in files:
runParam = extractRunInfo(f)
massLi = computeMassLi(runParam[1],runParam[0])
countRate = GetInteractionRate(f)
s += '{}, {:5.2f} , {:5.3f} , {:5.3f} , {:4.2f} , {:5.3f}\n'.format(runParam[1].ljust(7),runParam[0],massLi,countRate[0],countRate[1],countRate[0]/massLi)
print s | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def cleanup(path):
files = glob.glob(path+'/OUTCyl_*.m')
for f in files:
head,tail = os.path.split(f)
numCylinders = tail.count('-')+1
if numCylinders == 3:
newdir = 'ThreeCylPosOpt'
elif numCylinders == 4:
newdir = 'FourCylPosOpt'
elif numCylinders == 5:
newdir = 'FiveCylPosOpt'
os.rename(f,os.path.join(newdir,tail)) | murffer/DetectorSim | [
7,
5,
7,
1,
1380841989
] |
def __init__(self, version=None):
"""
Initialize a new Postgres object
:param version: version to use. If it is not set, use the latest version in .pyembedpg directory. If not present
use the latest version remotely. Use 'local' to use the local postgres version installed on the machine
:return:
"""
home_dir = expanduser("~")
self._cache_dir = os.path.join(home_dir, PyEmbedPg.CACHE_DIRECTORY)
# if version is not specified, check local last version otherwise get last remote version
self.version = version
if not self.version:
self.version = self.get_latest_local_version()
if not self.version:
self.version = self.get_latest_remote_version()
if version == PyEmbedPg.LOCAL_VERSION:
full_path = spawn.find_executable('postgres')
if not full_path:
raise PyEmbedPgException('Cannot find postgres executable. Make sure it is in your path')
self._version_path = os.path.dirname(full_path)
else:
self._version_path = os.path.join(self._cache_dir, self.version) | Simulmedia/pyembedpg | [
31,
8,
31,
2,
1437597497
] |
def get_latest_remote_version(self):
"""
Return the latest version on the Postgres FTP server
:return: latest version installed locally on the Postgres FTP server
"""
response = requests.get(PyEmbedPg.DOWNLOAD_BASE_URL)
last_version_match = list(re.finditer('>v(?P<version>[^<]+)<', response.content.decode()))[-1]
return last_version_match.group('version') | Simulmedia/pyembedpg | [
31,
8,
31,
2,
1437597497
] |
def download_and_unpack(self):
# if the version we want to download already exists, do not do anything
if self.check_version_present():
logger.debug('Version {version} already present in cache'.format(version=self.version))
return
url = PyEmbedPg.DOWNLOAD_URL.format(version=self.version)
response = requests.get(url, stream=True)
if not response.ok:
raise PyEmbedPgException('Cannot download file {url}. Error: {error}'.format(url=url, error=response.content))
with tempfile.NamedTemporaryFile() as fd:
logger.debug('Downloading {url}'.format(url=url))
for block in response.iter_content(chunk_size=4096):
fd.write(block)
fd.flush()
# Unpack the file into temporary dir
temp_dir = tempfile.mkdtemp()
source_dir = os.path.join(temp_dir, 'postgresql-{version}'.format(version=self.version))
try:
# Can't use with context directly because of python 2.6
with closing(tarfile.open(fd.name)) as tar:
tar.extractall(temp_dir)
os.system(
'sh -c "cd {path} && ./configure --prefix={target_dir} && make install && cd contrib && make install"'.format(
path=source_dir,
target_dir=self._version_path)
)
finally:
shutil.rmtree(temp_dir, ignore_errors=True) | Simulmedia/pyembedpg | [
31,
8,
31,
2,
1437597497
] |
def __init__(self, bin_dir, ports):
self._ports = ports
self._postgres_cmd = os.path.join(bin_dir, 'postgres')
# init db
init_db = os.path.join(bin_dir, 'initdb')
self._temp_dir = tempfile.mkdtemp()
command = init_db + ' -D ' + self._temp_dir + ' -U ' + DatabaseRunner.ADMIN_USER
logger.debug('Running command: {command}'.format(command=command))
os.system(command)
# overwrite pg_hba.conf to only allow local access with password authentication
with open(os.path.join(self._temp_dir, 'pg_hba.conf'), 'w') as fd:
fd.write(
'# TYPE DATABASE USER ADDRESS METHOD\n'
'# "local" is for Unix domain socket connections only\n'
'local all {admin} trust\n'
'local all all md5\n'
'host all {admin} 127.0.0.1/32 trust\n'
'host all all 127.0.0.1/32 md5\n'
'# IPv6 local connections:\n'
'host all {admin} ::1/128 trust\n'
'host all all ::1/128 md5\n'.format(admin=DatabaseRunner.ADMIN_USER)
)
def can_connect(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(('127.0.0.1', port)) != 0
self.running_port = next((port for port in ports if can_connect(port)), None)
if self.running_port is None:
raise PyEmbedPgException('Cannot run postgres on any of these ports [{ports}]'.format(ports=', '.join((str(p) for p in ports))))
self.proc = Popen([self._postgres_cmd, '-D', self._temp_dir, '-p', str(self.running_port)])
logger.debug('Postgres started on port {port}...'.format(port=self.running_port))
# Loop until the server is started
logger.debug('Waiting for Postgres to start...')
start = time.time()
while time.time() - start < DatabaseRunner.TIMEOUT:
try:
with psycopg2.connect(database='postgres', user=DatabaseRunner.ADMIN_USER, host='localhost', port=self.running_port):
break
except OperationalError:
pass
time.sleep(0.1)
else:
raise PyEmbedPgException('Cannot start postgres after {timeout} seconds'.format(timeout=DatabaseRunner.TIMEOUT)) | Simulmedia/pyembedpg | [
31,
8,
31,
2,
1437597497
] |
def __exit__(self, exc_type, exc_val, exc_tb):
self.shutdown() | Simulmedia/pyembedpg | [
31,
8,
31,
2,
1437597497
] |
def create_database(self, name, owner=None):
"""Create a new database
:param name: database name
:type name: basestring
:param owner: username of the owner or None if unspecified
:type owner: basestring
"""
with psycopg2.connect(database='postgres', user=DatabaseRunner.ADMIN_USER, host='localhost', port=self.running_port) as conn:
conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
sql = 'CREATE DATABASE {name} ' + ('WITH OWNER {owner}' if owner else '')
cursor.execute(sql.format(name=name, owner=owner)) | Simulmedia/pyembedpg | [
31,
8,
31,
2,
1437597497
] |
def _is_multiclass(context):
"""Returns True iff we're given a multiclass context."""
if not isinstance(context, subsettable_context.SubsettableContext):
raise TypeError("context must be a SubsettableContext object")
raw_context = context.raw_context
return raw_context.num_classes is not None | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def update_ops_fn(denominator_bound_variable, structure_memoizer,
value_memoizer):
"""Projects denominator_bound onto the feasible region."""
del value_memoizer
denominator_bound = tf.maximum(
structure_memoizer[defaults.DENOMINATOR_LOWER_BOUND_KEY],
tf.minimum(1.0, denominator_bound_variable))
return [denominator_bound_variable.assign(denominator_bound)] | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def _ratio(numerator_expression, denominator_expression):
"""Creates an `Expression` for a ratio.
The result of this function is an `Expression` representing:
numerator / denominator_bound
where denominator_bound satisfies the following:
denominator_lower_bound <= denominator_bound <= 1
The resulting `Expression` will include both the implicit denominator_bound
slack variable, and implicit constraints.
Args:
numerator_expression: `Expression`, the numerator of the ratio.
denominator_expression: `Expression`, the denominator of the ratio.
Returns:
An `Expression` representing the ratio.
Raises:
TypeError: if either numerator_expression or denominator_expression is not
an `Expression`.
"""
return expression.BoundedExpression(
lower_bound=_ratio_bound(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression,
lower_bound=True,
upper_bound=False),
upper_bound=_ratio_bound(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression,
lower_bound=False,
upper_bound=True)) | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def negative_prediction_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a negative prediction rate.
A negative prediction rate is the number of examples within the given context
on which the model makes a negative prediction, divided by the number of
examples within the context. For multiclass problems, the positive_class
argument, which tells us which class (or classes) should be treated as
positive, must also be provided.
Please see the docstrings of negative_prediction_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the negative prediction rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if positive_class is provided for a non-multiclass context, or
is *not* provided for a multiclass context. In the latter case, an error
will also be raised if positive_class is an integer outside the range
[0,num_classes), or is a collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.negative_prediction_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"negative_prediction_rate unless it's also given a "
"multiclass context")
return binary_rates.negative_prediction_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss) | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def accuracy_rate(context,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for an accuracy rate.
An accuracy rate is the number of examples within the given context on which
the model makes a correct prediction, divided by the number of examples within
the context.
Please see the docstrings of accuracy_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the accuracy rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass).
ValueError: if the context doesn't contain labels.
"""
if _is_multiclass(context):
return multiclass_rates.accuracy_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return binary_rates.accuracy_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss) | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def false_negative_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false negative rate.
A false negative rate is the number of positively-labeled examples within the
given context on which the model makes a negative prediction, divided by the
number of positively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_negative_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false negative rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_negative_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"false_negative_rate unless it's also given a multiclass "
"context")
return binary_rates.false_negative_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss) | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def true_negative_rate(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true negative rate.
A true negative rate is the number of negatively-labeled examples within the
given context on which the model makes a negative prediction, divided by the
number of negatively-labeled examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of true_negative_rate() in binary_rates.py and
multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true negative rate.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_negative_rate(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError("positive_class cannot be provided to "
"true_negative_rate unless it's also given a multiclass "
"context")
return binary_rates.true_negative_rate(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss) | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def false_negative_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a false negative proportion.
A false negative proportion is the number of positively-labeled examples
within the given context on which the model makes a negative prediction,
divided by the total number of examples within the context. For multiclass
problems, the positive_class argument, which tells us which class (or classes)
should be treated as positive, must also be provided.
Please see the docstrings of false_negative_proportion() in binary_rates.py
and multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the false negative proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.false_negative_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"false_negative_proportion unless it's also given a multiclass "
"context")
return binary_rates.false_negative_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss) | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def true_negative_proportion(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression` for a true negative proportion.
A true negative proportion is the number of negatively-labeled examples within
the given context on which the model makes a negative prediction, divided by
the total number of examples within the context. For multiclass problems, the
positive_class argument, which tells us which class (or classes) should be
treated as positive, must also be provided.
Please see the docstrings of true_negative_proportion() in binary_rates.py
and multiclass_rates.py for further details.
Args:
context: `SubsettableContext`, the block of data to use when calculating the
rate. If this is a multiclass context, we'll calculate the multiclass
version of the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the true negative proportion.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
if _is_multiclass(context):
return multiclass_rates.true_negative_proportion(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
if positive_class is not None:
raise ValueError(
"positive_class cannot be provided to "
"true_negative_proportion unless it's also given a multiclass "
"context")
return binary_rates.true_negative_proportion(
context=context,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss) | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def precision(context,
positive_class=None,
penalty_loss=defaults.DEFAULT_PENALTY_LOSS,
constraint_loss=defaults.DEFAULT_CONSTRAINT_LOSS):
"""Creates an `Expression`s for precision.
A precision is the number of positively-labeled examples within the given
context on which the model makes a positive prediction, divided by the number
of examples within the context on which the model makes a positive prediction.
For multiclass problems, the positive_class argument, which tells us which
class (or classes) should be treated as positive, must also be provided.
Args:
context: multiclass `SubsettableContext`, the block of data to use when
calculating the rate.
positive_class: None for a non-multiclass problem. Otherwise, an int, the
index of the class to treat as "positive", *or* a collection of
num_classes elements, where the ith element is the probability that the
ith class should be treated as "positive".
penalty_loss: `MulticlassLoss`, the (differentiable) loss function to use
when calculating the "penalty" approximation to the rate.
constraint_loss: `MulticlassLoss`, the (not necessarily differentiable) loss
function to use when calculating the "constraint" approximation to the
rate.
Returns:
An `Expression` representing the precision.
Raises:
TypeError: if the context is not a SubsettableContext, either loss is not a
BinaryClassificationLoss (if the context is non-multiclass) or a
MulticlassLoss (if the context is multiclass). In the latter case, an
error will also be raised if positive_class is a non-integer number.
ValueError: if the context doesn't contain labels, or positive_class is
provided for a non-multiclass context, or is *not* provided for a
multiclass context. In the latter case, an error will also be raised if
positive_class is an integer outside the range [0,num_classes), or is a
collection not containing num_classes elements.
"""
numerator_expression, denominator_expression = precision_ratio(
context=context,
positive_class=positive_class,
penalty_loss=penalty_loss,
constraint_loss=constraint_loss)
return _ratio(
numerator_expression=numerator_expression,
denominator_expression=denominator_expression) | google-research/tensorflow_constrained_optimization | [
282,
49,
282,
12,
1548359328
] |
def lhs_node(self, row):
pass | PeachstoneIO/peachbox | [
14,
4,
14,
7,
1427099581
] |
def import(row):
self.lhs_node(row.user_id)
self.rhs_node(row.review_id)
self.partition_key(row.time) | PeachstoneIO/peachbox | [
14,
4,
14,
7,
1427099581
] |
def __init__(self):
self.build_model() | PeachstoneIO/peachbox | [
14,
4,
14,
7,
1427099581
] |
def fill_review_id(self, row, field):
user_id = row['user_id']
product_id = row['product_id']
true_as_of_seconds = row['time']
return unicode(hash(user_id+product_id+str(true_as_of_seconds))) | PeachstoneIO/peachbox | [
14,
4,
14,
7,
1427099581
] |
def get_version():
version = ''
with open('grimreaper.py', 'r') as fd:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError("Cannot find version's information")
return version | matee911/GrimReapersPie | [
1,
1,
1,
3,
1442078546
] |
def __init__(self, store_name, context) -> None:
self.topic = f'{context.application_id}-{store_name}-changelog'
self.context = context
self.partition = context.task_id.partition
self.record_collector = context.state_record_collector | wintoncode/winton-kafka-streams | [
313,
56,
313,
13,
1499849928
] |
def test_sim_updates(self, disc_lr, gen_lr):
# player order does not matter.
# the number of updates does not matter for simultaneous updates.
learning_rates = gan.GANTuple(disc=disc_lr, gen=gen_lr)
drift_coeffs = drift_utils.get_dd_coeffs(
None, True, learning_rates, num_updates=None)
self.assertEqual(drift_coeffs.disc.self_norm, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.disc.other_norm, 0.0)
self.assertEqual(drift_coeffs.disc.other_dot_prod, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.gen.self_norm, 0.5 * gen_lr)
self.assertEqual(drift_coeffs.gen.other_norm, 0.0)
self.assertEqual(drift_coeffs.gen.other_dot_prod, 0.5 * gen_lr) | deepmind/dd_two_player_games | [
4,
2,
4,
1,
1633120481
] |
def test_alt_updates(self, disc_lr, gen_lr):
learning_rates = gan.GANTuple(disc=disc_lr, gen=gen_lr)
num_updates = gan.GANTuple(disc=1, gen=1)
drift_coeffs = drift_utils.get_dd_coeffs(
drift_utils.PlayerOrder.disc_first, False, learning_rates,
num_updates=num_updates)
self.assertEqual(drift_coeffs.disc.self_norm, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.disc.other_norm, 0.0)
self.assertEqual(drift_coeffs.disc.other_dot_prod, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.gen.self_norm, 0.5 * gen_lr)
self.assertEqual(drift_coeffs.gen.other_norm, 0.0)
self.assertEqual(
drift_coeffs.gen.other_dot_prod,
0.5 * gen_lr * (1 - 2 * disc_lr / gen_lr)) | deepmind/dd_two_player_games | [
4,
2,
4,
1,
1633120481
] |
def test_alt_updates_change_player_order(self, disc_lr, gen_lr):
learning_rates = gan.GANTuple(disc=disc_lr, gen=gen_lr)
num_updates = gan.GANTuple(disc=1, gen=1)
drift_coeffs = drift_utils.get_dd_coeffs(
drift_utils.PlayerOrder.gen_first, False, learning_rates,
num_updates=num_updates)
self.assertEqual(drift_coeffs.disc.self_norm, 0.5 * disc_lr)
self.assertEqual(drift_coeffs.disc.other_norm, 0.0)
self.assertEqual(
drift_coeffs.disc.other_dot_prod,
0.5 * disc_lr * (1 - 2 * gen_lr / disc_lr))
self.assertEqual(drift_coeffs.gen.self_norm, 0.5 * gen_lr)
self.assertEqual(drift_coeffs.gen.other_norm, 0.0)
self.assertEqual(drift_coeffs.gen.other_dot_prod, 0.5 * gen_lr) | deepmind/dd_two_player_games | [
4,
2,
4,
1,
1633120481
] |
def managed_task(task_function):
"""Decorator to manage task methods.
This records the status of the task in an entity and raises the
deferred.PermanentTaskFailure exception to prevent tasks from repeating upon
failure. In such cases, the exception message is recorded to the entity.
Args:
task_function: function, to be managed by the decorator.
Returns:
Wrapped function.
Raises:
deferred.PermanentTaskFailure: if anything at all goes wrong.
"""
@functools.wraps(task_function)
def wrapper(*args, **kwargs):
"""Wrapper for managed task decorator."""
status_entity = bootstrap_status_model.BootstrapStatus.get_or_insert(
task_function.__name__)
status_entity.description = _TASK_DESCRIPTIONS.get(
task_function.__name__, task_function.__name__)
status_entity.timestamp = datetime.datetime.utcnow()
try:
task_function(*args, **kwargs)
status_entity.success = True
status_entity.details = None
status_entity.put()
except Exception as e:
status_entity.success = False
status_entity.details = '{} {}'.format(str(type(e)), str(e))
status_entity.put()
raise deferred.PermanentTaskFailure(
'Task {} failed; error: {}'.format(
task_function.__name__, status_entity.details))
return wrapper | google/loaner | [
167,
61,
167,
23,
1522269186
] |
def bootstrap_datastore_yaml(wipe=True, **kwargs):
"""Bootstraps arbitrary datastore entities from supplied YAML input.
Args:
wipe: bool, whether to wipe all existing datastore models for any model
contained in the YAML.
**kwargs: keyword args including a user_email with which to run the
datastore methods (required for BigQuery streaming).
"""
with open(
os.path.join(os.path.dirname(__file__), 'bootstrap.yaml')) as yaml_file:
datastore_yaml.import_yaml(yaml_file.read(), kwargs['user_email'], wipe) | google/loaner | [
167,
61,
167,
23,
1522269186
] |
def bootstrap_chrome_ous(**kwargs):
"""Bootstraps Chrome device OUs.
Args:
**kwargs: keyword args including a user_email with which to run the
Directory API client methods (required for BigQuery streaming).
"""
logging.info('Requesting delegated admin for bootstrap')
client = directory.DirectoryApiClient(user_email=kwargs['user_email'])
for org_unit_name, org_unit_path in constants.ORG_UNIT_DICT.iteritems():
logging.info(
'Creating org unit %s at path %s ...', org_unit_name, org_unit_path)
if client.get_org_unit(org_unit_path):
logging.warn(_ORG_UNIT_EXISTS_MSG, org_unit_name)
else:
client.insert_org_unit(org_unit_path) | google/loaner | [
167,
61,
167,
23,
1522269186
] |
def bootstrap_bq_history(**kwargs):
"""Bootstraps BigQuery history tables for archival purposes.
Args:
**kwargs: keyword args including a user_email with which to run the
Directory API client methods (required for BigQuery streaming).
"""
del kwargs # Unused, but comes by default.
client = bigquery.BigQueryClient()
client.initialize_tables() | google/loaner | [
167,
61,
167,
23,
1522269186
] |
def bootstrap_load_config_yaml(**kwargs):
"""Loads config_defaults.yaml into datastore.
Args:
**kwargs: Unused, but required for bootstrap tasks.
"""
del kwargs # Unused, but comes by default.
config_defaults = utils.load_config_from_yaml()
for name, value in config_defaults.iteritems():
if name == 'bootstrap_started':
config_model.Config.set(name, config_model.Config.get(name), False)
else:
config_model.Config.set(name, value, False) | google/loaner | [
167,
61,
167,
23,
1522269186
] |
def _run_function_as_task(all_functions_list, function_name, kwargs=None):
"""Runs a specific function and its kwargs as an AppEngine task.
Args:
all_functions_list: string list, A list with all function names that are
registered as bootstrap functions on the Loaner app.
function_name: string, A specific function that should be ran as a task.
kwargs: dict, Optional kwargs to be passed to the function that will run.
Returns:
The deferred task from AppEngine taskqueue.
Raises:
Error: if requested bootstrap method is not allowed or does not exist.
"""
logging.debug('Running %s as a task.', function_name)
function = all_functions_list.get(function_name)
if function is None:
raise Error(
'Requested bootstrap method {} does not exist.'.format(function_name))
if not kwargs:
kwargs = {}
kwargs['user_email'] = user.get_user_email()
return deferred.defer(function, **kwargs) | google/loaner | [
167,
61,
167,
23,
1522269186
] |
def _is_new_deployment():
"""Checks whether this is a new deployment.
A '0.0' version number and a missing bootstrap_datastore_yaml task
status indicates that this is a new deployment. The latter check
is to support backward-compatibility with early alpha versions that did not
have a version number.
Returns:
True if this is a new deployment, else False.
"""
return (config_model.Config.get('running_version') == '0.0' and
not bootstrap_status_model.BootstrapStatus.get_by_id(
'bootstrap_datastore_yaml')) | google/loaner | [
167,
61,
167,
23,
1522269186
] |
def is_update():
"""Checks whether the application is in a state requiring an update.
Returns:
True if an update is available and this is not a new installation.
"""
if _is_new_deployment():
return False
return version.LooseVersion(constants.APP_VERSION) > version.LooseVersion(
config_model.Config.get('running_version')) | google/loaner | [
167,
61,
167,
23,
1522269186
] |
def is_bootstrap_started():
"""Checks to see if bootstrap has started.
Returns:
True if the bootstrap has started, else False.
"""
if (config_model.Config.get('bootstrap_started') and
config_model.Config.get('bootstrap_completed')):
# If bootstrap was completed indicate that it is no longer in progress.
config_model.Config.set('bootstrap_started', False)
return config_model.Config.get('bootstrap_started') | google/loaner | [
167,
61,
167,
23,
1522269186
] |
def test_equality():
assert cirq_google.PhysicalZTag() == cirq_google.PhysicalZTag()
assert hash(cirq_google.PhysicalZTag()) == hash(cirq_google.PhysicalZTag()) | quantumlib/Cirq | [
3678,
836,
3678,
314,
1513294909
] |
def __init__(self):
"""Initialize class."""
self.verbose = False
self.home = os.getcwd() | major/monitorstack | [
1,
1,
1,
1,
1487693141
] |
def vlog(self, msg, *args):
"""Log a message to stderr only if verbose is enabled."""
if self.verbose:
self.log(msg, *args) | major/monitorstack | [
1,
1,
1,
1,
1487693141
] |
def cmd_folder(self):
"""Get the path to the plugin directory."""
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'plugins'
)
) | major/monitorstack | [
1,
1,
1,
1,
1487693141
] |
def get_command(self, ctx, name):
"""Load a command and run it."""
for _, pkg_name, _ in pkgutil.iter_modules([self.cmd_folder]):
if pkg_name == name:
mod = importlib.import_module(
'monitorstack.plugins.{}'.format(name)
)
return getattr(mod, 'cli')
else:
raise SystemExit('Module "{}" Not Found.'.format(name)) | major/monitorstack | [
1,
1,
1,
1,
1487693141
] |
def cli(*args, **kwargs):
"""A complex command line interface."""
try:
args[0].verbose = kwargs.get('verbose', False)
except IndexError: # pragma: no cover
pass | major/monitorstack | [
1,
1,
1,
1,
1487693141
] |
def process_result(results, output_format, **kwargs):
"""Render the output into the proper format."""
module_name = 'monitorstack.common.formatters'
method_name = 'write_{}'.format(output_format.replace('-', '_'))
output_formatter = getattr(
importlib.import_module(module_name),
method_name
)
# Force the output formatter into a list
if not isinstance(results, list): # pragma: no cover
results = [results]
exit_code = 0
for result in results:
output_formatter(result)
if result['exit_code'] != 0:
exit_code = result['exit_code']
else:
sys.exit(exit_code) | major/monitorstack | [
1,
1,
1,
1,
1487693141
] |
def _basic_alert():
return Alert('test_rule', {'abc': 123}, {'aws-firehose:alerts', 'aws-sns:test-output'}) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def _customized_alert():
return Alert(
'test_rule',
{'abc': 123},
{'aws-firehose:alerts', 'aws-sns:test-output', 'aws-s3:other-output'},
alert_id='abc-123',
attempts=1,
cluster='',
context={'rule': 'context'},
created=datetime.utcnow(),
dispatched=datetime.utcnow(),
log_source='source',
log_type='csv',
merge_by_keys=['abc'],
merge_window=timedelta(minutes=5),
outputs_sent={'aws-sns:test-output'},
rule_description='A Test Rule',
source_entity='entity',
source_service='s3',
staged=True
) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_init_invalid_kwargs(self):
"""Alert Class - Init With Invalid Kwargs"""
assert_raises(AlertCreationError, Alert, '', {}, set(), cluster='test', invalid='nonsense') | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_repr(self):
"""Alert Class - Complete Alert Representation"""
assert_is_instance(repr(self._basic_alert()), str)
assert_is_instance(repr(self._customized_alert()), str) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_dynamo_key(self):
"""Alert Class - Dynamo Key"""
alert = self._customized_alert()
assert_equal({'RuleName': 'test_rule', 'AlertID': 'abc-123'}, alert.dynamo_key) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_remaining_outputs_merge_enabled(self):
"""Alert Class - Remaining Outputs - With Merge Config"""
# Only the required firehose output shows as remaining
assert_equal({'aws-firehose:alerts'}, self._customized_alert().remaining_outputs) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_create_from_dynamo_record(self):
"""Alert Class - Create Alert from Dynamo Record"""
alert = self._customized_alert()
# Converting to a Dynamo record and back again should result in the exact same alert
record = alert.dynamo_record()
new_alert = Alert.create_from_dynamo_record(record)
assert_equal(alert.dynamo_record(), new_alert.dynamo_record()) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_output_dict(self):
"""Alert Class - Output Dict"""
alert = self._basic_alert()
result = alert.output_dict()
# Ensure result is JSON-serializable (no sets)
assert_is_instance(json.dumps(result), str)
# Ensure result is Athena compatible (no None values)
assert_not_in(None, list(result.values())) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_can_merge_too_far_apart(self):
"""Alert Class - Can Merge - False if Outside Merge Window"""
alert1 = Alert(
'', {'key': True}, set(),
created=datetime(year=2000, month=1, day=1, minute=0),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'key': True}, set(),
created=datetime(year=2000, month=1, day=1, minute=11),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1)) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_can_merge_key_not_common(self):
"""Alert Class - Can Merge - False if Merge Key Not Present in Both Records"""
alert1 = Alert(
'', {'key': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
alert2 = Alert(
'', {'other': True}, set(),
merge_by_keys=['key'],
merge_window=timedelta(minutes=10)
)
assert_false(alert1.can_merge(alert2))
assert_false(alert2.can_merge(alert1)) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_can_merge_merge_keys_absent(self):
"""Alert Class - Can Merge - True if Merge Keys Do Not Exist in Either Record"""
alert1 = Alert('', {}, set(), merge_by_keys=['key'], merge_window=timedelta(minutes=10))
alert2 = Alert('', {}, set(), merge_by_keys=['key'], merge_window=timedelta(minutes=10))
assert_true(alert1.can_merge(alert2))
assert_true(alert2.can_merge(alert1)) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_compute_common_empty_record(self):
"""Alert Class - Compute Common - Empty Record List"""
assert_equal({}, Alert._compute_common([])) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_compute_common_top_level(self):
"""Alert Class - Compute Common - No Nested Dictionaries"""
record1 = {'a': 1, 'b': 2, 'c': 3}
record2 = {'b': 2, 'c': 3, 'd': 4}
record3 = {'c': 3, 'd': 4, 'e': 5}
assert_equal({'c': 3}, Alert._compute_common([record1, record2, record3])) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_compute_common_partial_nested(self):
"""Alert Class - Compute Common - Some Common Features in Nested Dictionary"""
# This is the example given in the docstring
record1 = {'abc': 123, 'nested': {'A': 1, 'B': 2}}
record2 = {'abc': 123, 'def': 456, 'nested': {'A': 1}}
assert_equal({'abc': 123, 'nested': {'A': 1}}, Alert._compute_common([record1, record2])) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_compute_common_many_nested(self):
"""Alert Class - Compute Common - Multiple Levels of Nesting"""
record1 = {
'a': {
'b': {
'c': 3,
'd': 4
},
'e': {
'h': {
'i': 9
}
},
'j': {}
}
}
record2 = {
'a': {
'b': {
'c': 3,
},
'e': {
'f': {
'g': 8
},
'h': {}
},
'j': {}
}
}
expected = {
'a': {
'b': {
'c': 3
},
'j': {}
}
}
assert_equal(expected, Alert._compute_common([record1, record2])) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_compute_diff_no_common(self):
"""Alert Class - Compute Diff - No Common Set"""
record = {'a': 1, 'b': 2, 'c': {'d': {'e': 3}}}
assert_equal(record, Alert._compute_diff({}, record)) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_compute_diff_top_level(self):
"""Alert Class - Compute Diff - Top Level Keys"""
common = {'c': 3}
record = {'a': 1, 'b': 2, 'c': 3}
assert_equal({'a': 1, 'b': 2}, Alert._compute_diff(common, record)) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_compute_diff_nested(self):
"""Alert Class - Compute Diff - Difference in Nested Dictionary"""
# This is the example given in the docstring
common = {'abc': 123, 'nested': {'A': 1}}
record = {'abc': 123, 'nested': {'A': 1, 'B': 2}}
assert_equal({'nested': {'B': 2}}, Alert._compute_diff(common, record)) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
def test_merge(self):
"""Alert Class - Merge - Create Merged Alert"""
# Example based on a CarbonBlack log
record1 = {
'alliance_data_virustotal': [],
'alliance_link_virustotal': '',
'alliance_score_virustotal': 0,
'cmdline': 'whoami',
'comms_ip': '1.2.3.4',
'hostname': 'my-computer-name',
'path': '/usr/bin/whoami',
'streamalert:ioc': {
'hello': 'world'
},
'timestamp': 1234.5678,
'username': 'user'
}
alert1 = Alert(
'RuleName', record1, {'aws-sns:topic'},
created=datetime(year=2000, month=1, day=1),
merge_by_keys=['hostname', 'username'],
merge_window=timedelta(minutes=5)
)
# Second alert has slightly different record and different outputs
record2 = copy.deepcopy(record1)
record2['streamalert:ioc'] = {'goodbye': 'world'}
record2['timestamp'] = 9999
alert2 = Alert(
'RuleName', record2, {'slack:channel'},
created=datetime(year=2000, month=1, day=2),
merge_by_keys=['hostname', 'username'],
merge_window=timedelta(minutes=5)
)
merged = Alert.merge([alert1, alert2])
assert_is_instance(merged, Alert)
assert_equal({'slack:channel'}, merged.outputs) # Most recent outputs were used
expected_record = {
'AlertCount': 2,
'AlertTimeFirst': '2000-01-01T00:00:00.000000Z',
'AlertTimeLast': '2000-01-02T00:00:00.000000Z',
'MergedBy': {
'hostname': 'my-computer-name',
'username': 'user'
},
'OtherCommonKeys': {
'alliance_data_virustotal': [],
'alliance_link_virustotal': '',
'alliance_score_virustotal': 0,
'cmdline': 'whoami',
'comms_ip': '1.2.3.4',
'path': '/usr/bin/whoami',
},
'ValueDiffs': {
'2000-01-01T00:00:00.000000Z': {
'streamalert:ioc': {'hello': 'world'},
'timestamp': 1234.5678
},
'2000-01-02T00:00:00.000000Z': {
'streamalert:ioc': {'goodbye': 'world'},
'timestamp': 9999
}
}
}
assert_equal(expected_record, merged.record) | airbnb/streamalert | [
2767,
342,
2767,
92,
1485047456
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.