filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_13788 | from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
class PhasmaDevice(models.Model):
mac = models.CharField(_('media access control address'),
max_length=127,
primary_key=True,
help_text=_("MAC (Media Access Control) address of phasma device.")
)
name = models.CharField(_('name'),
max_length=127,
help_text=_("Name of phasma device.")
)
date_added = models.DateTimeField(_('date added'),
default=timezone.now,
help_text=_("Date when phasma device was added.")
)
date_updated = models.DateTimeField(_('date updated'),
auto_now=True,
help_text=_("Date when phasma device was updated.")
)
class Meta:
ordering = ('-date_added',)
def __str__(self) -> str:
return self.mac
|
the-stack_0_13790 | __author__ = 'bptripp'
import numpy as np
import matplotlib.pyplot as plt
import cPickle
from quaternion import angle_between_quaterions
# def interpolate(point, angle, points, angles, values, sigma_p=.01, sigma_a=(4*np.pi/180)):
# """
# Gaussian kernel smoothing.
# """
# # q = to_quaternion(get_rotation_matrix(point, angle))
# # print(angle)
#
# weights = np.zeros(len(values))
# # foo = np.zeros(len(values))
# # bar = np.zeros(len(values))
# for i in range(len(values)):
# # q_i = to_quaternion(get_rotation_matrix(points[:,i], angles[:,i]))
#
# # print(q_i)
#
# # angle = angle_between_quaterions(q, q_i)
# # print(angle)
#
# position_distance = np.linalg.norm(point - points[:,i])
# angle_distance = angle[2] - angles[2,i];
#
# # weights[i] = np.exp( -(angle**2/2/sigma_a**2) )
# weights[i] = np.exp( -(angle_distance**2/2/sigma_a**2 + position_distance**2/2/sigma_p**2) )
# # weights[i] = np.exp( -(angle**2/2/sigma_a**2 + distance**2/2/sigma_p**2) )
# # foo[i] = np.exp( -(angle**2/2/sigma_a**2) )
# # bar[i] = np.exp( -(distance**2/2/sigma_p**2) )
#
# # print(weights)
# # print(np.sum(weights))
# # print(np.sum(foo))
# # print(np.sum(bar))
# return np.sum(weights * np.array(values)) / np.sum(weights)
def interpolate(quaternion, distance, quaternions, distances, values, sigma_a=(4*np.pi/180), sigma_d=.01):
"""
Gaussian kernel smoothing.
"""
weights = np.zeros(len(values))
angle_threshold = np.cos(1.25*sigma_a) # I think this corresponds to twice this angle between quaternions
distance_threshold = 2.5*sigma_d
# attempt fast estimate (only considering within-threshold points) ...
c = 0
for i in range(len(values)):
distance_difference = np.abs(distance - distances[i])
if distance_difference < distance_threshold and np.dot(quaternion, quaternions[i]) > angle_threshold:
c += 1
angle_difference = np.abs(angle_between_quaterions(quaternion, quaternions[i]))
weights[i] = np.exp( -(angle_difference**2/2/sigma_a**2 + distance_difference**2/2/sigma_d**2) )
# slow estimate if not enough matches ...
# print(c)
if c <= 3:
# print('slow estimate ' + str(c))
for i in range(len(values)):
distance_difference = np.abs(distance - distances[i])
angle_difference = np.abs(angle_between_quaterions(quaternion, quaternions[i]))
weights[i] = np.exp( -(angle_difference**2/2/sigma_a**2 + distance_difference**2/2/sigma_d**2) )
# print(weights)
# print(values)
return np.sum(weights * np.array(values)) / np.sum(weights)
def check_interpolate():
from perspective import get_quaternion_distance
point = np.array([1e-6,.1,.1])
angle = np.array([0,0,.9])
points = np.array([[1e-6,.1,.1], [1e-6,.12,.1]]).T
angles = np.array([[0,0,1], [0,0,1]]).T
values = np.array([0,1])
quaternion, distance = get_quaternion_distance(point[:,np.newaxis], angle[:,np.newaxis])
quaternions, distances = get_quaternion_distance(points, angles)
# print(quaternion)
# print(distance)
# print(quaternions)
# print(distances)
# estimate = interpolate(point, angle, points, angles, values, sigma_p=.01, sigma_a=(4*np.pi/180))
estimate = interpolate(quaternion[0], distance[0], quaternions, distances, values, sigma_d=.01, sigma_a=(4*np.pi/180))
print(estimate)
def test_interpolation_accuracy(points, angles, metrics, n_examples):
"""
Compare interpolated vs. actual metrics by leaving random
examples out of interpolation set and estimating them.
"""
from perspective import get_quaternion_distance
quaternions, distances = get_quaternion_distance(points, angles)
actuals = []
interpolateds = []
for i in range(n_examples):
print(i)
one = np.random.randint(0, len(metrics))
others = range(one)
others.extend(range(one+1, len(metrics)))
others = np.array(others)
actuals.append(metrics[one])
interpolated = interpolate(quaternions[one,:], distances[one], quaternions[others,:], distances[others], metrics[others],
sigma_d=.01, sigma_a=(8*np.pi/180))
interpolateds.append(interpolated)
# print(interpolated - metrics[one])
# print(np.corrcoef(actuals, interpolateds))
return actuals, interpolateds
def plot_interp_error_vs_density():
with open('spatula-perspectives-smoothed.pkl', 'rb') as f:
(points, angles, metrics, collisions, smoothed) = cPickle.load(f)
metrics = np.array(metrics)
smoothed = np.array(smoothed)
numbers = [250, 500, 1000, 2000, 4000]
metric_errors = []
smoothed_errors = []
for n in numbers:
actuals, interpolateds = test_interpolation_accuracy(points[:,:n], angles[:,:n], metrics[:n], 500)
metric_errors.append(np.mean( (np.array(actuals)-np.array(interpolateds))**2 )**.5)
actuals, interpolateds = test_interpolation_accuracy(points[:,:n], angles[:,:n], smoothed[:n], 500)
smoothed_errors.append(np.mean( (np.array(actuals)-np.array(interpolateds))**2 )**.5)
plt.plot(numbers, smoothed_errors)
plt.plot(numbers, metric_errors)
plt.show()
if __name__ == '__main__':
# check_interpolate()
plot_interp_error_vs_density()
|
the-stack_0_13792 | """Represents a Concrete Strategy Object class for parsing PDF files.
References:
Lesson 4, Concept 8: Exercise - Strategy Objects
https://classroom.udacity.com/nanodegrees/nd303/parts/bdd52131-b22e-4c57-b3f2-a03951c9d514/modules/5fe343a0-2926-4953-81bc-485ee835e1c6/lessons/cac8a587-58ea-44d2-927f-0c9badb7a8e9/concepts/8e2fb5c6-33ef-4b5b-a01d-8f422a88fa1b
Lesson 5, Concept 7: Exercise - Complex Strategy:
https://classroom.udacity.com/nanodegrees/nd303/parts/bdd52131-b22e-4c57-b3f2-a03951c9d514/modules/5fe343a0-2926-4953-81bc-485ee835e1c6/lessons/93decac5-5e75-4573-b28e-ad1218ec04d3/concepts/6733fc76-b1a7-4c42-9a67-622af43b8cd5
"""
from typing import List
import subprocess
import os
import random
from .IngestorInterface import IngestorInterface
from .QuoteModel import QuoteModel
class PDFIngestor(IngestorInterface):
"""Create an Concrete Class Object for parsing PDF file pathways.
param allowed_extensions: File pathway allowed in this ingestor.
"""
allowed_extensions = ['pdf']
@classmethod
def parse(cls, path: str) -> List[QuoteModel]:
"""Ingest PDF File, return list of quotes.
param path {str}: PDF file pathway, origin of quotes.
return: Quotes stored in PDF file.
"""
if not cls.can_ingest(path):
raise Exception('PDF-Only Diet, Cannot Ingest!')
tmp = f'./tmp/{random.randint(0, 100000000)}.txt'
call = subprocess.call(['pdftotext', path, tmp])
file_ref = open(tmp, "r")
quotes = []
for line in file_ref.readlines():
line = line.strip('\n\r').strip()
if len(line) > 0:
parse = line.split(' - ')
meme_text = QuoteModel(parse[0], parse[1])
quotes.append(meme_text)
file_ref.close()
os.remove(tmp)
return quotes
|
the-stack_0_13793 | from six.moves import xrange
import tensorflow as tf
from .var_layer import VarLayer
from ..tf import sparse_tensor_diag_matmul
def conv(features, adj, weights):
degree = tf.sparse_reduce_sum(adj, axis=1) + 1
degree = tf.cast(degree, tf.float32)
degree = tf.pow(degree, -0.5)
adj = sparse_tensor_diag_matmul(adj, degree, transpose=True)
adj = sparse_tensor_diag_matmul(adj, degree, transpose=False)
output = tf.sparse_tensor_dense_matmul(adj, features)
features = tf.transpose(features)
features = tf.multiply(tf.multiply(degree, features), degree)
features = tf.transpose(features)
output = output + features
return tf.matmul(output, weights)
class GCNN(VarLayer):
def __init__(self, in_channels, out_channels, adjs, **kwargs):
self.adjs = adjs
super(GCNN, self).__init__(
weight_shape=[in_channels, out_channels],
bias_shape=[out_channels],
**kwargs)
def _call(self, inputs):
batch_size = len(inputs)
outputs = []
for i in xrange(batch_size):
output = conv(inputs[i], self.adjs[i], self.vars['weights'])
if self.bias:
output = tf.nn.bias_add(output, self.vars['bias'])
output = self.act(output)
outputs.append(output)
return outputs
|
the-stack_0_13795 | #!/usr/bin/env python
# Copyright 2015-2017 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import datetime
import sys
import time
from paasta_tools.mesos_tools import get_mesos_master
from paasta_tools.utils import load_system_paasta_config
OUTPUT_FORMAT = "{:<30} {:<8} {:<20} {:<27} {}"
FRAMEWORK_NAME = "marathon"
MAX_BOUNCE_TIME_IN_HOURS = 4
def parse_args():
parser = argparse.ArgumentParser(
description='Find all containers serving previous push versions.',
)
parser.add_argument(
'--bounce-time', dest="bounce_time", type=int,
default=MAX_BOUNCE_TIME_IN_HOURS,
help=(
"Ignore versions that were launched in the last BOUNCE_TIME hours "
"because they probably are still bouncing."
),
)
return parser.parse_args()
def get_mesos_state():
state = get_mesos_master(use_mesos_cache=True).state
return state
def marathon_tasks(state):
for framework in state.get('frameworks', []):
if framework['name'].lower().startswith(FRAMEWORK_NAME):
for task in framework.get('tasks', []):
yield task
def create_slave_id_to_hostname_dict(state):
res = {}
for slave in state['slaves']:
res[slave['id']] = slave['hostname']
return res
def group_running_tasks_by_id_and_gitsha(state):
res = {}
for t in marathon_tasks(state):
if t['state'] == 'TASK_RUNNING':
task_id = t['name'][:t['name'].find('.', t['name'].find('.') + 1)]
gitsha = t['name'][len(task_id) + 1:t['name'].find('.', len(task_id) + 1)]
res.setdefault(task_id, {}).setdefault(gitsha, []).append(t)
return res
def detect_outdated_gitshas(versions, max_bounce_time_in_hours):
"""Find versions that should have drained more than 'max_bounce_time_in_hours' ago"""
if len(versions) < 2:
return []
deploy_time = {}
latest_deploy = 0
for version, tasks in versions.items():
deploy_time[version] = sum(t['statuses'][0]['timestamp'] for t in tasks) / len(tasks)
if (
deploy_time[version] > latest_deploy and
time.time() - deploy_time[version] > max_bounce_time_in_hours * 3600
):
latest_deploy = deploy_time[version]
return [version for version, dtime in deploy_time.items() if dtime < latest_deploy]
def report_outdated_instances(task_id, gitsha, tasks, slave_id2hostname):
output = []
remedy = []
for t in tasks:
deploy_time = datetime.datetime.fromtimestamp(int(t['statuses'][0]['timestamp'])).strftime('%Y-%m-%d %H:%M:%S')
container_name = "mesos-{}.{}".format(
t['slave_id'],
t['statuses'][0]['container_status']['container_id']['value'],
)
hostname = slave_id2hostname[t['slave_id']]
hostname = hostname[:hostname.find('.')]
service_instance = task_id.replace('--', '_')
output.append(
OUTPUT_FORMAT.format(
service_instance[:30],
gitsha[3:],
deploy_time,
hostname,
container_name,
),
)
remedy.append('ssh {0} "sudo hadown {1}; sleep 10; sudo docker stop {2}; sudo haup {1}"'
.format(hostname, service_instance, container_name))
return output, remedy
def check_mesos_tasks(max_bounce_time_in_hours=MAX_BOUNCE_TIME_IN_HOURS):
output = []
remedy = []
state = get_mesos_state()
aggregated_tasks = group_running_tasks_by_id_and_gitsha(state)
slave_id2hostname = create_slave_id_to_hostname_dict(state)
for task_id, versions in aggregated_tasks.items():
for gitsha in detect_outdated_gitshas(versions, max_bounce_time_in_hours):
temp_output, temp_remedy = report_outdated_instances(
task_id, gitsha, versions[gitsha], slave_id2hostname,
)
output.extend(temp_output)
remedy.extend(temp_remedy)
return output, remedy
def main():
args = parse_args()
cluster = load_system_paasta_config().get_cluster()
output, remedy = check_mesos_tasks(args.bounce_time)
if output:
print("CRITICAL - There are {} tasks running in {} that are more than {}h older than their"
" last bounce.".format(len(output), cluster, args.bounce_time))
print(OUTPUT_FORMAT.format('SERVICE.INSTANCE', 'COMMIT', 'CREATED', 'HOSTNAME', 'CONTAINER'))
print('\n'.join(output))
print('')
print('Run the following commands to terminate them:')
print('{code}')
print('\n'.join(remedy))
print('{code}')
return 1
else:
print("OK - There are no outdated tasks in {}".format(cluster))
return 0
if __name__ == "__main__":
sys.exit(main())
|
the-stack_0_13796 | # Copyright (c) 2016, Matt Layman
import os
import sys
import tempfile
import unittest
try:
from unittest import mock
except ImportError:
import mock
from tap import TAPTestRunner
from tap.runner import TAPTestResult, _tracker
class TestTAPTestRunner(unittest.TestCase):
def test_has_tap_test_result(self):
runner = TAPTestRunner()
self.assertEqual(runner.resultclass, TAPTestResult)
def test_runner_uses_outdir(self):
"""Test that the test runner sets the outdir so that TAP
files will be written to that location.
Setting class attributes to get the right behavior is a dirty hack, but
the unittest classes aren't very extensible.
"""
# Save the previous outdir in case **this** execution was using it.
previous_outdir = _tracker.outdir
outdir = tempfile.mkdtemp()
TAPTestRunner.set_outdir(outdir)
self.assertEqual(outdir, _tracker.outdir)
_tracker.outdir = previous_outdir
def test_runner_uses_format(self):
"""Test that format is set on TAPTestResult FORMAT."""
# Save the previous format in case **this** execution was using it.
previous_format = TAPTestResult.FORMAT
fmt = "{method_name}: {short_description}"
TAPTestRunner.set_format(fmt)
self.assertEqual(fmt, TAPTestResult.FORMAT)
TAPTestResult.FORMAT = previous_format
def test_runner_uses_combined(self):
"""Test that output is combined."""
# Save previous combined in case **this** execution was using it.
previous_combined = _tracker.combined
TAPTestRunner.set_combined(True)
self.assertTrue(_tracker.combined)
_tracker.combined = previous_combined
@mock.patch('sys.exit')
def test_bad_format_string(self, fake_exit):
"""A bad format string exits the runner."""
previous_format = TAPTestResult.FORMAT
bad_format = "Not gonna work {sort_desc}"
TAPTestRunner.set_format(bad_format)
result = TAPTestResult(None, True, 1)
test = mock.Mock()
result._description(test)
self.assertTrue(fake_exit.called)
TAPTestResult.FORMAT = previous_format
def test_runner_sets_tracker_for_streaming(self):
"""The tracker is set for streaming mode."""
previous_streaming = _tracker.streaming
previous_stream = _tracker.stream
runner = TAPTestRunner()
runner.set_stream(True)
self.assertTrue(_tracker.streaming)
self.assertTrue(_tracker.stream, sys.stdout)
_tracker.streaming = previous_streaming
_tracker.stream = previous_stream
def test_runner_stream_to_devnull_for_streaming(self):
previous_streaming = _tracker.streaming
previous_stream = _tracker.stream
runner = TAPTestRunner()
runner.set_stream(True)
self.assertTrue(runner.stream.stream.name, os.devnull)
_tracker.streaming = previous_streaming
_tracker.stream = previous_stream
def test_runner_uses_header(self):
"""Test that the case header can be turned off."""
# Save previous header in case **this** execution was using it.
previous_header = _tracker.header
TAPTestRunner.set_header(False)
self.assertFalse(_tracker.header)
TAPTestRunner.set_header(True)
self.assertTrue(_tracker.header)
_tracker.header = previous_header
|
the-stack_0_13799 | # Aggregator class
# : contains aggregate methods
import logging
from Queue import Empty
from threading import Thread, Event
from user_agents import parse
logger = logging.getLogger('pwstat_aggregator')
class Aggregator(Thread):
def __init__(self, queue, writer, stat_list):
Thread.__init__(self)
self.queue = queue
self.writer = writer
self.stat_list = stat_list
# match dictionary: used in calculate function
# example 'stat_name': self.function_name
self.match = {'timestamp': self.get_timestamp,
'user-agent': self.get_user_agent,
}
# validate statistics specified
for stat in stat_list:
assert stat in self.match, \
"'{}' did not match any stats in aggregator. Available stats: {}".format(stat, self.match.keys())
self._stop = Event()
# default thread method
def run(self):
logger.info("Aggregator starting...")
while True:
try:
# timeout after 5 seconds
temp = self.queue.get(timeout=5)
self.writer.write(self.calculate(temp))
except Empty:
pass
if self._stop.isSet():
logger.info("Aggregator stopping...")
break
return
# thread termination method
def stop(self):
self._stop.set()
# aggregate functions
def get_timestamp(self, data):
return str(data[2])
def get_user_agent(self, data):
ua_string = data[0]['user-agent']
user_agent = parse(ua_string)
return 'mobile' if user_agent.is_mobile else 'desktop' if user_agent.is_pc else 'other'
# main method
def calculate(self, data):
# call aggregate functions and return dictionary of stats
# data has the form (headers, body, timestamp)
logger.info("Starting aggregation...")
result = dict()
for stat in self.stat_list:
stat_function = self.match[stat]
result[stat] = stat_function(data)
return result
|
the-stack_0_13800 | def is_prime(n):
if n <= 1:
return False
elif n <= 3:
return True
elif n % 2 == 0 or n % 3 == 0:
return False
i = 5
while i * i <= n:
if n % i == 0 or n % (i + 2) == 0:
return False
i += 6
return True
p = int(input().strip())
for _ in range(p):
n = int(input().strip())
if is_prime(n):
print("Prime")
else:
print("Not prime")
|
the-stack_0_13801 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""create is_encrypted
Revision ID: 1507a7289a2f
Revises: e3a246e0dc1
Create Date: 2015-08-18 18:57:51.927315
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.engine.reflection import Inspector
# revision identifiers, used by Alembic.
revision = '1507a7289a2f'
down_revision = 'e3a246e0dc1'
branch_labels = None
depends_on = None
connectionhelper = sa.Table(
'connection', sa.MetaData(), sa.Column('id', sa.Integer, primary_key=True), sa.Column('is_encrypted')
)
def upgrade():
# first check if the user already has this done. This should only be
# true for users who are upgrading from a previous version of Airflow
# that predates Alembic integration
conn = op.get_bind()
inspector = Inspector.from_engine(conn)
# this will only be true if 'connection' already exists in the db,
# but not if alembic created it in a previous migration
if 'connection' in inspector.get_table_names():
col_names = [c['name'] for c in inspector.get_columns('connection')]
if 'is_encrypted' in col_names:
return
op.add_column('connection', sa.Column('is_encrypted', sa.Boolean, unique=False, default=False))
conn = op.get_bind()
conn.execute(connectionhelper.update().values(is_encrypted=False))
def downgrade():
op.drop_column('connection', 'is_encrypted')
|
the-stack_0_13802 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests exercising the various classes in xmppserver.py."""
import unittest
import base64
import xmppserver
class XmlUtilsTest(unittest.TestCase):
def testParseXml(self):
xml_text = """<foo xmlns=""><bar xmlns=""><baz/></bar></foo>"""
xml = xmppserver.ParseXml(xml_text)
self.assertEqual(xml.toxml(), xml_text)
def testCloneXml(self):
xml = xmppserver.ParseXml('<foo/>')
xml_clone = xmppserver.CloneXml(xml)
xml_clone.setAttribute('bar', 'baz')
self.assertEqual(xml, xml)
self.assertEqual(xml_clone, xml_clone)
self.assertNotEqual(xml, xml_clone)
def testCloneXmlUnlink(self):
xml_text = '<foo/>'
xml = xmppserver.ParseXml(xml_text)
xml_clone = xmppserver.CloneXml(xml)
xml.unlink()
self.assertEqual(xml.parentNode, None)
self.assertNotEqual(xml_clone.parentNode, None)
self.assertEqual(xml_clone.toxml(), xml_text)
class StanzaParserTest(unittest.TestCase):
def setUp(self):
self.stanzas = []
def FeedStanza(self, stanza):
# We can't append stanza directly because it is unlinked after
# this callback.
self.stanzas.append(stanza.toxml())
def testBasic(self):
parser = xmppserver.StanzaParser(self)
parser.FeedString('<foo')
self.assertEqual(len(self.stanzas), 0)
parser.FeedString('/><bar></bar>')
self.assertEqual(self.stanzas[0], '<foo/>')
self.assertEqual(self.stanzas[1], '<bar/>')
def testStream(self):
parser = xmppserver.StanzaParser(self)
parser.FeedString('<stream')
self.assertEqual(len(self.stanzas), 0)
parser.FeedString(':stream foo="bar" xmlns:stream="baz">')
self.assertEqual(self.stanzas[0],
'<stream:stream foo="bar" xmlns:stream="baz"/>')
def testNested(self):
parser = xmppserver.StanzaParser(self)
parser.FeedString('<foo')
self.assertEqual(len(self.stanzas), 0)
parser.FeedString(' bar="baz"')
parser.FeedString('><baz/><blah>meh</blah></foo>')
self.assertEqual(self.stanzas[0],
'<foo bar="baz"><baz/><blah>meh</blah></foo>')
class JidTest(unittest.TestCase):
def testBasic(self):
jid = xmppserver.Jid('foo', 'bar.com')
self.assertEqual(str(jid), '[email protected]')
def testResource(self):
jid = xmppserver.Jid('foo', 'bar.com', 'resource')
self.assertEqual(str(jid), '[email protected]/resource')
def testGetBareJid(self):
jid = xmppserver.Jid('foo', 'bar.com', 'resource')
self.assertEqual(str(jid.GetBareJid()), '[email protected]')
class IdGeneratorTest(unittest.TestCase):
def testBasic(self):
id_generator = xmppserver.IdGenerator('foo')
for i in xrange(0, 100):
self.assertEqual('foo.%d' % i, id_generator.GetNextId())
class HandshakeTaskTest(unittest.TestCase):
def setUp(self):
self.Reset()
def Reset(self):
self.data_received = 0
self.handshake_done = False
self.jid = None
def SendData(self, _):
self.data_received += 1
def SendStanza(self, _, unused=True):
self.data_received += 1
def HandshakeDone(self, jid):
self.handshake_done = True
self.jid = jid
def DoHandshake(self, resource_prefix, resource, username,
initial_stream_domain, auth_domain, auth_stream_domain):
self.Reset()
handshake_task = (
xmppserver.HandshakeTask(self, resource_prefix, True))
stream_xml = xmppserver.ParseXml('<stream:stream xmlns:stream="foo"/>')
stream_xml.setAttribute('to', initial_stream_domain)
self.assertEqual(self.data_received, 0)
handshake_task.FeedStanza(stream_xml)
self.assertEqual(self.data_received, 2)
if auth_domain:
username_domain = '%s@%s' % (username, auth_domain)
else:
username_domain = username
auth_string = base64.b64encode('\0%s\0bar' % username_domain)
auth_xml = xmppserver.ParseXml('<auth>%s</auth>'% auth_string)
handshake_task.FeedStanza(auth_xml)
self.assertEqual(self.data_received, 3)
stream_xml = xmppserver.ParseXml('<stream:stream xmlns:stream="foo"/>')
stream_xml.setAttribute('to', auth_stream_domain)
handshake_task.FeedStanza(stream_xml)
self.assertEqual(self.data_received, 5)
bind_xml = xmppserver.ParseXml(
'<iq type="set"><bind><resource>%s</resource></bind></iq>' % resource)
handshake_task.FeedStanza(bind_xml)
self.assertEqual(self.data_received, 6)
self.assertFalse(self.handshake_done)
session_xml = xmppserver.ParseXml(
'<iq type="set"><session></session></iq>')
handshake_task.FeedStanza(session_xml)
self.assertEqual(self.data_received, 7)
self.assertTrue(self.handshake_done)
self.assertEqual(self.jid.username, username)
self.assertEqual(self.jid.domain,
auth_stream_domain or auth_domain or
initial_stream_domain)
self.assertEqual(self.jid.resource,
'%s.%s' % (resource_prefix, resource))
handshake_task.FeedStanza('<ignored/>')
self.assertEqual(self.data_received, 7)
def DoHandshakeUnauthenticated(self, resource_prefix, resource, username,
initial_stream_domain):
self.Reset()
handshake_task = (
xmppserver.HandshakeTask(self, resource_prefix, False))
stream_xml = xmppserver.ParseXml('<stream:stream xmlns:stream="foo"/>')
stream_xml.setAttribute('to', initial_stream_domain)
self.assertEqual(self.data_received, 0)
handshake_task.FeedStanza(stream_xml)
self.assertEqual(self.data_received, 2)
self.assertFalse(self.handshake_done)
auth_string = base64.b64encode('\0%s\0bar' % username)
auth_xml = xmppserver.ParseXml('<auth>%s</auth>'% auth_string)
handshake_task.FeedStanza(auth_xml)
self.assertEqual(self.data_received, 3)
self.assertTrue(self.handshake_done)
self.assertEqual(self.jid, None)
handshake_task.FeedStanza('<ignored/>')
self.assertEqual(self.data_received, 3)
def testBasic(self):
self.DoHandshake('resource_prefix', 'resource',
'foo', 'bar.com', 'baz.com', 'quux.com')
def testDomainBehavior(self):
self.DoHandshake('resource_prefix', 'resource',
'foo', 'bar.com', 'baz.com', 'quux.com')
self.DoHandshake('resource_prefix', 'resource',
'foo', 'bar.com', 'baz.com', '')
self.DoHandshake('resource_prefix', 'resource',
'foo', 'bar.com', '', '')
self.DoHandshake('resource_prefix', 'resource',
'foo', '', '', '')
def testBasicUnauthenticated(self):
self.DoHandshakeUnauthenticated('resource_prefix', 'resource',
'foo', 'bar.com')
class FakeSocket(object):
"""A fake socket object used for testing.
"""
def __init__(self):
self._sent_data = []
def GetSentData(self):
return self._sent_data
# socket-like methods.
def fileno(self):
return 0
def setblocking(self, int):
pass
def getpeername(self):
return ('', 0)
def send(self, data):
self._sent_data.append(data)
pass
def close(self):
pass
class XmppConnectionTest(unittest.TestCase):
def setUp(self):
self.connections = set()
self.fake_socket = FakeSocket()
# XmppConnection delegate methods.
def OnXmppHandshakeDone(self, xmpp_connection):
self.connections.add(xmpp_connection)
def OnXmppConnectionClosed(self, xmpp_connection):
self.connections.discard(xmpp_connection)
def ForwardNotification(self, unused_xmpp_connection, notification_stanza):
for connection in self.connections:
connection.ForwardNotification(notification_stanza)
def testBasic(self):
socket_map = {}
xmpp_connection = xmppserver.XmppConnection(
self.fake_socket, socket_map, self, ('', 0), True)
self.assertEqual(len(socket_map), 1)
self.assertEqual(len(self.connections), 0)
xmpp_connection.HandshakeDone(xmppserver.Jid('foo', 'bar'))
self.assertEqual(len(socket_map), 1)
self.assertEqual(len(self.connections), 1)
sent_data = self.fake_socket.GetSentData()
# Test subscription request.
self.assertEqual(len(sent_data), 0)
xmpp_connection.collect_incoming_data(
'<iq><subscribe xmlns="google:push"></subscribe></iq>')
self.assertEqual(len(sent_data), 1)
# Test acks.
xmpp_connection.collect_incoming_data('<iq type="result"/>')
self.assertEqual(len(sent_data), 1)
# Test notification.
xmpp_connection.collect_incoming_data(
'<message><push xmlns="google:push"/></message>')
self.assertEqual(len(sent_data), 2)
# Test unexpected stanza.
def SendUnexpectedStanza():
xmpp_connection.collect_incoming_data('<foo/>')
self.assertRaises(xmppserver.UnexpectedXml, SendUnexpectedStanza)
# Test unexpected notifier command.
def SendUnexpectedNotifierCommand():
xmpp_connection.collect_incoming_data(
'<iq><foo xmlns="google:notifier"/></iq>')
self.assertRaises(xmppserver.UnexpectedXml,
SendUnexpectedNotifierCommand)
# Test close.
xmpp_connection.close()
self.assertEqual(len(socket_map), 0)
self.assertEqual(len(self.connections), 0)
def testBasicUnauthenticated(self):
socket_map = {}
xmpp_connection = xmppserver.XmppConnection(
self.fake_socket, socket_map, self, ('', 0), False)
self.assertEqual(len(socket_map), 1)
self.assertEqual(len(self.connections), 0)
xmpp_connection.HandshakeDone(None)
self.assertEqual(len(socket_map), 0)
self.assertEqual(len(self.connections), 0)
# Test unexpected stanza.
def SendUnexpectedStanza():
xmpp_connection.collect_incoming_data('<foo/>')
self.assertRaises(xmppserver.UnexpectedXml, SendUnexpectedStanza)
# Test redundant close.
xmpp_connection.close()
self.assertEqual(len(socket_map), 0)
self.assertEqual(len(self.connections), 0)
class FakeXmppServer(xmppserver.XmppServer):
"""A fake XMPP server object used for testing.
"""
def __init__(self):
self._socket_map = {}
self._fake_sockets = set()
self._next_jid_suffix = 1
xmppserver.XmppServer.__init__(self, self._socket_map, ('', 0))
def GetSocketMap(self):
return self._socket_map
def GetFakeSockets(self):
return self._fake_sockets
def AddHandshakeCompletedConnection(self):
"""Creates a new XMPP connection and completes its handshake.
"""
xmpp_connection = self.handle_accept()
jid = xmppserver.Jid('user%s' % self._next_jid_suffix, 'domain.com')
self._next_jid_suffix += 1
xmpp_connection.HandshakeDone(jid)
# XmppServer overrides.
def accept(self):
fake_socket = FakeSocket()
self._fake_sockets.add(fake_socket)
return (fake_socket, ('', 0))
def close(self):
self._fake_sockets.clear()
xmppserver.XmppServer.close(self)
class XmppServerTest(unittest.TestCase):
def setUp(self):
self.xmpp_server = FakeXmppServer()
def AssertSentDataLength(self, expected_length):
for fake_socket in self.xmpp_server.GetFakeSockets():
self.assertEqual(len(fake_socket.GetSentData()), expected_length)
def testBasic(self):
socket_map = self.xmpp_server.GetSocketMap()
self.assertEqual(len(socket_map), 1)
self.xmpp_server.AddHandshakeCompletedConnection()
self.assertEqual(len(socket_map), 2)
self.xmpp_server.close()
self.assertEqual(len(socket_map), 0)
def testMakeNotification(self):
notification = self.xmpp_server.MakeNotification('channel', 'data')
expected_xml = (
'<message>'
' <push channel="channel" xmlns="google:push">'
' <data>%s</data>'
' </push>'
'</message>' % base64.b64encode('data'))
self.assertEqual(notification.toxml(), expected_xml)
def testSendNotification(self):
# Add a few connections.
for _ in xrange(0, 7):
self.xmpp_server.AddHandshakeCompletedConnection()
self.assertEqual(len(self.xmpp_server.GetFakeSockets()), 7)
self.AssertSentDataLength(0)
self.xmpp_server.SendNotification('channel', 'data')
self.AssertSentDataLength(1)
def testEnableDisableNotifications(self):
# Add a few connections.
for _ in xrange(0, 5):
self.xmpp_server.AddHandshakeCompletedConnection()
self.assertEqual(len(self.xmpp_server.GetFakeSockets()), 5)
self.AssertSentDataLength(0)
self.xmpp_server.SendNotification('channel', 'data')
self.AssertSentDataLength(1)
self.xmpp_server.EnableNotifications()
self.xmpp_server.SendNotification('channel', 'data')
self.AssertSentDataLength(2)
self.xmpp_server.DisableNotifications()
self.xmpp_server.SendNotification('channel', 'data')
self.AssertSentDataLength(2)
self.xmpp_server.DisableNotifications()
self.xmpp_server.SendNotification('channel', 'data')
self.AssertSentDataLength(2)
self.xmpp_server.EnableNotifications()
self.xmpp_server.SendNotification('channel', 'data')
self.AssertSentDataLength(3)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_13803 | # -*- coding: utf-8 -*-
import torch
import random
import inspect
import numpy as np
from itertools import islice, repeat
import os
def check_path(path, exist_ok=False, log=print):
"""Check if `path` exists, makedirs if not else warning/IOError."""
if os.path.exists(path):
if exist_ok:
log(f"path {path} exists, may overwrite...")
else:
raise IOError(f"path {path} exists, stop.")
else:
os.makedirs(os.path.dirname(path), exist_ok=True)
def split_corpus(path, shard_size, default=None):
"""yield a `list` containing `shard_size` line of `path`,
or repeatly generate `default` if `path` is None.
"""
if path is not None:
return _split_corpus(path, shard_size)
else:
return repeat(default)
def _split_corpus(path, shard_size):
"""Yield a `list` containing `shard_size` line of `path`.
"""
with open(path, "rb") as f:
if shard_size <= 0:
yield f.readlines()
else:
while True:
shard = list(islice(f, shard_size))
if not shard:
break
yield shard
def aeq(*args):
"""
Assert all arguments have the same value
"""
arguments = (arg for arg in args)
first = next(arguments)
assert all(arg == first for arg in arguments), \
"Not all arguments have the same value: " + str(args)
def sequence_mask(lengths, max_len=None):
"""
Creates a boolean mask from sequence lengths.
"""
batch_size = lengths.numel()
max_len = max_len or lengths.max()
return (torch.arange(0, max_len, device=lengths.device)
.type_as(lengths)
.repeat(batch_size, 1)
.lt(lengths.unsqueeze(1)))
def tile(x, count, dim=0):
"""
Tiles x on dimension dim count times.
"""
perm = list(range(len(x.size())))
if dim != 0:
perm[0], perm[dim] = perm[dim], perm[0]
x = x.permute(perm)
out_size = list(x.size())
out_size[0] *= count
batch = x.size(0)
x = x.contiguous().view(batch, -1) \
.transpose(0, 1) \
.repeat(count, 1) \
.transpose(0, 1) \
.contiguous() \
.view(*out_size)
if dim != 0:
x = x.permute(perm).contiguous()
return x
def use_gpu(opt):
"""
Creates a boolean if gpu used
"""
return (hasattr(opt, 'gpu_ranks') and len(opt.gpu_ranks) > 0) or \
(hasattr(opt, 'gpu') and opt.gpu > -1)
def set_random_seed(seed, is_cuda):
"""Sets the random seed."""
if seed > 0:
torch.manual_seed(seed)
# this one is needed for torchtext random call (shuffled iterator)
# in multi gpu it ensures datasets are read in the same order
random.seed(seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
# This one is needed for various tranfroms
np.random.seed(seed)
if is_cuda and seed > 0:
# These ensure same initialization in multi gpu mode
torch.cuda.manual_seed(seed)
def generate_relative_positions_matrix(length, max_relative_positions,
cache=False):
"""Generate the clipped relative positions matrix
for a given length and maximum relative positions"""
if cache:
distance_mat = torch.arange(-length+1, 1, 1).unsqueeze(0)
else:
range_vec = torch.arange(length)
range_mat = range_vec.unsqueeze(-1).expand(-1, length).transpose(0, 1)
distance_mat = range_mat - range_mat.transpose(0, 1)
distance_mat_clipped = torch.clamp(distance_mat,
min=-max_relative_positions,
max=max_relative_positions)
# Shift values to be >= 0
final_mat = distance_mat_clipped + max_relative_positions
return final_mat
def relative_matmul(x, z, transpose):
"""Helper function for relative positions attention."""
batch_size = x.shape[0]
heads = x.shape[1]
length = x.shape[2]
x_t = x.permute(2, 0, 1, 3)
x_t_r = x_t.reshape(length, heads * batch_size, -1)
if transpose:
z_t = z.transpose(1, 2)
x_tz_matmul = torch.matmul(x_t_r, z_t)
else:
x_tz_matmul = torch.matmul(x_t_r, z)
x_tz_matmul_r = x_tz_matmul.reshape(length, batch_size, heads, -1)
x_tz_matmul_r_t = x_tz_matmul_r.permute(1, 2, 0, 3)
return x_tz_matmul_r_t
def fn_args(fun):
"""Returns the list of function arguments name."""
return inspect.getfullargspec(fun).args
def report_matrix(row_label, column_label, matrix):
header_format = "{:>10.10} " + "{:>10.7} " * len(row_label)
row_format = "{:>10.10} " + "{:>10.7f} " * len(row_label)
output = header_format.format("", *row_label) + '\n'
for word, row in zip(column_label, matrix):
max_index = row.index(max(row))
row_format = row_format.replace(
"{:>10.7f} ", "{:*>10.7f} ", max_index + 1)
row_format = row_format.replace(
"{:*>10.7f} ", "{:>10.7f} ", max_index)
output += row_format.format(word, *row) + '\n'
row_format = "{:>10.10} " + "{:>10.7f} " * len(row_label)
return output
def check_model_config(model_config, root):
# we need to check the model path + any tokenizer path
for model in model_config["models"]:
model_path = os.path.join(root, model)
if not os.path.exists(model_path):
raise FileNotFoundError(
"{} from model {} does not exist".format(
model_path, model_config["id"]))
if "tokenizer" in model_config.keys():
if "params" in model_config["tokenizer"].keys():
for k, v in model_config["tokenizer"]["params"].items():
if k.endswith("path"):
tok_path = os.path.join(root, v)
if not os.path.exists(tok_path):
raise FileNotFoundError(
"{} from model {} does not exist".format(
tok_path, model_config["id"]))
|
the-stack_0_13804 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark for KPL implementation of vocabulary columns from lists with dense inputs."""
from tensorflow.python import keras
from tensorflow.python.compat import v2_compat
from tensorflow.python.eager.def_function import function as tf_function
from tensorflow.python.feature_column import feature_column_v2 as fcv2
from tensorflow.python.framework import dtypes as dt
from tensorflow.python.keras.layers.preprocessing import string_lookup
from tensorflow.python.keras.layers.preprocessing.benchmarks import feature_column_benchmark as fc_bm
from tensorflow.python.platform import test as tf_test
# This is required as of 3/2021 because otherwise we drop into graph mode.
v2_compat.enable_v2_behavior()
NUM_REPEATS = 10
BATCH_SIZES = [32, 256]
def embedding_varlen(batch_size, max_length):
"""Benchmark a variable-length embedding."""
# Data and constants.
vocab = fc_bm.create_vocabulary(32768)
data = fc_bm.create_string_data(
max_length, batch_size * NUM_REPEATS, vocab, pct_oov=0.15)
# Keras implementation
model = keras.Sequential()
model.add(keras.Input(shape=(max_length,), name="data", dtype=dt.string))
model.add(string_lookup.StringLookup(vocabulary=vocab, mask_token=None))
# FC implementation
fc = fcv2.categorical_column_with_vocabulary_list(
key="data", vocabulary_list=vocab, num_oov_buckets=1)
# Wrap the FC implementation in a tf.function for a fair comparison
@tf_function()
def fc_fn(tensors):
fc.transform_feature(fcv2.FeatureTransformationCache(tensors), None)
# Benchmark runs
keras_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
k_avg_time = fc_bm.run_keras(keras_data, model, batch_size, NUM_REPEATS)
fc_data = {
"data": data.to_tensor(default_value="", shape=(batch_size, max_length))
}
fc_avg_time = fc_bm.run_fc(fc_data, fc_fn, batch_size, NUM_REPEATS)
return k_avg_time, fc_avg_time
class BenchmarkLayer(fc_bm.LayerBenchmark):
"""Benchmark the layer forward pass."""
def benchmark_layer(self):
for batch in BATCH_SIZES:
name = "vocab_list|dense|batch_%s" % batch
k_time, f_time = embedding_varlen(batch_size=batch, max_length=256)
self.report(name, k_time, f_time, NUM_REPEATS)
if __name__ == "__main__":
tf_test.main()
|
the-stack_0_13807 | """
Devices.
"""
import attr
import anyio
from typing import List
from .event import DeviceLocated, DeviceNotFound, DeviceValue, DeviceException
from .error import IsDirError
import logging
logger = logging.getLogger(__name__)
__all__ = ["Device"]
@attr.s
class NoLocationKnown(RuntimeError):
device = attr.ib()
@attr.s
class NotADevice(RuntimeError):
id = attr.ib()
dev_classes = dict()
def register(cls):
dev_classes[cls.family] = cls
def split_id(id): # pylint: disable=redefined-builtin
try:
a, b, c = (int(x, 16) for x in id.split("."))
except ValueError:
raise NotADevice(id) from None
return a, b, c
class _Value:
def __init__(self, path, typ):
self.path = path
self.typ = typ
def __repr__(self):
return "<%s: %s %s>" % (self.__class__.__name__, self.path, self.typ)
class _RValue(_Value):
def __init__(self, path, typ):
super().__init__(path, typ)
if typ in {"f", "g", "p", "t"}:
self.conv = float
elif typ in {"i", "u"}:
self.conv = int
elif typ == "y":
self.conv = lambda x: bool(int(x))
elif typ == "b":
self.conv = lambda x: x
else:
self.conv = lambda x: x.decode("utf-8")
class _WValue(_Value):
def __init__(self, path, typ):
super().__init__(path, typ)
if typ == "b":
self.conv = lambda x: x
elif typ == "y":
self.conv = lambda x: b"1" if x else b"0"
else:
self.conv = lambda x: str(x).encode("utf-8")
class SimpleValue(_RValue):
"""Accessor for direct attribute access"""
async def __get__(slf, self, cls): # pylint: disable=no-self-argument
res = await self.dev.attr_get(*slf.path)
return slf.conv(res)
class SimpleGetter(_RValue):
"""Accessor for get_* function"""
def __get__(slf, self, cls): # pylint: disable=no-self-argument
async def getter():
res = await self.dev.attr_get(*slf.path)
return slf.conv(res)
return getter
class SimpleSetter(_WValue):
"""Accessor for set_* function"""
def __get__(slf, self, cls): # pylint: disable=no-self-argument
async def setter(val):
await self.dev.attr_set(*slf.path, value=slf.conv(val))
return setter
class MultiValue(_RValue):
"""Accessor for direct array access"""
async def __get__(slf, self, cls): # pylint: disable=no-self-argument
p = slf.path[:-1] + (slf.path[-1] + ".ALL",)
res = await self.dev.attr_get(*p)
conv = slf.conv
return [conv(v) for v in res.split(b",")]
class MultiGetter(_RValue):
"""Accessor for array get_* function"""
def __get__(slf, self, cls): # pylint: disable=no-self-argument
async def getter():
p = slf.path[:-1] + (slf.path[-1] + ".ALL",)
res = await self.dev.attr_get(*p)
conv = slf.conv
return [conv(v) for v in res.split(b",")]
return getter
class MultiSetter(_WValue):
"""Accessor for array set_* function"""
def __get__(slf, self, cls): # pylint: disable=no-self-argument
async def setter(val):
conv = slf.conv
p = slf.path[:-1] + (slf.path[-1] + ".ALL",)
val = b",".join(conv(v) for v in val)
await self.dev.attr_set(*p, value=val)
return setter
class _IdxObj:
def __init__(self, dev, ary):
self.dev = dev
self.ary = ary
async def __getitem__(self, idx):
if self.ary.num:
idx = str(idx)
else:
idx = chr(ord("A") + idx)
p = self.ary.path[:-1] + (self.ary.path[-1] + "." + idx,)
res = await self.dev.attr_get(*p)
return self.ary.conv(res)
async def set(self, idx, val):
if self.ary.num:
idx = str(idx)
else:
idx = chr(ord("A") + idx)
p = self.ary.path[:-1] + (self.ary.path[-1] + "." + idx,)
await self.dev.attr_set(*p, value=val)
class ArrayValue(_RValue):
"""Accessor for direct array element access"""
def __init__(self, path, typ, num):
super().__init__(path, typ)
self.num = num
def __get__(slf, self, cls): # pylint: disable=no-self-argument
return _IdxObj(self.dev, slf)
class ArrayGetter(_RValue):
"""Accessor for array element get_* function"""
def __init__(self, path, typ, num):
super().__init__(path, typ)
self.num = num
def __get__(slf, self, cls): # pylint: disable=no-self-argument
async def getter(idx):
if slf.num:
idx = str(idx)
else:
idx = chr(ord("A") + idx)
p = slf.path[:-1] + (slf.path[-1] + "." + idx,)
res = await self.dev.attr_get(*p)
return slf.conv(res)
return getter
class ArraySetter(_WValue):
"""Accessor for array element set_* function"""
def __init__(self, path, typ, num):
super().__init__(path, typ)
self.num = num
def __get__(slf, self, cls): # pylint: disable=no-self-argument
async def setter(idx, val):
if slf.num:
idx = str(idx)
else:
idx = chr(ord("A") + idx)
p = slf.path[:-1] + (slf.path[-1] + "." + idx,)
await self.dev.attr_set(*p, value=slf.conv(val))
return setter
class SubDir:
_subdirs = set()
dev = None # needs to be filled by subclass
def __getattr__(self, name):
if name not in self._subdirs:
return super().__getattribute__(name)
c = getattr(self, "_cls_" + name)(self)
c.dev = self.dev
return c
async def setup_accessors(server, cls, typ, *subdir):
cls.fields = {}
for d in await server.dir("structure", typ, *subdir):
dd = subdir + (d,)
try:
v = await server.attr_get("structure", typ, *dd)
except IsDirError:
t = typ
class SubPath(SubDir):
typ = t
subdir = dd
def __init__(self, base):
self.base = base
def __repr__(self):
return "<%s %s %s>" % (
self.__class__.__name__,
self.base,
self.subdir,
)
def __get__(self, obj, cls):
if obj is None:
return cls
try:
return getattr(obj, "_" + self.dd[-1])
except AttributeError:
c = getattr(cls, self.__name__)()
setattr(obj, "_" + self.dd[-1], c)
c.dev = obj.dev
return c
SubPath.__name__ = "_cls_" + d
setattr(cls, "_cls_" + d, SubPath)
cls._subdirs.add(d)
await setup_accessors(server, SubPath, typ, *dd)
else:
v = v.decode("utf-8").split(",")
try:
v[1] = int(v[1])
v[2] = int(v[2])
v[4] = int(v[4])
except ValueError:
raise ValueError("broken setup vector", (typ, dd), v) from None
if v[1] == 0:
if d.endswith(".0"):
num = True
elif d.endswith(".A"):
num = False
else:
num = None
v[1] = num
cls.fields[d] = v
if num is None:
if v[3] in {"ro", "rw"}:
if hasattr(cls, d):
logger.debug("%s: not overwriting %s", cls, d)
else:
setattr(cls, d, SimpleValue(dd, v[0]))
setattr(cls, "get_" + d, SimpleGetter(dd, v[0]))
if v[3] in {"wo", "rw"}:
setattr(cls, "set_" + d, SimpleSetter(dd, v[0]))
else:
d = d[:-2]
dd = subdir + (d,)
if v[3] in {"ro", "rw"}:
if hasattr(cls, d):
logger.debug("%s: not overwriting %s", cls, d)
else:
setattr(cls, d, ArrayValue(dd, v[0], num))
setattr(cls, "get_" + d, ArrayGetter(dd, v[0], num))
if v[3] in {"wo", "rw"}:
setattr(cls, "set_" + d, ArraySetter(dd, v[0], num))
if v[3] in {"ro", "rw"}:
if hasattr(cls, d + "_all"):
logger.debug("%s: not overwriting %s", cls, d + "_all")
else:
setattr(cls, d + "_all", MultiValue(dd, v[0]))
setattr(cls, "get_" + d + "_all", MultiGetter(dd, v[0]))
if v[3] in {"wo", "rw"}:
setattr(cls, "set_" + d + "_all", MultiSetter(dd, v[0]))
class Device(SubDir):
"""Base class for devices.
A device may or may not have a known location.
Whenever a device is located, poll activity is auto-started.
"""
_did_setup = False
_poll: dict = None
bus = None
_events = None
_wait_bus = None
def __new__(cls, service, id): # pylint: disable=redefined-builtin
family_id, code, chksum = split_id(id)
cls = dev_classes.get(family_id) # pylint: disable=self-cls-assignment
if cls is None:
class cls(Device): # pylint: disable=function-redefined
family = family_id
cls.__name__ = "Device_%02x" % (family_id,)
dev_classes[family_id] = cls
self = object.__new__(cls)
self.id = id.upper()
self.family = family_id
self.code = code
self.chksum = chksum
self.service = service
self.bus = None
self._unseen = 0
self._events = []
self._wait_bus = anyio.create_event()
self._poll = {} # name > poll task scopes
self._intervals = {}
self._task_lock = anyio.create_lock()
return self
def __init__(self, service, id): # pylint: disable=redefined-builtin,unused-argument
logger.debug("NewDev %s", id)
@property
def dev(self):
return self
def queue_event(self, evt):
"""Remember this event. Used if an event arrives when the device
hasn't yet been set up by high-level code
"""
self._events.append(evt)
@property
def queued_events(self):
"""Return queued events. Shall be called exactly once."""
e, self._events = self._events, None
if e is None:
raise RuntimeError("You cannot call `queued_events` " "more than once")
return iter(e)
@classmethod
async def setup_struct(cls, server):
"""Read the device's structural data from OWFS
and add methods to access the fields"""
if cls._did_setup is not False:
return
cls._did_setup = None
try:
fc = "%02X" % (cls.family)
await setup_accessors(server, cls, fc)
except BaseException:
cls._did_setup = False
raise
else:
cls._did_setup = True
def __eq__(self, x):
x = getattr(x, "id", x)
return self.id == x
def __hash__(self):
return hash(self.id)
def __repr__(self):
return "<%s:%s @ %s>" % (self.__class__.__name__, self.id, self.bus)
def buses(self):
return set()
async def locate(self, bus):
"""The device has been seen here."""
if self.bus is bus:
return
self.bus = bus
await self._wait_bus.set()
await self.service.push_event(DeviceLocated(self))
for typ, val in self._intervals.items():
await self._set_poll_task(typ, val)
async def wait_bus(self):
await self._wait_bus.wait()
async def delocate(self, bus):
"""The device is no longer located here."""
if self.bus is bus:
self._wait_bus = anyio.create_event()
await self._delocate()
async def _delocate(self):
await self.bus._del_device(self)
self.bus = None
for t in self._poll.values():
await t.cancel()
self._poll = {}
await self.service.push_event(DeviceNotFound(self))
async def attr_get(self, *attrs: List[str]):
"""Read this attribute (ignoring device struct)"""
if self.bus is None:
raise NoLocationKnown(self)
return await self.bus.attr_get(self.id, *attrs)
async def attr_set(self, *attrs: List[str], value):
"""Write this attribute (ignoring device struct)"""
if self.bus is None:
raise NoLocationKnown(self)
return await self.bus.attr_set(self.id, *attrs, value=value)
async def get(self, *attrs):
"""Read this attribute (following device struct)"""
dev = self
for k in attrs:
if isinstance(k, int):
dev = dev[k]
else:
dev = getattr(dev, k)
return await dev
async def set(self, *attrs, value):
"""Write this attribute (following device struct)"""
dev = self
for k in attrs[:-1]:
if isinstance(k, int):
dev = dev[k]
else:
dev = getattr(dev, k)
if isinstance(dev, _IdxObj):
await dev.set(attrs[-1], value)
else:
await getattr(dev, "set_" + attrs[-1])(value)
def polling_items(self):
"""Enumerate poll variants supported by this device.
This is a generator. If you override, call::
yield from super().polling_items()
See the associated ``poll_<name>`` methods on
:class:`anyio_owfs.bus.Bus` for details.
Special return values:
* "alarm": you need to implement ``.stop_alarm``
"""
if False: # pylint: disable=using-constant-test
yield None
def polling_interval(self, typ: str):
"""Return the interval WRT how often to poll for this type.
The default implementation looks up the "interval_<typ>" attribute
or returns ``None`` if that doesn't exist.
"""
try:
return self._intervals[typ]
except KeyError:
return getattr(self, "interval_" + typ, None)
async def set_polling_interval(self, typ: str, value: float = 0):
if isinstance(typ, str):
styp = typ
else:
styp = "/".join(str(x) for x in typ)
if value > 0:
self._intervals[styp] = value
else:
self._intervals.pop(styp, None)
if self.bus is not None:
if hasattr(self, "poll_" + styp):
await self.bus.update_poll()
else:
await self._set_poll_task(typ, value)
async def _set_poll_task(self, typ, value):
async with self._task_lock:
try:
task = self._poll.pop(typ)
except KeyError:
pass
else:
await task.cancel()
if not value:
return
*p, n = typ.split("/") if isinstance(typ, str) else typ
s = self
for pp in p:
if isinstance(pp, int):
s = s[pp]
else:
s = getattr(s, pp)
if isinstance(n, int) or hasattr(s, "get_" + n):
self._poll[typ] = await self.service.add_task(self._poll_task, s, n, typ, value)
else:
raise RuntimeError("%r: No poll for %s" % (self, typ))
async def _poll_task(self, s, n, typ, value):
await anyio.sleep(value / 5)
while True:
try:
if isinstance(n, int):
v = await s[n]
else:
v = await getattr(s, n)
except Exception as exc:
logger.exception("Reader at %s %s", self, typ)
await self.service.push_event(DeviceException(self, typ, exc))
else:
await self.service.push_event(DeviceValue(self, typ, v))
await anyio.sleep(value)
async def poll_alarm(self):
"""Tells the device not to trigger an alarm any more.
You *need* to override this if your device can trigger an alarm
condition. Also, this method *must* disable the alarm; your
application can re-enable it later, when processing the
:class:`anyio_owfs.event.DeviceAlarm` event.
"""
pass # pylint
raise NotImplementedError(
"<%s> (%02x) needs 'poll_alarm'" % (self.__class__.__name__, self.family)
)
@register
class SwitchDevice(Device):
family = 0x1F
def buses(self):
"""Return a list of the connected buses.
'main' should be processed first.
"""
b = []
b.append((self.id, "main"))
b.append((self.id, "aux"))
return b
async def poll_alarm(self):
"""Clear alarm"""
raise RuntimeError("TODO")
# res = await self.get_event_all()
# await self.set_clearalarm(1)
@register
class TemperatureDevice(Device):
family = 0x10
interval_temperature = None
alarm_temperature = None
async def poll_alarm(self):
"""Turn off alarm condition by adapting the temperature bounds"""
self.alarm_temperature = t = await self.latesttemp
reasons = {"temp": t}
t_h = await self.temphigh
if t > t_h:
await self.set_temphigh(int(t + 2))
reasons["high"] = t_h
t_l = await self.templow
if t < t_l:
await self.set_templow(int(t - 1))
reasons["low"] = t_l
return reasons
def polling_items(self):
yield from super().polling_items()
yield "temperature"
yield "alarm"
async def poll_temperature(self):
# t = await self.latesttemp
t = await self.temperature
await self.service.push_event(DeviceValue(self, "temperature", t))
# @property
# def temperature(self):
# return self.latesttemp
@register
class TemperatureBDevice(Device):
family = 0x28
@register
class VoltageDevice(Device):
family = 0x20
interval_voltage = None
alarm_voltage = None
async def poll_alarm(self):
"""Turn off alarm condition by adapting the voltage bounds"""
reasons = {}
v = await self.voltage_all
ah = await self.alarm.high_all
al = await self.alarm.low_all
power = await self.set_alarm.unset
if power:
reasons["power_on"] = True
await self.set_alarm.set_unset(0)
if any(ah):
vh = await self.set_alarm.set_high_all
for i in range(4):
if not ah[i]:
continue
reasons["high_" + str(i)] = vh[i]
await self.set_alarm.set_high(i, 0)
if any(al):
vl = await self.set_alarm.set_low_all
for i in range(4):
if not al[i]:
continue
reasons["low_" + str(i)] = vl[i]
await self.set_alarm.set_low(i, 0)
for i in range(4):
reasons["volt_" + str(i)] = v[i]
return reasons
def polling_items(self):
yield from super().polling_items()
yield "voltage"
yield "alarm"
async def poll_voltage(self):
v = await self.volt_all
await self.service.push_event(DeviceValue(self, "volt_all", v))
@register
class PIODevice(Device):
family = 0x05
# dumb slave, no special handling
|
the-stack_0_13808 | import torch
from .observation_type import ObservationType
def get_tensorrt_backend_config_dict():
""" Get the backend config dictionary for tensorrt backend
NOTE: Current api will change in the future, it's just to unblock experimentation for
new backends, please don't use it right now.
"""
weighted_op_qint8_dtype_config = {
# optional, input activation dtype
"input_dtype": torch.qint8,
# optional, weight dtype
"weight_dtype": torch.qint8,
# optional, bias dtype
"bias_dtype": torch.float,
# optional, output activation dtype
"output_dtype": torch.qint8
}
linear_module_config = {
# Please see README under this folder for pattern format
"pattern": torch.nn.Linear,
"observation_type": ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
"dtype_configs": [
weighted_op_qint8_dtype_config,
]
}
conv_module_config = {
"pattern": torch.nn.Conv2d,
"observation_type": ObservationType.OUTPUT_USE_DIFFERENT_OBSERVER_AS_INPUT,
"dtype_configs": [
weighted_op_qint8_dtype_config,
]
}
return {
# optional
"name": "tensorrt",
"configs": [
linear_module_config,
conv_module_config,
]
}
|
the-stack_0_13811 | # -*- coding: utf-8 -*-
"""
Copyright (c) 2018 Deepomatic SAS
http://www.deepomatic.com/
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
from deepomatic.api.exceptions import HTTPRetryError
from tenacity import (Retrying, after_log, before_log)
logger = logging.getLogger(__name__)
def retry(apply_func, retry_if, wait, stop, **kwargs):
retryer = Retrying(retry=retry_if,
wait=wait,
stop=stop,
before=before_log(logger, logging.DEBUG),
after=after_log(logger, logging.DEBUG),
**kwargs)
return retryer(apply_func)
def warn_on_http_retry_error(http_func, suffix='', reraise=True):
# http helper can raise a HTTPRetryError
try:
# this should be an http_helper call
return http_func()
except HTTPRetryError as e:
last_attempt = e.last_attempt
last_exception = last_attempt.exception(timeout=0)
msg = "HTTPHelper failed to refresh task status. In the last attempt, "
if last_exception is None:
last_response = last_attempt.result()
msg += 'the status code was {}.'.format(last_response.status_code)
else:
msg += 'an exception occured: {}.'.format(last_exception)
if suffix:
msg += ' ' + suffix
logger.warning(msg)
if reraise:
raise
return None
|
the-stack_0_13813 | #!/usr/bin/env python
from tornado import httpclient, simple_httpclient, netutil
from tornado.escape import json_decode, utf8, _unicode, recursive_unicode, native_str
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders
from tornado.iostream import IOStream
from tornado.simple_httpclient import SimpleAsyncHTTPClient
from tornado.testing import AsyncHTTPTestCase, LogTrapTestCase, AsyncTestCase
from tornado.util import b, bytes_type
from tornado.web import Application, RequestHandler
import os
import shutil
import socket
import sys
import tempfile
try:
import ssl
except ImportError:
ssl = None
class HandlerBaseTestCase(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
return Application([('/', self.__class__.Handler)])
def fetch_json(self, *args, **kwargs):
response = self.fetch(*args, **kwargs)
response.rethrow()
return json_decode(response.body)
class HelloWorldRequestHandler(RequestHandler):
def initialize(self, protocol="http"):
self.expected_protocol = protocol
def get(self):
assert self.request.protocol == self.expected_protocol
self.finish("Hello world")
def post(self):
self.finish("Got %d bytes in POST" % len(self.request.body))
class BaseSSLTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_ssl_version(self):
raise NotImplementedError()
def setUp(self):
super(BaseSSLTest, self).setUp()
# Replace the client defined in the parent class.
# Some versions of libcurl have deadlock bugs with ssl,
# so always run these tests with SimpleAsyncHTTPClient.
self.http_client = SimpleAsyncHTTPClient(io_loop=self.io_loop,
force_instance=True)
def get_app(self):
return Application([('/', HelloWorldRequestHandler,
dict(protocol="https"))])
def get_httpserver_options(self):
# Testing keys were generated with:
# openssl req -new -keyout tornado/test/test.key -out tornado/test/test.crt -nodes -days 3650 -x509
test_dir = os.path.dirname(__file__)
return dict(ssl_options=dict(
certfile=os.path.join(test_dir, 'test.crt'),
keyfile=os.path.join(test_dir, 'test.key'),
ssl_version=self.get_ssl_version()))
def fetch(self, path, **kwargs):
self.http_client.fetch(self.get_url(path).replace('http', 'https'),
self.stop,
validate_cert=False,
**kwargs)
return self.wait()
class SSLTestMixin(object):
def test_ssl(self):
response = self.fetch('/')
self.assertEqual(response.body, b("Hello world"))
def test_large_post(self):
response = self.fetch('/',
method='POST',
body='A'*5000)
self.assertEqual(response.body, b("Got 5000 bytes in POST"))
def test_non_ssl_request(self):
# Make sure the server closes the connection when it gets a non-ssl
# connection, rather than waiting for a timeout or otherwise
# misbehaving.
self.http_client.fetch(self.get_url("/"), self.stop,
request_timeout=3600,
connect_timeout=3600)
response = self.wait()
self.assertEqual(response.code, 599)
# Python's SSL implementation differs significantly between versions.
# For example, SSLv3 and TLSv1 throw an exception if you try to read
# from the socket before the handshake is complete, but the default
# of SSLv23 allows it.
class SSLv23Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self): return ssl.PROTOCOL_SSLv23
class SSLv3Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self): return ssl.PROTOCOL_SSLv3
class TLSv1Test(BaseSSLTest, SSLTestMixin):
def get_ssl_version(self): return ssl.PROTOCOL_TLSv1
if hasattr(ssl, 'PROTOCOL_SSLv2'):
class SSLv2Test(BaseSSLTest):
def get_ssl_version(self): return ssl.PROTOCOL_SSLv2
def test_sslv2_fail(self):
# This is really more of a client test, but run it here since
# we've got all the other ssl version tests here.
# Clients should have SSLv2 disabled by default.
try:
# The server simply closes the connection when it gets
# an SSLv2 ClientHello packet.
# request_timeout is needed here because on some platforms
# (cygwin, but not native windows python), the close is not
# detected promptly.
response = self.fetch('/', request_timeout=1)
except ssl.SSLError:
# In some python/ssl builds the PROTOCOL_SSLv2 constant
# exists but SSLv2 support is still compiled out, which
# would result in an SSLError here (details vary depending
# on python version). The important thing is that
# SSLv2 request's don't succeed, so we can just ignore
# the errors here.
return
self.assertEqual(response.code, 599)
if ssl is None:
del BaseSSLTest
del SSLv23Test
del SSLv3Test
del TLSv1Test
elif getattr(ssl, 'OPENSSL_VERSION_INFO', (0,0)) < (1,0):
# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
# ClientHello messages, which are rejected by SSLv3 and TLSv1
# servers. Note that while the OPENSSL_VERSION_INFO was formally
# introduced in python3.2, it was present but undocumented in
# python 2.7
del SSLv3Test
del TLSv1Test
class MultipartTestHandler(RequestHandler):
def post(self):
self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
"argument": self.get_argument("argument"),
"filename": self.request.files["files"][0].filename,
"filebody": _unicode(self.request.files["files"][0]["body"]),
})
class RawRequestHTTPConnection(simple_httpclient._HTTPConnection):
def set_request(self, request):
self.__next_request = request
def _on_connect(self, parsed):
self.stream.write(self.__next_request)
self.__next_request = None
self.stream.read_until(b("\r\n\r\n"), self._on_headers)
# This test is also called from wsgi_test
class HTTPConnectionTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_handlers(self):
return [("/multipart", MultipartTestHandler),
("/hello", HelloWorldRequestHandler)]
def get_app(self):
return Application(self.get_handlers())
def raw_fetch(self, headers, body):
client = SimpleAsyncHTTPClient(self.io_loop)
conn = RawRequestHTTPConnection(self.io_loop, client,
httpclient.HTTPRequest(self.get_url("/")),
None, self.stop,
1024*1024)
conn.set_request(
b("\r\n").join(headers +
[utf8("Content-Length: %d\r\n" % len(body))]) +
b("\r\n") + body)
response = self.wait()
client.close()
response.rethrow()
return response
def test_multipart_form(self):
# Encodings here are tricky: Headers are latin1, bodies can be
# anything (we use utf8 by default).
response = self.raw_fetch([
b("POST /multipart HTTP/1.0"),
b("Content-Type: multipart/form-data; boundary=1234567890"),
b("X-Header-encoding-test: \xe9"),
],
b("\r\n").join([
b("Content-Disposition: form-data; name=argument"),
b(""),
u"\u00e1".encode("utf-8"),
b("--1234567890"),
u'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode("utf8"),
b(""),
u"\u00fa".encode("utf-8"),
b("--1234567890--"),
b(""),
]))
data = json_decode(response.body)
self.assertEqual(u"\u00e9", data["header"])
self.assertEqual(u"\u00e1", data["argument"])
self.assertEqual(u"\u00f3", data["filename"])
self.assertEqual(u"\u00fa", data["filebody"])
def test_100_continue(self):
# Run through a 100-continue interaction by hand:
# When given Expect: 100-continue, we get a 100 response after the
# headers, and then the real response after the body.
stream = IOStream(socket.socket(), io_loop=self.io_loop)
stream.connect(("localhost", self.get_http_port()), callback=self.stop)
self.wait()
stream.write(b("\r\n").join([b("POST /hello HTTP/1.1"),
b("Content-Length: 1024"),
b("Expect: 100-continue"),
b("\r\n")]), callback=self.stop)
self.wait()
stream.read_until(b("\r\n\r\n"), self.stop)
data = self.wait()
self.assertTrue(data.startswith(b("HTTP/1.1 100 ")), data)
stream.write(b("a") * 1024)
stream.read_until(b("\r\n"), self.stop)
first_line = self.wait()
self.assertTrue(first_line.startswith(b("HTTP/1.1 200")), first_line)
stream.read_until(b("\r\n\r\n"), self.stop)
header_data = self.wait()
headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b("Got 1024 bytes in POST"))
class EchoHandler(RequestHandler):
def get(self):
self.write(recursive_unicode(self.request.arguments))
class TypeCheckHandler(RequestHandler):
def prepare(self):
self.errors = {}
fields = [
('method', str),
('uri', str),
('version', str),
('remote_ip', str),
('protocol', str),
('host', str),
('path', str),
('query', str),
]
for field, expected_type in fields:
self.check_type(field, getattr(self.request, field), expected_type)
self.check_type('header_key', self.request.headers.keys()[0], str)
self.check_type('header_value', self.request.headers.values()[0], str)
self.check_type('cookie_key', self.request.cookies.keys()[0], str)
self.check_type('cookie_value', self.request.cookies.values()[0].value, str)
# secure cookies
self.check_type('arg_key', self.request.arguments.keys()[0], str)
self.check_type('arg_value', self.request.arguments.values()[0][0], bytes_type)
def post(self):
self.check_type('body', self.request.body, bytes_type)
self.write(self.errors)
def get(self):
self.write(self.errors)
def check_type(self, name, obj, expected_type):
actual_type = type(obj)
if expected_type != actual_type:
self.errors[name] = "expected %s, got %s" % (expected_type,
actual_type)
class HTTPServerTest(AsyncHTTPTestCase, LogTrapTestCase):
def get_app(self):
return Application([("/echo", EchoHandler),
("/typecheck", TypeCheckHandler),
])
def test_query_string_encoding(self):
response = self.fetch("/echo?foo=%C3%A9")
data = json_decode(response.body)
self.assertEqual(data, {u"foo": [u"\u00e9"]})
def test_types(self):
headers = {"Cookie": "foo=bar"}
response = self.fetch("/typecheck?foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
data = json_decode(response.body)
self.assertEqual(data, {})
class XHeaderTest(HandlerBaseTestCase):
class Handler(RequestHandler):
def get(self):
self.write(dict(remote_ip=self.request.remote_ip))
def get_httpserver_options(self):
return dict(xheaders=True)
def test_ip_headers(self):
self.assertEqual(self.fetch_json("/")["remote_ip"],
"127.0.0.1")
valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
"4.4.4.4")
valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
self.assertEqual(
self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
"2620:0:1cfe:face:b00c::3")
invalid_chars = {"X-Real-IP": "4.4.4.4<script>"}
self.assertEqual(
self.fetch_json("/", headers=invalid_chars)["remote_ip"],
"127.0.0.1")
invalid_host = {"X-Real-IP": "www.google.com"}
self.assertEqual(
self.fetch_json("/", headers=invalid_host)["remote_ip"],
"127.0.0.1")
class UnixSocketTest(AsyncTestCase, LogTrapTestCase):
"""HTTPServers can listen on Unix sockets too.
Why would you want to do this? Nginx can proxy to backends listening
on unix sockets, for one thing (and managing a namespace for unix
sockets can be easier than managing a bunch of TCP port numbers).
Unfortunately, there's no way to specify a unix socket in a url for
an HTTP client, so we have to test this by hand.
"""
def setUp(self):
super(UnixSocketTest, self).setUp()
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
super(UnixSocketTest, self).tearDown()
def test_unix_socket(self):
sockfile = os.path.join(self.tmpdir, "test.sock")
sock = netutil.bind_unix_socket(sockfile)
app = Application([("/hello", HelloWorldRequestHandler)])
server = HTTPServer(app, io_loop=self.io_loop)
server.add_socket(sock)
stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop)
stream.connect(sockfile, self.stop)
self.wait()
stream.write(b("GET /hello HTTP/1.0\r\n\r\n"))
stream.read_until(b("\r\n"), self.stop)
response = self.wait()
self.assertEqual(response, b("HTTP/1.0 200 OK\r\n"))
stream.read_until(b("\r\n\r\n"), self.stop)
headers = HTTPHeaders.parse(self.wait().decode('latin1'))
stream.read_bytes(int(headers["Content-Length"]), self.stop)
body = self.wait()
self.assertEqual(body, b("Hello world"))
if not hasattr(socket, 'AF_UNIX') or sys.platform == 'cygwin':
del UnixSocketTest
|
the-stack_0_13814 | #!/usr/bin/env python3
import os
from torch.utils.data import Dataset, ConcatDataset
from torchvision.datasets.omniglot import Omniglot
import numpy as np
import torch
from torchvision import transforms
class TripletOmniglot(Dataset):
"""
[[Source]]()
**Description**
This class provides an interface to the Omniglot dataset.
The Omniglot dataset was introduced by Lake et al., 2015.
Omniglot consists of 1623 character classes from 50 different alphabets, each containing 20 samples.
While the original dataset is separated in background and evaluation sets,
this class concatenates both sets and leaves to the user the choice of classes splitting
as was done in Ravi and Larochelle, 2017.
The background and evaluation splits are available in the `torchvision` package.
**References**
1. Lake et al. 2015. “Human-Level Concept Learning through Probabilistic Program Induction.” Science.
2. Ravi and Larochelle. 2017. “Optimization as a Model for Few-Shot Learning.” ICLR.
**Arguments**
* **root** (str) - Path to download the data.
* **transform** (Transform, *optional*, default=None) - Input pre-processing.
* **target_transform** (Transform, *optional*, default=None) - Target pre-processing.
* **download** (bool, *optional*, default=False) - Whether to download the dataset.
**Example**
~~~python
omniglot = l2l.vision.datasets.FullOmniglot(root='./data',
transform=transforms.Compose([
transforms.Resize(28, interpolation=LANCZOS),
transforms.ToTensor(),
lambda x: 1.0 - x,
]),
download=True)
omniglot = l2l.data.MetaDataset(omniglot)
~~~
"""
def __init__(self, root, transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
# Set up both the background and eval dataset
omni_background = Omniglot(self.root, background=True, download=download)
# Eval labels also start from 0.
# It's important to add 964 to label values in eval so they don't overwrite background dataset.
omni_evaluation = Omniglot(self.root,
background=False,
download=download,
target_transform=lambda x: x + len(omni_background._characters))
omniglot_backgroundlabels = set(range(len(omni_background._characters)))
omniglot_evalutionlabels = set(range(964,964+len(omni_evaluation._characters)))
self.labels_set = list(omniglot_backgroundlabels.union(omniglot_evalutionlabels))
del omniglot_backgroundlabels
del omniglot_evalutionlabels
self.dataset = ConcatDataset((omni_background, omni_evaluation))
self._bookkeeping_path = os.path.join(self.root, 'omniglot-bookkeeping.pkl')
self.all_data = self.flatten(self.dataset.datasets)
self.all_labels = np.array([i[1] for i in self.all_data])
self.label_to_indices = {label: np.where(self.all_labels == int(label))[0] for label in self.labels_set}
self.split_train_test_valid()
def __len__(self):
return len(self.dataset)
def split_train_test_valid(self):
np.random.shuffle(self.labels_set)
self.labels = {"train": [], "validation": [], "test": []}
self.indexes = {"train": 0, "validation": 0, "test": 0}
total_labels = len(self.labels_set)
total_train_labels = int(total_labels * 0.8)
total_valid_labels = int(total_train_labels * 0.1)
self.labels["train"] = self.labels_set[: total_train_labels]
self.labels["validation"] = self.labels["train"][-total_valid_labels: ]
self.labels["train"] = self.labels["train"][: total_train_labels - total_valid_labels]
self.labels["test"] = self.labels_set[total_train_labels: ]
def flatten(self,dataset):
return [item for sublist in dataset for item in sublist]
def sample(self, type,k_shots = 1):
if k_shots == 1 :
nbr_positive_sample = 4
nbr_negative_sample = 8
labels = self.labels[type]
index = self.indexes[type] % len(labels)
#1. select a anchor label
anchor_label = labels[index]
#2. we know that positive files should have same label as anchor
positive_label = anchor_label # Not even needed to decleare
#3. we need to select 1 anchor 1 positive for support - 2 positives for query and all of these should be different
used_positive_indexes = set(np.random.choice(self.label_to_indices[anchor_label], nbr_positive_sample, replace=False))
#4. Now that we have selected 4 positive examples we are gonna place them to the according elements
used_positive_indexes = list(used_positive_indexes)
# [x1a, x1p_1, x1p_2, x1p_3 ... x1p_n] - image tensors
positive_x = [np.array(self.all_data[used_positive_indexes[idx]][0]) for idx in range(nbr_positive_sample)]
# [y1a, y1p_1, y1p_2, y1p_3 ... y1p_n] - image labels
# positive_y = [self.all_labels[used_positive_indexes[idx]] for idx in range(nbr_positive_sample)]
#5. Now we have all the positive examples ready , we can do same operation to get negative indexes.
# To Pick Negative Examples we have different rules . we can pick all random classes , but if there happend to be same class we need to be sure we use differend indexs.
remainingLabels = list(set(labels) - set([anchor_label]))
negative_labels = list(np.random.choice(remainingLabels, nbr_negative_sample // 2, replace=False))
used_negative_indexes = [idx for label in negative_labels for idx in np.random.choice(self.label_to_indices[label], 2, replace=False)]
# [x1n_1, x1n_2, x1n_3, ... x1n_n] - image tensors
negative_x = [np.array(self.all_data[used_negative_indexes[idx]][0]) for idx in range(nbr_negative_sample)]
# [y1n_1, y1n_2, y1n_3, ... y1n_n] - image labels
# negative_y = [self.all_labels[used_negative_indexes[idx]] for idx in range(nbr_negative_sample)]
X_support = []
y_support = []
# Transform
if self.transform is not None:
for idx, x in enumerate(positive_x):
positive_x[idx] = self.transform(positive_x[idx])
for idx, x in enumerate(negative_x):
negative_x[idx] = self.transform(negative_x[idx])
X_support.append(torch.stack([positive_x[0], positive_x[0], positive_x[0], positive_x[0]]).float())
X_support.append(torch.stack([positive_x[1], positive_x[1], positive_x[1], positive_x[1]]).float())
X_support.append(torch.stack([negative_x[0], negative_x[2], negative_x[4], negative_x[6]]).float())
y_support.append([0,0,0,0])
y_support.append([0,0,0,0])
y_support.append([1,2,3,4])
X_query = []
y_query = []
X_query.append(torch.stack([positive_x[2], positive_x[2], positive_x[2], positive_x[2]]).float())
X_query.append(torch.stack([positive_x[3], positive_x[3], positive_x[3], positive_x[3]]).float())
X_query.append(torch.stack([negative_x[1], negative_x[3], negative_x[5], negative_x[7]]).float())
y_query.append([0,0,0,0])
y_query.append([0,0,0,0])
y_query.append([1,2,3,4])
y_support = (torch.Tensor(y_support)).long()
y_query = (torch.Tensor(y_query)).long()
# Update index value
self.indexes[type] += 1
return X_support ,y_support, X_query, y_query
elif k_shots == 5:
nbr_positive_sample = 10
nbr_negative_labels = 4
nbr_negative_sample = 40
labels = self.labels[type]
index = self.indexes[type] % len(labels)
#1. select a anchor label
anchor_label = labels[index]
#2. we know that positive files should have same label as anchor
positive_label = anchor_label # Not even needed to decleare
#3. we need to select 1 anchor 4 positive for support - 5 positives for query and all of these should be different => 10 positive samples
used_positive_indexes = set(np.random.choice(self.label_to_indices[anchor_label], nbr_positive_sample, replace=False))
#4. Now that we have selected 10 positive examples we are gonna place them to the according elements
used_positive_indexes = list(used_positive_indexes)
# [x1a, x1p_1, x1p_2, x1p_3 ... x1p_n] - image tensors
positive_x = [np.array(self.all_data[used_positive_indexes[idx]][0]) for idx in range(nbr_positive_sample)]
# [y1a, y1p_1, y1p_2, y1p_3 ... y1p_n] - image labels
# positive_y = [self.all_labels[used_positive_indexes[idx]] for idx in range(nbr_positive_sample)]
#5. Now we have all the positive examples ready , we can do same operation to get negative indexes.
# To Pick Negative Examples we have different rules . we can pick all random classes , but if there happend to be same class we need to be sure we use differend indexs.
remainingLabels = list(set(labels) - set([anchor_label]))
negative_labels = list(np.random.choice(remainingLabels, nbr_negative_labels, replace=False))
used_negative_indexes = [idx for label in negative_labels for idx in np.random.choice(self.label_to_indices[label], 10, replace=False)]
# [x1n_1, x1n_2, x1n_3, ... x1n_n] - image tensors
negative_x = [np.array(self.all_data[used_negative_indexes[idx]][0]) for idx in range(nbr_negative_sample)]
# [y1n_1, y1n_2, y1n_3, ... y1n_n] - image labels
# negative_y = [self.all_labels[used_negative_indexes[idx]] for idx in range(nbr_negative_sample)]
X_support = []
y_support = []
# Transform
if self.transform is not None:
for idx, x in enumerate(positive_x):
positive_x[idx] = self.transform(positive_x[idx])
for idx, x in enumerate(negative_x):
negative_x[idx] = self.transform(negative_x[idx])
X_support.append(torch.stack([positive_x[0] for i in range(20)]).float())
X_support.append(torch.stack([positive_x[i] for i in range(1,5) for x in range(5) ] ).float())
X_support.append(torch.stack([negative_x[i] for i in range(0,40,2)]).float())
y_support.append([0 for i in range(20)])
y_support.append([0 for i in range(20)])
y_support.append([i for i in range(1,5) for x in range(5)])
X_query = []
y_query = []
X_query.append(torch.stack([positive_x[5] for i in range(20)]).float())
X_query.append(torch.stack([positive_x[i] for i in range(6,10) for x in range(5) ]).float())
X_query.append(torch.stack([negative_x[i] for i in range(1,40,2)]).float())
y_query.append([0 for i in range(20)])
y_query.append([0 for i in range(20)])
y_query.append([i for i in range(1,5) for x in range(5)])
y_support = (torch.Tensor(y_support)).long()
y_query = (torch.Tensor(y_query)).long()
# Update index value
self.indexes[type] += 1
return X_support ,y_support, X_query, y_query
if __name__ == "__main__":
import matplotlib.pyplot as plt
def show_triplets(data,label):
"""Display image for testing"""
fig = plt.figure(figsize=(60, 60))
plt.title(label,pad =20)
plt.axis('off')
axislist = [fig.add_subplot(20,3,i) for i in range(1,61)]
for i in axislist:
i.set_axis_off()
img_counter = 0
label_counter = 60
for number , ax in enumerate(axislist):
if number % 3 == 0 :
img_counter = number // 3
label_counter = 60 + number // 3
print(number , img_counter , label_counter )
ax.imshow(data[img_counter].permute(1, 2, 0).cpu().int())
ax.title.set_text(data[label_counter].numpy())
img_counter = img_counter + 20
label_counter = label_counter +20
plt.show()
def show_helper(tripletx,triplety,title):
#for i in range(len(tripletx)):
# show_triplets(*tripletx[i],*triplety[i])
show_triplets([*tripletx[0],*tripletx[1],*tripletx[2],*triplety[0],*triplety[1],*triplety[2]],title)
transform = transforms.Compose([transforms.ToTensor()])
omniglot = TripletOmniglot(root='~/data', download=True,
transform = transforms.Compose([transforms.ToTensor(),transforms.Resize((28,28)),])
)
""" print(cub.data['validation'])
print(cub.data['validation']['image_data'])
print(cub.data['validation']['class_dict']) """
a = omniglot.sample('train',5)
show_helper(a[0],a[1],"Support Images")
show_helper(a[2],a[3],"Query Images")
|
the-stack_0_13815 | import django_filters
import netaddr
from django.core.exceptions import ValidationError
from django.db.models import Q
from netaddr.core import AddrFormatError
from nautobot.dcim.models import Device, Interface, Region, Site
from nautobot.extras.filters import (
CustomFieldModelFilterSet,
CreatedUpdatedFilterSet,
StatusModelFilterSetMixin,
)
from nautobot.tenancy.filters import TenancyFilterSet
from nautobot.utilities.filters import (
BaseFilterSet,
MultiValueCharFilter,
MultiValueNumberFilter,
NameSlugSearchFilterSet,
NumericArrayFilter,
TagFilter,
TreeNodeMultipleChoiceFilter,
)
from nautobot.virtualization.models import VirtualMachine, VMInterface
from .choices import *
from .models import (
Aggregate,
IPAddress,
Prefix,
RIR,
Role,
RouteTarget,
Service,
VLAN,
VLANGroup,
VRF,
)
__all__ = (
"AggregateFilterSet",
"IPAddressFilterSet",
"PrefixFilterSet",
"RIRFilterSet",
"RoleFilterSet",
"RouteTargetFilterSet",
"ServiceFilterSet",
"VLANFilterSet",
"VLANGroupFilterSet",
"VRFFilterSet",
)
class VRFFilterSet(BaseFilterSet, TenancyFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
import_target_id = django_filters.ModelMultipleChoiceFilter(
field_name="import_targets",
queryset=RouteTarget.objects.all(),
label="Import target",
)
import_target = django_filters.ModelMultipleChoiceFilter(
field_name="import_targets__name",
queryset=RouteTarget.objects.all(),
to_field_name="name",
label="Import target (name)",
)
export_target_id = django_filters.ModelMultipleChoiceFilter(
field_name="export_targets",
queryset=RouteTarget.objects.all(),
label="Export target",
)
export_target = django_filters.ModelMultipleChoiceFilter(
field_name="export_targets__name",
queryset=RouteTarget.objects.all(),
to_field_name="name",
label="Export target (name)",
)
tag = TagFilter()
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(name__icontains=value) | Q(rd__icontains=value) | Q(description__icontains=value))
class Meta:
model = VRF
fields = ["id", "name", "rd", "enforce_unique"]
class RouteTargetFilterSet(BaseFilterSet, TenancyFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
importing_vrf_id = django_filters.ModelMultipleChoiceFilter(
field_name="importing_vrfs",
queryset=VRF.objects.all(),
label="Importing VRF",
)
importing_vrf = django_filters.ModelMultipleChoiceFilter(
field_name="importing_vrfs__rd",
queryset=VRF.objects.all(),
to_field_name="rd",
label="Import VRF (RD)",
)
exporting_vrf_id = django_filters.ModelMultipleChoiceFilter(
field_name="exporting_vrfs",
queryset=VRF.objects.all(),
label="Exporting VRF",
)
exporting_vrf = django_filters.ModelMultipleChoiceFilter(
field_name="exporting_vrfs__rd",
queryset=VRF.objects.all(),
to_field_name="rd",
label="Export VRF (RD)",
)
tag = TagFilter()
def search(self, queryset, name, value):
if not value.strip():
return queryset
return queryset.filter(Q(name__icontains=value) | Q(description__icontains=value))
class Meta:
model = RouteTarget
fields = ["id", "name"]
class RIRFilterSet(
BaseFilterSet,
NameSlugSearchFilterSet,
CustomFieldModelFilterSet,
CreatedUpdatedFilterSet,
):
class Meta:
model = RIR
fields = ["id", "name", "slug", "is_private", "description"]
class AggregateFilterSet(BaseFilterSet, TenancyFilterSet, CustomFieldModelFilterSet, CreatedUpdatedFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
family = django_filters.NumberFilter(field_name="prefix", lookup_expr="family")
prefix = django_filters.CharFilter(
method="filter_prefix",
label="Prefix",
)
rir_id = django_filters.ModelMultipleChoiceFilter(
queryset=RIR.objects.all(),
label="RIR (ID)",
)
rir = django_filters.ModelMultipleChoiceFilter(
field_name="rir__slug",
queryset=RIR.objects.all(),
to_field_name="slug",
label="RIR (slug)",
)
tag = TagFilter()
class Meta:
model = Aggregate
fields = ["id", "date_added"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(description__icontains=value)
try:
prefix = str(netaddr.IPNetwork(value.strip()).cidr)
qs_filter |= Q(prefix__net_contains_or_equals=prefix)
except (AddrFormatError, ValueError):
pass
return queryset.filter(qs_filter)
def filter_prefix(self, queryset, name, value):
if not value.strip():
return queryset
try:
query = str(netaddr.IPNetwork(value).cidr)
return queryset.filter(prefix=query)
except (AddrFormatError, ValueError):
return queryset.none()
class RoleFilterSet(
BaseFilterSet,
NameSlugSearchFilterSet,
CustomFieldModelFilterSet,
CreatedUpdatedFilterSet,
):
q = django_filters.CharFilter(
method="search",
label="Search",
)
class Meta:
model = Role
fields = ["id", "name", "slug"]
class PrefixFilterSet(
BaseFilterSet,
TenancyFilterSet,
StatusModelFilterSetMixin,
CustomFieldModelFilterSet,
CreatedUpdatedFilterSet,
):
q = django_filters.CharFilter(
method="search",
label="Search",
)
family = django_filters.NumberFilter(field_name="prefix", lookup_expr="family")
prefix = django_filters.CharFilter(
method="filter_prefix",
label="Prefix",
)
within = django_filters.CharFilter(
method="search_within",
label="Within prefix",
)
within_include = django_filters.CharFilter(
method="search_within_include",
label="Within and including prefix",
)
contains = django_filters.CharFilter(
method="search_contains",
label="Prefixes which contain this prefix or IP",
)
mask_length = django_filters.NumberFilter(field_name="prefix", lookup_expr="net_mask_length")
mask_length__gte = django_filters.NumberFilter(field_name="prefix", lookup_expr="net_mask_length__gte")
mask_length__lte = django_filters.NumberFilter(field_name="prefix", lookup_expr="net_mask_length__lte")
vrf_id = django_filters.ModelMultipleChoiceFilter(
queryset=VRF.objects.all(),
label="VRF",
)
vrf = django_filters.ModelMultipleChoiceFilter(
field_name="vrf__rd",
queryset=VRF.objects.all(),
to_field_name="rd",
label="VRF (RD)",
)
present_in_vrf_id = django_filters.ModelChoiceFilter(
queryset=VRF.objects.all(), method="filter_present_in_vrf", label="VRF"
)
present_in_vrf = django_filters.ModelChoiceFilter(
queryset=VRF.objects.all(),
method="filter_present_in_vrf",
to_field_name="rd",
label="VRF (RD)",
)
region_id = TreeNodeMultipleChoiceFilter(
queryset=Region.objects.all(),
field_name="site__region",
lookup_expr="in",
label="Region (ID)",
)
region = TreeNodeMultipleChoiceFilter(
queryset=Region.objects.all(),
field_name="site__region",
lookup_expr="in",
to_field_name="slug",
label="Region (slug)",
)
site_id = django_filters.ModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label="Site (ID)",
)
site = django_filters.ModelMultipleChoiceFilter(
field_name="site__slug",
queryset=Site.objects.all(),
to_field_name="slug",
label="Site (slug)",
)
vlan_id = django_filters.ModelMultipleChoiceFilter(
queryset=VLAN.objects.all(),
label="VLAN (ID)",
)
vlan_vid = django_filters.NumberFilter(
field_name="vlan__vid",
label="VLAN number (1-4095)",
)
role_id = django_filters.ModelMultipleChoiceFilter(
queryset=Role.objects.all(),
label="Role (ID)",
)
role = django_filters.ModelMultipleChoiceFilter(
field_name="role__slug",
queryset=Role.objects.all(),
to_field_name="slug",
label="Role (slug)",
)
tag = TagFilter()
class Meta:
model = Prefix
fields = ["id", "is_pool"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(description__icontains=value)
try:
prefix = str(netaddr.IPNetwork(value.strip()).cidr)
qs_filter |= Q(prefix__net_contains_or_equals=prefix)
except (AddrFormatError, ValueError):
pass
return queryset.filter(qs_filter)
def filter_prefix(self, queryset, name, value):
if not value.strip():
return queryset
try:
query = str(netaddr.IPNetwork(value).cidr)
return queryset.filter(prefix=query)
except (AddrFormatError, ValueError):
return queryset.none()
def search_within(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
query = str(netaddr.IPNetwork(value).cidr)
return queryset.filter(prefix__net_contained=query)
except (AddrFormatError, ValueError):
return queryset.none()
def search_within_include(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
query = str(netaddr.IPNetwork(value).cidr)
return queryset.filter(prefix__net_contained_or_equal=query)
except (AddrFormatError, ValueError):
return queryset.none()
def search_contains(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
# Searching by prefix
if "/" in value:
return queryset.filter(prefix__net_contains_or_equals=str(netaddr.IPNetwork(value).cidr))
# Searching by IP address
else:
return queryset.filter(prefix__net_contains=str(netaddr.IPAddress(value)))
except (AddrFormatError, ValueError):
return queryset.none()
def filter_present_in_vrf(self, queryset, name, vrf):
if vrf is None:
return queryset.none
return queryset.filter(Q(vrf=vrf) | Q(vrf__export_targets__in=vrf.import_targets.all()))
class IPAddressFilterSet(
BaseFilterSet,
TenancyFilterSet,
StatusModelFilterSetMixin,
CustomFieldModelFilterSet,
CreatedUpdatedFilterSet,
):
q = django_filters.CharFilter(
method="search",
label="Search",
)
family = django_filters.NumberFilter(field_name="address", lookup_expr="family")
parent = django_filters.CharFilter(
method="search_by_parent",
label="Parent prefix",
)
address = MultiValueCharFilter(
method="filter_address",
label="Address",
)
mask_length = django_filters.NumberFilter(
method="filter_mask_length",
label="Mask length",
)
vrf_id = django_filters.ModelMultipleChoiceFilter(
queryset=VRF.objects.all(),
label="VRF",
)
vrf = django_filters.ModelMultipleChoiceFilter(
field_name="vrf__rd",
queryset=VRF.objects.all(),
to_field_name="rd",
label="VRF (RD)",
)
present_in_vrf_id = django_filters.ModelChoiceFilter(
queryset=VRF.objects.all(), method="filter_present_in_vrf", label="VRF"
)
present_in_vrf = django_filters.ModelChoiceFilter(
queryset=VRF.objects.all(),
method="filter_present_in_vrf",
to_field_name="rd",
label="VRF (RD)",
)
device = MultiValueCharFilter(
method="filter_device",
field_name="name",
label="Device (name)",
)
device_id = MultiValueNumberFilter(
method="filter_device",
field_name="pk",
label="Device (ID)",
)
virtual_machine = MultiValueCharFilter(
method="filter_virtual_machine",
field_name="name",
label="Virtual machine (name)",
)
virtual_machine_id = MultiValueNumberFilter(
method="filter_virtual_machine",
field_name="pk",
label="Virtual machine (ID)",
)
interface = django_filters.ModelMultipleChoiceFilter(
field_name="interface__name",
queryset=Interface.objects.all(),
to_field_name="name",
label="Interface (name)",
)
interface_id = django_filters.ModelMultipleChoiceFilter(
field_name="interface",
queryset=Interface.objects.all(),
label="Interface (ID)",
)
vminterface = django_filters.ModelMultipleChoiceFilter(
field_name="vminterface__name",
queryset=VMInterface.objects.all(),
to_field_name="name",
label="VM interface (name)",
)
vminterface_id = django_filters.ModelMultipleChoiceFilter(
field_name="vminterface",
queryset=VMInterface.objects.all(),
label="VM interface (ID)",
)
assigned_to_interface = django_filters.BooleanFilter(
method="_assigned_to_interface",
label="Is assigned to an interface",
)
role = django_filters.MultipleChoiceFilter(choices=IPAddressRoleChoices)
tag = TagFilter()
class Meta:
model = IPAddress
fields = ["id", "dns_name"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(dns_name__icontains=value) | Q(description__icontains=value) | Q(address__istartswith=value)
return queryset.filter(qs_filter)
def search_by_parent(self, queryset, name, value):
value = value.strip()
if not value:
return queryset
try:
query = str(netaddr.IPNetwork(value.strip()).cidr)
return queryset.filter(address__net_host_contained=query)
except (AddrFormatError, ValueError):
return queryset.none()
def filter_address(self, queryset, name, value):
try:
return queryset.filter(address__net_in=value)
except ValidationError:
return queryset.none()
def filter_mask_length(self, queryset, name, value):
if not value:
return queryset
return queryset.filter(address__net_mask_length=value)
def filter_present_in_vrf(self, queryset, name, vrf):
if vrf is None:
return queryset.none
return queryset.filter(Q(vrf=vrf) | Q(vrf__export_targets__in=vrf.import_targets.all()))
def filter_device(self, queryset, name, value):
devices = Device.objects.filter(**{"{}__in".format(name): value})
if not devices.exists():
return queryset.none()
interface_ids = []
for device in devices:
interface_ids.extend(device.vc_interfaces.values_list("id", flat=True))
return queryset.filter(interface__in=interface_ids)
def filter_virtual_machine(self, queryset, name, value):
virtual_machines = VirtualMachine.objects.filter(**{"{}__in".format(name): value})
if not virtual_machines.exists():
return queryset.none()
interface_ids = []
for vm in virtual_machines:
interface_ids.extend(vm.interfaces.values_list("id", flat=True))
return queryset.filter(vminterface__in=interface_ids)
def _assigned_to_interface(self, queryset, name, value):
return queryset.exclude(assigned_object_id__isnull=value)
class VLANGroupFilterSet(
BaseFilterSet,
NameSlugSearchFilterSet,
CustomFieldModelFilterSet,
CreatedUpdatedFilterSet,
):
region_id = TreeNodeMultipleChoiceFilter(
queryset=Region.objects.all(),
field_name="site__region",
lookup_expr="in",
label="Region (ID)",
)
region = TreeNodeMultipleChoiceFilter(
queryset=Region.objects.all(),
field_name="site__region",
lookup_expr="in",
to_field_name="slug",
label="Region (slug)",
)
site_id = django_filters.ModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label="Site (ID)",
)
site = django_filters.ModelMultipleChoiceFilter(
field_name="site__slug",
queryset=Site.objects.all(),
to_field_name="slug",
label="Site (slug)",
)
class Meta:
model = VLANGroup
fields = ["id", "name", "slug", "description"]
class VLANFilterSet(
BaseFilterSet,
TenancyFilterSet,
StatusModelFilterSetMixin,
CustomFieldModelFilterSet,
CreatedUpdatedFilterSet,
):
q = django_filters.CharFilter(
method="search",
label="Search",
)
region_id = TreeNodeMultipleChoiceFilter(
queryset=Region.objects.all(),
field_name="site__region",
lookup_expr="in",
label="Region (ID)",
)
region = TreeNodeMultipleChoiceFilter(
queryset=Region.objects.all(),
field_name="site__region",
lookup_expr="in",
to_field_name="slug",
label="Region (slug)",
)
site_id = django_filters.ModelMultipleChoiceFilter(
queryset=Site.objects.all(),
label="Site (ID)",
)
site = django_filters.ModelMultipleChoiceFilter(
field_name="site__slug",
queryset=Site.objects.all(),
to_field_name="slug",
label="Site (slug)",
)
group_id = django_filters.ModelMultipleChoiceFilter(
queryset=VLANGroup.objects.all(),
label="Group (ID)",
)
group = django_filters.ModelMultipleChoiceFilter(
field_name="group__slug",
queryset=VLANGroup.objects.all(),
to_field_name="slug",
label="Group",
)
role_id = django_filters.ModelMultipleChoiceFilter(
queryset=Role.objects.all(),
label="Role (ID)",
)
role = django_filters.ModelMultipleChoiceFilter(
field_name="role__slug",
queryset=Role.objects.all(),
to_field_name="slug",
label="Role (slug)",
)
tag = TagFilter()
class Meta:
model = VLAN
fields = ["id", "vid", "name"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(name__icontains=value) | Q(description__icontains=value)
try:
qs_filter |= Q(vid=int(value.strip()))
except ValueError:
pass
return queryset.filter(qs_filter)
class ServiceFilterSet(BaseFilterSet, CreatedUpdatedFilterSet):
q = django_filters.CharFilter(
method="search",
label="Search",
)
device_id = django_filters.ModelMultipleChoiceFilter(
queryset=Device.objects.all(),
label="Device (ID)",
)
device = django_filters.ModelMultipleChoiceFilter(
field_name="device__name",
queryset=Device.objects.all(),
to_field_name="name",
label="Device (name)",
)
virtual_machine_id = django_filters.ModelMultipleChoiceFilter(
queryset=VirtualMachine.objects.all(),
label="Virtual machine (ID)",
)
virtual_machine = django_filters.ModelMultipleChoiceFilter(
field_name="virtual_machine__name",
queryset=VirtualMachine.objects.all(),
to_field_name="name",
label="Virtual machine (name)",
)
port = NumericArrayFilter(field_name="ports", lookup_expr="contains")
tag = TagFilter()
class Meta:
model = Service
fields = ["id", "name", "protocol"]
def search(self, queryset, name, value):
if not value.strip():
return queryset
qs_filter = Q(name__icontains=value) | Q(description__icontains=value)
return queryset.filter(qs_filter)
|
the-stack_0_13816 | """Support for monitoring if a sensor value is below/above a threshold."""
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
_LOGGER = logging.getLogger(__name__)
ATTR_HYSTERESIS = "hysteresis"
ATTR_LOWER = "lower"
ATTR_POSITION = "position"
ATTR_SENSOR_VALUE = "sensor_value"
ATTR_TYPE = "type"
ATTR_UPPER = "upper"
CONF_HYSTERESIS = "hysteresis"
CONF_LOWER = "lower"
CONF_UPPER = "upper"
DEFAULT_NAME = "Threshold"
DEFAULT_HYSTERESIS = 0.0
POSITION_ABOVE = "above"
POSITION_BELOW = "below"
POSITION_IN_RANGE = "in_range"
POSITION_UNKNOWN = "unknown"
TYPE_LOWER = "lower"
TYPE_RANGE = "range"
TYPE_UPPER = "upper"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_HYSTERESIS, default=DEFAULT_HYSTERESIS): vol.Coerce(float),
vol.Optional(CONF_LOWER): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UPPER): vol.Coerce(float),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Threshold sensor."""
entity_id = config.get(CONF_ENTITY_ID)
name = config.get(CONF_NAME)
lower = config.get(CONF_LOWER)
upper = config.get(CONF_UPPER)
hysteresis = config.get(CONF_HYSTERESIS)
device_class = config.get(CONF_DEVICE_CLASS)
async_add_entities(
[
ThresholdSensor(
hass, entity_id, name, lower, upper, hysteresis, device_class
)
],
)
class ThresholdSensor(BinarySensorEntity):
"""Representation of a Threshold sensor."""
def __init__(self, hass, entity_id, name, lower, upper, hysteresis, device_class):
"""Initialize the Threshold sensor."""
self._hass = hass
self._entity_id = entity_id
self._name = name
self._threshold_lower = lower
self._threshold_upper = upper
self._hysteresis = hysteresis
self._device_class = device_class
self._state_position = POSITION_UNKNOWN
self._state = None
self.sensor_value = None
@callback
def async_threshold_sensor_state_listener(event):
"""Handle sensor state changes."""
new_state = event.data.get("new_state")
if new_state is None:
return
try:
self.sensor_value = (
None
if new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]
else float(new_state.state)
)
except (ValueError, TypeError):
self.sensor_value = None
_LOGGER.warning("State is not numerical")
self._update_state()
self.async_write_ha_state()
async_track_state_change_event(
hass, [entity_id], async_threshold_sensor_state_listener
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def threshold_type(self):
"""Return the type of threshold this sensor represents."""
if self._threshold_lower is not None and self._threshold_upper is not None:
return TYPE_RANGE
if self._threshold_lower is not None:
return TYPE_LOWER
if self._threshold_upper is not None:
return TYPE_UPPER
@property
def extra_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_ENTITY_ID: self._entity_id,
ATTR_HYSTERESIS: self._hysteresis,
ATTR_LOWER: self._threshold_lower,
ATTR_POSITION: self._state_position,
ATTR_SENSOR_VALUE: self.sensor_value,
ATTR_TYPE: self.threshold_type,
ATTR_UPPER: self._threshold_upper,
}
@callback
def _update_state(self):
"""Update the state."""
def below(threshold):
"""Determine if the sensor value is below a threshold."""
return self.sensor_value < (threshold - self._hysteresis)
def above(threshold):
"""Determine if the sensor value is above a threshold."""
return self.sensor_value > (threshold + self._hysteresis)
if self.sensor_value is None:
self._state_position = POSITION_UNKNOWN
self._state = False
elif self.threshold_type == TYPE_LOWER:
if below(self._threshold_lower):
self._state_position = POSITION_BELOW
self._state = True
elif above(self._threshold_lower):
self._state_position = POSITION_ABOVE
self._state = False
elif self.threshold_type == TYPE_UPPER:
if above(self._threshold_upper):
self._state_position = POSITION_ABOVE
self._state = True
elif below(self._threshold_upper):
self._state_position = POSITION_BELOW
self._state = False
elif self.threshold_type == TYPE_RANGE:
if below(self._threshold_lower):
self._state_position = POSITION_BELOW
self._state = False
if above(self._threshold_upper):
self._state_position = POSITION_ABOVE
self._state = False
elif above(self._threshold_lower) and below(self._threshold_upper):
self._state_position = POSITION_IN_RANGE
self._state = True
|
the-stack_0_13820 | #!/usr/bin/env python3
#
# Copyright 2019 PSB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""./iJO1366star/ec_model_2019_06_25_CALIBRATOR_get_reaction_flux_control_and_differential_reactions.py
This script constains an exemplary use of the AutoPACMEN Model Calibrator
Python functions as described in AutoPACMEN's manual.
"""
# External modules
import cobra
# Internal modules
from ec_model_2019_06_25_data_scenarios_for_optimization import ec_model_scenarios_for_optimization
from ec_model_2019_06_25_data_set_up_model import set_up_ec_model_with_sbml
from autopacmen.submodules.reaction_flux_control_by_scenario import reaction_flux_control_by_scenario
from autopacmen.submodules.get_differential_reactions import get_differential_reactions
from autopacmen.submodules.helper_general import json_write
# Set-up of project
flux_control_folder = "iJO1366star/ec_model_2019_06_25_output_optimization/flux_control_data_2019_06_25_manual_changes/"
project_name = "psb_orth"
# Read SBML model
print("Reading SBML model...")
original_thermogecko_sbml_path: str = "./iJO1366star/ec_model_2019_06_25_output_optimization/iJO1366_sMOMENT_2019_06_25_STANDARD_EXCHANGE_SCENARIO_MANUAL_CHANGES.xml"
model: cobra.Model = set_up_ec_model_with_sbml(
original_thermogecko_sbml_path, .095)
# Set protein bound
model.reactions.get_by_id("ER_pool_TG_").upper_bound = .095
# Get flux controlling proteins
print("Getting flux control files...")
reaction_flux_control_by_scenario(
model, flux_control_folder, project_name, ec_model_scenarios_for_optimization)
# Get differential proteins
print("Getting differential reactions (Growth)...")
unique_differential_reactions_of_scenarios, _ = \
get_differential_reactions(list(ec_model_scenarios_for_optimization.keys()), flux_control_folder, project_name,
ec_model_scenarios_for_optimization,
threshold=(.1) / 1000, print_result=True)
# Get unique reactions in MATLAB style
for scenario_key in unique_differential_reactions_of_scenarios.keys():
print(f"% {scenario_key}")
unique_reactions = unique_differential_reactions_of_scenarios[scenario_key]
for unique_reaction in unique_reactions:
print(f'"R_{unique_reaction}",')
json_write("./iJO1366star/ec_model_2019_06_25_output_optimization/iJO1366_sMOMENT_2019_06_25_STANDARD_EXCHANGE_SCENARIO_MANUAL_CHANGES_unique_differential_reactions_of_scenarios.json",
unique_differential_reactions_of_scenarios)
|
the-stack_0_13821 | # Copyright (C) 2020
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
"""
Get detailed info about any user
"""
import os
from telethon.tl.functions.photos import GetUserPhotosRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import MessageEntityMentionName
from telebot import CMD_HELP
TMP_DOWNLOAD_DIRECTORY = "./"
@telebot.on(admin_cmd(pattern="whois(?: |$)(.*)"))
async def who(event):
""" For .whois command, get info about a user. """
if event.fwd_from:
return
if not os.path.isdir(TMP_DOWNLOAD_DIRECTORY):
os.makedirs(TMP_DOWNLOAD_DIRECTORY)
replied_user = await get_user(event)
caption = await fetch_info(replied_user, event)
message_id_to_reply = event.message.reply_to_msg_id
replied_user_profile_photos = await borg(
GetUserPhotosRequest(
user_id=replied_user.user.id, offset=42, max_id=0, limit=80
)
)
if not message_id_to_reply:
message_id_to_reply = None
await borg.send_message(
event.chat_id,
caption,
reply_to=message_id_to_reply,
parse_mode="HTML",
file=replied_user.profile_photo,
force_document=False,
silent=True,
)
await event.delete()
async def get_user(event):
""" Get the user from argument or replied message. """
if event.reply_to_msg_id:
previous_message = await event.get_reply_message()
if previous_message.forward:
replied_user = await event.client(
GetFullUserRequest(previous_message.forward.sender_id)
)
else:
replied_user = await event.client(
GetFullUserRequest(previous_message.sender_id)
)
else:
user = event.pattern_match.group(1)
if user.isnumeric():
user = int(user)
if not user:
self_user = await event.client.get_me()
user = self_user.id
if event.message.entities is not None:
probable_user_mention_entity = event.message.entities[0]
if isinstance(probable_user_mention_entity, MessageEntityMentionName):
user_id = probable_user_mention_entity.user_id
replied_user = await event.client(GetFullUserRequest(user_id))
return replied_user
try:
user_object = await event.client.get_entity(user)
replied_user = await event.client(GetFullUserRequest(user_object.id))
except (TypeError, ValueError) as err:
await event.edit("**ERROR**\n" + str(err))
return None
replied_user_profile_photos = await borg(
GetUserPhotosRequest(
user_id=replied_user.user.id, offset=42, max_id=0, limit=80
)
)
return replied_user
async def fetch_info(replied_user, event):
""" Get details from the User object. """
user_id = replied_user.user.id
first_name = replied_user.user.first_name
last_name = replied_user.user.last_name
username = replied_user.user.username
user_bio = replied_user.about
is_bot = replied_user.user.bot
restricted = replied_user.user.restricted
verified = replied_user.user.verified
first_name = first_name.replace("\u2060", "") if first_name else (" ")
last_name = last_name.replace("\u2060", "") if last_name else (" ")
replied_user_profile_photos = await event.client(
GetUserPhotosRequest(
user_id=replied_user.user.id, offset=42, max_id=0, limit=80
)
)
replied_user_profile_photos_count = "NaN"
try:
replied_user_profile_photos_count = replied_user_profile_photos.count
except AttributeError:
pass
username = "@{}".format(username) if username else ("This User has no Username")
user_bio = "This User has no About" if not user_bio else user_bio
if user_id != (await event.client.get_me()).id:
common_chat = replied_user.common_chats_count
else:
common_chat = "I've seen them in... Wow. Are they stalking me? "
common_chat += "They're in all the same places I am... oh. It's me."
caption = "<u><b>Dᴇᴛᴀɪʟᴇᴅ UsᴇʀIɴғᴏ</b></u>\n\n"
caption += f"✯ <b>Fɪʀsᴛ Nᴀᴍᴇ</b>: <code>{first_name}</code> \n"
caption += f"✯ <b>Lᴀsᴛ Nᴀᴍᴇ</b>: <code>{last_name}</code> \n"
caption += f"✯ <b>UsᴇʀNᴀᴍᴇ</b>: <code>{username}</code> \n"
caption += f"✯ <b>Is Bᴏᴛ</b>: <code>{is_bot}</code> \n"
caption += f"✯ <b>Is Rᴇsᴛʀɪᴄᴛᴇᴅ</b>: <code>{restricted}</code> \n"
caption += f"✯ <b>Is Vᴇʀɪғɪᴇᴅ ʙʏ Tᴇʟᴇɢʀᴀᴍ</b>: <code>{verified}</code> \n"
caption += f"✯ <b>ID</b>: <code>{user_id}</code> \n"
caption += f"✯ <b>Bɪᴏ</b>: <code>{user_bio}</code> \n\n"
caption += f"✯ <b>Nᴜᴍʙᴇʀ ᴏғ Pʀᴏғɪʟᴇ Pɪᴄs</b>: <code>{replied_user_profile_photos_count}</code> \n"
caption += f"\n✯ <b>Cᴏᴍᴍᴏɴ Cʜᴀᴛs</b>: <code>{common_chat} </code>\n\n"
caption += f"✯ <b>Pᴇʀᴍᴀɴᴇɴᴛ Lɪɴᴋ</b>: "
caption += f'<a href="tg://user?id={user_id}">{first_name}</a>'
return caption
CMD_HELP.update(
{
"whois": ".whois <id/reply to mssg>\nUse - Get full details about a persons telegram account."
}
)
|
the-stack_0_13823 | import os
import shutil
from collections import OrderedDict
from shaper import manager, libs
def test_create_folder():
temp_dir_name = 'test_folder'
manager.create_folders(temp_dir_name)
assert os.path.isdir(temp_dir_name)
shutil.rmtree(temp_dir_name)
def test_read_properties(test_assets_root):
input_dir = test_assets_root / 'input'
filename_data_map = manager.read_properties(input_dir)
for filename, data in filename_data_map.items():
assert os.path.splitext(filename)[1] in libs.PARSERS_MAPPING
assert data is not None
assert isinstance(data, (dict, OrderedDict))
def test_forward_path_parser():
datastructure = {
'g/e/c6': 'c6',
'a/d/c3': 'c3',
'a/b/c1': 'c1',
'a/c4': 'c4',
'a/b/c2': 'c2',
'g/c5': 'c5',
}
expected = {
'a': {
'b': {'c1': 'c1', 'c2': 'c2'},
'c4': 'c4',
'd': {'c3': 'c3'},
},
'g': {
'c5': 'c5',
'e': {'c6': 'c6'},
},
}
assert expected == manager.forward_path_parser(datastructure)
def test_backward_path_parser():
datastructure = OrderedDict(
[
(
'a',
OrderedDict(
[
('c4.py', 'c4'),
('d', OrderedDict([('c3.py', 'c3')])),
('b', OrderedDict([('c2.py', 'c2'), ('c1.py', 'c1')])),
],
),
),
('g', OrderedDict([('c5.py', 'c5'), ('e', OrderedDict([('c6.py', 'c6')]))])),
],
)
expected = {
'a/b/c1.py': 'c1',
'a/b/c2.py': 'c2',
'a/c4.py': 'c4',
'a/d/c3.py': 'c3',
'g/c5.py': 'c5',
'g/e/c6.py': 'c6',
}
assert expected == manager.backward_path_parser(datastructure)
|
the-stack_0_13825 | import logging
from unittest import TestCase
import requests_mock
from parameterized import parameterized
from hvac.adapters import JSONAdapter
from hvac.api.auth_methods import Okta
class TestOkta(TestCase):
TEST_MOUNT_POINT = "okta-test"
TEST_USERNAME = "hvac-person"
@parameterized.expand(
[
(
"success",
dict(),
None,
),
]
)
@requests_mock.Mocker()
def test_login(self, label, test_params, raises, requests_mocker):
test_policies = [
"default",
]
expected_status_code = 200
mock_url = (
"http://localhost:8200/v1/auth/{mount_point}/login/{username}".format(
mount_point=self.TEST_MOUNT_POINT,
username=self.TEST_USERNAME,
)
)
mock_response = {
"lease_id": "",
"data": None,
"warnings": None,
"auth": {
"client_token": "64d2a8f2-2a2f-5688-102b-e6088b76e344",
"accessor": "18bb8f89-826a-56ee-c65b-1736dc5ea27d",
"policies": ["default"],
"metadata": {"username": self.TEST_USERNAME, "policies": "default"},
},
"lease_duration": 7200,
"renewable": True,
}
requests_mocker.register_uri(
method="POST",
url=mock_url,
status_code=expected_status_code,
json=mock_response,
)
okta = Okta(adapter=JSONAdapter())
if raises is not None:
with self.assertRaises(raises):
okta.login(
username=self.TEST_USERNAME,
password="badpassword",
mount_point=self.TEST_MOUNT_POINT,
**test_params
)
else:
login_response = okta.login(
username=self.TEST_USERNAME,
password="badpassword",
mount_point=self.TEST_MOUNT_POINT,
**test_params
)
logging.debug("login_response: %s" % login_response)
self.assertEqual(
first=login_response["auth"]["policies"],
second=test_policies,
)
|
the-stack_0_13827 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for the psort CLI tool."""
from __future__ import unicode_literals
import argparse
import os
import unittest
try:
import resource
except ImportError:
resource = None
from plaso.cli import psort_tool
from plaso.cli.helpers import interface as helpers_interface
from plaso.cli.helpers import manager as helpers_manager
from plaso.lib import errors
from plaso.output import interface as output_interface
from plaso.output import manager as output_manager
from tests import test_lib as shared_test_lib
from tests.cli import test_lib
class TestInputReader(object):
"""Test input reader."""
def __init__(self):
"""Initialize the reader."""
super(TestInputReader, self).__init__()
self.read_called = False
def Read(self):
"""Mock a read operation by user."""
self.read_called = True
return 'foobar'
class TestOutputModuleArgumentHelper(helpers_interface.ArgumentsHelper):
"""Test argument helper for the test output module."""
NAME = 'test_missing'
@classmethod
def AddArguments(cls, argument_group):
"""Mock the add argument section."""
pass
# pylint: disable=arguments-differ
@classmethod
def ParseOptions(cls, options, output_module):
"""Provide a test parse options section."""
if not isinstance(output_module, TestOutputModuleMissingParameters):
raise errors.BadConfigObject((
'Output module is not an instance of '
'TestOutputModuleMissingParameters'))
missing = getattr(options, 'missing', None)
if missing:
output_module.SetMissingValue('missing', missing)
parameters = getattr(options, 'parameters', None)
if parameters:
output_module.SetMissingValue('parameters', parameters)
class TestOutputModuleMissingParameters(output_interface.LinearOutputModule):
"""Test output module that is missing some parameters."""
NAME = 'test_missing'
_HEADER = (
'date,time,timezone,MACB,source,sourcetype,type,user,host,'
'short,desc,version,filename,inode,notes,format,extra\n')
# For test purpose assign these as class attributes.
missing = None
parameters = None
def GetMissingArguments(self):
"""Return a list of missing parameters."""
missing_parameters = []
if self.missing is None:
missing_parameters.append('missing')
if self.parameters is None:
missing_parameters.append('parameters')
return missing_parameters
@classmethod
def SetMissingValue(cls, attribute, value):
"""Set missing value."""
setattr(cls, attribute, value)
def WriteEventBody(self, event):
"""Writes the body of an event object to the output.
Args:
event (EventObject): event.
"""
message, _ = self._output_mediator.GetFormattedMessages(event)
source_short, source_long = self._output_mediator.GetFormattedSources(event)
self._output_writer.Write('{0:s}/{1:s} {2:s}\n'.format(
source_short, source_long, message))
def WriteHeader(self):
"""Writes the header to the output."""
self._output_writer.Write(self._HEADER)
class PsortToolTest(test_lib.CLIToolTestCase):
"""Tests for the psort tool."""
if resource is None:
_EXPECTED_PROCESSING_OPTIONS = """\
usage: psort_test.py [--temporary_directory DIRECTORY] [--disable_zeromq]
[--worker-memory-limit SIZE]
Test argument parser.
optional arguments:
--disable_zeromq, --disable-zeromq
Disable queueing using ZeroMQ. A Multiprocessing queue
will be used instead.
--temporary_directory DIRECTORY, --temporary-directory DIRECTORY
Path to the directory that should be used to store
temporary files created during processing.
--worker-memory-limit SIZE, --worker_memory_limit SIZE
Maximum amount of memory (data segment and shared
memory) a worker process is allowed to consume in
bytes, where 0 represents no limit. The default limit
is 2147483648 (2 GiB). If a worker process exceeds
this limit is is killed by the main (foreman) process.
"""
else:
_EXPECTED_PROCESSING_OPTIONS = """\
usage: psort_test.py [--process_memory_limit SIZE]
[--temporary_directory DIRECTORY] [--disable_zeromq]
[--worker-memory-limit SIZE]
Test argument parser.
optional arguments:
--disable_zeromq, --disable-zeromq
Disable queueing using ZeroMQ. A Multiprocessing queue
will be used instead.
--process_memory_limit SIZE, --process-memory-limit SIZE
Maximum amount of memory (data segment) a process is
allowed to allocate in bytes, where 0 represents no
limit. The default limit is 4294967296 (4 GiB). This
applies to both the main (foreman) process and the
worker processes. This limit is enforced by the
operating system and will supersede the worker memory
limit (--worker_memory_limit).
--temporary_directory DIRECTORY, --temporary-directory DIRECTORY
Path to the directory that should be used to store
temporary files created during processing.
--worker-memory-limit SIZE, --worker_memory_limit SIZE
Maximum amount of memory (data segment and shared
memory) a worker process is allowed to consume in
bytes, where 0 represents no limit. The default limit
is 2147483648 (2 GiB). If a worker process exceeds
this limit is is killed by the main (foreman) process.
"""
# TODO: add test for _CreateOutputModule.
# TODO: add test for _FormatStatusTableRow.
# TODO: add test for _GetAnalysisPlugins.
# TODO: add test for _ParseAnalysisPluginOptions.
# TODO: add test for _ParseProcessingOptions.
# TODO: add test for _ParseInformationalOptions.
# TODO: add test for _PrintStatusHeader.
# TODO: add test for _PrintStatusUpdate.
# TODO: add test for _PrintStatusUpdateStream.
def testAddProcessingOptions(self):
"""Tests the AddProcessingOptions function."""
argument_parser = argparse.ArgumentParser(
prog='psort_test.py',
description='Test argument parser.', add_help=False,
formatter_class=test_lib.SortedArgumentsHelpFormatter)
test_tool = psort_tool.PsortTool()
test_tool.AddProcessingOptions(argument_parser)
output = self._RunArgparseFormatHelp(argument_parser)
self.assertEqual(output, self._EXPECTED_PROCESSING_OPTIONS)
def testListLanguageIdentifiers(self):
"""Tests the ListLanguageIdentifiers function."""
output_writer = test_lib.TestOutputWriter(encoding='utf-8')
test_tool = psort_tool.PsortTool(output_writer=output_writer)
test_tool.ListLanguageIdentifiers()
output = output_writer.ReadOutput()
number_of_tables = 0
lines = []
for line in output.split(b'\n'):
line = line.strip()
lines.append(line)
if line.startswith(b'*****') and line.endswith(b'*****'):
number_of_tables += 1
self.assertIn('Language identifiers', lines[1])
lines = frozenset(lines)
self.assertEqual(number_of_tables, 1)
expected_line = b'en : English'
self.assertIn(expected_line, lines)
def testParseArguments(self):
"""Tests the ParseArguments function."""
output_writer = test_lib.TestOutputWriter(encoding='utf-8')
test_tool = psort_tool.PsortTool(output_writer=output_writer)
result = test_tool.ParseArguments()
self.assertFalse(result)
# TODO: check output.
# TODO: improve test coverage.
def testParseOptions(self):
"""Tests the ParseOptions function."""
output_writer = test_lib.TestOutputWriter(encoding='utf-8')
test_tool = psort_tool.PsortTool(output_writer=output_writer)
options = test_lib.TestOptions()
options.output_format = 'null'
options.storage_file = self._GetTestFilePath(['psort_test.plaso'])
test_tool.ParseOptions(options)
options = test_lib.TestOptions()
with self.assertRaises(errors.BadConfigOption):
test_tool.ParseOptions(options)
options = test_lib.TestOptions()
options.storage_file = self._GetTestFilePath(['psort_test.plaso'])
with self.assertRaises(errors.BadConfigOption):
test_tool.ParseOptions(options)
# TODO: improve test coverage.
def testProcessStorageWithMissingParameters(self):
"""Tests the ProcessStorage function with parameters missing."""
input_reader = TestInputReader()
output_writer = test_lib.TestOutputWriter(encoding='utf-8')
test_tool = psort_tool.PsortTool(
input_reader=input_reader, output_writer=output_writer)
options = test_lib.TestOptions()
options.storage_file = self._GetTestFilePath(['psort_test.plaso'])
options.output_format = 'test_missing'
output_manager.OutputManager.RegisterOutput(
TestOutputModuleMissingParameters)
helpers_manager.ArgumentHelperManager.RegisterHelper(
TestOutputModuleArgumentHelper)
lines = []
with shared_test_lib.TempDirectory() as temp_directory:
temp_file_name = os.path.join(temp_directory, 'output.txt')
options.write = temp_file_name
test_tool.ParseOptions(options)
test_tool.ProcessStorage()
with open(temp_file_name, 'rb') as file_object:
for line in file_object.readlines():
lines.append(line.strip())
self.assertTrue(input_reader.read_called)
self.assertEqual(TestOutputModuleMissingParameters.missing, 'foobar')
self.assertEqual(TestOutputModuleMissingParameters.parameters, 'foobar')
expected_line = (
'FILE/OS Metadata Modification Time OS:/tmp/test/test_data/syslog '
'Type: file')
self.assertIn(expected_line, lines)
output_manager.OutputManager.DeregisterOutput(
TestOutputModuleMissingParameters)
helpers_manager.ArgumentHelperManager.DeregisterHelper(
TestOutputModuleArgumentHelper)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_13829 | from optimism.JaxConfig import *
from optimism import ReadMesh
from optimism.material import Neohookean
from optimism.material import LinearElastic
from optimism import EquationSolver
from optimism.SparseCholesky import SparseCholesky as Cholesky
from optimism import SparseMatrixAssembler
from optimism import FunctionSpace
from optimism import Mechanics
from optimism import VTKWriter
from optimism import Mesh
from optimism import Mechanics
from optimism import QuadratureRule
from optimism import TractionBC
from optimism.inverse import NonlinearSolve
from optimism.Mesh import EssentialBC
from optimism.Mesh import DofManager
from optimism.Timer import Timer
from optimism import Objective
from jax.experimental import optimizers
from optimism.test.MeshFixture import MeshFixture
shapeOpt = True
if shapeOpt:
import optimism.inverse.ShapeOpt as DesignOpt
else:
import optimism.inverse.TopOpt as DesignOpt
props = {'elastic modulus': 10.0,
'poisson ratio': 0.3}
dProps = {'elastic modulus': 1e-1,
'poisson ratio': 0.0}
materialModel = Neohookean.create_material_model_functions(props)
meshDistortionModel = Neohookean.create_material_model_functions(dProps)
settings = EquationSolver.get_settings(max_cg_iters=100,
max_trust_iters=500,
min_tr_size=1e-13,
tol=1e-10)
maxTraction = 0.025
numSteps = 15
N = 8
M = 56
top = 'nodeset_1'
topSide = 'sideset_1'
bottom = 'nodeset_2'
class Buckle(MeshFixture):
def __init__(self):
self.designStep=0
self.w = 0.07
self.archRadius = 1.5
self.mesh, _ = \
self.create_arch_mesh_disp_and_edges(N, M,
self.w, self.archRadius, 0.5*self.w)
assert(self.mesh.nodeSets['push'].shape[0] == 2)
EBCs = [EssentialBC(nodeSet='left', field=0),
EssentialBC(nodeSet='left', field=1),
EssentialBC(nodeSet='right', field=0),
EssentialBC(nodeSet='right', field=1)]
self.dofManager = DofManager(self.mesh, self.mesh.coords.shape, EBCs)
self.triQuadRule = QuadratureRule.create_quadrature_rule_on_triangle(degree=1)
self.U = np.zeros(self.mesh.coords.shape)
self.Uu = self.dofManager.get_unknown_values(self.U)
meshfs = FunctionSpace.construct_weighted_function_space(self.mesh, self.triQuadRule)
self.meshMechFuncs = Mechanics.create_mechanics_functions(meshfs,
'plane strain',
meshDistortionModel)
self.meshMechState = self.meshMechFuncs.compute_initial_state()
def energy_func(self, Uu, p):
fs = DesignOpt.create_function_space(self.mesh,
self.triQuadRule,
p)
mechFuncs = Mechanics.create_mechanics_functions(fs,
'plane strain',
materialModel)
U = create_field(Uu, p, self.mesh, self.dofManager)
pushDisps = U[self.mesh.nodeSets['push'],1]
tractionEnergy = -p[0] * np.sum(pushDisps)
mechanicalEnergy = mechFuncs.compute_strain_energy(U, p[1])
return mechanicalEnergy + tractionEnergy
def reaction_func(self, Uu, p):
return grad(self.energy_func,1)(Uu,p)[0]
def compute_volume(self, p):
fs = DesignOpt.create_function_space(self.mesh, self.triQuadRule, p)
totalVolContrib = np.sum(fs.vols.ravel())
return totalVolContrib
def compute_mesh_mechanical_energy(self, p):
return self.meshMechFuncs.compute_strain_energy(p[2]-self.mesh.coords, self.meshMechState) if shapeOpt else 0.0
def assemble_sparse(self, Uu, p):
fs = DesignOpt.create_function_space(self.mesh, self.triQuadRule, p)
mechFuncs = Mechanics.create_mechanics_functions(fs,
'plane strain',
materialModel)
U = create_field(Uu, p, self.mesh, self.dofManager)
elementStiffnesses = mechFuncs.compute_element_stiffnesses(U, p[1])
return SparseMatrixAssembler.assemble_sparse_stiffness_matrix(elementStiffnesses,
self.mesh.conns,
self.dofManager)
def create_params_from_design_vars(self, chi):
return DesignOpt.create_parameters_from_design_vars(chi, self.mesh, self.dofManager)
def simulate(self, designParams, output_func):
traction = 0.0
self.objective.p = Objective.param_index_update(self.objective.p, 0, traction)
Uu = 0.0*self.Uu
U = create_field(Uu, self.objective.p, self.mesh, self.dofManager)
pushDisps = U[self.mesh.nodeSets['push'],1]
output_func(U, self.objective.p, pushDisps, 0)
work = 0.0
for i in range(numSteps):
tractionInc = maxTraction/numSteps
traction -= tractionInc
p = Objective.param_index_update(self.objective.p, 0, traction)
Uu = NonlinearSolve.nonlinear_solve(self.objective, settings, Uu, designParams)
U = create_field(Uu, p, self.mesh, self.dofManager)
pushDispsNew = U[self.mesh.nodeSets['push'],1]
work += p[0] * np.sum(pushDispsNew - pushDisps)
pushDisps = pushDispsNew
output_func(U, p, pushDisps, i+1)
# reverse the loading
for i in range(numSteps):
tractionInc = maxTraction/numSteps
traction += tractionInc
p = Objective.param_index_update(self.objective.p, 0, traction)
Uu = NonlinearSolve.nonlinear_solve(self.objective, settings, Uu, designParams)
U = create_field(Uu, p, self.mesh, self.dofManager)
pushDispsNew = U[self.mesh.nodeSets['push'],1]
work += p[0] * np.sum(pushDispsNew - pushDisps)
pushDisps = pushDispsNew
output_func(U, p, pushDisps, numSteps+i+1)
q = Objective.param_index_update(self.objective.p, 2, designParams)
volume = self.compute_volume(q)
meshMechanicalEnergy = 100*self.compute_mesh_mechanical_energy(q)
work *= -100
print('volume, work, mesh energy = ', volume, work, meshMechanicalEnergy)
designObjective = work + volume + meshMechanicalEnergy
if designObjective != designObjective:
designObjective = np.inf
return designObjective
def run(self):
chi = DesignOpt.create_initial_design_vars(self.mesh, self.dofManager, self.triQuadRule)
designParams = DesignOpt.create_parameters_from_design_vars(chi, self.mesh, self.dofManager)
initialDisp = 0.0
p = Objective.Params(initialDisp, None, designParams)
fs = DesignOpt.create_function_space(self.mesh, self.triQuadRule, designParams)
mechFuncs = Mechanics.create_mechanics_functions(fs,
'plane strain',
materialModel)
state = mechFuncs.compute_initial_state()
p = Objective.param_index_update(p, 1, state)
self.objective = Objective.Objective(self.energy_func, self.Uu, p,
Objective.PrecondStrategy(self.assemble_sparse))
def loss(chi):
print('forward run\n')
designParams = self.create_params_from_design_vars(chi)
return self.simulate(designParams, lambda U, p, disp, i: None)
def debug_loss(chi):
print('debug run\n')
fs = DesignOpt.create_function_space(self.mesh, self.triQuadRule, self.objective.p[3])
mechFuncs = Mechanics.create_mechanics_functions(fs,
'plane strain',
materialModel)
currentMesh = DesignOpt.create_current_mesh(self.mesh, self.objective.p[3])
post_process = PostProcess(currentMesh, self.dofManager, self.triQuadRule, mechFuncs,
'arch-'+str(self.designStep).zfill(2))
self.designStep+=1
designParams = self.create_params_from_design_vars(chi)
return self.simulate(designParams, post_process)
learningRate = 0.001
numLearningSteps = 201
opt_init, opt_update, get_params = optimizers.adam(learningRate)
opt_state = opt_init(chi)
def step(step, opt_state):
value, grads = value_and_grad(loss)(get_params(opt_state))
opt_state = opt_update(step, grads, opt_state)
return value, opt_state
for i in range(numLearningSteps):
chi = get_params(opt_state)
# set the parameters to the correct design variable outside of the A.D.
# need to make sure this parameter is exposed internally to get correct gradients
self.objective.p = Objective.param_index_update(self.objective.p, 2, self.create_params_from_design_vars(chi))
if i%5==0:
debug_loss(get_params(opt_state))
value, opt_state = step(i, opt_state)
print('design objective = ', value)
if value != value: return
def create_field(Uu, p, mesh, dofManager):
return dofManager.create_field(Uu, get_ubcs(p, mesh, dofManager))
def get_ubcs(p, mesh, dofManager):
V = np.zeros(mesh.coords.shape)
return dofManager.get_bc_values(V)
class PostProcess:
def __init__(self, mesh, dofManager, triQuadRule, mechFuncs, filename):
self.disp = [0,]
self.force = [0,]
self.mesh = mesh
self.dofManager = dofManager
self.triQuadRule = triQuadRule
self.mechFuncs = mechFuncs
self.filename=filename
def __call__(self, U, p, pushDisps, i):
self.disp.append(np.average(pushDisps))
self.force.append(2*p[0])
with open(self.filename+'.npz', 'wb') as f:
np.savez(f,
disp=np.array(self.disp),
force=np.array(self.force))
self.plot_solution(U, self.get_output_name(i), p)
def get_output_name(self, N):
return self.filename + '.' + str(N).zfill(3)
def plot_solution(self, U, plotName, p):
fs = DesignOpt.create_function_space(self.mesh, self.triQuadRule, p)
mechFuncs = Mechanics.create_mechanics_functions(fs,
'plane strain',
materialModel)
writer = VTKWriter.VTKWriter(self.mesh, baseFileName=plotName)
writer.add_nodal_field(name='displacement',
nodalData=U,
fieldType=VTKWriter.VTKFieldType.VECTORS)
bcs = np.array(self.dofManager.isBc, dtype=int)
writer.add_nodal_field(name='bcs',
nodalData=bcs,
fieldType=VTKWriter.VTKFieldType.VECTORS,
dataType=VTKWriter.VTKDataType.INT)
strainEnergyDensities, stresses = \
mechFuncs.compute_output_energy_densities_and_stresses(U, p[1])
strainEnergyDensities = np.squeeze(strainEnergyDensities)
stresses = np.squeeze(stresses)
writer.add_cell_field(name='strain_energy_density',
cellData=strainEnergyDensities,
fieldType=VTKWriter.VTKFieldType.SCALARS)
writer.add_cell_field(name='stress',
cellData=stresses,
fieldType=VTKWriter.VTKFieldType.TENSORS)
if not shapeOpt:
elementPhases = p[2]
writer.add_cell_field(name='phase',
cellData=elementPhases,
fieldType=VTKWriter.VTKFieldType.SCALARS)
writer.write()
##### RUN IT #####
app = Buckle()
app.run()
|
the-stack_0_13831 | import logging
from datetime import time
from calendar_view.core.config import CalendarConfig
from calendar_view.core.event import Event
class InputData(object):
def __init__(self, config: CalendarConfig, events: list):
self.config = config
self.events = events
def validate_config(config: CalendarConfig):
config.validate()
def validate_event(event: Event, config: CalendarConfig):
start_date, end_date = config.get_date_range()
start_time = time(hour=config.get_hours_range()[0])
end_hour = config.get_hours_range()[1]
if not (start_date <= event.get_start_date(config) <= end_date):
logging.warning("Event can't be shown, because it is not in configured date range: {} not in [{}, {}]".format(
event.get_start_date(config).strftime('%Y-%m-%d'),
start_date.strftime('%Y-%m-%d'),
end_date.strftime('%Y-%m-%d')
))
if event.start_time < start_time:
logging.warning("Event can't be shown, because its start is before time range: {} is before {}".format(
event.start_time.strftime('%H:%M'),
start_time.strftime('%H:%M')
))
if end_hour < 24 and time(hour=end_hour) < event.end_time:
logging.warning("Event can't be shown, because its end is after time range: {} is before {}".format(
event.end_time.strftime('%H:%M'),
time(hour=end_hour).strftime('%H:%M')
))
def validate_events(events: list, config: CalendarConfig):
for e in events:
validate_event(e, config)
def validate_data(data: InputData):
validate_config(data.config)
validate_events(data.events, data.config)
|
the-stack_0_13833 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <[email protected]>
# Copyright (C) 2012 Lars Buitinck <[email protected]>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Construct a corpus from a Wikipedia (or other MediaWiki-based) database dump.
If you have the `pattern` package installed, this module will use a fancy
lemmatization to get a lemma of each token (instead of plain alphabetic
tokenizer). The package is available at https://github.com/clips/pattern .
See scripts/process_wiki.py for a canned (example) script based on this
module.
"""
import bz2
import logging
import re
from xml.etree.cElementTree import iterparse # LXML isn't faster, so let's go with the built-in solution
import multiprocessing
from gensim import utils
# cannot import whole gensim.corpora, because that imports wikicorpus...
from gensim.corpora.dictionary import Dictionary
from gensim.corpora.textcorpus import TextCorpus
logger = logging.getLogger('gensim.corpora.wikicorpus')
# ignore articles shorter than ARTICLE_MIN_WORDS characters (after full preprocessing)
ARTICLE_MIN_WORDS = 50
RE_P0 = re.compile('<!--.*?-->', re.DOTALL | re.UNICODE) # comments
RE_P1 = re.compile('<ref([> ].*?)(</ref>|/>)', re.DOTALL | re.UNICODE) # footnotes
RE_P2 = re.compile("(\n\[\[[a-z][a-z][\w-]*:[^:\]]+\]\])+$", re.UNICODE) # links to languages
RE_P3 = re.compile("{{([^}{]*)}}", re.DOTALL | re.UNICODE) # template
RE_P4 = re.compile("{{([^}]*)}}", re.DOTALL | re.UNICODE) # template
RE_P5 = re.compile('\[(\w+):\/\/(.*?)(( (.*?))|())\]', re.UNICODE) # remove URL, keep description
RE_P6 = re.compile("\[([^][]*)\|([^][]*)\]", re.DOTALL | re.UNICODE) # simplify links, keep description
RE_P7 = re.compile('\n\[\[[iI]mage(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) # keep description of images
RE_P8 = re.compile('\n\[\[[fF]ile(.*?)(\|.*?)*\|(.*?)\]\]', re.UNICODE) # keep description of files
RE_P9 = re.compile('<nowiki([> ].*?)(</nowiki>|/>)', re.DOTALL | re.UNICODE) # outside links
RE_P10 = re.compile('<math([> ].*?)(</math>|/>)', re.DOTALL | re.UNICODE) # math content
RE_P11 = re.compile('<(.*?)>', re.DOTALL | re.UNICODE) # all other tags
RE_P12 = re.compile('\n(({\|)|(\|-)|(\|}))(.*?)(?=\n)', re.UNICODE) # table formatting
RE_P13 = re.compile('\n(\||\!)(.*?\|)*([^|]*?)', re.UNICODE) # table cell formatting
RE_P14 = re.compile('\[\[Category:[^][]*\]\]', re.UNICODE) # categories
# Remove File and Image template
RE_P15 = re.compile('\[\[([fF]ile:|[iI]mage)[^]]*(\]\])', re.UNICODE)
# MediaWiki namespaces (https://www.mediawiki.org/wiki/Manual:Namespace) that
# ought to be ignored
IGNORED_NAMESPACES = ['Wikipedia', 'Category', 'File', 'Portal', 'Template',
'MediaWiki', 'User', 'Help', 'Book', 'Draft',
'WikiProject', 'Special', 'Talk']
def filter_wiki(raw):
"""
Filter out wiki mark-up from `raw`, leaving only text. `raw` is either unicode
or utf-8 encoded string.
"""
# parsing of the wiki markup is not perfect, but sufficient for our purposes
# contributions to improving this code are welcome :)
text = utils.to_unicode(raw, 'utf8', errors='ignore')
text = utils.decode_htmlentities(text) # '&nbsp;' --> '\xa0'
return remove_markup(text)
def remove_markup(text):
text = re.sub(RE_P2, "", text) # remove the last list (=languages)
# the wiki markup is recursive (markup inside markup etc)
# instead of writing a recursive grammar, here we deal with that by removing
# markup in a loop, starting with inner-most expressions and working outwards,
# for as long as something changes.
text = remove_template(text)
text = remove_file(text)
iters = 0
while True:
old, iters = text, iters + 1
text = re.sub(RE_P0, "", text) # remove comments
text = re.sub(RE_P1, '', text) # remove footnotes
text = re.sub(RE_P9, "", text) # remove outside links
text = re.sub(RE_P10, "", text) # remove math content
text = re.sub(RE_P11, "", text) # remove all remaining tags
text = re.sub(RE_P14, '', text) # remove categories
text = re.sub(RE_P5, '\\3', text) # remove urls, keep description
text = re.sub(RE_P6, '\\2', text) # simplify links, keep description only
# remove table markup
text = text.replace('||', '\n|') # each table cell on a separate line
text = re.sub(RE_P12, '\n', text) # remove formatting lines
text = re.sub(RE_P13, '\n\\3', text) # leave only cell content
# remove empty mark-up
text = text.replace('[]', '')
if old == text or iters > 2: # stop if nothing changed between two iterations or after a fixed number of iterations
break
# the following is needed to make the tokenizer see '[[socialist]]s' as a single word 'socialists'
# TODO is this really desirable?
text = text.replace('[', '').replace(']', '') # promote all remaining markup to plain text
return text
def remove_template(s):
"""Remove template wikimedia markup.
Return a copy of `s` with all the wikimedia markup template removed. See
http://meta.wikimedia.org/wiki/Help:Template for wikimedia templates
details.
Note: Since template can be nested, it is difficult remove them using
regular expresssions.
"""
# Find the start and end position of each template by finding the opening
# '{{' and closing '}}'
n_open, n_close = 0, 0
starts, ends = [], []
in_template = False
prev_c = None
for i, c in enumerate(iter(s)):
if not in_template:
if c == '{' and c == prev_c:
starts.append(i - 1)
in_template = True
n_open = 1
if in_template:
if c == '{':
n_open += 1
elif c == '}':
n_close += 1
if n_open == n_close:
ends.append(i)
in_template = False
n_open, n_close = 0, 0
prev_c = c
# Remove all the templates
s = ''.join([s[end + 1:start] for start, end in
zip(starts + [None], [-1] + ends)])
return s
def remove_file(s):
"""Remove the 'File:' and 'Image:' markup, keeping the file caption.
Return a copy of `s` with all the 'File:' and 'Image:' markup replaced by
their corresponding captions. See http://www.mediawiki.org/wiki/Help:Images
for the markup details.
"""
# The regex RE_P15 match a File: or Image: markup
for match in re.finditer(RE_P15, s):
m = match.group(0)
caption = m[:-2].split('|')[-1]
s = s.replace(m, caption, 1)
return s
def tokenize(content):
"""
Tokenize a piece of text from wikipedia. The input string `content` is assumed
to be mark-up free (see `filter_wiki()`).
Return list of tokens as utf8 bytestrings. Ignore words shorted than 2 or longer
that 15 characters (not bytes!).
"""
# TODO maybe ignore tokens with non-latin characters? (no chinese, arabic, russian etc.)
return [token.encode('utf8') for token in utils.tokenize(content, lower=True, errors='ignore')
if 2 <= len(token) <= 15 and not token.startswith('_')]
def get_namespace(tag):
"""Returns the namespace of tag."""
m = re.match("^{(.*?)}", tag)
namespace = m.group(1) if m else ""
if not namespace.startswith("http://www.mediawiki.org/xml/export-"):
raise ValueError("%s not recognized as MediaWiki dump namespace"
% namespace)
return namespace
_get_namespace = get_namespace
def extract_pages(f, filter_namespaces=False):
"""
Extract pages from a MediaWiki database dump = open file-like object `f`.
Return an iterable over (str, str, str) which generates (title, content, pageid) triplets.
"""
elems = (elem for _, elem in iterparse(f, events=("end",)))
# We can't rely on the namespace for database dumps, since it's changed
# it every time a small modification to the format is made. So, determine
# those from the first element we find, which will be part of the metadata,
# and construct element paths.
elem = next(elems)
namespace = get_namespace(elem.tag)
ns_mapping = {"ns": namespace}
page_tag = "{%(ns)s}page" % ns_mapping
text_path = "./{%(ns)s}revision/{%(ns)s}text" % ns_mapping
title_path = "./{%(ns)s}title" % ns_mapping
ns_path = "./{%(ns)s}ns" % ns_mapping
pageid_path = "./{%(ns)s}id" % ns_mapping
for elem in elems:
if elem.tag == page_tag:
title = elem.find(title_path).text
text = elem.find(text_path).text
if filter_namespaces:
ns = elem.find(ns_path).text
if ns not in filter_namespaces:
text = None
pageid = elem.find(pageid_path).text
yield title, text or "", pageid # empty page will yield None
# Prune the element tree, as per
# http://www.ibm.com/developerworks/xml/library/x-hiperfparse/
# except that we don't need to prune backlinks from the parent
# because we don't use LXML.
# We do this only for <page>s, since we need to inspect the
# ./revision/text element. The pages comprise the bulk of the
# file, so in practice we prune away enough.
elem.clear()
_extract_pages = extract_pages # for backward compatibility
def process_article(args):
"""
Parse a wikipedia article, returning its content as a list of tokens
(utf8-encoded strings).
"""
text, lemmatize, title, pageid = args
text = filter_wiki(text)
if lemmatize:
result = utils.lemmatize(text)
else:
result = tokenize(text)
return result, title, pageid
class WikiCorpus(TextCorpus):
"""
Treat a wikipedia articles dump (\*articles.xml.bz2) as a (read-only) corpus.
The documents are extracted on-the-fly, so that the whole (massive) dump
can stay compressed on disk.
>>> wiki = WikiCorpus('enwiki-20100622-pages-articles.xml.bz2') # create word->word_id mapping, takes almost 8h
>>> MmCorpus.serialize('wiki_en_vocab200k.mm', wiki) # another 8h, creates a file in MatrixMarket format plus file with id->word
"""
def __init__(self, fname, processes=None, lemmatize=utils.has_pattern(), dictionary=None, filter_namespaces=('0',)):
"""
Initialize the corpus. Unless a dictionary is provided, this scans the
corpus once, to determine its vocabulary.
If `pattern` package is installed, use fancier shallow parsing to get
token lemmas. Otherwise, use simple regexp tokenization. You can override
this automatic logic by forcing the `lemmatize` parameter explicitly.
"""
self.fname = fname
self.filter_namespaces = filter_namespaces
self.metadata = False
if processes is None:
processes = max(1, multiprocessing.cpu_count() - 1)
self.processes = processes
self.lemmatize = lemmatize
if dictionary is None:
self.dictionary = Dictionary(self.get_texts())
else:
self.dictionary = dictionary
def get_texts(self):
"""
Iterate over the dump, returning text version of each article as a list
of tokens.
Only articles of sufficient length are returned (short articles & redirects
etc are ignored).
Note that this iterates over the **texts**; if you want vectors, just use
the standard corpus interface instead of this function::
>>> for vec in wiki_corpus:
>>> print(vec)
"""
articles, articles_all = 0, 0
positions, positions_all = 0, 0
texts = ((text, self.lemmatize, title, pageid) for title, text, pageid in extract_pages(bz2.BZ2File(self.fname), self.filter_namespaces))
pool = multiprocessing.Pool(self.processes)
# process the corpus in smaller chunks of docs, because multiprocessing.Pool
# is dumb and would load the entire input into RAM at once...
for group in utils.chunkize(texts, chunksize=10 * self.processes, maxsize=1):
for tokens, title, pageid in pool.imap(process_article, group): # chunksize=10):
articles_all += 1
positions_all += len(tokens)
# article redirects and short stubs are pruned here
if len(tokens) < ARTICLE_MIN_WORDS or any(title.startswith(ignore + ':') for ignore in IGNORED_NAMESPACES):
continue
articles += 1
positions += len(tokens)
if self.metadata:
yield (tokens, (pageid, title))
else:
yield tokens
pool.terminate()
logger.info(
"finished iterating over Wikipedia corpus of %i documents with %i positions"
" (total %i articles, %i positions before pruning articles shorter than %i words)",
articles, positions, articles_all, positions_all, ARTICLE_MIN_WORDS)
self.length = articles # cache corpus length
# endclass WikiCorpus
|
the-stack_0_13835 | import torch
import torch.nn as nn
from models.deconv import *
from torchvision.models.utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1,deconv=None):
"""3x3 convolution with padding"""
if deconv:
return deconv(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, dilation=dilation,groups=groups)
else:
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,padding=dilation, bias=False, dilation=dilation,groups=groups)#
def conv1x1(in_planes, out_planes, stride=1,deconv=None):
"""1x1 convolution"""
if deconv:
return deconv(in_planes, out_planes, kernel_size=1, stride=stride)
else:
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None, deconv=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride,deconv=deconv)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes,deconv=deconv)
self.downsample = downsample
self.stride = stride
if not deconv:
self.bn1 = norm_layer(planes)
self.bn2 = norm_layer(planes)
def forward(self, x):
identity = x
out = self.conv1(x)
if hasattr(self,'bn1'):
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if hasattr(self, 'bn2'):
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None,deconv=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width,deconv=deconv)
self.conv2 = conv3x3(width, width, stride, groups, dilation,deconv=deconv)
self.conv3 = conv1x1(width, planes * self.expansion,deconv=deconv)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
if not deconv:
self.bn1 = norm_layer(width)
self.bn2 = norm_layer(width)
self.bn3 = norm_layer(planes * self.expansion)
def forward(self, x):
identity = x
out = self.conv1(x)
if hasattr(self,'bn1'):
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if hasattr(self, 'bn2'):
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
if hasattr(self, 'bn3'):
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None,deconv=None,delinear=None,channel_deconv=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
if not deconv:
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = norm_layer(self.inplanes)
else:
self.conv1 = deconv(3, self.inplanes, kernel_size=7, stride=2, padding=3, freeze=True, n_iter=15,sampling_stride=3)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0],deconv=deconv)
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0],deconv=deconv)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1],deconv=deconv)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2],deconv=deconv)
if channel_deconv:
self.deconv1 =channel_deconv()
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
if delinear:
self.fc = delinear(512 * block.expansion, num_classes)
else:
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m,FastDeconv):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False,deconv=None):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride,deconv=deconv),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer,deconv=deconv))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer,deconv=deconv))
return nn.Sequential(*layers)
def _forward_impl(self, x):
x = self.conv1(x)
if hasattr(self,'bn1'):
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
if hasattr(self, 'deconv1'):
x = self.deconv1(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, planes, pretrained, progress, deconv,delinear,channel_deconv, **kwargs):
model = ResNet(block, planes,deconv=deconv,delinear=delinear,channel_deconv=channel_deconv, **kwargs)
"""
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict)
"""
return model
def resnet18d(deconv, delinear,channel_deconv, pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, deconv=deconv,delinear=delinear,channel_deconv=channel_deconv,
**kwargs)
def resnet34d(deconv, delinear,channel_deconv, pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, deconv=deconv,delinear=delinear,channel_deconv=channel_deconv,
**kwargs)
def resnet50d(deconv, delinear,channel_deconv,pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,deconv=deconv,delinear=delinear,channel_deconv=channel_deconv,
**kwargs)
def resnet101d(deconv,delinear, channel_deconv,pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,deconv=deconv,delinear=delinear,channel_deconv=channel_deconv,
**kwargs)
def resnet152d(deconv, delinear,channel_deconv,pretrained=False, progress=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,deconv=deconv,delinear=delinear,channel_deconv=channel_deconv,
**kwargs)
def resnext50_32x4d(deconv, delinear,channel_deconv,pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress,deconv=deconv,delinear=delinear,channel_deconv=channel_deconv, **kwargs)
def resnext101_32x8d(deconv, delinear,channel_deconv,pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress,deconv=deconv,delinear=delinear,channel_deconv=channel_deconv, **kwargs)
def wide_resnet50_2d(deconv,delinear, channel_deconv,pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, deconv=deconv,delinear=delinear,channel_deconv=channel_deconv, **kwargs)
def wide_resnet101_2d(deconv, delinear,channel_deconv, pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, deconv=deconv,delinear=delinear,channel_deconv=channel_deconv, **kwargs)
|
the-stack_0_13836 | import logging
import os
import click
from cligj import (
precision_opt, indent_opt, compact_opt, projection_geographic_opt,
projection_mercator_opt, projection_projected_opt,
use_rs_opt, geojson_type_feature_opt, geojson_type_bbox_opt,
geojson_type_collection_opt)
from .helpers import write_features, to_lower
import rasterio
from rasterio.rio import options
from rasterio.warp import transform_bounds
logger = logging.getLogger(__name__)
# Bounds command.
@click.command(short_help="Write bounding boxes to stdout as GeoJSON.")
# One or more files, the bounds of each are a feature in the collection
# object or feature sequence.
@click.argument('INPUT', nargs=-1, type=click.Path(), required=True)
@precision_opt
@indent_opt
@compact_opt
@projection_geographic_opt
@projection_projected_opt
@projection_mercator_opt
@click.option(
'--dst-crs', default='', metavar="EPSG:NNNN", callback=to_lower,
help="Output in specified coordinates.")
@options.sequence_opt
@use_rs_opt
@geojson_type_feature_opt(True)
@geojson_type_bbox_opt(False)
@click.pass_context
def bounds(ctx, input, precision, indent, compact, projection, dst_crs,
sequence, use_rs, geojson_type):
"""Write bounding boxes to stdout as GeoJSON for use with, e.g.,
geojsonio
$ rio bounds *.tif | geojsonio
If a destination crs is passed via dst_crs, it takes precedence over
the projection parameter.
"""
import rasterio.warp
dump_kwds = {'sort_keys': True}
if indent:
dump_kwds['indent'] = indent
if compact:
dump_kwds['separators'] = (',', ':')
stdout = click.get_text_stream('stdout')
# This is the generator for (feature, bbox) pairs.
class Collection:
def __init__(self, env):
self._xs = []
self._ys = []
self.env = env
@property
def bbox(self):
return min(self._xs), min(self._ys), max(self._xs), max(self._ys)
def __call__(self):
for i, path in enumerate(input):
with rasterio.open(path) as src:
bounds = src.bounds
if dst_crs:
bbox = transform_bounds(src.crs,
dst_crs, *bounds)
elif projection == 'mercator':
bbox = transform_bounds(src.crs,
{'init': 'epsg:3857'}, *bounds)
elif projection == 'geographic':
bbox = transform_bounds(src.crs,
{'init': 'epsg:4326'}, *bounds)
else:
bbox = bounds
if precision >= 0:
bbox = [round(b, precision) for b in bbox]
yield {
'type': 'Feature',
'bbox': bbox,
'geometry': {
'type': 'Polygon',
'coordinates': [[
[bbox[0], bbox[1]],
[bbox[2], bbox[1]],
[bbox[2], bbox[3]],
[bbox[0], bbox[3]],
[bbox[0], bbox[1]]]]},
'properties': {
'id': str(i),
'title': path,
'filename': os.path.basename(path)}}
self._xs.extend(bbox[::2])
self._ys.extend(bbox[1::2])
try:
with ctx.obj['env'] as env:
write_features(
stdout, Collection(env), sequence=sequence,
geojson_type=geojson_type, use_rs=use_rs,
**dump_kwds)
except Exception:
logger.exception("Exception caught during processing")
raise click.Abort()
|
the-stack_0_13837 | import sys
import selectors
import socket
import os
sys.path.insert(0, "../")
from messages.dns_request_message import *
from messages.dns_response_message import *
from messages.client_req_lb_message import *
from messages.client_res_lb_message import *
from messages.content_related_messages import *
from config import *
from edgeServer.edgeServer import md5
############# Get IP of load balancer from DNS
s = socket.socket()
host = DNS_IP ## DNS_IP
port = DNS_PORT
s.connect((host, port))
print("Requesting IP from DNS")
msg = DNSRequestMessage(1, "www.mycdn.com")
msg.send(s)
msg = DNSResponseMessage()
msg.receive(s)
ipblocks = msg.ipblocks
print(ipblocks)
s.close()
############# Request file from load balancer
def connectLB(ipblocks):
"""
Method to connect to LBs
IP blocks contains the DNS response
"""
err_count = 0
for host, port in ipblocks:
s = socket.socket()
try:
print("Connecting ",host,":",port)
s.connect((host, port))
print("Connected ",host,":",port)
break
except socket.error:
err_count += 1
print("Connection failed ",host,":",port)
continue
if err_count == 2:
print("Load Balancer could not be reached!")
return s,0
else:
print("Connection established to the load balancer")
return s,1
# s,err = connectLB(ipblocks)
# if err==0:
# raise Exception("Load Balancer could not be reached!")
############# Request file from redirected IP of edge server
def requestFile(edgeIP,edgePort,content_id,seq_no=0):
## Sequence number is zero for initial request
## returns last sequence number it received
## -2 if complete file is received
## -1 if nothing is received
## else the sequence number
soc = socket.socket() # Create a socket object
soc.settimeout(30)
try:
print("Connecting to edge server ip: ",edgeIP)
sys.stdout.flush()
soc.connect((edgeIP, edgePort))
except Exception as e:
print("Unable to connect to edge server ip: ",edgeIP)
return -1
last_seq_number_recv = -1
message = ContentRequestMessage(content_id, seq_no)
message.send(soc)
file_des = FileDescriptionMessage(0, 0, '', '')
try:
file_des.receive(soc)
except:
print("Unable to get file details")
print("Last Sequence Number received: ",last_seq_number_recv)
return last_seq_number_recv
print(file_des.file_name)
print(file_des.content_id)
print(file_des.file_size)
if seq_no!=0:
param = 'ab'
else:
param = 'wb'
with open('rec_' + file_des.file_name, param) as f:
print('file opened')
print("Content ID: ",file_des.content_id)
if seq_no!=0:
f.seek(seq_no*1018)
file_size = file_des.file_size
total_received=seq_no*1018
while True:
msg = ContentMessage(content_id, seq_no)
try:
msg.receive(soc,file_size,total_received)
except Exception as e:
print("Last Sequence Number received: ",last_seq_number_recv)
print(e)
return last_seq_number_recv
print("Sequence no: ",msg.seq_no)
last_seq_number_recv = msg.seq_no
data = msg.data
total_received+=len(data)
# print(len(data))
if not data:
break
f.write(data)
f.close()
soc.close()
############# Verify file, close connections and show success
if md5('rec_'+file_des.file_name)==file_des.md5_val:
print("File download success!")
else:
print("MD5 does not match")
os.remove('rec_'+file_des.file_name)
print("Try downloading again")
## TODO What to do with the file then???
return -2
while True:
contentreq = input("Enter content id: ")
try:
contentReq = int(contentreq)
except:
print("Enter only numbers.")
continue
if(contentReq<=0):
print("Content id cannot be less than 1")
continue
seqNo = -1
location_id = int(sys.argv[1])
n_msg = ClientReqLBMessage(contentReq,location_id)
prev_edge_ip = n_msg.prev_edge_ip
while True:
# seqNo = requestFile(msg.ip, EDGE_SERVER_PORT ,contentReq)
if seqNo != -2:
## TO DO get new edge server from load balancer
s, err = connectLB(ipblocks)
if err==0:
input("Load Balancer could not be reached!")
n_msg = ClientReqLBMessage(contentReq,location_id,prev_edge_ip)
try:
input("Press enter to request new edge server")
n_msg.send(s)
print('Hi')
n_msg = ClientResLBMessage()
n_msg.receive(s)
prev_edge_ip = n_msg.ip
input("Press enter to connect to edge server!")
if n_msg.ip=='0.0.0.0':
print("No edge servers available.")
input("Press enter to try again!")
continue
seqNo = requestFile(n_msg.ip, EDGE_SERVER_PORT ,contentReq, seqNo+1)
except:
print("Error communicating with LB")
input("Press enter to request another/same file!")
# break
s.close()
else:
break
#############
|
the-stack_0_13838 | # coding=utf-8
# Copyright 2019 The Google UDA Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The BERT model.
Part of the code is from https://github.com/google-research/bert
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import json
import math
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probabilitiy for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The sttdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file, model_dropout):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.io.gfile.GFile(json_file, "r") as reader:
text = reader.read()
config = cls.from_dict(json.loads(text))
if model_dropout != -1:
config.hidden_dropout_prob = model_dropout
config.attention_probs_dropout_prob = model_dropout
return config
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
def bert_embedding(config,
is_training,
input_ids,
input_mask,
token_type_ids=None,
use_one_hot_embeddings=True,
scope=None):
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.variable_scope("bert", scope, reuse=tf.AUTO_REUSE):
with tf.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(embedding_output, embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
embedding_output = embedding_postprocessor(
input_tensor=embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
return embedding_output, embedding_table
def bert_attention(config,
is_training,
input_ids,
input_mask,
embedding_output,
scope=None):
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
with tf.variable_scope("bert", scope, reuse=tf.AUTO_REUSE):
with tf.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
all_encoder_layers = transformer_model(
input_tensor=embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
sequence_output = all_encoder_layers[-1]
return sequence_output
def bert_pooler(config,
is_training,
sequence_output,
scope=None):
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
with tf.variable_scope("bert", scope, reuse=tf.AUTO_REUSE):
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
clas_rep = tf.squeeze(sequence_output[:, 0:1, :], axis=1)
pooled_output = tf.layers.dense(
clas_rep,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
return pooled_output
def bert_model(config,
is_training,
input_ids,
input_mask,
token_type_ids=None,
input_embedding=None,
output_type="pooled",
use_one_hot_embeddings=True,
scope=None):
"""doc."""
assert output_type in ["embedding", "pooled", "sequence"], (
"Unsupported output type {}".format(output_type))
if input_embedding is None:
embedding_output, embedding_table = bert_embedding(
config,
is_training,
input_ids,
input_mask,
token_type_ids,
use_one_hot_embeddings,
scope)
if output_type == "embedding":
return embedding_output, embedding_table
sequence_output = bert_attention(
config,
is_training,
input_ids,
input_mask,
embedding_output,
scope)
if output_type == "sequence":
return sequence_output
pooled_output = bert_pooler(
config,
is_training,
sequence_output,
scope)
if output_type == "pooled":
return pooled_output
def gelu(input_tensor):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
input_tensor: float Tensor to perform activation.
Returns:
`input_tensor` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.erf(input_tensor / tf.sqrt(2.0)))
return input_tensor * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear",
this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that's not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, (str, unicode)):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probabiltiy of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, 1.0 - dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
return tf.contrib.layers.layer_norm(
inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.truncated_normal_initializer(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.nn.embedding_lookup()`. One hot is better
for TPUs.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
if use_one_hot_embeddings:
flat_input_ids = tf.reshape(input_ids, [-1])
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
if seq_length > max_position_embeddings:
raise ValueError("The seq length (%d) cannot be greater than "
"`max_position_embeddings` (%d)" %
(seq_length, max_position_embeddings))
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range))
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
full_position_embeddings = tf.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range))
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchaged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob:
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(output_tensor, [0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*V]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*V]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.variable_scope("attention"):
attention_heads = []
with tf.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.variable_scope("output"):
attention_output = tf.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.variable_scope("intermediate"):
intermediate_output = tf.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.variable_scope("output"):
layer_output = tf.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, (int, long)):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
|
the-stack_0_13839 | from decimal import Decimal
from django.contrib.auth import get_user_model
from django.core.validators import MaxLengthValidator, MinValueValidator
from rest_framework import serializers
from snippets.models import LANGUAGE_CHOICES, STYLE_CHOICES, Snippet, SnippetViewer
class LanguageSerializer(serializers.Serializer):
name = serializers.ChoiceField(
choices=LANGUAGE_CHOICES, default='python', help_text='The name of the programming language')
read_only_nullable = serializers.CharField(read_only=True, allow_null=True)
class Meta:
ref_name = None
class ExampleProjectSerializer(serializers.Serializer):
project_name = serializers.CharField(label='project name custom title', help_text='Name of the project')
github_repo = serializers.CharField(required=True, help_text='Github repository of the project')
class Meta:
ref_name = 'Project'
class UnixTimestampField(serializers.DateTimeField):
def to_representation(self, value):
""" Return epoch time for a datetime object or ``None``"""
from django.utils.dateformat import format
try:
return int(format(value, 'U'))
except (AttributeError, TypeError):
return None
def to_internal_value(self, value):
import datetime
return datetime.datetime.fromtimestamp(int(value))
class Meta:
swagger_schema_fields = {
'format': 'integer',
'title': 'Client date time suu',
'description': 'Date time in unix timestamp format',
}
class SnippetSerializer(serializers.Serializer):
"""SnippetSerializer classdoc
create: docstring for create from serializer classdoc
"""
id = serializers.IntegerField(read_only=True, help_text="id serializer help text")
created = UnixTimestampField(read_only=True)
owner = serializers.PrimaryKeyRelatedField(
queryset=get_user_model().objects.all(),
default=serializers.CurrentUserDefault(),
help_text="The ID of the user that created this snippet; if none is provided, "
"defaults to the currently logged in user."
)
owner_as_string = serializers.PrimaryKeyRelatedField(
help_text="The ID of the user that created this snippet.",
read_only=True,
source='owner',
)
title = serializers.CharField(required=False, allow_blank=True, max_length=100)
code = serializers.CharField(style={'base_template': 'textarea.html'})
tags = serializers.ListField(child=serializers.CharField())
linenos = serializers.BooleanField(required=False)
language = LanguageSerializer(help_text="Sample help text for language")
styles = serializers.MultipleChoiceField(choices=STYLE_CHOICES, default=['solarized-dark'])
lines = serializers.ListField(child=serializers.IntegerField(), allow_null=True, required=False)
example_projects = serializers.ListSerializer(child=ExampleProjectSerializer(), read_only=True,
validators=[MaxLengthValidator(100)])
difficulty_factor = serializers.FloatField(help_text="this is here just to test FloatField",
read_only=True, default=lambda: 6.9)
rate_as_string = serializers.DecimalField(max_digits=6, decimal_places=3, default=Decimal('0.0'),
validators=[MinValueValidator(Decimal('0.0'))])
rate = serializers.DecimalField(max_digits=6, decimal_places=3, default=Decimal('0.0'), coerce_to_string=False,
validators=[MinValueValidator(Decimal('0.0'))])
nullable_secondary_language = LanguageSerializer(allow_null=True)
def create(self, validated_data):
"""
Create and return a new `Snippet` instance, given the validated data.
"""
del validated_data['styles']
del validated_data['lines']
del validated_data['difficulty_factor']
return Snippet.objects.create(**validated_data)
def update(self, instance, validated_data):
"""
Update and return an existing `Snippet` instance, given the validated data.
"""
instance.title = validated_data.get('title', instance.title)
instance.code = validated_data.get('code', instance.code)
instance.linenos = validated_data.get('linenos', instance.linenos)
instance.language = validated_data.get('language', instance.language)
instance.style = validated_data.get('style', instance.style)
instance.save()
return instance
class SnippetViewerSerializer(serializers.ModelSerializer):
class Meta:
model = SnippetViewer
fields = '__all__'
|
the-stack_0_13840 | from datetime import date
from django.db import models
from django import forms
from django.forms import ModelForm, Textarea, TextInput, NumberInput
#from django.forms.extras.widgets import Select, SelectDateWidget
from django.forms.widgets import EmailInput
from django.conf import settings
from django.core.exceptions import ValidationError
#from captcha.fields import CaptchaField
from landpage.models import LandpageContactMessage
class ContactForm(forms.ModelForm):
#captcha = CaptchaField()
class Meta:
model = LandpageContactMessage
fields = ['name', 'email', 'phone', 'message']
labels = {
}
widgets = {
'name': TextInput(attrs={'class': u'form-control', 'placeholder': u'Enter Full Name'}),
'email': EmailInput(attrs={'class': u'form-control', 'placeholder': u'Enter Email'}),
'phone': TextInput(attrs={'class': u'form-control', 'placeholder': u'Enter Phone'}),
'comment': Textarea(attrs={'class': u'form-control', 'placeholder': u'Enter Comment'}),
}
def clean_phone(self):
phone = self.cleaned_data.get('phone', None)
# clean phone by removing all non-numerals
phone = ''.join(x for x in phone if x.isdigit())
ph_length = str(phone)
min_length = 10
max_length = 13
if len(ph_length) < min_length:
raise ValidationError('Must be 10 digit phone number.')
if len(ph_length) > max_length:
raise ValidationError('Must be at maxium 13 digits long')
return phone
class ForgotPasswordForm(forms.Form):
email = forms.EmailField(
label='Email',
max_length=100,
widget=forms.TextInput(
attrs={'class': 'form-control', 'placeholder': 'Enter Email'}),
)
#captcha = CaptchaField()
# Captcha Setup:
# http://django-simple-captcha.readthedocs.org/en/latest/usage.html#installation
|
the-stack_0_13841 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Backward compatibility of configs.
Instructions to bump version:
+ It's not needed to bump version if new keys are added.
It's only needed when backward-incompatible changes happen
(i.e., some existing keys disappear, or the meaning of a key changes)
+ To bump version, do the following:
1. Increment _C.VERSION in defaults.py
2. Add a converter in this file.
Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X,
and a function "downgrade" which in-place downgrades config from X to X-1
In each function, VERSION is left unchanged.
Each converter assumes that its input has the relevant keys
(i.e., the input is not a partial config).
3. Run the tests (test_config.py) to make sure the upgrade & downgrade
functions are consistent.
"""
import logging
from typing import List, Optional, Tuple
from .config import CfgNode as CN
from .defaults import _C
__all__ = ["upgrade_config", "downgrade_config"]
def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN:
"""
Upgrade a config from its current version to a newer version.
Args:
cfg (CfgNode):
to_version (int): defaults to the latest version.
"""
cfg = cfg.clone()
if to_version is None:
to_version = _C.VERSION
assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format(
cfg.VERSION, to_version
)
for k in range(cfg.VERSION, to_version):
converter = globals()["ConverterV" + str(k + 1)]
converter.upgrade(cfg)
cfg.VERSION = k + 1
return cfg
def downgrade_config(cfg: CN, to_version: int) -> CN:
"""
Downgrade a config from its current version to an older version.
Args:
cfg (CfgNode):
to_version (int):
Note:
A general downgrade of arbitrary configs is not always possible due to the
different functionalities in different versions.
The purpose of downgrade is only to recover the defaults in old versions,
allowing it to load an old partial yaml config.
Therefore, the implementation only needs to fill in the default values
in the old version when a general downgrade is not possible.
"""
cfg = cfg.clone()
assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format(
cfg.VERSION, to_version
)
for k in range(cfg.VERSION, to_version, -1):
converter = globals()["ConverterV" + str(k)]
converter.downgrade(cfg)
cfg.VERSION = k - 1
return cfg
def guess_version(cfg: CN, filename: str) -> int:
"""
Guess the version of a partial config where the VERSION field is not specified.
Returns the version, or the latest if cannot make a guess.
This makes it easier for users to migrate.
"""
logger = logging.getLogger(__name__)
def _has(name: str) -> bool:
cur = cfg
for n in name.split("."):
if n not in cur:
return False
cur = cur[n]
return True
# Most users' partial configs have "MODEL.WEIGHT", so guess on it
ret = None
if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"):
ret = 1
if ret is not None:
logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret))
else:
ret = _C.VERSION
logger.warning(
"Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format(
filename, ret
)
)
return ret
def _rename(cfg: CN, old: str, new: str) -> None:
old_keys = old.split(".")
new_keys = new.split(".")
def _set(key_seq: List[str], val: str) -> None:
cur = cfg
for k in key_seq[:-1]:
if k not in cur:
cur[k] = CN()
cur = cur[k]
cur[key_seq[-1]] = val
def _get(key_seq: List[str]) -> CN:
cur = cfg
for k in key_seq:
cur = cur[k]
return cur
def _del(key_seq: List[str]) -> None:
cur = cfg
for k in key_seq[:-1]:
cur = cur[k]
del cur[key_seq[-1]]
if len(cur) == 0 and len(key_seq) > 1:
_del(key_seq[:-1])
_set(new_keys, _get(old_keys))
_del(old_keys)
class _RenameConverter:
"""
A converter that handles simple rename.
"""
RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name)
@classmethod
def upgrade(cls, cfg: CN) -> None:
for old, new in cls.RENAME:
_rename(cfg, old, new)
@classmethod
def downgrade(cls, cfg: CN) -> None:
for old, new in cls.RENAME[::-1]:
_rename(cfg, new, old)
class ConverterV1(_RenameConverter):
RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")]
class ConverterV2(_RenameConverter):
"""
A large bulk of rename, before public release.
"""
RENAME = [
("MODEL.WEIGHT", "MODEL.WEIGHTS"),
("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"),
("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"),
("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"),
("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"),
(
"MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD",
"MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH",
),
(
"MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT",
"MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT",
),
(
"MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD",
"MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH",
),
("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"),
("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"),
("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"),
("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"),
("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"),
("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"),
("TEST.AUG_ON", "TEST.AUG.ENABLED"),
("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"),
("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"),
("TEST.AUG_FLIP", "TEST.AUG.FLIP"),
]
@classmethod
def upgrade(cls, cfg: CN) -> None:
super().upgrade(cfg)
if cfg.MODEL.META_ARCHITECTURE == "RetinaNet":
_rename(
cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS"
)
_rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"]
del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"]
else:
_rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS")
_rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"]
del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"]
del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"]
@classmethod
def downgrade(cls, cfg: CN) -> None:
super().downgrade(cfg)
_rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS")
_rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES")
cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS
cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES
cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version
|
the-stack_0_13842 | # -*- coding: utf-8 -*-
import re
import scrapy
import json
from locations.items import GeojsonPointItem
class StopAndShopSpider(scrapy.Spider):
#download_delay = 0.2
name = "stop_and_shop"
item_attributes = {'brand': "Stop and Shop"}
allowed_domains = ["stopandshop.com"]
start_urls = (
'https://stopandshop.com/apis/store-locator/locator/v1/stores/STSH?storeType=GROCERY&q=11797&maxDistance=1000000&details=true',
)
def parse(self, response):
data = json.loads(json.dumps(response.json()))
for i in data.items():
idata = i
for j in idata:
if j == "stores":
pass
else:
jdata = json.loads(json.dumps(j))
for item in jdata:
properties = {
'ref': item['storeNo'],
'name': item['name'],
'addr_full': item['address1'] + item['address2'],
'city': item['city'],
'state': item['state'],
'postcode': item['zip'],
'country': 'US',
'lat': float(item['latitude']),
'lon': float(item['longitude']),
}
yield GeojsonPointItem(**properties)
|
the-stack_0_13845 | # filter.py
from pyspark import SparkContext
sc = SparkContext("local", "Filter app")
words = sc.parallelize(
["scala",
"java",
"hadoop",
"spark",
"akka",
"spark vs hadoop",
"pyspark",
"pyspark and spark"]
)
# words_filter = words.filter(lambda x: 'spark' in x)
def g(x):
for i in x:
if "spark" in x:
return i
words_filter = words.filter(g)
filtered = words_filter.collect()
print("Fitered RDD -> %s" % (filtered)) |
the-stack_0_13847 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck
#
# License: BSD 3 clause
from functools import update_wrapper
import functools
import sklearn
import numpy as np
import scipy.sparse as sp
import scipy
import scipy.stats
from scipy.sparse.linalg import lsqr as sparse_lsqr # noqa
import threadpoolctl
from .._config import config_context, get_config
from ..externals._packaging.version import parse as parse_version
np_version = parse_version(np.__version__)
sp_version = parse_version(scipy.__version__)
if sp_version >= parse_version("1.4"):
from scipy.sparse.linalg import lobpcg
else:
# Backport of lobpcg functionality from scipy 1.4.0, can be removed
# once support for sp_version < parse_version('1.4') is dropped
# mypy error: Name 'lobpcg' already defined (possibly by an import)
from ..externals._lobpcg import lobpcg # type: ignore # noqa
try:
from scipy.optimize._linesearch import line_search_wolfe2, line_search_wolfe1
except ImportError: # SciPy < 1.8
from scipy.optimize.linesearch import line_search_wolfe2, line_search_wolfe1 # type: ignore # noqa
def _object_dtype_isnan(X):
return X != X
# TODO: replace by copy=False, when only scipy > 1.1 is supported.
def _astype_copy_false(X):
"""Returns the copy=False parameter for
{ndarray, csr_matrix, csc_matrix}.astype when possible,
otherwise don't specify
"""
if sp_version >= parse_version("1.1") or not sp.issparse(X):
return {"copy": False}
else:
return {}
def _joblib_parallel_args(**kwargs):
"""Set joblib.Parallel arguments in a compatible way for 0.11 and 0.12+
For joblib 0.11 this maps both ``prefer`` and ``require`` parameters to
a specific ``backend``.
Parameters
----------
prefer : str in {'processes', 'threads'} or None
Soft hint to choose the default backend if no specific backend
was selected with the parallel_backend context manager.
require : 'sharedmem' or None
Hard condstraint to select the backend. If set to 'sharedmem',
the selected backend will be single-host and thread-based even
if the user asked for a non-thread based backend with
parallel_backend.
See joblib.Parallel documentation for more details
"""
import joblib
if parse_version(joblib.__version__) >= parse_version("0.12"):
return kwargs
extra_args = set(kwargs.keys()).difference({"prefer", "require"})
if extra_args:
raise NotImplementedError(
"unhandled arguments %s with joblib %s"
% (list(extra_args), joblib.__version__)
)
args = {}
if "prefer" in kwargs:
prefer = kwargs["prefer"]
if prefer not in ["threads", "processes", None]:
raise ValueError("prefer=%s is not supported" % prefer)
args["backend"] = {
"threads": "threading",
"processes": "multiprocessing",
None: None,
}[prefer]
if "require" in kwargs:
require = kwargs["require"]
if require not in [None, "sharedmem"]:
raise ValueError("require=%s is not supported" % require)
if require == "sharedmem":
args["backend"] = "threading"
return args
class loguniform(scipy.stats.reciprocal):
"""A class supporting log-uniform random variables.
Parameters
----------
low : float
The minimum value
high : float
The maximum value
Methods
-------
rvs(self, size=None, random_state=None)
Generate log-uniform random variables
The most useful method for Scikit-learn usage is highlighted here.
For a full list, see
`scipy.stats.reciprocal
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.reciprocal.html>`_.
This list includes all functions of ``scipy.stats`` continuous
distributions such as ``pdf``.
Notes
-----
This class generates values between ``low`` and ``high`` or
low <= loguniform(low, high).rvs() <= high
The logarithmic probability density function (PDF) is uniform. When
``x`` is a uniformly distributed random variable between 0 and 1, ``10**x``
are random variables that are equally likely to be returned.
This class is an alias to ``scipy.stats.reciprocal``, which uses the
reciprocal distribution:
https://en.wikipedia.org/wiki/Reciprocal_distribution
Examples
--------
>>> from sklearn.utils.fixes import loguniform
>>> rv = loguniform(1e-3, 1e1)
>>> rvs = rv.rvs(random_state=42, size=1000)
>>> rvs.min() # doctest: +SKIP
0.0010435856341129003
>>> rvs.max() # doctest: +SKIP
9.97403052786026
"""
def _take_along_axis(arr, indices, axis):
"""Implements a simplified version of np.take_along_axis if numpy
version < 1.15"""
if np_version >= parse_version("1.15"):
return np.take_along_axis(arr=arr, indices=indices, axis=axis)
else:
if axis is None:
arr = arr.flatten()
if not np.issubdtype(indices.dtype, np.intp):
raise IndexError("`indices` must be an integer array")
if arr.ndim != indices.ndim:
raise ValueError(
"`indices` and `arr` must have the same number of dimensions"
)
shape_ones = (1,) * indices.ndim
dest_dims = list(range(axis)) + [None] + list(range(axis + 1, indices.ndim))
# build a fancy index, consisting of orthogonal aranges, with the
# requested index inserted at the right location
fancy_index = []
for dim, n in zip(dest_dims, arr.shape):
if dim is None:
fancy_index.append(indices)
else:
ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim + 1 :]
fancy_index.append(np.arange(n).reshape(ind_shape))
fancy_index = tuple(fancy_index)
return arr[fancy_index]
# remove when https://github.com/joblib/joblib/issues/1071 is fixed
def delayed(function):
"""Decorator used to capture the arguments of a function."""
@functools.wraps(function)
def delayed_function(*args, **kwargs):
return _FuncWrapper(function), args, kwargs
return delayed_function
class _FuncWrapper:
""" "Load the global configuration before calling the function."""
def __init__(self, function):
self.function = function
self.config = get_config()
update_wrapper(self, self.function)
def __call__(self, *args, **kwargs):
with config_context(**self.config):
return self.function(*args, **kwargs)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0):
"""Implements a simplified linspace function as of numpy version >= 1.16.
As of numpy 1.16, the arguments start and stop can be array-like and
there is an optional argument `axis`.
For simplicity, we only allow 1d array-like to be passed to start and stop.
See: https://github.com/numpy/numpy/pull/12388 and numpy 1.16 release
notes about start and stop arrays for linspace logspace and geomspace.
Returns
-------
out : ndarray of shape (num, n_start) or (num,)
The output array with `n_start=start.shape[0]` columns.
"""
if np_version < parse_version("1.16"):
start = np.asanyarray(start) * 1.0
stop = np.asanyarray(stop) * 1.0
dt = np.result_type(start, stop, float(num))
if dtype is None:
dtype = dt
if start.ndim == 0 == stop.ndim:
return np.linspace(
start=start,
stop=stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
)
if start.ndim != 1 or stop.ndim != 1 or start.shape != stop.shape:
raise ValueError("start and stop must be 1d array-like of same shape.")
n_start = start.shape[0]
out = np.empty((num, n_start), dtype=dtype)
step = np.empty(n_start, dtype=np.float)
for i in range(n_start):
out[:, i], step[i] = np.linspace(
start=start[i],
stop=stop[i],
num=num,
endpoint=endpoint,
retstep=True,
dtype=dtype,
)
if axis != 0:
out = np.moveaxis(out, 0, axis)
if retstep:
return out, step
else:
return out
else:
return np.linspace(
start=start,
stop=stop,
num=num,
endpoint=endpoint,
retstep=retstep,
dtype=dtype,
axis=axis,
)
# Rename the `method` kwarg to `interpolation` for NumPy < 1.22, because
# `interpolation` kwarg was deprecated in favor of `method` in NumPy >= 1.22.
def _percentile(a, q, *, method="linear", **kwargs):
return np.percentile(a, q, interpolation=method, **kwargs)
if np_version < parse_version("1.22"):
percentile = _percentile
else: # >= 1.22
from numpy import percentile # type: ignore # noqa
# compatibility fix for threadpoolctl >= 3.0.0
# since version 3 it's possible to setup a global threadpool controller to avoid
# looping through all loaded shared libraries each time.
# the global controller is created during the first call to threadpoolctl.
def _get_threadpool_controller():
if not hasattr(threadpoolctl, "ThreadpoolController"):
return None
if not hasattr(sklearn, "_sklearn_threadpool_controller"):
sklearn._sklearn_threadpool_controller = threadpoolctl.ThreadpoolController()
return sklearn._sklearn_threadpool_controller
def threadpool_limits(limits=None, user_api=None):
controller = _get_threadpool_controller()
if controller is not None:
return controller.limit(limits=limits, user_api=user_api)
else:
return threadpoolctl.threadpool_limits(limits=limits, user_api=user_api)
threadpool_limits.__doc__ = threadpoolctl.threadpool_limits.__doc__
def threadpool_info():
controller = _get_threadpool_controller()
if controller is not None:
return controller.info()
else:
return threadpoolctl.threadpool_info()
threadpool_info.__doc__ = threadpoolctl.threadpool_info.__doc__
|
the-stack_0_13848 | # vim:ts=4:sw=4:sts=4:et
# -*- coding: utf-8 -*-
"""Additional auxiliary data types"""
from itertools import islice
class Matrix:
"""Simple matrix data type.
Of course there are much more advanced matrix data types for Python (for
instance, the C{ndarray} data type of Numeric Python) and this implementation
does not want to compete with them. The only role of this data type is to
provide a convenient interface for the matrices returned by the C{Graph}
object (for instance, allow indexing with tuples in the case of adjacency
matrices and so on).
"""
def __init__(self, data=None):
"""Initializes a matrix.
@param data: the elements of the matrix as a list of lists, or C{None} to
create a 0x0 matrix.
"""
self._nrow, self._ncol, self._data = 0, 0, []
self.data = data
@classmethod
def Fill(cls, value, *args):
"""Creates a matrix filled with the given value
@param value: the value to be used
@keyword shape: the shape of the matrix. Can be a single integer,
two integers or a tuple. If a single integer is
given here, the matrix is assumed to be square-shaped.
"""
if len(args) < 1:
raise TypeError("expected an integer or a tuple")
if len(args) == 1:
if hasattr(args[0], "__len__"):
height, width = int(args[0][0]), int(args[0][1])
else:
height, width = int(args[0]), int(args[0])
else:
height, width = int(args[0]), int(args[1])
mtrx = [[value] * width for _ in range(height)]
return cls(mtrx)
@classmethod
def Zero(cls, *args):
"""Creates a matrix filled with zeros.
@keyword shape: the shape of the matrix. Can be a single integer,
two integers or a tuple. If a single integer is
given here, the matrix is assumed to be square-shaped.
"""
result = cls.Fill(0, *args)
return result
@classmethod
def Identity(cls, *args):
"""Creates an identity matrix.
@keyword shape: the shape of the matrix. Can be a single integer,
two integers or a tuple. If a single integer is
given here, the matrix is assumed to be square-shaped.
"""
result = cls.Fill(0, *args)
for i in range(min(result.shape)):
result._data[i][i] = 1
return result
def _set_data(self, data=None):
"""Sets the data stored in the matrix"""
if data is not None:
self._data = [list(row) for row in data]
self._nrow = len(self._data)
if self._nrow > 0:
self._ncol = max(len(row) for row in self._data)
else:
self._ncol = 0
for row in self._data:
if len(row) < self._ncol:
row.extend([0] * (self._ncol - len(row)))
def _get_data(self):
"""Returns the data stored in the matrix as a list of lists"""
return [list(row) for row in self._data]
data = property(_get_data, _set_data)
@property
def shape(self):
"""Returns the shape of the matrix as a tuple"""
return self._nrow, self._ncol
def __add__(self, other):
"""Adds the given value to the matrix.
@param other: either a scalar or a matrix. Scalars will
be added to each element of the matrix. Matrices will
be added together elementwise.
@return: the result matrix
"""
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("matrix shapes do not match")
return self.__class__(
[
[a + b for a, b in zip(row_a, row_b)]
for row_a, row_b in zip(self, other)
]
)
else:
return self.__class__([[item + other for item in row] for row in self])
def __eq__(self, other):
"""Checks whether a given matrix is equal to another one"""
return (
isinstance(other, Matrix)
and self._nrow == other._nrow
and self._ncol == other._ncol
and self._data == other._data
)
def __getitem__(self, i):
"""Returns a single item, a row or a column of the matrix
@param i: if a single integer, returns the M{i}th row as a list. If a
slice, returns the corresponding rows as another L{Matrix} object. If
a 2-tuple, the first element of the tuple is used to select a row and
the second is used to select a column.
"""
if isinstance(i, int):
return list(self._data[i])
elif isinstance(i, slice):
return self.__class__(self._data[i])
elif isinstance(i, tuple):
try:
first = i[0]
except IndexError:
first = slice(None)
try:
second = i[1]
except IndexError:
second = slice(None)
if type(first) == slice and type(second) == slice:
return self.__class__(row[second] for row in self._data[first])
elif type(first) == slice:
return [row[second] for row in self._data[first]]
else:
return self._data[first][second]
else:
raise IndexError("invalid matrix index")
def __hash__(self):
"""Returns a hash value for a matrix."""
return hash(self._nrow, self._ncol, self._data)
def __iadd__(self, other):
"""In-place addition of a matrix or scalar."""
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("matrix shapes do not match")
for row_a, row_b in zip(self._data, other):
for i in range(len(row_a)):
row_a[i] += row_b[i]
else:
for row in self._data:
for i in range(len(row)):
row[i] += other
return self
def __isub__(self, other):
"""In-place subtraction of a matrix or scalar."""
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("matrix shapes do not match")
for row_a, row_b in zip(self._data, other):
for i in range(len(row_a)):
row_a[i] -= row_b[i]
else:
for row in self._data:
for i in range(len(row)):
row[i] -= other
return self
def __ne__(self, other):
"""Checks whether a given matrix is not equal to another one"""
return not self == other
def __setitem__(self, i, value):
"""Sets a single item, a row or a column of the matrix
@param i: if a single integer, sets the M{i}th row as a list. If a
slice, sets the corresponding rows from another L{Matrix} object.
If a 2-tuple, the first element of the tuple is used to select a row
and the second is used to select a column.
@param value: the new value
"""
if isinstance(i, int):
# Setting a row
if len(value) != len(self._data[i]):
raise ValueError("new value must have %d items" % self._ncol)
self._data[i] = list(value)
elif isinstance(i, slice):
# Setting multiple rows
if len(value) != len(self._data[i]):
raise ValueError("new value must have %d items" % self._ncol)
if any(len(row) != self._ncol for row in value):
raise ValueError("rows of new value must have %d items" % self._ncol)
self._data[i] = [list(row) for row in value]
elif isinstance(i, tuple):
try:
first = i[0]
except IndexError:
first = slice(None)
try:
second = i[1]
except IndexError:
second = slice(None)
if type(first) == slice and type(second) == slice:
# Setting a submatrix
# TODO
raise NotImplementedError
elif type(first) == slice:
# Setting a submatrix
raise NotImplementedError
else:
# Setting a single element
self._data[first][second] = value
else:
raise IndexError("invalid matrix index")
def __sub__(self, other):
"""Subtracts the given value from the matrix.
@param other: either a scalar or a matrix. Scalars will
be subtracted from each element of the matrix. Matrices will
be subtracted together elementwise.
@return: the result matrix
"""
if isinstance(other, Matrix):
if self.shape != other.shape:
raise ValueError("matrix shapes do not match")
return self.__class__(
[
[a - b for a, b in zip(row_a, row_b)]
for row_a, row_b in zip(self, other)
]
)
else:
return self.__class__([[item - other for item in row] for row in self])
def __repr__(self):
class_name = self.__class__.__name__
rows = ("[%s]" % ", ".join(repr(item) for item in row) for row in self)
return "%s([%s])" % (class_name, ", ".join(rows))
def __str__(self):
rows = ("[%s]" % ", ".join(repr(item) for item in row) for row in self)
return "[%s]" % "\n ".join(rows)
def __iter__(self):
"""Support for iteration.
This is actually implemented as a generator, so there is no need for a
separate iterator class. The generator returns I{copies} of the rows in
the matrix as lists to avoid messing around with the internals. Feel
free to do anything with the copies, the changes won't be reflected in
the original matrix."""
return (list(row) for row in self._data)
def __plot__(self, context, bbox, palette, **kwds):
"""Plots the matrix to the given Cairo context in the given box
Besides the usual self-explanatory plotting parameters (C{context},
C{bbox}, C{palette}), it accepts the following keyword arguments:
- C{style}: the style of the plot. C{boolean} is useful for plotting
matrices with boolean (C{True}/C{False} or 0/1) values: C{False}
will be shown with a white box and C{True} with a black box.
C{palette} uses the given palette to represent numbers by colors,
the minimum will be assigned to palette color index 0 and the maximum
will be assigned to the length of the palette. C{None} draws transparent
cell backgrounds only. The default style is C{boolean} (but it may
change in the future). C{None} values in the matrix are treated
specially in both cases: nothing is drawn in the cell corresponding
to C{None}.
- C{square}: whether the cells of the matrix should be square or not.
Default is C{True}.
- C{grid_width}: line width of the grid shown on the matrix. If zero or
negative, the grid is turned off. The grid is also turned off if the size
of a cell is less than three times the given line width. Default is C{1}.
Fractional widths are also allowed.
- C{border_width}: line width of the border drawn around the matrix.
If zero or negative, the border is turned off. Default is C{1}.
- C{row_names}: the names of the rows
- C{col_names}: the names of the columns.
- C{values}: values to be displayed in the cells. If C{None} or
C{False}, no values are displayed. If C{True}, the values come
from the matrix being plotted. If it is another matrix, the
values of that matrix are shown in the cells. In this case,
the shape of the value matrix must match the shape of the
matrix being plotted.
- C{value_format}: a format string or a callable that specifies how
the values should be plotted. If it is a callable, it must be a
function that expects a single value and returns a string.
Example: C{"%#.2f"} for floating-point numbers with always exactly
two digits after the decimal point. See the Python documentation of
the C{%} operator for details on the format string. If the format
string is not given, it defaults to the C{str} function.
If only the row names or the column names are given and the matrix
is square-shaped, the same names are used for both column and row
names.
"""
grid_width = float(kwds.get("grid_width", 1.0))
border_width = float(kwds.get("border_width", 1.0))
style = kwds.get("style", "boolean")
row_names = kwds.get("row_names")
col_names = kwds.get("col_names", row_names)
values = kwds.get("values")
value_format = kwds.get("value_format", str)
# Validations
if style not in ("boolean", "palette", "none", None):
raise ValueError("invalid style")
if style == "none":
style = None
if row_names is None and col_names is not None:
row_names = col_names
if row_names is not None:
row_names = [str(name) for name in islice(row_names, self._nrow)]
if len(row_names) < self._nrow:
row_names.extend([""] * (self._nrow - len(row_names)))
if col_names is not None:
col_names = [str(name) for name in islice(col_names, self._ncol)]
if len(col_names) < self._ncol:
col_names.extend([""] * (self._ncol - len(col_names)))
if values is False:
values = None
if values is True:
values = self
if isinstance(values, list):
values = Matrix(list)
if values is not None and not isinstance(values, Matrix):
raise TypeError("values must be None, False, True or a matrix")
if values is not None and values.shape != self.shape:
raise ValueError("values must be a matrix of size %s" % self.shape)
# Calculate text extents if needed
if row_names is not None or col_names is not None:
te = context.text_extents
space_width = te(" ")[4]
max_row_name_width = max([te(s)[4] for s in row_names]) + space_width
max_col_name_width = max([te(s)[4] for s in col_names]) + space_width
else:
max_row_name_width, max_col_name_width = 0, 0
# Calculate sizes
total_width = float(bbox.width) - max_row_name_width
total_height = float(bbox.height) - max_col_name_width
dx = total_width / self.shape[1]
dy = total_height / self.shape[0]
if kwds.get("square", True):
dx, dy = min(dx, dy), min(dx, dy)
total_width, total_height = dx * self.shape[1], dy * self.shape[0]
ox = bbox.left + (bbox.width - total_width - max_row_name_width) / 2.0
oy = bbox.top + (bbox.height - total_height - max_col_name_width) / 2.0
ox += max_row_name_width
oy += max_col_name_width
# Determine rescaling factors for the palette if needed
if style == "palette":
mi, ma = self.min(), self.max()
color_offset = mi
color_ratio = (len(palette) - 1) / float(ma - mi)
# Validate grid width
if dx < 3 * grid_width or dy < 3 * grid_width:
grid_width = 0.0
if grid_width > 0:
context.set_line_width(grid_width)
else:
# When the grid width is zero, we will still stroke the
# rectangles, but with the same color as the fill color
# of the cell - otherwise we would get thin white lines
# between the cells as a drawing artifact
context.set_line_width(1)
# Draw row names (if any)
context.set_source_rgb(0.0, 0.0, 0.0)
if row_names is not None:
x, y = ox, oy
for heading in row_names:
_, _, _, h, xa, _ = context.text_extents(heading)
context.move_to(x - xa - space_width, y + (dy + h) / 2.0)
context.show_text(heading)
y += dy
# Draw column names (if any)
if col_names is not None:
context.save()
context.translate(ox, oy)
context.rotate(-1.5707963285) # pi/2
x, y = 0.0, 0.0
for heading in col_names:
_, _, _, h, _, _ = context.text_extents(heading)
context.move_to(x + space_width, y + (dx + h) / 2.0)
context.show_text(heading)
y += dx
context.restore()
# Draw matrix
x, y = ox, oy
if style is None:
fill = lambda: None # noqa: E731
else:
fill = context.fill_preserve
for row in self:
for item in row:
if item is None:
x += dx
continue
if style == "boolean":
if item:
context.set_source_rgb(0.0, 0.0, 0.0)
else:
context.set_source_rgb(1.0, 1.0, 1.0)
elif style == "palette":
cidx = int((item - color_offset) * color_ratio)
if cidx < 0:
cidx = 0
context.set_source_rgba(*palette.get(cidx))
context.rectangle(x, y, dx, dy)
if grid_width > 0:
fill()
context.set_source_rgb(0.5, 0.5, 0.5)
context.stroke()
else:
fill()
context.stroke()
x += dx
x, y = ox, y + dy
# Draw cell values
if values is not None:
x, y = ox, oy
context.set_source_rgb(0.0, 0.0, 0.0)
for row in values.data:
if hasattr(value_format, "__call__"):
values = [value_format(item) for item in row]
else:
values = [value_format % item for item in row]
for item in values:
th, tw = context.text_extents(item)[3:5]
context.move_to(x + (dx - tw) / 2.0, y + (dy + th) / 2.0)
context.show_text(item)
x += dx
x, y = ox, y + dy
# Draw borders
if border_width > 0:
context.set_line_width(border_width)
context.set_source_rgb(0.0, 0.0, 0.0)
context.rectangle(ox, oy, dx * self.shape[1], dy * self.shape[0])
context.stroke()
def min(self, dim=None):
"""Returns the minimum of the matrix along the given dimension
@param dim: the dimension. 0 means determining the column minimums, 1 means
determining the row minimums. If C{None}, the global minimum is
returned.
"""
if dim == 1:
return [min(row) for row in self._data]
if dim == 0:
return [min(row[idx] for row in self._data) for idx in range(self._ncol)]
return min(min(row) for row in self._data)
def max(self, dim=None):
"""Returns the maximum of the matrix along the given dimension
@param dim: the dimension. 0 means determining the column maximums, 1 means
determining the row maximums. If C{None}, the global maximum is
returned.
"""
if dim == 1:
return [max(row) for row in self._data]
if dim == 0:
return [max(row[idx] for row in self._data) for idx in range(self._ncol)]
return max(max(row) for row in self._data)
class DyadCensus(tuple):
"""Dyad census of a graph.
This is a pretty simple class - basically it is a tuple, but it allows
the user to refer to its individual items by the names C{mutual} (or
C{mut}), C{asymmetric} (or C{asy} or C{asym} or C{asymm}) and C{null}.
Examples:
>>> from igraph import Graph
>>> g=Graph.Erdos_Renyi(100, 0.2, directed=True)
>>> dc=g.dyad_census()
>>> print(dc.mutual) #doctest:+SKIP
179
>>> print(dc["asym"]) #doctest:+SKIP
1609
>>> print(tuple(dc), list(dc)) #doctest:+SKIP
(179, 1609, 3162) [179, 1609, 3162]
>>> print(sorted(dc.as_dict().items())) #doctest:+ELLIPSIS
[('asymmetric', ...), ('mutual', ...), ('null', ...)]
"""
_remap = {
"mutual": 0,
"mut": 0,
"sym": 0,
"symm": 0,
"asy": 1,
"asym": 1,
"asymm": 1,
"asymmetric": 1,
"null": 2,
}
def __getitem__(self, idx):
return tuple.__getitem__(self, self._remap.get(idx, idx))
def __getattr__(self, attr):
if attr in self._remap:
return tuple.__getitem__(self, self._remap[attr])
raise AttributeError("no such attribute: %s" % attr)
def __repr__(self):
return "DyadCensus((%d, %d, %d))" % self
def __str__(self):
return "%d mutual, %d asymmetric, %d null dyads" % self
def as_dict(self):
"""Converts the dyad census to a dict using the known dyad names."""
return {"mutual": self[0], "asymmetric": self[1], "null": self[2]}
class TriadCensus(tuple):
"""Triad census of a graph.
This is a pretty simple class - basically it is a tuple, but it allows
the user to refer to its individual items by the following triad names:
- C{003} -- the empty graph
- C{012} -- a graph with a single directed edge (C{A --> B, C})
- C{102} -- a graph with a single mutual edge (C{A <-> B, C})
- C{021D} -- the binary out-tree (C{A <-- B --> C})
- C{021U} -- the binary in-tree (C{A --> B <-- C})
- C{021C} -- the directed line (C{A --> B --> C})
- C{111D} -- C{A <-> B <-- C}
- C{111U} -- C{A <-> B --> C}
- C{030T} -- C{A --> B <-- C, A --> C}
- C{030C} -- C{A <-- B <-- C, A --> C}
- C{201} -- C{A <-> B <-> C}
- C{120D} -- C{A <-- B --> C, A <-> C}
- C{120U} -- C{A --> B <-- C, A <-> C}
- C{120C} -- C{A --> B --> C, A <-> C}
- C{210C} -- C{A --> B <-> C, A <-> C}
- C{300} -- the complete graph (C{A <-> B <-> C, A <-> C})
Attribute and item accessors are provided. Due to the syntax of Python,
attribute names are not allowed to start with a number, therefore the
triad names must be prepended with a lowercase C{t} when accessing
them as attributes. This is not necessary with the item accessor syntax.
Examples:
>>> from igraph import Graph
>>> g=Graph.Erdos_Renyi(100, 0.2, directed=True)
>>> tc=g.triad_census()
>>> print(tc.t003) #doctest:+SKIP
39864
>>> print(tc["030C"]) #doctest:+SKIP
1206
"""
_remap = {
"003": 0,
"012": 1,
"102": 2,
"021D": 3,
"021U": 4,
"021C": 5,
"111D": 6,
"111U": 7,
"030T": 8,
"030C": 9,
"201": 10,
"120D": 11,
"120U": 12,
"120C": 13,
"210": 14,
"300": 15,
}
def __getitem__(self, idx):
if isinstance(idx, str):
idx = idx.upper()
return tuple.__getitem__(self, self._remap.get(idx, idx))
def __getattr__(self, attr):
if isinstance(attr, str) and attr[0] == "t" and attr[1:].upper() in self._remap:
return tuple.__getitem__(self, self._remap[attr[1:].upper()])
raise AttributeError("no such attribute: %s" % attr)
def __repr__(self):
return "TriadCensus((%s))" % ", ".join(str(item) for item in self)
def __str__(self):
maxidx = len(self)
maxcount = max(self)
numwidth = len(str(maxcount))
captionwidth = max(len(key) for key in self._remap)
colcount = 4
rowcount = maxidx / colcount
if rowcount * colcount < maxidx:
rowcount += 1
invmap = dict((v, k) for k, v in self._remap.items())
result, row, idx = [], [], 0
for _ in range(rowcount):
for _ in range(colcount):
if idx >= maxidx:
break
row.append(
"%-*s: %*d"
% (captionwidth, invmap.get(idx, ""), numwidth, self[idx])
)
idx += 1
result.append(" | ".join(row))
row = []
return "\n".join(result)
class UniqueIdGenerator:
"""A dictionary-like class that can be used to assign unique IDs to
names (say, vertex names).
Usage:
>>> gen = UniqueIdGenerator()
>>> gen["A"]
0
>>> gen["B"]
1
>>> gen["C"]
2
>>> gen["A"] # Retrieving already existing ID
0
>>> gen.add("D") # Synonym of gen["D"]
3
>>> len(gen) # Number of already used IDs
4
>>> "C" in gen
True
>>> "E" in gen
False
"""
def __init__(self, id_generator=None, initial=None):
"""Creates a new unique ID generator. `id_generator` specifies how do we
assign new IDs to elements that do not have an ID yet. If it is `None`,
elements will be assigned integer identifiers starting from 0. If it is
an integer, elements will be assigned identifiers starting from the given
integer. If it is an iterator or generator, its `next` method will be
called every time a new ID is needed."""
if id_generator is None:
id_generator = 0
if isinstance(id_generator, int):
import itertools
self._generator = itertools.count(id_generator)
else:
self._generator = id_generator
self._ids = {}
if initial:
for value in initial:
self.add(value)
def __contains__(self, item):
"""Checks whether `item` already has an ID or not."""
return item in self._ids
def __getitem__(self, item):
"""Retrieves the ID corresponding to `item`. Generates a new ID for
`item` if it is the first time we request an ID for it."""
try:
return self._ids[item]
except KeyError:
self._ids[item] = next(self._generator)
return self._ids[item]
def __setitem__(self, item, value):
"""Overrides the ID for `item`."""
self._ids[item] = value
def __len__(self):
""""Returns the number of items"""
return len(self._ids)
def reverse_dict(self):
"""Returns the reverse mapping, i.e., the one that maps from generated
IDs to their corresponding objects"""
return dict((v, k) for k, v in self._ids.items())
def values(self):
"""Returns the values stored so far. If the generator generates items
according to the standard sorting order, the values returned will be
exactly in the order they were added. This holds for integer IDs for
instance (but for many other ID generators as well)."""
return sorted(list(self._ids.keys()), key=self._ids.__getitem__)
add = __getitem__
|
the-stack_0_13851 | from __future__ import unicode_literals
import pytest
from toolz import dissoc, merge
from eth_utils import (
encode_hex,
)
from eth_tester.exceptions import (
ValidationError,
)
from eth_tester.validation import DefaultValidator
@pytest.fixture
def validator():
_validator = DefaultValidator()
return _validator
@pytest.mark.parametrize(
"block_hash,is_valid",
(
(1, False),
(True, False),
(b'\x00' * 32, True),
(b'\xff' * 32, True),
('\x00' * 32, False),
(encode_hex(b'\x00' * 32), False),
),
)
def test_block_hash_output_validation(validator, block_hash, is_valid):
if is_valid:
validator.validate_outbound_block_hash(block_hash)
else:
with pytest.raises(ValidationError):
validator.validate_outbound_block_hash(block_hash)
ZERO_32BYTES = b'\x00' * 32
ZERO_8BYTES = b'\x00' * 8
ZERO_ADDRESS = b'\x00' * 20
ADDRESS_A = b'\x00' * 19 + b'\x01'
TOPIC_A = b'\x00' * 31 + b'\x01'
TOPIC_B = b'\x00' * 31 + b'\x02'
HASH32_AS_TEXT = '\x00' * 32
HASH31 = b'\x00' * 31
def _make_legacy_txn(
hash=ZERO_32BYTES,
nonce=0,
block_hash=ZERO_32BYTES,
block_number=0,
transaction_index=0,
_from=ZERO_ADDRESS,
to=ZERO_ADDRESS,
value=0,
gas=21000,
gas_price=1,
data=b'',
v=0,
r=0,
s=0
):
return {
"type": '0x0',
"hash": hash,
"nonce": nonce,
"block_hash": block_hash,
"block_number": block_number,
"transaction_index": transaction_index,
"from": _from,
"to": to,
"value": value,
"gas": gas,
"gas_price": gas_price,
"data": data,
"v": v,
"r": r,
"s": s,
}
def _make_access_list_txn(chain_id=131277322940537, access_list=[], **kwargs,):
legacy_kwargs = dissoc(dict(**kwargs), "chain_id", "access_list")
return merge(
_make_legacy_txn(**legacy_kwargs),
{
"type": "0x1",
"chain_id": chain_id,
"access_list": access_list,
}
)
# This is an outbound transaction so we still keep the gas_price for now since the gas_price is
# the min(max_fee_per_gas, base_fee_per_gas + max_priority_fee_per_gas).
# TODO: Sometime in 2022 the inclusion of gas_price may be removed from dynamic fee
# transactions and we can get rid of this behavior.
# https://github.com/ethereum/execution-specs/pull/251
def _make_dynamic_fee_txn(
chain_id=131277322940537,
max_fee_per_gas=2000000000,
max_priority_fee_per_gas=1000000000,
access_list=[],
**kwargs,
):
legacy_kwargs = dissoc(
dict(**kwargs),
"chain_id", "max_fee_per_gas", "max_priority_fee_per_gas", "access_list"
)
return merge(
_make_access_list_txn(
chain_id=chain_id, access_list=access_list, **legacy_kwargs
),
{
"type": "0x2",
"max_fee_per_gas": max_fee_per_gas,
"max_priority_fee_per_gas": max_priority_fee_per_gas,
}
)
@pytest.mark.parametrize(
"transaction,is_valid",
(
(_make_legacy_txn(), True),
(_make_access_list_txn(), True),
(_make_dynamic_fee_txn(), True),
(_make_legacy_txn(hash=HASH32_AS_TEXT), False),
(_make_legacy_txn(hash=HASH31), False),
(_make_legacy_txn(nonce=-1), False),
(_make_legacy_txn(nonce=1.0), False),
(_make_legacy_txn(nonce=True), False),
(_make_legacy_txn(value=-1), False),
(_make_legacy_txn(value=1.0), False),
(_make_legacy_txn(value=True), False),
(_make_legacy_txn(block_number=-1), False),
(_make_legacy_txn(block_number=1.0), False),
(_make_legacy_txn(block_number=True), False),
(_make_legacy_txn(gas=-1), False),
(_make_legacy_txn(gas=1.0), False),
(_make_legacy_txn(gas=True), False),
(_make_legacy_txn(gas_price=-1), False),
(_make_legacy_txn(gas_price=1.0), False),
(_make_legacy_txn(gas_price=True), False),
(_make_legacy_txn(data=''), False),
(_make_legacy_txn(data='0x'), False),
(_make_legacy_txn(block_hash=HASH32_AS_TEXT), False),
(_make_legacy_txn(block_hash=HASH31), False),
(_make_legacy_txn(transaction_index=None, block_hash=None, block_number=None), True),
(_make_access_list_txn(transaction_index=None, block_hash=None, block_number=None,), True),
(_make_dynamic_fee_txn(transaction_index=None, block_hash=None, block_number=None,), True),
(_make_access_list_txn(chain_id='1'), False),
(_make_dynamic_fee_txn(chain_id='1'), False),
(_make_access_list_txn(chain_id=-1), False),
(_make_dynamic_fee_txn(chain_id=-1), False),
(_make_access_list_txn(chain_id=None), False),
(_make_dynamic_fee_txn(chain_id=None), False),
(_make_legacy_txn(v=0), True),
(_make_dynamic_fee_txn(v=0), True),
(_make_access_list_txn(v=0), True),
(_make_legacy_txn(v=1), True),
(_make_dynamic_fee_txn(v=1), True),
(_make_access_list_txn(v=1), True),
(_make_legacy_txn(v=27), True),
(_make_access_list_txn(v=27), False),
(_make_dynamic_fee_txn(v=27), False),
(_make_dynamic_fee_txn(max_fee_per_gas=1.0), False),
(_make_dynamic_fee_txn(max_priority_fee_per_gas=1.0), False),
(_make_dynamic_fee_txn(max_fee_per_gas='1'), False),
(_make_dynamic_fee_txn(max_priority_fee_per_gas='1'), False),
(_make_access_list_txn(access_list=((b'\xf0' * 20, (0, 2)),),), True),
(_make_dynamic_fee_txn(access_list=((b'\xef' * 20, (1, 2, 3, 4)),),), True),
(_make_access_list_txn(access_list=()), True),
(_make_dynamic_fee_txn(access_list=()), True),
(_make_access_list_txn(access_list=((b'\xf0' * 19, (0, 2)),),), False),
(_make_dynamic_fee_txn(access_list=((b'\xf0' * 19, ()),),), False),
(_make_access_list_txn(access_list=((b'\xf0' * 20, ('0', 2)),),), False),
(_make_dynamic_fee_txn(access_list=((b'\xf0' * 20, (b'', 1)),),), False),
(_make_dynamic_fee_txn(access_list=(('', (1, 2)),),), False),
)
)
def test_transaction_output_validation(validator, transaction, is_valid):
if is_valid:
validator.validate_outbound_transaction(transaction)
else:
with pytest.raises(ValidationError):
validator.validate_outbound_transaction(transaction)
def _make_log(_type="mined",
log_index=0,
transaction_index=0,
transaction_hash=ZERO_32BYTES,
block_hash=ZERO_32BYTES,
block_number=0,
address=ZERO_ADDRESS,
data=b'',
topics=None):
return {
"type": _type,
"log_index": log_index,
"transaction_index": transaction_index,
"transaction_hash": transaction_hash,
"block_hash": block_hash,
"block_number": block_number,
"address": address,
"data": data,
"topics": topics or [],
}
@pytest.mark.parametrize(
"log_entry,is_valid",
(
(_make_log(), True),
(_make_log(_type="pending", transaction_index=None, block_hash=None, block_number=None), True),
(_make_log(_type="invalid-type"), False),
(_make_log(transaction_index=-1), False),
(_make_log(block_number=-1), False),
(_make_log(transaction_hash=HASH31), False),
(_make_log(transaction_hash=HASH32_AS_TEXT), False),
(_make_log(block_hash=HASH31), False),
(_make_log(block_hash=HASH32_AS_TEXT), False),
(_make_log(address=encode_hex(ADDRESS_A)), False),
(_make_log(data=''), False),
(_make_log(data=None), False),
(_make_log(topics=[HASH32_AS_TEXT]), False),
(_make_log(topics=[HASH31]), False),
(_make_log(topics=[TOPIC_A, TOPIC_B]), True),
(_make_log(address=ADDRESS_A), True),
),
)
def test_log_entry_output_validation(validator, log_entry, is_valid):
if is_valid:
validator.validate_outbound_log_entry(log_entry)
else:
with pytest.raises(ValidationError):
validator.validate_outbound_log_entry(log_entry)
def _make_receipt(transaction_hash=ZERO_32BYTES,
transaction_index=0,
block_number=0,
block_hash=ZERO_32BYTES,
cumulative_gas_used=0,
gas_used=21000,
effective_gas_price=1000000000,
contract_address=None,
logs=None,
state_root=b'\x00',
_type='0x0'):
return {
"transaction_hash": transaction_hash,
"transaction_index": transaction_index,
"block_number": block_number,
"block_hash": block_hash,
"cumulative_gas_used": cumulative_gas_used,
"gas_used": gas_used,
"effective_gas_price": effective_gas_price,
"contract_address": contract_address,
"logs": logs or [],
"state_root": state_root,
"type": _type,
}
@pytest.mark.parametrize(
"receipt,is_valid",
(
(_make_receipt(), True),
(_make_receipt(transaction_hash=HASH32_AS_TEXT), False),
(_make_receipt(transaction_hash=HASH31), False),
(_make_receipt(block_hash=HASH32_AS_TEXT), False),
(_make_receipt(block_hash=HASH31), False),
(_make_receipt(transaction_index=-1), False),
(_make_receipt(transaction_index=1.0), False),
(_make_receipt(transaction_index=True), False),
(_make_receipt(block_number=-1), False),
(_make_receipt(block_number=1.0), False),
(_make_receipt(block_number=True), False),
(_make_receipt(gas_used=-1), False),
(_make_receipt(gas_used=1.0), False),
(_make_receipt(gas_used=True), False),
(_make_receipt(cumulative_gas_used=-1), False),
(_make_receipt(cumulative_gas_used=1.0), False),
(_make_receipt(cumulative_gas_used=True), False),
(_make_receipt(contract_address=ZERO_ADDRESS), True),
(_make_receipt(contract_address=encode_hex(ZERO_ADDRESS)), False),
(_make_receipt(logs=[_make_log()]), True),
(_make_receipt(logs=[_make_log(_type="invalid")]), False),
),
)
def test_receipt_output_validation(validator, receipt, is_valid):
if is_valid:
validator.validate_outbound_receipt(receipt)
else:
with pytest.raises(ValidationError):
validator.validate_outbound_receipt(receipt)
def _make_block(number=0,
hash=ZERO_32BYTES,
parent_hash=ZERO_32BYTES,
nonce=ZERO_8BYTES,
sha3_uncles=ZERO_32BYTES,
logs_bloom=0,
transactions_root=ZERO_32BYTES,
receipts_root=ZERO_32BYTES,
state_root=ZERO_32BYTES,
miner=ZERO_ADDRESS,
difficulty=0,
total_difficulty=0,
size=0,
extra_data=ZERO_32BYTES,
gas_limit=30029122, # gas limit at London fork block 12965000 on mainnet
gas_used=21000,
timestamp=4000000,
transactions=None,
uncles=None,
base_fee_per_gas=1000000000):
block = {
"number": number,
"hash": hash,
"parent_hash": parent_hash,
"nonce": nonce,
"sha3_uncles": sha3_uncles,
"logs_bloom": logs_bloom,
"transactions_root": transactions_root,
"receipts_root": receipts_root,
"state_root": state_root,
"miner": miner,
"difficulty": difficulty,
"total_difficulty": total_difficulty,
"size": size,
"extra_data": extra_data,
"gas_limit": gas_limit,
"gas_used": gas_used,
"timestamp": timestamp,
"transactions": transactions or [],
"uncles": uncles or [],
"base_fee_per_gas": base_fee_per_gas,
}
return block
@pytest.mark.parametrize(
"block,is_valid",
(
(_make_block(), True),
(_make_block(number=-1), False),
(_make_block(number=1.0), False),
(_make_block(number=True), False),
(_make_block(hash=HASH32_AS_TEXT), False),
(_make_block(hash=HASH31), False),
(_make_block(parent_hash=HASH32_AS_TEXT), False),
(_make_block(parent_hash=HASH31), False),
(_make_block(nonce=-1), False),
(_make_block(nonce=1.0), False),
(_make_block(nonce=True), False),
(_make_block(sha3_uncles=HASH32_AS_TEXT), False),
(_make_block(logs_bloom=-1), False),
(_make_block(logs_bloom=1.0), False),
(_make_block(logs_bloom=True), False),
(_make_block(transactions_root=HASH32_AS_TEXT), False),
(_make_block(transactions_root=HASH31), False),
(_make_block(receipts_root=HASH32_AS_TEXT), False),
(_make_block(receipts_root=HASH31), False),
(_make_block(state_root=HASH32_AS_TEXT), False),
(_make_block(state_root=HASH31), False),
(_make_block(miner=encode_hex(ADDRESS_A)), False),
(_make_block(difficulty=-1), False),
(_make_block(difficulty=1.0), False),
(_make_block(difficulty=True), False),
(_make_block(total_difficulty=-1), False),
(_make_block(total_difficulty=1.0), False),
(_make_block(total_difficulty=True), False),
(_make_block(size=-1), False),
(_make_block(size=1.0), False),
(_make_block(size=True), False),
(_make_block(extra_data=HASH32_AS_TEXT), False),
(_make_block(extra_data=HASH31), False),
(_make_block(gas_limit=-1), False),
(_make_block(gas_limit=1.0), False),
(_make_block(gas_limit=True), False),
(_make_block(gas_used=-1), False),
(_make_block(gas_used=1.0), False),
(_make_block(gas_used=True), False),
(_make_block(timestamp=-1), False),
(_make_block(timestamp=1.0), False),
(_make_block(timestamp=True), False),
(_make_block(base_fee_per_gas=1000000000), True),
(_make_block(base_fee_per_gas=-1000000000), False),
(_make_block(base_fee_per_gas=1000000000.0), False),
(_make_block(base_fee_per_gas='1000000000'), False),
(_make_block(uncles=[ZERO_32BYTES]), True),
(_make_block(uncles=[ZERO_32BYTES, HASH32_AS_TEXT]), False),
(_make_block(transactions=[ZERO_32BYTES]), True),
(_make_block(transactions=[_make_legacy_txn()]), True),
(_make_block(transactions=[ZERO_32BYTES, _make_legacy_txn()]), False),
(_make_block(transactions=[_make_access_list_txn()]), True),
(_make_block(transactions=[ZERO_32BYTES, _make_access_list_txn()]), False),
(_make_block(transactions=[_make_dynamic_fee_txn()]), True),
(_make_block(transactions=[ZERO_32BYTES, _make_dynamic_fee_txn()]), False),
(_make_block(transactions=[ZERO_32BYTES, HASH32_AS_TEXT]), False),
)
)
def test_block_output_validation(validator, block, is_valid):
if is_valid:
validator.validate_outbound_block(block)
else:
with pytest.raises(ValidationError):
validator.validate_outbound_block(block)
@pytest.mark.parametrize(
"accounts,is_valid",
(
([ADDRESS_A], True),
([ADDRESS_A, encode_hex(ADDRESS_A)], False),
),
)
def test_accounts_output_validation(validator, accounts, is_valid):
if is_valid:
validator.validate_outbound_accounts(accounts)
else:
with pytest.raises(ValidationError):
validator.validate_outbound_accounts(accounts)
|
the-stack_0_13852 | import torch
from . import iou3d_cuda
def boxes_iou_bev(boxes_a, boxes_b):
"""Calculate boxes IoU in the bird view.
Args:
boxes_a (torch.Tensor): Input boxes a with shape (M, 5).
boxes_b (torch.Tensor): Input boxes b with shape (N, 5).
Returns:
ans_iou (torch.Tensor): IoU result with shape (M, N).
"""
ans_iou = boxes_a.new_zeros(
torch.Size((boxes_a.shape[0], boxes_b.shape[0])))
iou3d_cuda.boxes_iou_bev_gpu(boxes_a.contiguous(), boxes_b.contiguous(),
ans_iou)
return ans_iou
def nms_gpu(boxes, scores, thresh, pre_maxsize=None, post_max_size=None):
"""Nms function with gpu implementation.
Args:
boxes (torch.Tensor): Input boxes with the shape of [N, 5]
([x1, y1, x2, y2, ry]).
scores (torch.Tensor): Scores of boxes with the shape of [N].
thresh (int): Threshold.
pre_maxsize (int): Max size of boxes before nms. Default: None.
post_maxsize (int): Max size of boxes after nms. Default: None.
Returns:
torch.Tensor: Indexes after nms.
"""
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long)
num_out = iou3d_cuda.nms_gpu(boxes, keep, thresh, boxes.device.index)
keep = order[keep[:num_out].cuda(boxes.device)].contiguous()
if post_max_size is not None:
keep = keep[:post_max_size]
return keep
def nms_weighted_gpu(boxes, scores, thresh, det_boxes=None, pre_maxsize=None, post_max_size=None):
order = scores.sort(0, descending=True)[1]
if pre_maxsize is not None:
order = order[:pre_maxsize]
boxes = boxes[order].contiguous()
if det_boxes is not None:
det_boxes = det_boxes[order].contiguous()
keep_boxes = torch.zeros_like(boxes)
keep_det_boxes = torch.zeros_like(det_boxes)
keep_scores = torch.zeros_like(scores)
keep = torch.zeros(boxes.size(0), dtype=torch.long)
keep_pos = 0
while(boxes.size(0)):
ans_iou = boxes_iou_bev(boxes[0:1], boxes).squeeze(0) #(1, N)
selected = ans_iou >= thresh
###Hard code demo/1013044.bin
selected[0] = True
###
weights = scores[selected] * ans_iou[selected]
boxes[0, :4] = (weights.unsqueeze(1) * boxes[selected, :4]).sum(0) / (weights.sum() + 1e-6)
keep_boxes[keep_pos] = boxes[0]
if det_boxes is not None:
det_boxes[0, :4] = (weights.unsqueeze(1) * det_boxes[selected, :4]).sum(0) / (weights.sum())
keep_det_boxes[keep_pos] = det_boxes[0]
keep_scores[keep_pos] = scores[0]
keep[keep_pos] = keep_pos
keep_pos += 1
boxes = boxes[~selected]
scores = scores[~selected]
det_boxes = det_boxes[~selected]
if post_max_size is not None:
keep_pos = min(keep_pos, post_max_size)
return keep[:keep_pos], keep_det_boxes, keep_scores
# selected = nms_gpu(boxes, scores, thresh, pre_maxsize=pre_maxsize, post_max_size=post_max_size)
# weighted_bboxs[:, :4] = (scores * ious * boxes[:, :4])
# weighted_bboxs = weighted_bboxs[selected]
def nms_normal_gpu(boxes, scores, thresh):
"""Normal non maximum suppression on GPU.
Args:
boxes (torch.Tensor): Input boxes with shape (N, 5).
scores (torch.Tensor): Scores of predicted boxes with shape (N).
thresh (torch.Tensor): Threshold of non maximum suppression.
Returns:
torch.Tensor: Remaining indices with scores in descending order.
"""
order = scores.sort(0, descending=True)[1]
boxes = boxes[order].contiguous()
keep = torch.zeros(boxes.size(0), dtype=torch.long)
num_out = iou3d_cuda.nms_normal_gpu(boxes, keep, thresh,
boxes.device.index)
return order[keep[:num_out].cuda(boxes.device)].contiguous()
|
the-stack_0_13853 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import site
import unittest
import paddle
import paddle.static as static
import tempfile
import subprocess
import numpy as np
from paddle.vision.transforms import Compose, Normalize
from paddle.utils.cpp_extension.extension_utils import run_cmd
from paddle.fluid.framework import _test_eager_guard
def custom_relu_dynamic(func, device, dtype, np_x, use_func=True):
paddle.set_device(device)
t = paddle.to_tensor(np_x, dtype=dtype)
t.stop_gradient = False
out = func(t) if use_func else paddle.nn.functional.relu(t)
out.stop_gradient = False
out.backward()
if t.grad is None:
return out.numpy(), t.grad
else:
return out.numpy(), t.grad.numpy()
def custom_relu_static(func,
device,
dtype,
np_x,
use_func=True,
test_infer=False):
paddle.enable_static()
paddle.set_device(device)
with static.scope_guard(static.Scope()):
with static.program_guard(static.Program()):
x = static.data(name='X', shape=[None, 8], dtype=dtype)
x.stop_gradient = False
out = func(x) if use_func else paddle.nn.functional.relu(x)
static.append_backward(out)
exe = static.Executor()
exe.run(static.default_startup_program())
# in static mode, x data has been covered by out
out_v = exe.run(static.default_main_program(),
feed={'X': np_x},
fetch_list=[out.name])
paddle.disable_static()
return out_v
def custom_relu_static_pe(func, device, dtype, np_x, use_func=True):
paddle.enable_static()
paddle.set_device(device)
places = static.cpu_places() if device is 'cpu' else static.cuda_places()
with static.scope_guard(static.Scope()):
with static.program_guard(static.Program()):
x = static.data(name='X', shape=[None, 8], dtype=dtype)
x.stop_gradient = False
out = func(x) if use_func else paddle.nn.functional.relu(x)
static.append_backward(out)
exe = static.Executor()
exe.run(static.default_startup_program())
# in static mode, x data has been covered by out
compiled_prog = static.CompiledProgram(
static.default_main_program()).with_data_parallel(
loss_name=out.name, places=places)
out_v = exe.run(compiled_prog,
feed={'X': np_x},
fetch_list=[out.name])
paddle.disable_static()
return out_v
def custom_relu_static_inference(func, device, np_data, np_label, path_prefix):
paddle.set_device(device)
with static.scope_guard(static.Scope()):
with static.program_guard(static.Program()):
# simple module
data = static.data(name='data',
shape=[None, 1, 28, 28],
dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
hidden = static.nn.fc(data, size=128)
hidden = func(hidden)
hidden = static.nn.fc(hidden, size=128)
predict = static.nn.fc(hidden, size=10, activation='softmax')
loss = paddle.nn.functional.cross_entropy(input=hidden, label=label)
avg_loss = paddle.mean(loss)
opt = paddle.optimizer.SGD(learning_rate=0.1)
opt.minimize(avg_loss)
# run start up model
exe = static.Executor()
exe.run(static.default_startup_program())
# train
for i in range(4):
avg_loss_v = exe.run(static.default_main_program(),
feed={
'data': np_data,
'label': np_label
},
fetch_list=[avg_loss])
# save inference model
static.save_inference_model(path_prefix, [data], [predict], exe)
# get train predict value
predict_v = exe.run(static.default_main_program(),
feed={
'data': np_data,
'label': np_label
},
fetch_list=[predict])
return predict_v
def custom_relu_double_grad_dynamic(func, device, dtype, np_x, use_func=True):
paddle.set_device(device)
t = paddle.to_tensor(np_x, dtype=dtype, stop_gradient=False)
out = func(t) if use_func else paddle.nn.functional.relu(t)
out.stop_gradient = False
dx = paddle.grad(outputs=[out],
inputs=[t],
create_graph=True,
retain_graph=True)
dx[0].backward()
assert dx[0].grad is not None
return dx[0].numpy(), dx[0].grad.numpy()
class TestNewCustomOpSetUpInstall(unittest.TestCase):
def setUp(self):
cur_dir = os.path.dirname(os.path.abspath(__file__))
# compile, install the custom op egg into site-packages under background
if os.name == 'nt':
cmd = 'cd /d {} && python custom_relu_setup.py install'.format(
cur_dir)
else:
cmd = 'cd {} && {} custom_relu_setup.py install'.format(
cur_dir, sys.executable)
run_cmd(cmd)
# NOTE(Aurelius84): Normally, it's no need to add following codes for users.
# But we simulate to pip install in current process, so interpreter don't snap
# sys.path has been updated. So we update it manually.
# See: https://stackoverflow.com/questions/56974185/import-runtime-installed-module-using-pip-in-python-3
if os.name == 'nt':
# NOTE(zhouwei25): getsitepackages on windows will return a list: [python install dir, site packages dir]
site_dir = site.getsitepackages()[1]
else:
site_dir = site.getsitepackages()[0]
custom_egg_path = [
x for x in os.listdir(site_dir) if 'custom_relu_module_setup' in x
]
assert len(custom_egg_path
) == 1, "Matched egg number is %d." % len(custom_egg_path)
sys.path.append(os.path.join(site_dir, custom_egg_path[0]))
# usage: import the package directly
import custom_relu_module_setup
# `custom_relu_dup` is same as `custom_relu_dup`
self.custom_ops = [
custom_relu_module_setup.custom_relu,
custom_relu_module_setup.custom_relu_dup
]
self.dtypes = ['float32', 'float64']
if paddle.is_compiled_with_cuda():
self.dtypes.append('float16')
self.devices = ['cpu']
if paddle.is_compiled_with_cuda():
self.devices.append('gpu')
# config seed
SEED = 2021
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def test_static(self):
for device in self.devices:
for dtype in self.dtypes:
if device == 'cpu' and dtype == 'float16':
continue
x = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
for custom_op in self.custom_ops:
out = custom_relu_static(custom_op, device, dtype, x)
pd_out = custom_relu_static(custom_op, device, dtype, x,
False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
def test_static_pe(self):
for device in self.devices:
for dtype in self.dtypes:
if device == 'cpu' and dtype == 'float16':
continue
x = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
for custom_op in self.custom_ops:
out = custom_relu_static_pe(custom_op, device, dtype, x)
pd_out = custom_relu_static_pe(custom_op, device, dtype, x,
False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
def func_dynamic(self):
for device in self.devices:
for dtype in self.dtypes:
if device == 'cpu' and dtype == 'float16':
continue
x = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
for custom_op in self.custom_ops:
out, x_grad = custom_relu_dynamic(custom_op, device, dtype,
x)
pd_out, pd_x_grad = custom_relu_dynamic(
custom_op, device, dtype, x, False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
self.assertTrue(
np.array_equal(x_grad, pd_x_grad),
"custom op x grad: {},\n paddle api x grad: {}".format(
x_grad, pd_x_grad))
def test_dynamic(self):
with _test_eager_guard():
self.func_dynamic()
self.func_dynamic()
def test_static_save_and_load_inference_model(self):
paddle.enable_static()
np_data = np.random.random((1, 1, 28, 28)).astype("float32")
np_label = np.random.random((1, 1)).astype("int64")
path_prefix = "custom_op_inference/custom_relu"
for device in self.devices:
predict = custom_relu_static_inference(self.custom_ops[0], device,
np_data, np_label,
path_prefix)
# load inference model
with static.scope_guard(static.Scope()):
exe = static.Executor()
[inference_program, feed_target_names,
fetch_targets] = static.load_inference_model(path_prefix, exe)
predict_infer = exe.run(inference_program,
feed={feed_target_names[0]: np_data},
fetch_list=fetch_targets)
self.assertTrue(
np.array_equal(predict, predict_infer),
"custom op predict: {},\n custom op infer predict: {}".
format(predict, predict_infer))
paddle.disable_static()
def test_static_save_and_run_inference_predictor(self):
paddle.enable_static()
np_data = np.random.random((1, 1, 28, 28)).astype("float32")
np_label = np.random.random((1, 1)).astype("int64")
path_prefix = "custom_op_inference/custom_relu"
from paddle.inference import Config
from paddle.inference import create_predictor
for device in self.devices:
predict = custom_relu_static_inference(self.custom_ops[0], device,
np_data, np_label,
path_prefix)
# load inference model
config = Config(path_prefix + ".pdmodel",
path_prefix + ".pdiparams")
predictor = create_predictor(config)
input_tensor = predictor.get_input_handle(
predictor.get_input_names()[0])
input_tensor.reshape(np_data.shape)
input_tensor.copy_from_cpu(np_data.copy())
predictor.run()
output_tensor = predictor.get_output_handle(
predictor.get_output_names()[0])
predict_infer = output_tensor.copy_to_cpu()
self.assertTrue(
np.isclose(predict, predict_infer, rtol=5e-5).any(),
"custom op predict: {},\n custom op infer predict: {}".format(
predict, predict_infer))
paddle.disable_static()
def test_func_double_grad_dynamic(self):
for device in self.devices:
for dtype in self.dtypes:
if device == 'cpu' and dtype == 'float16':
continue
x = np.random.uniform(-1, 1, [4, 8]).astype(dtype)
out, dx_grad = custom_relu_double_grad_dynamic(
self.custom_ops[0], device, dtype, x)
pd_out, pd_dx_grad = custom_relu_double_grad_dynamic(
self.custom_ops[0], device, dtype, x, False)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
self.assertTrue(
np.array_equal(dx_grad, pd_dx_grad),
"custom op dx grad: {},\n paddle api dx grad: {}".format(
dx_grad, pd_dx_grad))
def test_with_dataloader(self):
for device in self.devices:
paddle.set_device(device)
# data loader
transform = Compose(
[Normalize(mean=[127.5], std=[127.5], data_format='CHW')])
train_dataset = paddle.vision.datasets.MNIST(mode='train',
transform=transform)
train_loader = paddle.io.DataLoader(train_dataset,
batch_size=64,
shuffle=True,
drop_last=True,
num_workers=0)
for batch_id, (image, _) in enumerate(train_loader()):
out = self.custom_ops[0](image)
pd_out = paddle.nn.functional.relu(image)
self.assertTrue(
np.array_equal(out, pd_out),
"custom op out: {},\n paddle api out: {}".format(
out, pd_out))
if batch_id == 5:
break
if __name__ == '__main__':
unittest.main()
|
the-stack_0_13855 | from dataclasses import asdict, dataclass
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple
from .regime import Regime
class ClassementState(Enum):
ACTIVE = 'ACTIVE'
SUPPRIMEE = 'SUPPRIMEE'
@dataclass
class Classement:
rubrique: str
regime: Regime
alinea: Optional[str] = None
state: ClassementState = ClassementState.ACTIVE
@staticmethod
def from_dict(dict_: Dict[str, Any]) -> 'Classement':
dict_ = dict_.copy()
dict_['rubrique'] = str(dict_['rubrique'])
dict_['regime'] = Regime(dict_['regime'])
dict_['alinea'] = dict_.get('alinea')
dict_['state'] = ClassementState(dict_.get('state') or ClassementState.ACTIVE.value)
return Classement(**dict_)
def to_dict(self) -> Dict[str, Any]:
res = asdict(self)
res['regime'] = self.regime.value
res['state'] = self.state.value
return res
@dataclass
class ClassementWithAlineas:
rubrique: str
regime: Regime
alineas: List[str]
@staticmethod
def from_dict(dict_: Dict[str, Any]) -> 'ClassementWithAlineas':
dict_ = dict_.copy()
dict_['rubrique'] = str(dict_['rubrique'])
dict_['regime'] = Regime(dict_['regime'])
return ClassementWithAlineas(**dict_)
def to_dict(self) -> Dict[str, Any]:
res = asdict(self)
res['regime'] = self.regime.value
return res
def group_classements_by_alineas(classements: List[Classement]) -> List[ClassementWithAlineas]:
rubrique_regime_to_alineas: Dict[Tuple[str, Regime], List[str]] = {}
for classement in classements:
key = (classement.rubrique, classement.regime)
if key not in rubrique_regime_to_alineas:
rubrique_regime_to_alineas[key] = []
if classement.alinea:
rubrique_regime_to_alineas[key].append(classement.alinea)
return [ClassementWithAlineas(rub, reg, als) for (rub, reg), als in rubrique_regime_to_alineas.items()]
def ensure_rubrique(candidate: str) -> str:
if len(candidate) != 4 or candidate[0] not in '1234':
raise ValueError(f'Incorrect rubrique value, got {candidate}')
try:
int(candidate)
except ValueError:
raise ValueError(f'Incorrect rubrique value, got {candidate}')
return candidate
|
the-stack_0_13856 | from __future__ import print_function
import argparse
import os
import time
import numpy as np
import torch
import torch.optim as optim
import torchvision.datasets as dset
import torchvision.transforms as tforms
from torchvision.utils import save_image
import torch.utils.data as data
from torch.utils.data import Dataset
from PIL import Image
import os.path
import errno
import codecs
import lib.layers as layers
import lib.utils as utils
import lib.multiscale_parallel as multiscale_parallel
import lib.modules as modules
import lib.thops as thops
from train_misc import standard_normal_logprob
from train_misc import set_cnf_options, count_nfe, count_parameters, count_total_time, count_nfe_gate
from train_misc import add_spectral_norm, spectral_norm_power_iteration
from train_misc import create_regularization_fns, get_regularization, append_regularization_to_log
from tensorboardX import SummaryWriter
# go fast boi!!
torch.backends.cudnn.benchmark = True
SOLVERS = ["dopri5", "bdf", "rk4", "midpoint", 'adams', 'explicit_adams']
GATES = ["cnn1", "cnn2", "rnn"]
parser = argparse.ArgumentParser("Continuous Normalizing Flow")
parser.add_argument("--data", choices=["colormnist", "mnist", "svhn", "cifar10", 'lsun_church'], type=str, default="mnist")
parser.add_argument("--dims", type=str, default="8,32,32,8")
parser.add_argument("--strides", type=str, default="2,2,1,-2,-2")
parser.add_argument("--num_blocks", type=int, default=1, help='Number of stacked CNFs.')
parser.add_argument("--conv", type=eval, default=True, choices=[True, False])
parser.add_argument(
"--layer_type", type=str, default="ignore",
choices=["ignore", "concat", "concat_v2", "squash", "concatsquash", "concatcoord", "hyper", "blend"]
)
parser.add_argument("--divergence_fn", type=str, default="approximate", choices=["brute_force", "approximate"])
parser.add_argument(
"--nonlinearity", type=str, default="softplus", choices=["tanh", "relu", "softplus", "elu", "swish"]
)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument('--solver', type=str, default='dopri5', choices=SOLVERS)
parser.add_argument('--atol', type=float, default=1e-5)
parser.add_argument('--rtol', type=float, default=1e-5)
parser.add_argument("--step_size", type=float, default=None, help="Optional fixed step size.")
parser.add_argument('--gate', type=str, default='cnn1', choices=GATES)
parser.add_argument('--scale', type=float, default=1.0)
parser.add_argument('--scale_fac', type=float, default=1.0)
parser.add_argument('--scale_std', type=float, default=1.0)
parser.add_argument('--eta', default=0.1, type=float,
help='tuning parameter that allows us to trade-off the competing goals of'
'minimizing the prediction loss and maximizing the gate rewards ')
parser.add_argument('--rl-weight', default=0.01, type=float,
help='rl weight')
parser.add_argument('--gamma', default=0.99, type=float,
help='discount factor, default: (0.99)')
parser.add_argument('--test_solver', type=str, default=None, choices=SOLVERS + [None])
parser.add_argument('--test_atol', type=float, default=None)
parser.add_argument('--test_rtol', type=float, default=None)
parser.add_argument("--imagesize", type=int, default=None)
parser.add_argument("--alpha", type=float, default=1e-6)
parser.add_argument('--time_length', type=float, default=1.0)
parser.add_argument('--train_T', type=eval, default=True)
parser.add_argument("--num_epochs", type=int, default=500)
parser.add_argument("--batch_size", type=int, default=200)
parser.add_argument(
"--batch_size_schedule", type=str, default="", help="Increases the batchsize at every given epoch, dash separated."
)
parser.add_argument("--test_batch_size", type=int, default=200)
parser.add_argument("--lr", type=float, default=1e-3)
parser.add_argument("--warmup_iters", type=float, default=1000)
parser.add_argument("--weight_decay", type=float, default=0.0)
parser.add_argument("--spectral_norm_niter", type=int, default=10)
parser.add_argument("--weight_y", type=float, default=0.5)
parser.add_argument("--annealing_std", type=eval, default=False, choices=[True, False])
parser.add_argument("--y_class", type=int, default=10)
parser.add_argument("--y_color", type=int, default=10)
parser.add_argument("--add_noise", type=eval, default=True, choices=[True, False])
parser.add_argument("--batch_norm", type=eval, default=False, choices=[True, False])
parser.add_argument('--residual', type=eval, default=False, choices=[True, False])
parser.add_argument('--autoencode', type=eval, default=False, choices=[True, False])
parser.add_argument('--rademacher', type=eval, default=True, choices=[True, False])
parser.add_argument('--spectral_norm', type=eval, default=False, choices=[True, False])
parser.add_argument('--multiscale', type=eval, default=False, choices=[True, False])
parser.add_argument('--parallel', type=eval, default=False, choices=[True, False])
parser.add_argument('--conditional', type=eval, default=False, choices=[True, False])
parser.add_argument('--controlled_tol', type=eval, default=False, choices=[True, False])
parser.add_argument("--train_mode", choices=["semisup", "sup", "unsup"], type=str, default="semisup")
parser.add_argument("--condition_ratio", type=float, default=0.5)
parser.add_argument("--dropout_rate", type=float, default=0.0)
parser.add_argument("--cond_nn", choices=["linear", "mlp"], type=str, default="linear")
# Regularizations
parser.add_argument('--l1int', type=float, default=None, help="int_t ||f||_1")
parser.add_argument('--l2int', type=float, default=None, help="int_t ||f||_2")
parser.add_argument('--dl2int', type=float, default=None, help="int_t ||f^T df/dt||_2")
parser.add_argument('--JFrobint', type=float, default=None, help="int_t ||df/dx||_F")
parser.add_argument('--JdiagFrobint', type=float, default=None, help="int_t ||df_i/dx_i||_F")
parser.add_argument('--JoffdiagFrobint', type=float, default=None, help="int_t ||df/dx - df_i/dx_i||_F")
parser.add_argument("--time_penalty", type=float, default=0, help="Regularization on the end_time.")
parser.add_argument(
"--max_grad_norm", type=float, default=1e10,
help="Max norm of graidents (default is just stupidly high to avoid any clipping)"
)
parser.add_argument("--begin_epoch", type=int, default=1)
parser.add_argument("--resume", type=str, default=None)
parser.add_argument("--save", type=str, default="experiments/cnf")
parser.add_argument("--val_freq", type=int, default=1)
parser.add_argument("--log_freq", type=int, default=1)
# for disentanglement
parser.add_argument('--beta', default=0.01, type=float, help='disentanglement weight')
args = parser.parse_args()
import lib.odenvp_conditional_rl_2cond_multiscale as odenvp
# set seed
torch.manual_seed(args.seed)
np.random.seed(args.seed)
# logger
utils.makedirs(args.save)
logger = utils.get_logger(logpath=os.path.join(args.save, 'logs'), filepath=os.path.abspath(__file__)) # write to log file
writer = SummaryWriter(os.path.join(args.save, 'tensorboard')) # write to tensorboard
if args.layer_type == "blend":
logger.info("!! Setting time_length from None to 1.0 due to use of Blend layers.")
args.time_length = 1.0
logger.info(args)
class ColorMNIST(data.Dataset):
"""
ColorMNIST
"""
urls = [
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
raw_folder = 'raw'
processed_folder = 'processed'
training_file = 'training.pt'
test_file = 'test.pt'
def __init__(self, root, train=True, transform=None, target_transform=None, download=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
self.train_data, self.train_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.training_file))
self.train_data = np.tile(self.train_data[:, :, :, np.newaxis], 3)
else:
self.test_data, self.test_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.test_file))
self.test_data = np.tile(self.test_data[:, :, :, np.newaxis], 3)
self.pallette = [[31, 119, 180],
[255, 127, 14],
[44, 160, 44],
[214, 39, 40],
[148, 103, 189],
[140, 86, 75],
[227, 119, 194],
[127, 127, 127],
[188, 189, 34],
[23, 190, 207]]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
img, target = self.train_data[index].copy(), self.train_labels[index]
else:
img, target = self.test_data[index].copy(), self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
y_color_digit = np.random.randint(0, args.y_color)
c_digit = self.pallette[y_color_digit]
img[:, :, 0] = img[:, :, 0] / 255 * c_digit[0]
img[:, :, 1] = img[:, :, 1] / 255 * c_digit[1]
img[:, :, 2] = img[:, :, 2] / 255 * c_digit[2]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, [target,torch.from_numpy(np.array(y_color_digit))]
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \
os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
from six.moves import urllib
import gzip
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
for url in self.urls:
print('Downloading ' + url)
data = urllib.request.urlopen(url)
filename = url.rpartition('/')[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
with open(file_path, 'wb') as f:
f.write(data.read())
with open(file_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(file_path) as zip_f:
out_f.write(zip_f.read())
os.unlink(file_path)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.root, self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.root, self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.root, self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.root, self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def add_noise(x):
"""
[0, 1] -> [0, 255] -> add noise -> [0, 1]
"""
if args.add_noise:
noise = x.new().resize_as_(x).uniform_()
x = x * 255 + noise
x = x / 256
return x
def update_lr(optimizer, itr):
iter_frac = min(float(itr + 1) / max(args.warmup_iters, 1), 1.0)
lr = args.lr * iter_frac
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def update_scale_std(model, epoch):
epoch_frac = 1.0 - float(epoch - 1) / max(args.num_epochs + 1, 1)
scale_std = args.scale_std * epoch_frac
model.set_scale_std(scale_std)
def get_train_loader(train_set, epoch):
if args.batch_size_schedule != "":
epochs = [0] + list(map(int, args.batch_size_schedule.split("-")))
n_passed = sum(np.array(epochs) <= epoch)
current_batch_size = int(args.batch_size * n_passed)
else:
current_batch_size = args.batch_size
train_loader = torch.utils.data.DataLoader(
dataset=train_set, batch_size=current_batch_size, shuffle=True, drop_last=True, pin_memory=True
)
logger.info("===> Using batch size {}. Total {} iterations/epoch.".format(current_batch_size, len(train_loader)))
return train_loader
def get_dataset(args):
trans = lambda im_size: tforms.Compose([tforms.Resize(im_size), tforms.ToTensor(), add_noise])
if args.data == "mnist":
im_dim = 1
im_size = 28 if args.imagesize is None else args.imagesize
train_set = dset.MNIST(root="../data", train=True, transform=trans(im_size), download=True)
test_set = dset.MNIST(root="../data", train=False, transform=trans(im_size), download=True)
if args.data == "colormnist":
im_dim = 3
im_size = 28 if args.imagesize is None else args.imagesize
train_set = ColorMNIST(root="../data", train=True, transform=trans(im_size), download=True)
test_set = ColorMNIST(root="../data", train=False, transform=trans(im_size), download=True)
elif args.data == "svhn":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.SVHN(root="../data", split="train", transform=trans(im_size), download=True)
test_set = dset.SVHN(root="../data", split="test", transform=trans(im_size), download=True)
elif args.data == "cifar10":
im_dim = 3
im_size = 32 if args.imagesize is None else args.imagesize
train_set = dset.CIFAR10(
root="../data", train=True, transform=tforms.Compose([
tforms.Resize(im_size),
tforms.RandomHorizontalFlip(),
tforms.ToTensor(),
add_noise,
]), download=True
)
test_set = dset.CIFAR10(root="../data", train=False, transform=trans(im_size), download=True)
elif args.data == 'celeba':
im_dim = 3
im_size = 64 if args.imagesize is None else args.imagesize
train_set = dset.CelebA(
train=True, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(im_size),
tforms.RandomHorizontalFlip(),
tforms.ToTensor(),
add_noise,
])
)
test_set = dset.CelebA(
train=False, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(im_size),
tforms.ToTensor(),
add_noise,
])
)
elif args.data == 'lsun_church':
im_dim = 3
im_size = 64 if args.imagesize is None else args.imagesize
train_set = dset.LSUN(
'../data', ['church_outdoor_train'], transform=tforms.Compose([
tforms.Resize(96),
tforms.RandomCrop(64),
tforms.Resize(im_size),
tforms.ToTensor(),
add_noise,
])
)
test_set = dset.LSUN(
'../data', ['church_outdoor_val'], transform=tforms.Compose([
tforms.Resize(96),
tforms.RandomCrop(64),
tforms.Resize(im_size),
tforms.ToTensor(),
add_noise,
])
)
elif args.data == 'imagenet_64':
im_dim = 3
im_size = 64 if args.imagesize is None else args.imagesize
train_set = dset.ImageFolder(
train=True, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(im_size),
tforms.RandomHorizontalFlip(),
tforms.ToTensor(),
add_noise,
])
)
test_set = dset.ImageFolder(
train=False, transform=tforms.Compose([
tforms.ToPILImage(),
tforms.Resize(im_size),
tforms.ToTensor(),
add_noise,
])
)
data_shape = (im_dim, im_size, im_size)
if not args.conv:
data_shape = (im_dim * im_size * im_size,)
test_loader = torch.utils.data.DataLoader(
dataset=test_set, batch_size=args.test_batch_size, shuffle=False, drop_last=True
)
return train_set, test_loader, data_shape
def compute_bits_per_dim(x, model):
zero = torch.zeros(x.shape[0], 1).to(x)
# Don't use data parallelize if batch size is small.
# if x.shape[0] < 200:
# model = model.module
z, delta_logp, atol, rtol, logp_actions, nfe = model(x, zero) # run model forward
logpz = standard_normal_logprob(z).view(z.shape[0], -1).sum(1, keepdim=True) # logp(z)
logpx = logpz - delta_logp
logpx_per_dim = torch.sum(logpx) / x.nelement() # averaged over batches
bits_per_dim = -(logpx_per_dim - np.log(256)) / np.log(2)
return bits_per_dim, atol, rtol, logp_actions, nfe
def compute_bits_per_dim_conditional(x, y, y_color, model):
zero = torch.zeros(x.shape[0], 1).to(x)
y_onehot = thops.onehot(y, num_classes=model.module.y_class).to(x)
y_onehot_color = thops.onehot(y_color, num_classes=model.module.y_color).to(x)
# Don't use data parallelize if batch size is small.
# if x.shape[0] < 200:
# model = model.module
z, z_unsup, delta_logp, atol, rtol, logp_actions, nfe = model(x, zero) # run model forward
z_unsup = torch.cat(z_unsup, 1)
z_sup_class = [o[:,:int(np.prod(o.size()[1:])*0.5)] for o in z]
z_sup_class = torch.cat(z_sup_class,1)
z_sup_color = [o[:,int(np.prod(o.size()[1:])*0.5):] for o in z]
z_sup_color = torch.cat(z_sup_color,1)
# prior
mean, logs = model.module._prior(y_onehot)
mean_color, logs_color = model.module._prior_color(y_onehot_color)
logpz_sup = modules.GaussianDiag.logp(mean, logs, z_sup_class).view(-1,1) # logp(z)_sup
beta_logpz_sup = logpz_sup * (1.0 - args.beta * torch.exp(logpz_sup) / torch.tensor(model.module.y_class).to(logpz_sup))
logpz_color_sup = modules.GaussianDiag.logp(mean_color, logs_color, z_sup_color).view(-1,1) # logp(z)_color_sup
beta_logpz_color_sup = logpz_color_sup * (1.0 - args.beta * torch.exp(logpz_color_sup) / torch.tensor(model.module.y_color).to(logpz_color_sup))
logpz_unsup = standard_normal_logprob(z_unsup).view(z_unsup.shape[0], -1).sum(1, keepdim=True)
logpz = beta_logpz_sup + beta_logpz_color_sup + logpz_unsup
logpx = logpz - delta_logp
logpx_per_dim = torch.sum(logpx) / x.nelement() # averaged over batches
bits_per_dim = -(logpx_per_dim - np.log(256)) / np.log(2)
# dropout
if args.dropout_rate > 0:
z_sup_class = model.module.dropout(z_sup_class)
z_sup_color = model.module.dropout_color(z_sup_color)
# compute xentropy loss
y_logits = model.module.project_class(z_sup_class)
loss_xent = model.module.loss_class(y_logits, y.to(x.get_device()))
y_predicted = np.argmax(y_logits.cpu().detach().numpy(), axis=1)
y_logits_color = model.module.project_color(z_sup_color)
loss_xent_color = model.module.loss_class(y_logits_color, y_color.to(x.get_device()))
y_color_predicted = np.argmax(y_logits_color.cpu().detach().numpy(), axis=1)
return bits_per_dim, loss_xent, loss_xent_color, y_predicted, y_color_predicted, atol, rtol, logp_actions, nfe
def create_model(args, data_shape, regularization_fns):
hidden_dims = tuple(map(int, args.dims.split(",")))
strides = tuple(map(int, args.strides.split(",")))
if args.multiscale:
model = odenvp.ODENVP(
(args.batch_size, *data_shape),
n_blocks=args.num_blocks,
intermediate_dims=hidden_dims,
nonlinearity=args.nonlinearity,
alpha=args.alpha,
cnf_kwargs={"T": args.time_length, "train_T": args.train_T, "regularization_fns": regularization_fns, "solver": args.solver, "atol": args.atol, "rtol": args.rtol, "scale": args.scale, "scale_fac": args.scale_fac, "scale_std": args.scale_std, "gate": args.gate},
condition_ratio=args.condition_ratio * 2.,
dropout_rate=args.dropout_rate,
cond_nn=args.cond_nn,
y_class = args.y_class,
y_color = args.y_color)
elif args.parallel:
model = multiscale_parallel.MultiscaleParallelCNF(
(args.batch_size, *data_shape),
n_blocks=args.num_blocks,
intermediate_dims=hidden_dims,
alpha=args.alpha,
time_length=args.time_length,
)
else:
if args.autoencode:
def build_cnf():
autoencoder_diffeq = layers.AutoencoderDiffEqNet(
hidden_dims=hidden_dims,
input_shape=data_shape,
strides=strides,
conv=args.conv,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
odefunc = layers.AutoencoderODEfunc(
autoencoder_diffeq=autoencoder_diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
cnf = layers.CNF(
odefunc=odefunc,
T=args.time_length,
regularization_fns=regularization_fns,
solver=args.solver,
)
return cnf
else:
def build_cnf():
diffeq = layers.ODEnet(
hidden_dims=hidden_dims,
input_shape=data_shape,
strides=strides,
conv=args.conv,
layer_type=args.layer_type,
nonlinearity=args.nonlinearity,
)
odefunc = layers.ODEfunc(
diffeq=diffeq,
divergence_fn=args.divergence_fn,
residual=args.residual,
rademacher=args.rademacher,
)
cnf = layers.CNF(
odefunc=odefunc,
T=args.time_length,
train_T=args.train_T,
regularization_fns=regularization_fns,
solver=args.solver,
)
return cnf
chain = [layers.LogitTransform(alpha=args.alpha)] if args.alpha > 0 else [layers.ZeroMeanTransform()]
chain = chain + [build_cnf() for _ in range(args.num_blocks)]
if args.batch_norm:
chain.append(layers.MovingBatchNorm2d(data_shape[0]))
model = layers.SequentialFlow(chain)
return model
if __name__ == "__main__":
# get deivce
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cvt = lambda x: x.type(torch.float32).to(device, non_blocking=True)
# load dataset
train_set, test_loader, data_shape = get_dataset(args)
# build model
regularization_fns, regularization_coeffs = create_regularization_fns(args)
model = create_model(args, data_shape, regularization_fns)
if args.spectral_norm: add_spectral_norm(model, logger)
set_cnf_options(args, model)
logger.info(model)
logger.info("Number of trainable parameters: {}".format(count_parameters(model)))
writer.add_text('info', "Number of trainable parameters: {}".format(count_parameters(model)))
# optimizer
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
# set initial iter
itr = 1
# set the meters
time_epoch_meter = utils.RunningAverageMeter(0.97)
time_meter = utils.RunningAverageMeter(0.97)
loss_meter = utils.RunningAverageMeter(0.97) # track total loss
nll_meter = utils.RunningAverageMeter(0.97) # track negative log-likelihood
xent_meter = utils.RunningAverageMeter(0.97) # track xentropy score
xent_color_meter = utils.RunningAverageMeter(0.97) # track xentropy score
error_meter = utils.RunningAverageMeter(0.97) # track error score
error_color_meter = utils.RunningAverageMeter(0.97)
steps_meter = utils.RunningAverageMeter(0.97)
grad_meter = utils.RunningAverageMeter(0.97)
tt_meter = utils.RunningAverageMeter(0.97)
# restore parameters
if args.resume is not None:
checkpt = torch.load(args.resume, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpt["state_dict"])
if "optim_state_dict" in checkpt.keys():
optimizer.load_state_dict(checkpt["optim_state_dict"])
# Manually move optimizer state to device.
for state in optimizer.state.values():
for k, v in state.items():
if torch.is_tensor(v):
state[k] = cvt(v)
args.begin_epoch = checkpt['epoch'] + 1
itr = checkpt['iter'] + 1
time_epoch_meter.set(checkpt['epoch_time_avg'])
time_meter.set(checkpt['time_train'])
loss_meter.set(checkpt['loss_train'])
nll_meter.set(checkpt['bits_per_dim_train'])
xent_meter.set(checkpt['xent_train'])
xent_color_meter.set(checkpt['xent_train_color'])
error_meter.set(checkpt['error_train'])
error_color_meter.set(checkpt['error_train_color'])
steps_meter.set(checkpt['nfe_train'])
grad_meter.set(checkpt['grad_train'])
tt_meter.set(checkpt['total_time_train'])
if torch.cuda.is_available():
model = torch.nn.DataParallel(model).cuda()
# For visualization.
if args.conditional:
fixed_y = torch.from_numpy(np.arange(model.module.y_class)).repeat(model.module.y_class).type(torch.long).to(device, non_blocking=True)
fixed_y_onehot = thops.onehot(fixed_y, num_classes=model.module.y_class)
fixed_y_color = torch.from_numpy(np.arange(model.module.y_color)).repeat(model.module.y_color).type(torch.long).to(device, non_blocking=True)
fixed_y_onehot_color = thops.onehot(fixed_y_color, num_classes=model.module.y_color)
with torch.no_grad():
mean, logs = model.module._prior(fixed_y_onehot)
mean_color, logs_color = model.module._prior_color(fixed_y_onehot_color)
fixed_z_sup = modules.GaussianDiag.sample(mean, logs)
fixed_z_color_sup = modules.GaussianDiag.sample(mean_color, logs_color)
dim_unsup = np.prod(data_shape) - np.prod(fixed_z_sup.shape[1:]) - np.prod(fixed_z_color_sup.shape[1:])
fixed_z_unsup = cvt(torch.randn(model.module.y_class**2, dim_unsup))
a_sup = fixed_z_sup.shape[1] // (2**(model.module.n_scale - 1))
a_color_sup = fixed_z_color_sup.shape[1] // (2**(model.module.n_scale - 1))
a_unsup = fixed_z_unsup.shape[1] // (2**(model.module.n_scale - 1))
fixed_z = []
start_sup = 0; start_color_sup = 0; start_unsup = 0
for ns in range(model.module.n_scale, 1, -1):
end_sup = start_sup + (2**(ns-2))*a_sup
end_color_sup = start_color_sup + (2**(ns-2))*a_color_sup
end_unsup = start_unsup + (2**(ns-2))*a_unsup
fixed_z.append(fixed_z_sup[:,start_sup:end_sup])
fixed_z.append(fixed_z_color_sup[:,start_color_sup:end_color_sup])
fixed_z.append(fixed_z_unsup[:,start_unsup:end_unsup])
start_sup = end_sup; start_color_sup = end_color_sup; start_unsup = end_unsup
end_sup = start_sup + a_sup
end_color_sup = start_color_sup + a_color_sup
end_unsup = start_unsup + a_unsup
fixed_z.append(fixed_z_sup[:,start_sup:end_sup])
fixed_z.append(fixed_z_color_sup[:,start_color_sup:end_color_sup])
fixed_z.append(fixed_z_unsup[:,start_unsup:end_unsup])
# for i_z in range(len(fixed_z)): print(fixed_z[i_z].shape)
fixed_z = torch.cat(fixed_z,1)
else:
fixed_z = cvt(torch.randn(100, *data_shape))
if args.spectral_norm and not args.resume: spectral_norm_power_iteration(model, 500)
best_loss_nll = float("inf")
best_error_score = float("inf")
best_error_score_color = float("inf")
for epoch in range(args.begin_epoch, args.num_epochs + 1):
start_epoch = time.time()
model.train()
if args.annealing_std:
update_scale_std(model.module, epoch)
train_loader = get_train_loader(train_set, epoch)
for _, (x, y_all) in enumerate(train_loader):
start = time.time()
y = y_all[0]
y_color = y_all[1]
update_lr(optimizer, itr)
optimizer.zero_grad()
if not args.conv:
x = x.view(x.shape[0], -1)
# cast data and move to device
x = cvt(x)
# compute loss
if args.conditional:
loss_nll, loss_xent, loss_xent_color, y_predicted, y_color_predicted, atol, rtol, logp_actions, nfe = compute_bits_per_dim_conditional(x, y, y_color, model)
if args.train_mode == "semisup":
loss = loss_nll + args.weight_y * 0.5 * (loss_xent + loss_xent_color)
elif args.train_mode == "sup":
loss = 0.5 * (loss_xent + loss_xent_color)
elif args.train_mode == "unsup":
loss = loss_nll
else:
raise ValueError('Choose supported train_mode: semisup, sup, unsup')
error_score = 1. - np.mean(y_predicted.astype(int) == y.numpy())
error_score_color = 1. - np.mean(y_color_predicted.astype(int) == y_color.numpy())
else:
loss, atol, rtol, logp_actions, nfe = compute_bits_per_dim(x, model)
loss_nll, loss_xent, loss_xent_color, error_score, error_score_color = loss, 0., 0., 0., 0.
if regularization_coeffs:
reg_states = get_regularization(model, regularization_coeffs)
reg_loss = sum(
reg_state * coeff for reg_state, coeff in zip(reg_states, regularization_coeffs) if coeff != 0
)
loss = loss + reg_loss
total_time = count_total_time(model)
loss = loss + total_time * args.time_penalty
# re-weight the gate rewards
normalized_eta = args.eta / len(logp_actions)
# collect cumulative future rewards
R = - loss
cum_rewards = []
for r in nfe[::-1]:
R = -normalized_eta * r.view(-1,1) + args.gamma * R
cum_rewards.insert(0,R)
# apply REINFORCE
rl_loss = 0
for lpa, r in zip(logp_actions, cum_rewards):
rl_loss = rl_loss - lpa.view(-1,1) * args.rl_weight * r
loss = loss + rl_loss.mean()
loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
if args.spectral_norm: spectral_norm_power_iteration(model, args.spectral_norm_niter)
time_meter.update(time.time() - start)
loss_meter.update(loss.item())
nll_meter.update(loss_nll.item())
if args.conditional:
xent_meter.update(loss_xent.item())
xent_color_meter.update(loss_xent_color.item())
else:
xent_meter.update(loss_xent)
xent_color_meter.update(loss_xent_color)
error_meter.update(error_score)
error_color_meter.update(error_score_color)
steps_meter.update(count_nfe_gate(model))
grad_meter.update(grad_norm)
tt_meter.update(total_time)
for idx in range(len(model.module.transforms)):
for layer in model.module.transforms[idx].chain:
if hasattr(layer, 'atol'):
layer.odefunc.after_odeint()
# write to tensorboard
writer.add_scalars('time', {'train_iter': time_meter.val}, itr)
writer.add_scalars('loss', {'train_iter': loss_meter.val}, itr)
writer.add_scalars('bits_per_dim', {'train_iter': nll_meter.val}, itr)
writer.add_scalars('xent', {'train_iter': xent_meter.val}, itr)
writer.add_scalars('xent_color', {'train_iter': xent_color_meter.val}, itr)
writer.add_scalars('error', {'train_iter': error_meter.val}, itr)
writer.add_scalars('error_color', {'train_iter': error_color_meter.val}, itr)
writer.add_scalars('nfe', {'train_iter': steps_meter.val}, itr)
writer.add_scalars('grad', {'train_iter': grad_meter.val}, itr)
writer.add_scalars('total_time', {'train_iter': tt_meter.val}, itr)
if itr % args.log_freq == 0:
for tol_indx in range(len(atol)):
writer.add_scalars('atol_%i'%tol_indx, {'train': atol[tol_indx].mean()}, itr)
writer.add_scalars('rtol_%i'%tol_indx, {'train': rtol[tol_indx].mean()}, itr)
log_message = (
"Iter {:04d} | Time {:.4f}({:.4f}) | Bit/dim {:.4f}({:.4f}) | Xent {:.4f}({:.4f}) | Xent Color {:.4f}({:.4f}) | Loss {:.4f}({:.4f}) | Error {:.4f}({:.4f}) | Error Color {:.4f}({:.4f}) |"
"Steps {:.0f}({:.2f}) | Grad Norm {:.4f}({:.4f}) | Total Time {:.2f}({:.2f})".format(
itr, time_meter.val, time_meter.avg, nll_meter.val, nll_meter.avg, xent_meter.val, xent_meter.avg, xent_color_meter.val, xent_color_meter.avg, loss_meter.val, loss_meter.avg, error_meter.val, error_meter.avg, error_color_meter.val, error_color_meter.avg, steps_meter.val, steps_meter.avg, grad_meter.val, grad_meter.avg, tt_meter.val, tt_meter.avg
)
)
if regularization_coeffs:
log_message = append_regularization_to_log(log_message, regularization_fns, reg_states)
logger.info(log_message)
writer.add_text('info', log_message, itr)
itr += 1
# compute test loss
model.eval()
if epoch % args.val_freq == 0:
with torch.no_grad():
# write to tensorboard
writer.add_scalars('time', {'train_epoch': time_meter.avg}, epoch)
writer.add_scalars('loss', {'train_epoch': loss_meter.avg}, epoch)
writer.add_scalars('bits_per_dim', {'train_epoch': nll_meter.avg}, epoch)
writer.add_scalars('xent', {'train_epoch': xent_meter.avg}, epoch)
writer.add_scalars('xent_color', {'train_epoch': xent_color_meter.avg}, epoch)
writer.add_scalars('error', {'train_epoch': error_meter.avg}, epoch)
writer.add_scalars('error_color', {'train_epoch': error_color_meter.avg}, epoch)
writer.add_scalars('nfe', {'train_epoch': steps_meter.avg}, epoch)
writer.add_scalars('grad', {'train_epoch': grad_meter.avg}, epoch)
writer.add_scalars('total_time', {'train_epoch': tt_meter.avg}, epoch)
start = time.time()
logger.info("validating...")
writer.add_text('info', "validating...", epoch)
losses_nll = []; losses_xent = []; losses_xent_color = []; losses = []
total_correct = 0
total_correct_color = 0
for (x, y_all) in test_loader:
y = y_all[0]
y_color = y_all[1]
if not args.conv:
x = x.view(x.shape[0], -1)
x = cvt(x)
if args.conditional:
loss_nll, loss_xent, loss_xent_color, y_predicted, y_color_predicted, atol, rtol, logp_actions, nfe = compute_bits_per_dim_conditional(x, y, y_color, model)
if args.train_mode == "semisup":
loss = loss_nll + args.weight_y * 0.5 * (loss_xent + loss_xent_color)
elif args.train_mode == "sup":
loss = 0.5 * (loss_xent + loss_xent_color)
elif args.train_mode == "unsup":
loss = loss_nll
else:
raise ValueError('Choose supported train_mode: semisup, sup, unsup')
total_correct += np.sum(y_predicted.astype(int) == y.numpy())
total_correct_color += np.sum(y_color_predicted.astype(int) == y_color.numpy())
else:
loss, atol, rtol, logp_actions, nfe = compute_bits_per_dim(x, model)
loss_nll, loss_xent, loss_xent_color = loss, 0., 0.
losses_nll.append(loss_nll.cpu().numpy()); losses.append(loss.cpu().numpy())
if args.conditional:
losses_xent.append(loss_xent.cpu().numpy())
losses_xent_color.append(loss_xent_color.cpu().numpy())
else:
losses_xent.append(loss_xent)
losses_xent_color.append(loss_xent_color)
loss_nll = np.mean(losses_nll); loss_xent = np.mean(losses_xent); loss_xent_color = np.mean(losses_xent_color); loss = np.mean(losses)
error_score = 1. - total_correct / len(test_loader.dataset)
error_score_color = 1. - total_correct_color / len(test_loader.dataset)
time_epoch_meter.update(time.time() - start_epoch)
# write to tensorboard
test_time_spent = time.time() - start
writer.add_scalars('time', {'validation': test_time_spent}, epoch)
writer.add_scalars('epoch_time', {'validation': time_epoch_meter.val}, epoch)
writer.add_scalars('bits_per_dim', {'validation': loss_nll}, epoch)
writer.add_scalars('xent', {'validation': loss_xent}, epoch)
writer.add_scalars('xent_color', {'validation': loss_xent_color}, epoch)
writer.add_scalars('loss', {'validation': loss}, epoch)
writer.add_scalars('error', {'validation': error_score}, epoch)
writer.add_scalars('error_color', {'validation': error_score_color}, epoch)
for tol_indx in range(len(atol)):
writer.add_scalars('atol_%i'%tol_indx, {'validation': atol[tol_indx].mean()}, epoch)
writer.add_scalars('rtol_%i'%tol_indx, {'validation': rtol[tol_indx].mean()}, epoch)
log_message = "Epoch {:04d} | Time {:.4f}, Epoch Time {:.4f}({:.4f}), Bit/dim {:.4f}(best: {:.4f}), Xent {:.4f}, Xent Color {:.4f}. Loss {:.4f}, Error {:.4f}(best: {:.4f}), Error Color {:.4f}(best: {:.4f})".format(epoch, time.time() - start, time_epoch_meter.val, time_epoch_meter.avg, loss_nll, best_loss_nll, loss_xent, loss_xent_color, loss, error_score, best_error_score, error_score_color, best_error_score_color)
logger.info(log_message)
writer.add_text('info', log_message, epoch)
for name, param in model.named_parameters():
writer.add_histogram(name, param.clone().cpu().data.numpy(), epoch)
utils.makedirs(args.save)
torch.save({
"args": args,
"state_dict": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),
"optim_state_dict": optimizer.state_dict(),
"epoch": epoch,
"iter": itr-1,
"error": error_score,
"error_color": error_score_color,
"loss": loss,
"xent": loss_xent,
"xent_color": loss_xent_color,
"bits_per_dim": loss_nll,
"best_bits_per_dim": best_loss_nll,
"best_error_score": best_error_score,
"best_error_score_color": best_error_score_color,
"epoch_time": time_epoch_meter.val,
"epoch_time_avg": time_epoch_meter.avg,
"time": test_time_spent,
"error_train": error_meter.avg,
"error_train_color": error_color_meter.avg,
"loss_train": loss_meter.avg,
"xent_train": xent_meter.avg,
"xent_train_color": xent_color_meter.avg,
"bits_per_dim_train": nll_meter.avg,
"total_time_train": tt_meter.avg,
"time_train": time_meter.avg,
"nfe_train": steps_meter.avg,
"grad_train": grad_meter.avg,
}, os.path.join(args.save, "epoch_%i_checkpt.pth"%epoch))
torch.save({
"args": args,
"state_dict": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),
"optim_state_dict": optimizer.state_dict(),
"epoch": epoch,
"iter": itr-1,
"error": error_score,
"error_color": error_score_color,
"loss": loss,
"xent": loss_xent,
"xent_color": loss_xent_color,
"bits_per_dim": loss_nll,
"best_bits_per_dim": best_loss_nll,
"best_error_score": best_error_score,
"best_error_score_color": best_error_score_color,
"epoch_time": time_epoch_meter.val,
"epoch_time_avg": time_epoch_meter.avg,
"time": test_time_spent,
"error_train": error_meter.avg,
"error_train_color": error_color_meter.avg,
"loss_train": loss_meter.avg,
"xent_train": xent_meter.avg,
"xent_train_color": xent_color_meter.avg,
"bits_per_dim_train": nll_meter.avg,
"total_time_train": tt_meter.avg,
"time_train": time_meter.avg,
"nfe_train": steps_meter.avg,
"grad_train": grad_meter.avg,
}, os.path.join(args.save, "current_checkpt.pth"))
if loss_nll < best_loss_nll:
best_loss_nll = loss_nll
utils.makedirs(args.save)
torch.save({
"args": args,
"state_dict": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),
"optim_state_dict": optimizer.state_dict(),
"epoch": epoch,
"iter": itr-1,
"error": error_score,
"error_color": error_score_color,
"loss": loss,
"xent": loss_xent,
"xent_color": loss_xent_color,
"bits_per_dim": loss_nll,
"best_bits_per_dim": best_loss_nll,
"best_error_score": best_error_score,
"best_error_score_color": best_error_score_color,
"epoch_time": time_epoch_meter.val,
"epoch_time_avg": time_epoch_meter.avg,
"time": test_time_spent,
"error_train": error_meter.avg,
"error_train_color": error_color_meter.avg,
"loss_train": loss_meter.avg,
"xent_train": xent_meter.avg,
"xent_train_color": xent_color_meter.avg,
"bits_per_dim_train": nll_meter.avg,
"total_time_train": tt_meter.avg,
"time_train": time_meter.avg,
"nfe_train": steps_meter.avg,
"grad_train": grad_meter.avg,
}, os.path.join(args.save, "best_nll_checkpt.pth"))
if args.conditional:
if error_score < best_error_score:
best_error_score = error_score
utils.makedirs(args.save)
torch.save({
"args": args,
"state_dict": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),
"optim_state_dict": optimizer.state_dict(),
"epoch": epoch,
"iter": itr-1,
"error": error_score,
"error_color": error_score_color,
"loss": loss,
"xent": loss_xent,
"xent_color": loss_xent_color,
"bits_per_dim": loss_nll,
"best_bits_per_dim": best_loss_nll,
"best_error_score": best_error_score,
"best_error_score_color": best_error_score_color,
"epoch_time": time_epoch_meter.val,
"epoch_time_avg": time_epoch_meter.avg,
"time": test_time_spent,
"error_train": error_meter.avg,
"error_train_color": error_color_meter.avg,
"loss_train": loss_meter.avg,
"xent_train": xent_meter.avg,
"xent_train_color": xent_color_meter.avg,
"bits_per_dim_train": nll_meter.avg,
"total_time_train": tt_meter.avg,
"time_train": time_meter.avg,
"nfe_train": steps_meter.avg,
"grad_train": grad_meter.avg,
}, os.path.join(args.save, "best_error_checkpt.pth"))
if error_score_color < best_error_score_color:
best_error_score_color = error_score_color
utils.makedirs(args.save)
torch.save({
"args": args,
"state_dict": model.module.state_dict() if torch.cuda.is_available() else model.state_dict(),
"optim_state_dict": optimizer.state_dict(),
"epoch": epoch,
"iter": itr-1,
"error": error_score,
"error_color": error_score_color,
"loss": loss,
"xent": loss_xent,
"xent_color": loss_xent_color,
"bits_per_dim": loss_nll,
"best_bits_per_dim": best_loss_nll,
"best_error_score": best_error_score,
"best_error_score_color": best_error_score_color,
"epoch_time": time_epoch_meter.val,
"epoch_time_avg": time_epoch_meter.avg,
"time": test_time_spent,
"error_train": error_meter.avg,
"error_train_color": error_color_meter.avg,
"loss_train": loss_meter.avg,
"xent_train": xent_meter.avg,
"xent_train_color": xent_color_meter.avg,
"bits_per_dim_train": nll_meter.avg,
"total_time_train": tt_meter.avg,
"time_train": time_meter.avg,
"nfe_train": steps_meter.avg,
"grad_train": grad_meter.avg,
}, os.path.join(args.save, "best_error_color_checkpt.pth"))
# visualize samples and density
with torch.no_grad():
fig_filename = os.path.join(args.save, "figs", "{:04d}.jpg".format(epoch))
utils.makedirs(os.path.dirname(fig_filename))
generated_samples, atol, rtol, logp_actions, nfe = model(fixed_z, reverse=True)
generated_samples = generated_samples.view(-1, *data_shape)
for tol_indx in range(len(atol)):
writer.add_scalars('atol_gen_%i'%tol_indx, {'validation': atol[tol_indx].mean()}, epoch)
writer.add_scalars('rtol_gen_%i'%tol_indx, {'validation': rtol[tol_indx].mean()}, epoch)
save_image(generated_samples, fig_filename, nrow=10)
if args.data == "mnist":
writer.add_images('generated_images', generated_samples.repeat(1,3,1,1), epoch)
else:
writer.add_images('generated_images', generated_samples.repeat(1,1,1,1), epoch) |
the-stack_0_13858 | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import rospy
import math
import numpy as np
import matplotlib.pyplot as plt
from std_msgs.msg import Bool
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
class rplidarnav:
def __init__(self):
self.stop_flag = False
rospy.init_node('rplidar_navigation', anonymous=True)
rospy.Subscriber('/scan', LaserScan, self.callback)
self.pub = rospy.Publisher('rp_nav', Twist, queue_size=1)
print ('rplidar navigation is start')
plt.ion()
def road_detection_all(self,msg):
#雷达正前方为720,以线在前
left_dist = [] # 左侧激光距离数组
left_angle = [] # 左侧激光角度数组
left_x = [] # 左侧x坐标
left_y = [] # 左侧y坐标
right_dist = [] # 右侧激光距离数组
right_angle = [] # 右侧激光角度数组
right_x = [] # 右侧x坐标
right_y = [] # 右侧y坐标
break_point_index = 0 #分界点坐标索引
#找左右车道分界点
#扫描180度范围角度10米内的障碍物
last_dist = 100
for i in range(360,480):
if msg.ranges[i]<last_dist:
last_dist = msg.ranges[i]
else:
continue
for i in range (360,1080):
if msg.ranges[i]<10:
current_dist = msg.ranges[i]
#两次扫猫距离在1m以上,两个点不在同一侧车道线
if math.fabs(last_dist-current_dist)>1 and math.fabs(last_dist-current_dist)<50:
break_point_index = i
break
else:
last_dist = current_dist
continue
else:
continue
#左侧车道线拟合
for i in range (360,break_point_index):
current_angle = (0 + (i - 360)/4)*math.pi/180 #弧度制
left_dist.append(current_dist)
left_angle.append(current_angle)
if current_angle <= math.pi/2:
current_x = -1*current_dist*math.cos(current_angle)
current_y = current_dist*math.sin(current_angle)
left_x.append(current_x)
left_y.append(current_y)
if current_angle > math.pi/2:
current_angle = math.pi - current_angle
current_x = current_dist*math.cos(current_angle)
current_y = current_dist*math.sin(current_angle)
left_x.append(current_x)
left_y.append(current_y)
z1 = np.polyfit(left_y, left_x, 2) # 二次拟合
a1 = z1[0]
b1 = z1[1]
c1 = z1[2]
left_x_pred = []
x_left_base = c1
#计算对应y的x坐标 x=a1*y^2+b1*y+c1
for i in range (0,len(left_y),1):
left_x_pred.append(a1*left_y[i]**2+b1*left_y[i]+c1)
#计算切线斜率和道路偏向角
road_left_angle = math.atan(2 * a1 * c1 + b1)
#右侧车道拟合
for i in range (break_point_index,1080):
current_angle = (0 + (1080 - i)/4)*math.pi/180 #弧度制
right_dist.append(current_dist)
right_angle.append(current_angle)
if current_angle <= math.pi/2:
current_x = current_dist*math.cos(current_angle)
current_y = current_dist*math.sin(current_angle)
right_x.append(current_x)
right_y.append(current_y)
if current_angle > math.pi/2:
current_angle = math.pi - current_angle
current_x = -1*current_dist*math.cos(current_angle)
current_y = current_dist*math.sin(current_angle)
right_x.append(current_x)
right_y.append(current_y)
z2 = np.polyfit(right_y, right_x, 1) # 二次拟合
a2 = z2[0]
b2 = z2[1]
c2 = z2[2]
x_right_base = c2
right_x_pred = []
for i in range (0,len(right_y),1):
right_x_pred.append(a2*right_y[i]**2+b2*right_y[i]+c2)
road_right_angle = math.atan(2 * a2 * c2 + b2)
#计算道路倾角
road_angle = (road_left_angle + road_right_angle)/2
#计算离道路中心的偏移
left_near_dist = 0
right_near_dist = 0
left_near = []
right_near = []
for i in range(360,480):
if msg.ranges[i]<3:
left_near.append(msg.ranges[i])
else:
continue
left_near_dist = np.min(left_near)
for i in range(960,1080):
if msg.ranges[i]<3:
right_near.append(msg.ranges[i])
else:
continue
right_near_dist = np.min(right_near)
x_offset = (right_near_dist-left_near_dist)/2# 大于零右偏,小于零左偏
plt.clf()
plt.plot(left_x_pred, left_y, color='blue')
plt.plot(right_x_pred, right_y,color = 'red')
plt.axis('equal')
plt.show()
plt.pause(0.01)
return road_angle,x_offset
def road_detection(self, msg):
#扫描获取左侧雷达数据(左侧60度范围)
#雷达正前方为720
left_dist = [] # 左侧激光距离数组
left_angle = [] # 左侧激光角度数组
left_x = [] # 左侧x坐标
left_y = [] # 左侧y坐标
last_left_dist = 0
last_right_dist = 0
for i in range(480, 720):
# 过滤断点,大于1.5则判定为不在一条车道线上
if msg.ranges[i] < 1.5:
current_dist = msg.ranges[i]
if math.fabs(last_left_dist-current_dist)<=0.05:
current_angle = (30 + (i - 480)/4)*math.pi/180 #弧度制
left_dist.append(current_dist)
left_angle.append(current_angle)
current_x = -1*current_dist*math.cos(current_angle)
current_y = current_dist*math.sin(current_angle)
left_x.append(current_x)
left_y.append(current_y)
last_left_dist = current_dist
else:
last_left_dist = current_dist
else:
continue
z1 = np.polyfit(left_y, left_x, 2) # 二次拟合
a1 = z1[0]
b1 = z1[1]
c1 = z1[2]
left_x_pred = []
x_left_base = c1
#计算对应y的x坐标 x=a1*y^2+b1*y+c1
for i in range (0,len(left_y),1):
left_x_pred.append(a1*left_y[i]**2+b1*left_y[i]+c1)
#计算切线斜率和道路偏向角
#y_eval = np.max(left_y)
road_left_angle = math.atan(2 * a1 * c1 + b1)
# 扫描获取右侧雷达数据(右侧60度范围)
right_dist = [] # 右侧激光距离数组
right_angle = [] # 右侧激光角度数组
right_x = [] # 右侧x坐标
right_y = [] # 右侧y坐标
for i in range(960, 720,-1):
if msg.ranges[i] < 1.5:
current_dist = msg.ranges[i]
if math.fabs(last_right_dist-current_dist)<=0.05:
current_dist = msg.ranges[i]
current_angle = (30 + (960 - i)/4)*math.pi/180 #弧度制
right_dist.append(current_dist)
right_angle.append(current_angle)
current_x = current_dist*math.cos(current_angle)
current_y = current_dist*math.sin(current_angle)
right_x.append(current_x)
right_y.append(current_y)
last_right_dist = current_dist
else:
last_right_dist = current_dist
else:
continue
z2 = np.polyfit(right_y, right_x, 1) # 二次拟合
a2 = z2[0]
b2 = z2[1]
c2 = z2[2]
x_right_base = c2
right_x_pred = []
for i in range (0,len(right_y),1):
right_x_pred.append(a2*right_y[i]**2+b2*right_y[i]+c2)
road_right_angle = math.atan(2 * a2 * c2 + b2)
road_angle = (road_left_angle + road_right_angle)/2
expect_angle = road_angle-math.pi/2#大于零左转,小于零右转
x_offset = (x_right_base-x_left_base)/2
plt.clf()
plt.plot(left_x_pred, left_y)
plt.plot(right_x_pred, right_y)
plt.axis('equal')
plt.show()
plt.pause(0.01)
return expect_angle,x_offset
def callback(self, msg):
angle,offset = self.road_detection_all(msg)
if self.stop_flag == False:
print('angle')
print(angle)
print('offset')
print(offset)
twist = Twist()
twist.linear.x = 0.3
twist.angular.z = angle * 1 - offset
if self.stop_flag == True:
print('detect obstacle,stop')
twist = Twist()
twist.linear.x = 0
twist.angular.z = 0
self.pub.publish(twist)
if __name__ == '__main__':
try:
rp_nav = rplidarnav()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
the-stack_0_13859 | from kipoiseq.dataloaders import SingleVariantUTRDataLoader
class SingleVariantFramepoolDataloader(SingleVariantUTRDataLoader):
def __init__(
self,
gtf_file,
fasta_file,
vcf_file,
feature_type="5UTR",
vcf_file_tbi=None,
infer_from_cds=False,
on_error_warn=True,
**kwargs
):
super().__init__(
gtf_file=gtf_file,
fasta_file=fasta_file,
vcf_file=vcf_file,
feature_type=feature_type,
vcf_file_tbi=vcf_file_tbi,
infer_from_cds=infer_from_cds,
on_error_warn=on_error_warn,
**kwargs
)
|
the-stack_0_13860 | #!/usr/bin/python
import math
import datetime
import dateutil.parser
import dateutil.tz
import csv
import json
DEBUG = True
def void():
pass
def log(x):
print(x)
debug_log = log if DEBUG else void
def avg(items):
return float(sum(items)) / max(len(items), 1)
ISO_8601_UTC_MEAN = dateutil.tz.tzoffset(None, 0)
# Convert the given ISO time string to timestamps in seconds.
def ISOTimeString2TimeStamp(timeStr):
time = dateutil.parser.parse(timeStr)
isoStartTime = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, ISO_8601_UTC_MEAN)
return int((time - isoStartTime).total_seconds())
def tempUnit2K(value, unit):
if unit == 'Deg C':
return value + 273.15
elif unit == 'Deg F':
return (value + 459.67) * 5 / 9
elif unit == 'Deg K':
return value
else:
raise ValueError('Unsupported unit "%s".' % unit)
def relHumidUnit2Percent(value, unit):
if unit == '%':
return value
else:
raise ValueError('Unsupported unit "%s".' % unit)
def speedUnit2MeterPerSecond(value, unit):
if unit == 'meters/second':
return value
else:
raise ValueError('Unsupported unit "%s".' % unit)
def extractXFactor(magnitude, degreeFromNorth):
return magnitude * math.sin(math.radians(degreeFromNorth));
def extractYFactor(magnitude, degreeFromNorth):
return magnitude * math.cos(math.radians(degreeFromNorth));
STATION_GEOMETRY = {
'type': 'Point',
'coordinates': [
# SW Corner.
# @see {@link https://github.com/terraref/extractors-metadata/blob/master/sensorposition/terra.sensorposition.py#L68}
33.0745666667,
-111.9750833333,
0
]
}
# 'AirTC': 'air_temperature',
# 'RH': 'relative_humidity',
# 'Pyro': 'surface_downwelling_shortwave_flux_in_air',
# 'PAR_ref': 'surface_downwelling_photosynthetic_photon_flux_in_air',
# 'WindDir': 'wind_to_direction',
# 'WS_ms': 'wind_speed',
# 'Rain_mm_Tot': 'precipitation_rate'
# Each mapping function can decide to return one or multiple tuple, so leave the list to them.
PROP_MAPPING = {
'AirTC': lambda d: [(
'air_temperature',
tempUnit2K(float(d['value']), d['meta']['unit'])
)],
'RH': lambda d: [(
'relative_humidity',
relHumidUnit2Percent(float(d['value']), d['meta']['unit'])
)],
'Pyro': lambda d: [(
'surface_downwelling_shortwave_flux_in_air',
float(d['value'])
)],
'PAR_ref': lambda d: [(
'surface_downwelling_photosynthetic_photon_flux_in_air',
float(d['value'])
)],
# If Wind Direction is present, split into speed east and speed north it if we can find Wind Speed.
'WindDir': lambda d: [
('eastward_wind', extractXFactor(float(d['record']['WS_ms']), float(d['value']))),
('northward_wind', extractYFactor(float(d['record']['WS_ms']), float(d['value'])))
],
# If Wind Speed is present, process it if we can find Wind Direction.
'WS_ms': lambda d: [(
'wind_speed',
speedUnit2MeterPerSecond(float(d['value']), d['meta']['unit'])
)],
'Rain_mm_Tot': lambda d: [(
'precipitation_rate',
float(d['value'])
)]
}
# Aggregation functions for each property.
PROP_AGGREGATE = {
'air_temperature': avg,
'relative_humidity': avg,
'surface_downwelling_shortwave_flux_in_air': avg,
'surface_downwelling_photosynthetic_photon_flux_in_air': avg,
'eastward_wind': avg,
'northward_wind': avg,
'wind_speed': avg,
'precipitation_rate': sum
}
def transformProps(propMetaDict, propValDict):
newProps = []
for propName in propValDict:
if propName in PROP_MAPPING:
newProps += PROP_MAPPING[propName]({
'meta': propMetaDict[propName],
'value': propValDict[propName],
'record': propValDict
})
return dict(newProps)
def parse_file_header_line(linestr):
return [json.loads(x) for x in str(linestr).split(',')]
# ----------------------------------------------------------------------
# Parse the CSV file and return a list of dictionaries.
def parse_file(filepath, utc_offset = ISO_8601_UTC_MEAN):
results = []
with open(filepath) as csvfile:
# First line is always the header.
# @see {@link https://www.manualslib.com/manual/538296/Campbell-Cr9000.html?page=41#manual}
header_lines = [
csvfile.readline()
]
file_format, station_name, logger_model, logger_serial, os_version, dld_file, dld_sig, table_name = parse_file_header_line(header_lines[0])
if file_format != 'TOA5':
raise ValueError('Unsupported format "%s".' % file_format)
# For TOA5, there are in total 4 header lines.
# @see {@link https://www.manualslib.com/manual/538296/Campbell-Cr9000.html?page=43#manual}
while (len(header_lines) < 4):
header_lines.append(csvfile.readline())
prop_names = parse_file_header_line(header_lines[1])
prop_units = parse_file_header_line(header_lines[2])
prop_sample_method = parse_file_header_line(header_lines[3])
# Associate the above lists.
props = dict()
import logging
for x in range(0, len(prop_names)):
props[prop_names[x]] = {
'title': prop_names[x],
'unit': prop_units[x],
'sample_method': prop_sample_method[x]
}
# [DEBUG] Print the property details if needed.
#print json.dumps(props)
reader = csv.DictReader(csvfile, fieldnames=prop_names)
for row in reader:
timestamp = datetime.datetime.strptime(row['TIMESTAMP'], '%Y-%m-%d %H:%M:%S').isoformat() + utc_offset.tzname(None)
newResult = {
# @type {string}
'start_time': timestamp,
# @type {string}
'end_time': timestamp,
'properties': transformProps(props, row),
# @type {string}
'type': 'Feature',
'geometry': STATION_GEOMETRY
}
# Enable this if the raw data needs to be kept.
# newResult['properties']['_raw'] = {
# 'data': row,
# 'units': prop_units,
# 'sample_method': prop_sample_method
# }
results.append(newResult)
return results
# ----------------------------------------------------------------------
# Aggregate the list of parsed results.
# The aggregation starts with the input data and no state given.
# This function returns a list of aggregated data packages and a state package
# which should be fed back into the function to continue or end the aggregation.
# If there's no more data to input, provide None and the aggregation will stop.
# When aggregation ended, the state package returned should be None to indicate that.
# Note: data has to be sorted by time.
# Note: cutoffSize is in seconds.
def aggregate(cutoffSize, tz, inputData, state):
# This function should always return this complex package no matter what happens.
result = {
'packages': [],
# In case the input data does nothing, inherit the state first.
'state': None if state == None else dict(state)
}
# The aggregation ends when no more data is available. (inputData is None)
# In which case it needs to recover leftover data in the state package.
if inputData == None:
debug_log('Ending aggregation...')
# The aggregation is ending, try recover leftover data from the state.
if state == None:
# There is nothing to do.
pass
else:
# Recover leftover data from state.
data = state['leftover']
if len(data) == 0:
# There is nothing to recover.
pass
else:
# Aggregate leftover data.
# Assume leftover data never contain more data than the cutoff allows.
startTime = state['starttime']
# Use the latest date in the data entries.
# Assuming the data is always sorted, the last one should be the latest.
endTime = ISOTimeString2TimeStamp(data[-1]['end_time'])
newPackage = aggregate_chunk(data, tz, startTime, endTime)
if newPackage != None:
result['packages'].append(newPackage)
# Mark state with None to indicate the aggregation is done.
result['state'] = None
else:
debug_log('Aggregating...')
data = inputData
# More data is provided, continue aggregation.
if state == None:
debug_log('Fresh start...')
# There is no previous state, starting afresh.
# Use the earliest date in the input data entries.
# Assuming the input data is always sorted, the first one should be the earliest.
startTime = ISOTimeString2TimeStamp(data[0]['start_time'])
else:
debug_log('Continuing...')
# Resume aggregation from a previous state.
startTime = state['starttime']
# Left over data should be part of the data being processed.
data = state['leftover'] + inputData
startIndex = 0
# Keep aggregating until all the data is consumed.
while startIndex < len(data):
# Find the nearest cut-off point.
endTimeCutoff = startTime - startTime % cutoffSize + cutoffSize
# Scan the input data to find the portion that fits in the cutoff.
endIndex = startIndex
while endIndex < len(data) and ISOTimeString2TimeStamp(data[endIndex]['end_time']) < endTimeCutoff:
endIndex += 1
# If everything fits in the cutoff, there may be more data in the next run.
# Otherwise, these data should be aggregated.
if endIndex >= len(data):
# End of data reached, but cutoff is not.
# Save everything into state.
result['state'] = {
'starttime': startTime,
'leftover': data[startIndex:]
}
else:
# Cutoff reached.
# Aggregate this chunk.
newPackage = aggregate_chunk(data[startIndex:endIndex], tz, startTime, endTimeCutoff)
if newPackage != None:
result['packages'].append(newPackage)
# Update variables for the next loop.
startTime = endTimeCutoff
startIndex = endIndex
# The above loop should end with some chunks aggregated into result['packages'],
# with the last chunk saved in result['state']['leftover']
return result
# Helper function for aggregating a chunk of data.
# @param {timestamp} startTime
# @param {timestamp} endTime
def aggregate_chunk(dataChunk, tz, startTime, endTime):
if len(dataChunk) == 0:
# There is nothing to aggregate.
return None
else:
# Prepare the list of properties for aggregation.
propertiesList = [x['properties'] for x in dataChunk]
return {
'start_time': datetime.datetime.fromtimestamp(startTime, tz).isoformat(),
'end_time': datetime.datetime.fromtimestamp(endTime, tz).isoformat(),
'properties': aggregateProps(propertiesList),
'type': 'Point',
'geometry': STATION_GEOMETRY
}
def aggregateProps(propertiesList):
collection = {}
for properties in propertiesList:
for key in properties:
# Properties start with "_" shouldn't be processed.
if key.startswith('_'):
continue
value = properties[key]
# Collect property values and save them into collection
if key not in collection:
collection[key] = [value]
else:
collection[key].append(value)
result = {}
for key in properties:
# If there is no aggregation function, ignore the property.
if key not in PROP_AGGREGATE:
continue
func = PROP_AGGREGATE[key]
result[key] = func(collection[key])
return result
if __name__ == "__main__":
size = 5 * 60
tz = dateutil.tz.tzoffset("-07:00", -7 * 60 * 60)
packages = []
file = './test-input-1.dat'
parse = parse_file(file, tz)
result = aggregate(
cutoffSize=size,
tz=tz,
inputData=parse,
state=None
)
packages += result['packages']
print(json.dumps(result['state']))
file = './test-input-2.dat'
parse = parse_file(file, tz)
result = aggregate(
cutoffSize=size,
tz=tz,
inputData=parse,
state=result['state']
)
packages += result['packages']
print(json.dumps(result['state']))
result = aggregate(
cutoffSize=size,
tz=tz,
inputData=None,
state=result['state']
)
packages += result['packages']
print('Package Count: %s' % (len(packages)))
packages = []
file = './test-input-3.dat'
parse = parse_file(file, tz)
result = aggregate(
cutoffSize=size,
tz=tz,
inputData=parse,
state=result['state']
)
packages += result['packages']
result = aggregate(
cutoffSize=size,
tz=tz,
inputData=None,
state=result['state']
)
packages += result['packages']
print('Package Count: %s' % (len(packages)))
print(json.dumps(packages))
|
the-stack_0_13863 | import os
import errno
import fire
import json
import yaml
import shutil
from time import sleep
import logging
from boto3 import session
from botocore.exceptions import ClientError
logging.basicConfig(
format='%(asctime)s|%(name).10s|%(levelname).5s: %(message)s',
level=logging.WARNING)
log = logging.getLogger('greengo')
log.setLevel(logging.DEBUG)
DEFINITION_FILE = 'greengo.yaml'
MAGIC_DIR = '.gg'
STATE_FILE = os.path.join(MAGIC_DIR, 'gg_state.json')
DEPLOY_TIMEOUT = 45 # Timeout, seconds
class GroupCommands(object):
def __init__(self):
super(GroupCommands, self).__init__()
s = session.Session()
self._region = s.region_name
if not self._region:
log.error("AWS credentials and region must be setup. "
"Refer AWS docs at https://goo.gl/JDi5ie")
exit(-1)
log.info("AWS credentials found for region '{}'".format(self._region))
self._gg = s.client("greengrass")
self._iot = s.client("iot")
self._lambda = s.client("lambda")
self._iam = s.client("iam")
self._iot_endpoint = self._iot.describe_endpoint()['endpointAddress']
try:
with open(DEFINITION_FILE, 'r') as f:
self.group = self.group = yaml.safe_load(f)
except IOError:
log.error("Group definition file `greengo.yaml` not found. "
"Create file, and define the group definition first. "
"See https://github.com/greengo for details.")
exit(-1)
self.name = self.group['Group']['name']
self._LAMBDA_ROLE_NAME = "{0}_Lambda_Role".format(self.name)
_mkdir(MAGIC_DIR)
self.state = _load_state()
def create(self):
if self.state:
log.error("Previously created group exists. Remove before creating!")
return False
log.info("[BEGIN] creating group {0}".format(self.group['Group']['name']))
# TODO: create_lambda handles self.state directly.
# _create_cores leaves it to a caller. Refactor?
# 1. Create group
# TODO: create group at the end, with "initial version"?
group = rinse(self._gg.create_group(Name=self.group['Group']['name']))
self.state['Group'] = group
_update_state(self.state)
# Must update state on every step, else how can I clean?
# Or on exception?
# 2. Create core a) thing b) attach to group
core_def, cores = self._create_cores()
self.state['Cores'] = cores
self.state['CoreDefinition'] = core_def
_update_state(self.state)
# 3. Create Resources - policies for local and ML resource access.
self.create_resources()
# 4. Create Lambda functions and function definitions
# Lambda may have dependencies on resources.
# TODO: refactor to take dependencies into account
self.create_lambdas(update_group_version=False)
# 5. Create devices (coming soon)
# 6. Create subscriptions
self.create_subscriptions()
# LAST. Add all the constituent parts to the Greengrass Group
self.create_group_version()
log.info("[END] creating group {0}".format(self.group['Group']['name']))
def deploy(self):
if not self.state:
log.info("There is nothing to deploy. Do create first.")
return
log.info("Deploying group '{0}'".format(self.state['Group']['Name']))
deployment = self._gg.create_deployment(
GroupId=self.state['Group']['Id'],
GroupVersionId=self.state['Group']['Version']['Version'],
DeploymentType="NewDeployment")
self.state['Deployment'] = rinse(deployment)
_update_state(self.state)
for i in range(DEPLOY_TIMEOUT / 2):
sleep(2)
deployment_status = self._gg.get_deployment_status(
GroupId=self.state['Group']['Id'],
DeploymentId=deployment['DeploymentId'])
status = deployment_status.get('DeploymentStatus')
log.debug("--- deploying... status: {0}".format(status))
# Known status values: ['Building | InProgress | Success | Failure']
if status == 'Success':
log.info("--- SUCCESS!")
self.state['Deployment']['Status'] = rinse(deployment_status)
_update_state(self.state)
return
elif status == 'Failure':
log.error("--- ERROR! {0}".format(deployment_status['ErrorMessage']))
self.state['Deployment']['Status'] = rinse(deployment_status)
_update_state(self.state)
return
log.warning(
"--- Gave up waiting for deployment. Please check the status later. "
"Make sure GreenGrass Core is running, connected to network, "
"and the certificates match.")
def create_group_version(self):
# Create a copy so that referencing non-existent fileds not create them in self.state
state = State(self.state)
kwargs = dict(
GroupId=self.state['Group']['Id'],
CoreDefinitionVersionArn=state['CoreDefinition']['LatestVersionArn'],
DeviceDefinitionVersionArn="",
FunctionDefinitionVersionArn=state['FunctionDefinition']['LatestVersionArn'],
SubscriptionDefinitionVersionArn=state['Subscriptions']['LatestVersionArn'],
LoggerDefinitionVersionArn="",
ResourceDefinitionVersionArn=state['Resources']['LatestVersionArn'],
)
args = dict((k, v) for k, v in kwargs.iteritems() if v)
log.debug("Creating group version with settings:\n{0}".format(pretty(args)))
group_ver = self._gg.create_group_version(**args)
self.state['Group']['Version'] = rinse(group_ver)
_update_state(self.state)
def remove(self):
if not self.state:
log.info("There seem to be nothing to remove.")
return
log.info("[BEGIN] removing group {0}".format(self.group['Group']['name']))
self.remove_subscriptions()
self._remove_cores()
self.remove_lambdas()
self.remove_resources()
log.info("Reseting deployments forcefully, if they exist")
self._gg.reset_deployments(GroupId=self.state['Group']['Id'], Force=True)
log.info("Deleting group '{0}'".format(self.state['Group']['Id']))
self._gg.delete_group(GroupId=self.state['Group']['Id'])
os.remove(STATE_FILE)
log.info("[END] removing group {0}".format(self.group['Group']['name']))
def _default_lambda_role_arn(self):
if 'LambdaRole' not in self.state:
log.info("Creating default lambda role '{0}'".format(self._LAMBDA_ROLE_NAME))
try:
role = self._create_default_lambda_role()
except ClientError as e:
if e.response['Error']['Code'] == 'EntityAlreadyExists':
role = self._iam.get_role(RoleName=self._LAMBDA_ROLE_NAME)
log.warning("Role {0} already exists, reusing.".format(self._LAMBDA_ROLE_NAME))
else:
raise e
self.state['LambdaRole'] = rinse(role)
_update_state(self.state)
return self.state['LambdaRole']['Role']['Arn']
def create_lambdas(self, update_group_version=True):
if not self.group.get('Lambdas'):
log.info("Lambdas not defined. Moving on...")
return
if self.state and self.state.get('Lambdas'):
log.warning("Previously created Lambdas exists. Remove before creating!")
return
functions = []
self.state['Lambdas'] = []
_update_state(self.state)
for l in self.group['Lambdas']:
log.info("Creating Lambda function '{0}'".format(l['name']))
role_arn = l['role'] if 'role' in l else self._default_lambda_role_arn()
log.info("Assuming role '{0}'".format(role_arn))
zf = shutil.make_archive(
os.path.join(MAGIC_DIR, l['name']), 'zip', l['package'])
log.debug("Lambda deployment Zipped to '{0}'".format(zf))
for retry in range(3):
try:
with open(zf, 'rb') as f:
lr = self._lambda.create_function(
FunctionName=l['name'],
Runtime='python2.7',
Role=role_arn,
Handler=l['handler'],
Code=dict(ZipFile=f.read()),
Environment=dict(Variables=l.get('environment', {})),
Publish=True
)
# Break from retry cycle if lambda is created
break
except ClientError as e: # Catch the right exception
if "The role defined for the function cannot be assumed by Lambda" in str(e):
# Function creation immediately after role creation fails with
# "The role defined for the function cannot be assumed by Lambda."
# See StackOverflow https://goo.gl/eTfqsS
log.warning("We hit AWS bug: the role is not yet propagated."
"Taking 10 sec nap")
sleep(10)
continue
else:
raise(e)
lr['ZipPath'] = zf
self.state['Lambdas'].append(rinse(lr))
_update_state(self.state)
log.info("Lambda function '{0}' created".format(lr['FunctionName']))
# Auto-created alias uses the version of just published function
alias = self._lambda.create_alias(
FunctionName=lr['FunctionName'],
Name=l.get('alias', 'default'),
FunctionVersion=lr['Version'],
Description='Created by greengo'
)
log.info("Lambda alias created. FunctionVersion:'{0}', Arn:'{1}'".format(
alias['FunctionVersion'], alias['AliasArn']))
functions.append({
'Id': l['name'],
'FunctionArn': alias['AliasArn'],
'FunctionConfiguration': l['greengrassConfig']
})
log.debug("Function definition list ready:\n{0}".format(pretty(functions)))
log.info("Creating function definition: '{0}'".format(self.name + '_func_def_1'))
fd = self._gg.create_function_definition(
Name=self.name + '_func_def_1',
InitialVersion={'Functions': functions}
)
self.state['FunctionDefinition'] = rinse(fd)
_update_state(self.state)
fd_ver = self._gg.get_function_definition_version(
FunctionDefinitionId=self.state['FunctionDefinition']['Id'],
FunctionDefinitionVersionId=self.state['FunctionDefinition']['LatestVersion'])
self.state['FunctionDefinition']['LatestVersionDetails'] = rinse(fd_ver)
_update_state(self.state)
if update_group_version:
log.info("Updating group version with new Lambdas...")
self.create_group_version()
log.info("Lambdas and function definition created OK!")
def remove_lambdas(self):
if not (self.state and self.state.get('Lambdas')):
log.info("There seem to be nothing to remove.")
return
if not self.state.get('FunctionDefinition'):
log.warning("Function definition was not created. Moving on...")
else:
log.info("Deleting function definition '{0}' Id='{1}".format(
self.state['FunctionDefinition']['Name'], self.state['FunctionDefinition']['Id']))
self._gg.delete_function_definition(
FunctionDefinitionId=self.state['FunctionDefinition']['Id'])
self.state.pop('FunctionDefinition')
_update_state(self.state)
log.info("Deleting default lambda role '{0}'".format(self._LAMBDA_ROLE_NAME))
self._remove_default_lambda_role()
self.state.pop('LambdaRole')
_update_state(self.state)
for l in self.state['Lambdas']:
log.info("Deleting Lambda function '{0}'".format(l['FunctionName']))
self._lambda.delete_function(FunctionName=l['FunctionName'])
os.remove(l['ZipPath'])
self.state.pop('Lambdas')
_update_state(self.state)
log.info("Lambdas and function definition deleted OK!")
def create_subscriptions(self):
if not self.group.get('Subscriptions'):
log.info("Subscriptions not defined. Moving on...")
return
if self.state and self.state.get('Subscriptions'):
log.warning("Previously created Subscriptions exists. Remove before creating!")
return
# MAYBE: don't create subscription before devices and lambdas?
log.debug("Preparing subscription list...")
subs = []
for i, s in enumerate(self.group['Subscriptions']):
log.debug("Subscription '{0}' - '{1}': {2}->{3}'".format(
i, s['Subject'], s['Source'], s['Target']))
subs.append({
'Id': str(i),
'Source': self._resolve_subscription_destination(s['Source']),
'Target': self._resolve_subscription_destination(s['Target']),
'Subject': s['Subject']
})
log.debug("Subscription list is ready:\n{0}".format(pretty(subs)))
log.info("Creating subscription definition: '{0}'".format(self.name + '_subscription'))
sub_def = self._gg.create_subscription_definition(
Name=self.name + '_subscription',
InitialVersion={'Subscriptions': subs}
)
self.state['Subscriptions'] = rinse(sub_def)
_update_state(self.state)
sub_def_ver = self._gg.get_subscription_definition_version(
SubscriptionDefinitionId=self.state['Subscriptions']['Id'],
SubscriptionDefinitionVersionId=self.state['Subscriptions']['LatestVersion'])
self.state['Subscriptions']['LatestVersionDetails'] = rinse(sub_def_ver)
_update_state(self.state)
log.info("Subscription definition created OK!")
def remove_subscriptions(self):
if not (self.state and self.state.get('Subscriptions')):
log.info("There seem to be nothing to remove.")
return
log.info("Deleting subscription definition '{0}' Id='{1}".format(
self.state['Subscriptions']['Name'], self.state['Subscriptions']['Id']))
self._gg.delete_subscription_definition(
SubscriptionDefinitionId=self.state['Subscriptions']['Id'])
self.state.pop('Subscriptions')
_update_state(self.state)
log.info("Subscription definition deleted OK!")
def _resolve_subscription_destination(self, d):
p = [x.strip() for x in d.split('::')]
if p[0] == 'cloud':
return p[0]
elif p[0] == 'Lambda':
return self._lookup_lambda_qualified_arn(p[1])
elif p[0] == 'Device':
return self._lookup_device_arn(p[1])
elif p[0] == 'GGShadowService':
return p[0]
else:
raise ValueError("Error parsing subscription destination '{0}'. "
"Allowed values: 'Lambda::', 'Device::', or 'cloud'.".format(d))
def _lookup_lambda_qualified_arn(self, name):
details = self.state['FunctionDefinition']['LatestVersionDetails']
for l in details['Definition']['Functions']:
if l['Id'] == name:
return l['FunctionArn']
log.error("Lambda '{0}' not found".format(name))
return None
def _lookup_device_arn(self, name):
raise NotImplementedError("WIP: Devices not implemented yet.")
def create_resources(self):
if not self.group.get('Resources'):
log.info("Resources not defined. Moving on...")
return
if self.state and self.state.get('Resources'):
log.warning("Previously created Resources exist. Remove before creating!")
return
log.debug("Preparing Resources ...")
res = []
for r in self.group['Resources']:
# Convert from a simplified form
resource = dict(Name=r.pop('Name'), Id=r.pop('Id'))
resource['ResourceDataContainer'] = r
res.append(resource)
log.debug("Resources list is ready:\n{0}".format(pretty(res)))
name = self.name + '_resources'
log.info("Creating resource definition: '{0}'".format(name))
res_def = self._gg.create_resource_definition(
Name=name,
InitialVersion={'Resources': res}
)
self.state['Resources'] = rinse(res_def)
_update_state(self.state)
res_def_ver = self._gg.get_resource_definition_version(
ResourceDefinitionId=self.state['Resources']['Id'],
ResourceDefinitionVersionId=self.state['Resources']['LatestVersion'])
self.state['Resources']['LatestVersionDetails'] = rinse(res_def_ver)
_update_state(self.state)
log.info("Resources definition created OK!")
def remove_resources(self):
if not (self.state and self.state.get('Resources')):
log.info("There seem to be nothing to remove.")
return
log.info("Deleting resources definition '{0}' Id='{1}".format(
self.state['Resources']['Name'], self.state['Resources']['Id']))
self._gg.delete_resource_definition(
ResourceDefinitionId=self.state['Resources']['Id'])
self.state.pop('Resources')
_update_state(self.state)
log.info("Resources definition deleted OK!")
def update(self):
self.remove_subscriptions()
self.remove_lambdas()
self.remove_resources()
self.create_resources()
self.create_lambdas()
self.create_subscriptions()
self.create_group_version()
log.info('Updated on Greengrass! Execute "greengo deploy" to apply')
def _create_cores(self):
# TODO: Refactor-handle state internally, make callable individually
# Maybe reflet dependency tree in self.group/greensgo.yaml and travel it
self.state['Cores'] = []
cores = []
initial_version = {'Cores': []}
for core in self.group['Cores']:
try:
name = core['name']
log.info("Creating a thing for core {0}".format(name))
keys_cert = rinse(self._iot.create_keys_and_certificate(setAsActive=True))
core_thing = rinse(self._iot.create_thing(thingName=name))
# Attach the previously created Certificate to the created Thing
self._iot.attach_thing_principal(
thingName=name, principal=keys_cert['certificateArn'])
policy = self._create_and_attach_thing_policy(
thing_name=name,
policy_doc=self._create_core_policy(),
thing_cert_arn=keys_cert['certificateArn']
)
cores.append({
'name': name,
'thing': core_thing,
'keys': keys_cert,
'policy': policy
})
self.state['Cores'] = cores
_update_state(self.state)
initial_version['Cores'].append({
'Id': name,
'CertificateArn': keys_cert['certificateArn'],
'SyncShadow': core['SyncShadow'],
'ThingArn': core_thing['thingArn']
})
_save_keys(core['key_path'], name, keys_cert)
self._create_ggc_config_file(core['config_path'], "config.json", core_thing)
except Exception as e:
log.error("Error creating core {0}: {1}".format(name, str(e)))
# Continue with other cores if any
log.debug("Creating Core definition with InitialVersion={0}".format(
initial_version))
core_def = rinse(self._gg.create_core_definition(
Name="{0}_core_def".format(self.group['Group']['name']),
InitialVersion=initial_version
))
log.info("Created Core definition Arn:{0} Id:{1}".format(
core_def['Arn'], core_def['Id']))
return core_def, cores
def _remove_cores(self):
# TODO: protect with try/catch ClientError
for core in self.state['Cores']:
thing_name = core['thing']['thingName']
cert_id = core['keys']['certificateId']
log.info("Removing core thing '{0}'' from core '{1}'".format(
core['name'], thing_name))
log.debug("--- detaching policy: '{0}'".format(core['policy']['policyName']))
self._iot.detach_principal_policy(
policyName=core['policy']['policyName'], principal=core['keys']['certificateArn'])
log.debug("--- deleting policy: '{0}'".format(core['policy']['policyName']))
self._iot.delete_policy(policyName=core['policy']['policyName'])
log.debug("--- deactivating certificate: '{0}'".format(core['keys']['certificateId']))
self._iot.update_certificate(
certificateId=cert_id, newStatus='INACTIVE')
log.debug(
"--- detaching certificate '{0}' from thing '{1}'".format(cert_id, thing_name))
self._iot.detach_thing_principal(
thingName=thing_name, principal=core['keys']['certificateArn'])
sleep(1)
log.debug("--- deleting certificate: '{0}'".format(core['keys']['certificateId']))
self._iot.delete_certificate(certificateId=core['keys']['certificateId'])
log.debug("--- deleting thing: '{0}'".format(core['thing']['thingName']))
self._iot.delete_thing(thingName=core['thing']['thingName'])
core_def = self.state['CoreDefinition']
log.info("Removing core definition '{0}'".format(core_def['Name']))
self._gg.delete_core_definition(CoreDefinitionId=core_def['Id'])
def _create_and_attach_thing_policy(self, thing_name, policy_doc, thing_cert_arn):
try:
policy_name = "{0}-policy".format(thing_name)
policy = rinse(self._iot.create_policy(
policyName=policy_name,
policyDocument=policy_doc)
)
except ClientError as ce:
if ce.response['Error']['Code'] == 'EntityAlreadyExists':
log.warning(
"Policy '{0}' exists. Using existing Policy".format(policy_name))
else:
log.error("Unexpected Error: {0}".format(ce))
raise
self._iot.attach_principal_policy(
policyName=policy_name,
principal=thing_cert_arn
)
log.info("Created policy {0} for {1} and attached to certificate {2}".format(
policy_name, thing_name, thing_cert_arn))
return policy
def _create_core_policy(self):
# TODO: redo as template and read from definition file
core_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"iot:Publish",
"iot:Subscribe",
"iot:Connect",
"iot:Receive",
"iot:GetThingShadow",
"iot:DeleteThingShadow",
"iot:UpdateThingShadow"
],
"Resource": ["arn:aws:iot:" + self._region + ":*:*"]
},
{
"Effect": "Allow",
"Action": [
"greengrass:AssumeRoleForGroup",
"greengrass:CreateCertificate",
"greengrass:GetConnectivityInfo",
"greengrass:GetDeployment",
"greengrass:GetDeploymentArtifacts",
"greengrass:UpdateConnectivityInfo",
"greengrass:UpdateCoreDeploymentStatus"
],
"Resource": ["*"]
}
]
}
return json.dumps(core_policy)
def _create_ggc_config_file(self, path, name, core_thing):
log.info("Creating GGC config file with core {0} at {1}/{2}".format(
core_thing['thingName'], path, name))
config = {
"coreThing": {
"caPath": "root.ca.pem",
"certPath": core_thing['thingName'] + ".pem",
"keyPath": core_thing['thingName'] + ".key",
"thingArn": core_thing['thingArn'],
"iotHost": self._iot_endpoint,
"ggHost": "greengrass.iot." + self._region + ".amazonaws.com",
"keepAlive": 600
},
"runtime": {
"cgroup": {
"useSystemd": "yes"
}
},
"managedRespawn": False
}
_mkdir(path)
with open(path + '/' + name, 'w') as f:
json.dump(config, f, indent=4, separators=(',', ' : '))
def _create_default_lambda_role(self):
# TODO: redo as template and read from definition .yaml
role_policy_document = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Service": "lambda.amazonaws.com"
},
"Action": "sts:AssumeRole"
}
]
}
role = self._iam.create_role(
RoleName=self._LAMBDA_ROLE_NAME,
AssumeRolePolicyDocument=json.dumps(role_policy_document)
)
inline_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:PutLogEvents"
],
"Resource": "arn:aws:logs:*:*:*"
}
]
}
self._iam.put_role_policy(
RoleName=self._LAMBDA_ROLE_NAME,
PolicyName=self._LAMBDA_ROLE_NAME + "-Policy",
PolicyDocument=json.dumps(inline_policy))
return role
def _remove_default_lambda_role(self):
for p in self._iam.list_role_policies(RoleName=self._LAMBDA_ROLE_NAME)['PolicyNames']:
self._iam.delete_role_policy(RoleName=self._LAMBDA_ROLE_NAME, PolicyName=p)
self._iam.delete_role(RoleName=self._LAMBDA_ROLE_NAME)
###############################################################################
# UTILITY FUNCTIONS
def rinse(boto_response):
boto_response.pop('ResponseMetadata')
return boto_response
def pretty(d):
"""Pretty object as YAML."""
return yaml.safe_dump(d, default_flow_style=False)
def _update_state(group_state):
if not group_state:
os.remove(STATE_FILE)
log.debug("State is empty, removed state file '{0}'".format(STATE_FILE))
return
with open(STATE_FILE, 'w') as f:
json.dump(group_state, f, indent=2,
separators=(',', ': '), sort_keys=True, default=str)
log.debug("Updated group state in state file '{0}'".format(STATE_FILE))
class State(dict):
def __missing__(self, k): # noqa
v = self[k] = type(self)()
return v
def _state_exists():
return os.path.exists(STATE_FILE)
def _load_state():
if not _state_exists():
log.debug("Group state file {0} not found, assume new group.".format(STATE_FILE))
return {}
log.debug("Loading group state from {0}".format(STATE_FILE))
with open(STATE_FILE, 'r') as f:
return State(json.load(f))
def _mkdir(path):
try:
os.makedirs(path)
except OSError as exc:
if not (exc.errno == errno.EEXIST and os.path.isdir(path)):
raise
def _save_keys(path, name, keys_cert):
try:
path = path + '/' if not path.endswith('/') else path
_mkdir(path)
certname = path + name + ".pem"
public_key_file = path + name + ".pub"
private_key_file = path + name + ".key"
with open(certname, "w") as pem_file:
pem = keys_cert['certificatePem']
pem_file.write(pem)
log.info("Thing Name: {0} and PEM file: {1}".format(name, certname))
with open(public_key_file, "w") as pub_file:
pub = keys_cert['keyPair']['PublicKey']
pub_file.write(pub)
log.info("Thing Name: {0} Public Key File: {1}".format(name, public_key_file))
with open(private_key_file, "w") as prv_file:
prv = keys_cert['keyPair']['PrivateKey']
prv_file.write(prv)
log.info("Thing Name: {0} Private Key File: {1}".format(name, private_key_file))
except OSError as e:
log.error('Error while writing an certificate files. {0}'.format(e))
except KeyError as e:
log.error('Error while writing an certificate files. {0}'
'Check the keys {1}'.format(e, keys_cert))
def main():
fire.Fire(GroupCommands)
if __name__ == '__main__':
main()
|
the-stack_0_13864 | """
Pontoon requires a very specific subset of functionality implemented in django allauth.
Because of concerns related to the security concerns it's a better to keep only selected
views and don't allow user to tamper with the state of an account.
"""
import importlib
from django.urls import path
from django.conf import settings
from django.contrib.auth import views
from allauth.account import views as account_views
from allauth.socialaccount import views as socialaccount_views, providers
if settings.AUTHENTICATION_METHOD == "django":
urlpatterns = [
path("standalone-login/", views.LoginView.as_view(), name="standalone_login"),
path(
"standalone-logout/", views.LogoutView.as_view(), name="standalone_logout",
),
]
else:
urlpatterns = [
path("login/", account_views.login, name="account_login"),
path("logout/", account_views.logout, name="account_logout"),
path("inactive/", account_views.account_inactive, name="account_inactive"),
path(
"social/login/cancelled/",
socialaccount_views.login_cancelled,
name="socialaccount_login_cancelled",
),
path(
"social/login/error/",
socialaccount_views.login_error,
name="socialaccount_login_error",
),
]
for provider in providers.registry.get_list():
try:
prov_mod = importlib.import_module(provider.get_package() + ".urls")
except ImportError:
continue
prov_urlpatterns = getattr(prov_mod, "urlpatterns", None)
if prov_urlpatterns:
urlpatterns += prov_urlpatterns
|
the-stack_0_13865 | # -*- coding: utf-8 -*-
'''
Support for YUM/DNF
.. important::
If you feel that Salt should be using this module to manage packages on a
minion, and it is using a different module (or gives an error similar to
*'pkg.install' is not available*), see :ref:`here
<module-provider-override>`.
.. note::
DNF is fully supported as of version 2015.5.10 and 2015.8.4 (partial
support for DNF was initially added in 2015.8.0), and DNF is used
automatically in place of YUM in Fedora 22 and newer.
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import contextlib
import datetime
import fnmatch
import itertools
import logging
import os
import re
import string
# pylint: disable=import-error,redefined-builtin
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import zip
try:
import yum
HAS_YUM = True
except ImportError:
HAS_YUM = False
from salt.ext.six.moves import configparser
# pylint: enable=import-error,redefined-builtin
# Import Salt libs
import salt.utils.args
import salt.utils.data
import salt.utils.decorators.path
import salt.utils.files
import salt.utils.functools
import salt.utils.itertools
import salt.utils.lazy
import salt.utils.path
import salt.utils.pkg
import salt.utils.pkg.rpm
import salt.utils.systemd
import salt.utils.versions
from salt.utils.versions import LooseVersion as _LooseVersion
from salt.exceptions import (
CommandExecutionError, MinionError, SaltInvocationError
)
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
__HOLD_PATTERN = r'[\w+]+(?:[.-][^-]+)*'
# Define the module's virtual name
__virtualname__ = 'pkg'
def __virtual__():
'''
Confine this module to yum based systems
'''
if __opts__.get('yum_provider') == 'yumpkg_api':
return (False, "Module yumpkg: yumpkg_api provider not available")
try:
os_grain = __grains__['os'].lower()
os_family = __grains__['os_family'].lower()
except Exception:
return (False, "Module yumpkg: no yum based system detected")
enabled = ('amazon', 'xcp', 'xenserver', 'virtuozzolinux', 'virtuozzo')
if os_family == 'redhat' or os_grain in enabled:
return __virtualname__
return (False, "Module yumpkg: no yum based system detected")
def _strip_headers(output, *args):
if not args:
args_lc = ('installed packages',
'available packages',
'updated packages',
'upgraded packages')
else:
args_lc = [x.lower() for x in args]
ret = ''
for line in salt.utils.itertools.split(output, '\n'):
if line.lower() not in args_lc:
ret += line + '\n'
return ret
def _get_hold(line, pattern=__HOLD_PATTERN, full=True):
'''
Resolve a package name from a line containing the hold expression. If the
regex is not matched, None is returned.
yum ==> 2:vim-enhanced-7.4.629-5.el6.*
dnf ==> vim-enhanced-2:7.4.827-1.fc22.*
'''
if full:
if _yum() == 'dnf':
lock_re = r'({0}-\S+)'.format(pattern)
else:
lock_re = r'(\d+:{0}-\S+)'.format(pattern)
else:
if _yum() == 'dnf':
lock_re = r'({0}-\S+)'.format(pattern)
else:
lock_re = r'\d+:({0}-\S+)'.format(pattern)
match = re.search(lock_re, line)
if match:
if not full:
woarch = match.group(1).rsplit('.', 1)[0]
worel = woarch.rsplit('-', 1)[0]
return worel.rsplit('-', 1)[0]
else:
return match.group(1)
return None
def _yum():
'''
return yum or dnf depending on version
'''
contextkey = 'yum_bin'
if contextkey not in __context__:
if 'fedora' in __grains__['os'].lower() \
and int(__grains__['osrelease']) >= 22:
__context__[contextkey] = 'dnf'
else:
__context__[contextkey] = 'yum'
return __context__[contextkey]
def _yum_pkginfo(output):
'''
Parse yum/dnf output (which could contain irregular line breaks if package
names are long) retrieving the name, version, etc., and return a list of
pkginfo namedtuples.
'''
cur = {}
keys = itertools.cycle(('name', 'version', 'repoid'))
values = salt.utils.itertools.split(_strip_headers(output))
osarch = __grains__['osarch']
for (key, value) in zip(keys, values):
if key == 'name':
try:
cur['name'], cur['arch'] = value.rsplit('.', 1)
except ValueError:
cur['name'] = value
cur['arch'] = osarch
cur['name'] = salt.utils.pkg.rpm.resolve_name(cur['name'],
cur['arch'],
osarch)
else:
if key == 'version':
# Suppport packages with no 'Release' parameter
value = value.rstrip('-')
elif key == 'repoid':
# Installed packages show a '@' at the beginning
value = value.lstrip('@')
cur[key] = value
if key == 'repoid':
# We're done with this package, create the pkginfo namedtuple
pkginfo = salt.utils.pkg.rpm.pkginfo(**cur)
# Clear the dict for the next package
cur = {}
# Yield the namedtuple
if pkginfo is not None:
yield pkginfo
def _check_versionlock():
'''
Ensure that the appropriate versionlock plugin is present
'''
if _yum() == 'dnf':
if int(__grains__.get('osmajorrelease')) >= 26:
if six.PY3:
vl_plugin = 'python3-dnf-plugin-versionlock'
else:
vl_plugin = 'python2-dnf-plugin-versionlock'
else:
if six.PY3:
vl_plugin = 'python3-dnf-plugins-extras-versionlock'
else:
vl_plugin = 'python-dnf-plugins-extras-versionlock'
else:
vl_plugin = 'yum-versionlock' \
if __grains__.get('osmajorrelease') == '5' \
else 'yum-plugin-versionlock'
if vl_plugin not in list_pkgs():
raise SaltInvocationError(
'Cannot proceed, {0} is not installed.'.format(vl_plugin)
)
def _get_options(**kwargs):
'''
Returns a list of options to be used in the yum/dnf command, based on the
kwargs passed.
'''
# Get repo options from the kwargs
fromrepo = kwargs.pop('fromrepo', '')
repo = kwargs.pop('repo', '')
disablerepo = kwargs.pop('disablerepo', '')
enablerepo = kwargs.pop('enablerepo', '')
disableexcludes = kwargs.pop('disableexcludes', '')
branch = kwargs.pop('branch', '')
setopt = kwargs.pop('setopt', None)
if setopt is None:
setopt = []
else:
setopt = salt.utils.args.split_input(setopt)
get_extra_options = kwargs.pop('get_extra_options', False)
# Support old 'repo' argument
if repo and not fromrepo:
fromrepo = repo
ret = []
if fromrepo:
log.info('Restricting to repo \'%s\'', fromrepo)
ret.extend(['--disablerepo=*', '--enablerepo={0}'.format(fromrepo)])
else:
if disablerepo:
targets = [disablerepo] \
if not isinstance(disablerepo, list) \
else disablerepo
log.info('Disabling repo(s): %s', ', '.join(targets))
ret.extend(
['--disablerepo={0}'.format(x) for x in targets]
)
if enablerepo:
targets = [enablerepo] \
if not isinstance(enablerepo, list) \
else enablerepo
log.info('Enabling repo(s): %s', ', '.join(targets))
ret.extend(['--enablerepo={0}'.format(x) for x in targets])
if disableexcludes:
log.info('Disabling excludes for \'%s\'', disableexcludes)
ret.append('--disableexcludes={0}'.format(disableexcludes))
if branch:
log.info('Adding branch \'%s\'', branch)
ret.append('--branch={0}'.format(branch))
for item in setopt:
ret.extend(['--setopt', six.text_type(item)])
if get_extra_options:
# sorting here to make order uniform, makes unit testing more reliable
for key in sorted(kwargs):
if key.startswith('__'):
continue
value = kwargs[key]
if isinstance(value, six.string_types):
log.info('Found extra option --%s=%s', key, value)
ret.append('--{0}={1}'.format(key, value))
elif value is True:
log.info('Found extra option --%s', key)
ret.append('--{0}'.format(key))
if ret:
log.info('Adding extra options: %s', ret)
return ret
def _get_yum_config():
'''
Returns a dict representing the yum config options and values.
We try to pull all of the yum config options into a standard dict object.
This is currently only used to get the reposdir settings, but could be used
for other things if needed.
If the yum python library is available, use that, which will give us all of
the options, including all of the defaults not specified in the yum config.
Additionally, they will all be of the correct object type.
If the yum library is not available, we try to read the yum.conf
directly ourselves with a minimal set of "defaults".
'''
# in case of any non-fatal failures, these defaults will be used
conf = {
'reposdir': ['/etc/yum/repos.d', '/etc/yum.repos.d'],
}
if HAS_YUM:
try:
yb = yum.YumBase()
yb.preconf.init_plugins = False
for name, value in six.iteritems(yb.conf):
conf[name] = value
except (AttributeError, yum.Errors.ConfigError) as exc:
raise CommandExecutionError(
'Could not query yum config: {0}'.format(exc)
)
else:
# fall back to parsing the config ourselves
# Look for the config the same order yum does
fn = None
paths = ('/etc/yum/yum.conf', '/etc/yum.conf', '/etc/dnf/dnf.conf')
for path in paths:
if os.path.exists(path):
fn = path
break
if not fn:
raise CommandExecutionError(
'No suitable yum config file found in: {0}'.format(paths)
)
cp = configparser.ConfigParser()
try:
cp.read(fn)
except (IOError, OSError) as exc:
raise CommandExecutionError(
'Unable to read from {0}: {1}'.format(fn, exc)
)
if cp.has_section('main'):
for opt in cp.options('main'):
if opt in ('reposdir', 'commands', 'excludes'):
# these options are expected to be lists
conf[opt] = [x.strip()
for x in cp.get('main', opt).split(',')]
else:
conf[opt] = cp.get('main', opt)
else:
log.warning(
'Could not find [main] section in %s, using internal '
'defaults',
fn
)
return conf
def _get_yum_config_value(name):
'''
Look for a specific config variable and return its value
'''
conf = _get_yum_config()
if name in conf.keys():
return conf.get(name)
return None
def _normalize_basedir(basedir=None):
'''
Takes a basedir argument as a string or a list. If the string or list is
empty, then look up the default from the 'reposdir' option in the yum
config.
Returns a list of directories.
'''
# if we are passed a string (for backward compatibility), convert to a list
if isinstance(basedir, six.string_types):
basedir = [x.strip() for x in basedir.split(',')]
if basedir is None:
basedir = []
# nothing specified, so use the reposdir option as the default
if not basedir:
basedir = _get_yum_config_value('reposdir')
if not isinstance(basedir, list) or not basedir:
raise SaltInvocationError('Could not determine any repo directories')
return basedir
def normalize_name(name):
'''
Strips the architecture from the specified package name, if necessary.
Circumstances where this would be done include:
* If the arch is 32 bit and the package name ends in a 32-bit arch.
* If the arch matches the OS arch, or is ``noarch``.
CLI Example:
.. code-block:: bash
salt '*' pkg.normalize_name zsh.x86_64
'''
try:
arch = name.rsplit('.', 1)[-1]
if arch not in salt.utils.pkg.rpm.ARCHES + ('noarch',):
return name
except ValueError:
return name
if arch in (__grains__['osarch'], 'noarch') \
or salt.utils.pkg.rpm.check_32(arch, osarch=__grains__['osarch']):
return name[:-(len(arch) + 1)]
return name
def latest_version(*names, **kwargs):
'''
Return the latest version of the named package available for upgrade or
installation. If more than one package name is specified, a dict of
name/version pairs is returned.
If the latest version of a given package is already installed, an empty
string will be returned for that package.
A specific repo can be requested using the ``fromrepo`` keyword argument,
and the ``disableexcludes`` option is also supported.
.. versionadded:: 2014.7.0
Support for the ``disableexcludes`` option
CLI Example:
.. code-block:: bash
salt '*' pkg.latest_version <package name>
salt '*' pkg.latest_version <package name> fromrepo=epel-testing
salt '*' pkg.latest_version <package name> disableexcludes=main
salt '*' pkg.latest_version <package1> <package2> <package3> ...
'''
refresh = salt.utils.data.is_true(kwargs.pop('refresh', True))
if len(names) == 0:
return ''
options = _get_options(**kwargs)
# Refresh before looking for the latest version available
if refresh:
refresh_db(**kwargs)
cur_pkgs = list_pkgs(versions_as_list=True)
# Get available versions for specified package(s)
cmd = [_yum(), '--quiet']
cmd.extend(options)
cmd.extend(['list', 'available'])
cmd.extend(names)
out = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
if out['retcode'] != 0:
if out['stderr']:
# Check first if this is just a matter of the packages being
# up-to-date.
if not all([x in cur_pkgs for x in names]):
log.error(
'Problem encountered getting latest version for the '
'following package(s): %s. Stderr follows: \n%s',
', '.join(names),
out['stderr']
)
updates = []
else:
# Sort by version number (highest to lowest) for loop below
updates = sorted(
_yum_pkginfo(out['stdout']),
key=lambda pkginfo: _LooseVersion(pkginfo.version),
reverse=True
)
def _check_cur(pkg):
if pkg.name in cur_pkgs:
for installed_version in cur_pkgs[pkg.name]:
# If any installed version is greater than (or equal to) the
# one found by yum/dnf list available, then it is not an
# upgrade.
if salt.utils.versions.compare(ver1=installed_version,
oper='>=',
ver2=pkg.version,
cmp_func=version_cmp):
return False
# pkg.version is greater than all installed versions
return True
else:
# Package is not installed
return True
ret = {}
for name in names:
# Derive desired pkg arch (for arch-specific packages) based on the
# package name(s) passed to the function. On a 64-bit OS, "pkgame"
# would be assumed to match the osarch, while "pkgname.i686" would
# have an arch of "i686". This desired arch is then compared against
# the updates derived from _yum_pkginfo() above, so that we can
# distinguish an update for a 32-bit version of a package from its
# 64-bit counterpart.
try:
arch = name.rsplit('.', 1)[-1]
if arch not in salt.utils.pkg.rpm.ARCHES:
arch = __grains__['osarch']
except ValueError:
arch = __grains__['osarch']
# This loop will iterate over the updates derived by _yum_pkginfo()
# above, which have been sorted descendingly by version number,
# ensuring that the latest available version for the named package is
# examined first. The call to _check_cur() will ensure that a package
# seen by yum as "available" will only be detected as an upgrade if it
# has a version higher than all currently-installed versions of the
# package.
for pkg in (x for x in updates if x.name == name):
# This if/or statement makes sure that we account for noarch
# packages as well as arch-specific packages.
if pkg.arch == 'noarch' or pkg.arch == arch \
or salt.utils.pkg.rpm.check_32(pkg.arch):
if _check_cur(pkg):
ret[name] = pkg.version
# no need to check another match, if there was one
break
else:
ret[name] = ''
# Return a string if only one package name passed
if len(names) == 1:
return ret[names[0]]
return ret
# available_version is being deprecated
available_version = salt.utils.functools.alias_function(latest_version, 'available_version')
def upgrade_available(name, **kwargs):
'''
Check whether or not an upgrade is available for a given package
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade_available <package name>
'''
return latest_version(name, **kwargs) != ''
def version(*names, **kwargs):
'''
Returns a string representing the package version or an empty string if not
installed. If more than one package name is specified, a dict of
name/version pairs is returned.
CLI Example:
.. code-block:: bash
salt '*' pkg.version <package name>
salt '*' pkg.version <package1> <package2> <package3> ...
'''
return __salt__['pkg_resource.version'](*names, **kwargs)
def version_cmp(pkg1, pkg2, ignore_epoch=False):
'''
.. versionadded:: 2015.5.4
Do a cmp-style comparison on two packages. Return -1 if pkg1 < pkg2, 0 if
pkg1 == pkg2, and 1 if pkg1 > pkg2. Return None if there was a problem
making the comparison.
ignore_epoch : False
Set to ``True`` to ignore the epoch when comparing versions
.. versionadded:: 2015.8.10,2016.3.2
CLI Example:
.. code-block:: bash
salt '*' pkg.version_cmp '0.2-001' '0.2.0.1-002'
'''
return __salt__['lowpkg.version_cmp'](pkg1, pkg2, ignore_epoch=ignore_epoch)
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed as a dict. By default, the dict
contains versions as a comma separated string::
{'<package_name>': '<version>[,<version>...]'}
versions_as_list:
If set to true, the versions are provided as a list
{'<package_name>': ['<version>', '<version>']}
attr:
If a list of package attributes is specified, returned value will
contain them in addition to version, eg.::
{'<package_name>': [{'version' : 'version', 'arch' : 'arch'}]}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
salt '*' pkg.list_pkgs attr=version,arch
salt '*' pkg.list_pkgs attr='["version", "arch"]'
'''
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.data.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
attr = kwargs.get('attr')
if attr is not None:
attr = salt.utils.args.split_input(attr)
contextkey = 'pkg.list_pkgs'
if contextkey not in __context__:
ret = {}
cmd = ['rpm', '-qa', '--queryformat',
salt.utils.pkg.rpm.QUERYFORMAT.replace('%{REPOID}', '(none)') + '\n']
output = __salt__['cmd.run'](cmd,
python_shell=False,
output_loglevel='trace')
for line in output.splitlines():
pkginfo = salt.utils.pkg.rpm.parse_pkginfo(
line,
osarch=__grains__['osarch']
)
if pkginfo is not None:
# see rpm version string rules available at https://goo.gl/UGKPNd
pkgver = pkginfo.version
epoch = ''
release = ''
if ':' in pkgver:
epoch, pkgver = pkgver.split(":", 1)
if '-' in pkgver:
pkgver, release = pkgver.split("-", 1)
all_attr = {
'epoch': epoch,
'version': pkgver,
'release': release,
'arch': pkginfo.arch,
'install_date': pkginfo.install_date,
'install_date_time_t': pkginfo.install_date_time_t
}
__salt__['pkg_resource.add_pkg'](ret, pkginfo.name, all_attr)
for pkgname in ret:
ret[pkgname] = sorted(ret[pkgname], key=lambda d: d['version'])
__context__[contextkey] = ret
return __salt__['pkg_resource.format_pkg_list'](
__context__[contextkey],
versions_as_list,
attr)
def list_repo_pkgs(*args, **kwargs):
'''
.. versionadded:: 2014.1.0
.. versionchanged:: 2014.7.0
All available versions of each package are now returned. This required
a slight modification to the structure of the return dict. The return
data shown below reflects the updated return dict structure. Note that
packages which are version-locked using :py:mod:`pkg.hold
<salt.modules.yumpkg.hold>` will only show the currently-installed
version, as locking a package will make other versions appear
unavailable to yum/dnf.
.. versionchanged:: 2017.7.0
By default, the versions for each package are no longer organized by
repository. To get results organized by repository, use
``byrepo=True``.
Returns all available packages. Optionally, package names (and name globs)
can be passed and the results will be filtered to packages matching those
names. This is recommended as it speeds up the function considerably.
.. warning::
Running this function on RHEL/CentOS 6 and earlier will be more
resource-intensive, as the version of yum that ships with older
RHEL/CentOS has no yum subcommand for listing packages from a
repository. Thus, a ``yum list installed`` and ``yum list available``
are run, which generates a lot of output, which must then be analyzed
to determine which package information to include in the return data.
This function can be helpful in discovering the version or repo to specify
in a :mod:`pkg.installed <salt.states.pkg.installed>` state.
The return data will be a dictionary mapping package names to a list of
version numbers, ordered from newest to oldest. If ``byrepo`` is set to
``True``, then the return dictionary will contain repository names at the
top level, and each repository will map packages to lists of version
numbers. For example:
.. code-block:: python
# With byrepo=False (default)
{
'bash': ['4.1.2-15.el6_5.2',
'4.1.2-15.el6_5.1',
'4.1.2-15.el6_4'],
'kernel': ['2.6.32-431.29.2.el6',
'2.6.32-431.23.3.el6',
'2.6.32-431.20.5.el6',
'2.6.32-431.20.3.el6',
'2.6.32-431.17.1.el6',
'2.6.32-431.11.2.el6',
'2.6.32-431.5.1.el6',
'2.6.32-431.3.1.el6',
'2.6.32-431.1.2.0.1.el6',
'2.6.32-431.el6']
}
# With byrepo=True
{
'base': {
'bash': ['4.1.2-15.el6_4'],
'kernel': ['2.6.32-431.el6']
},
'updates': {
'bash': ['4.1.2-15.el6_5.2', '4.1.2-15.el6_5.1'],
'kernel': ['2.6.32-431.29.2.el6',
'2.6.32-431.23.3.el6',
'2.6.32-431.20.5.el6',
'2.6.32-431.20.3.el6',
'2.6.32-431.17.1.el6',
'2.6.32-431.11.2.el6',
'2.6.32-431.5.1.el6',
'2.6.32-431.3.1.el6',
'2.6.32-431.1.2.0.1.el6']
}
}
fromrepo : None
Only include results from the specified repo(s). Multiple repos can be
specified, comma-separated.
enablerepo (ignored if ``fromrepo`` is specified)
Specify a disabled package repository (or repositories) to enable.
(e.g., ``yum --enablerepo='somerepo'``)
.. versionadded:: 2017.7.0
disablerepo (ignored if ``fromrepo`` is specified)
Specify an enabled package repository (or repositories) to disable.
(e.g., ``yum --disablerepo='somerepo'``)
.. versionadded:: 2017.7.0
byrepo : False
When ``True``, the return data for each package will be organized by
repository.
.. versionadded:: 2017.7.0
cacheonly : False
When ``True``, the repo information will be retrieved from the cached
repo metadata. This is equivalent to passing the ``-C`` option to
yum/dnf.
.. versionadded:: 2017.7.0
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
.. versionadded:: Fluorine
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_repo_pkgs
salt '*' pkg.list_repo_pkgs foo bar baz
salt '*' pkg.list_repo_pkgs 'samba4*' fromrepo=base,updates
salt '*' pkg.list_repo_pkgs 'python2-*' byrepo=True
'''
byrepo = kwargs.pop('byrepo', False)
cacheonly = kwargs.pop('cacheonly', False)
fromrepo = kwargs.pop('fromrepo', '') or ''
disablerepo = kwargs.pop('disablerepo', '') or ''
enablerepo = kwargs.pop('enablerepo', '') or ''
repo_arg = _get_options(fromrepo=fromrepo, **kwargs)
if fromrepo and not isinstance(fromrepo, list):
try:
fromrepo = [x.strip() for x in fromrepo.split(',')]
except AttributeError:
fromrepo = [x.strip() for x in six.text_type(fromrepo).split(',')]
if disablerepo and not isinstance(disablerepo, list):
try:
disablerepo = [x.strip() for x in disablerepo.split(',')
if x != '*']
except AttributeError:
disablerepo = [x.strip() for x in six.text_type(disablerepo).split(',')
if x != '*']
if enablerepo and not isinstance(enablerepo, list):
try:
enablerepo = [x.strip() for x in enablerepo.split(',')
if x != '*']
except AttributeError:
enablerepo = [x.strip() for x in six.text_type(enablerepo).split(',')
if x != '*']
if fromrepo:
repos = fromrepo
else:
repos = [
repo_name for repo_name, repo_info in six.iteritems(list_repos())
if repo_name in enablerepo
or (repo_name not in disablerepo
and six.text_type(repo_info.get('enabled', '1')) == '1')
]
ret = {}
def _check_args(args, name):
'''
Do glob matching on args and return True if a match was found.
Otherwise, return False
'''
for arg in args:
if fnmatch.fnmatch(name, arg):
return True
return False
def _parse_output(output, strict=False):
for pkg in _yum_pkginfo(output):
if strict and (pkg.repoid not in repos
or not _check_args(args, pkg.name)):
continue
repo_dict = ret.setdefault(pkg.repoid, {})
version_list = repo_dict.setdefault(pkg.name, set())
version_list.add(pkg.version)
yum_version = None if _yum() != 'yum' else _LooseVersion(
__salt__['cmd.run'](
['yum', '--version'],
python_shell=False
).splitlines()[0].strip()
)
# Really old version of yum; does not even have --showduplicates option
if yum_version and yum_version < _LooseVersion('3.2.13'):
cmd_prefix = ['yum', '--quiet']
if cacheonly:
cmd_prefix.append('-C')
cmd_prefix.append('list')
for pkg_src in ('installed', 'available'):
# Check installed packages first
out = __salt__['cmd.run_all'](
cmd_prefix + [pkg_src],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False
)
if out['retcode'] == 0:
_parse_output(out['stdout'], strict=True)
# The --showduplicates option is added in 3.2.13, but the
# repository-packages subcommand is only in 3.4.3 and newer
elif yum_version and yum_version < _LooseVersion('3.4.3'):
cmd_prefix = ['yum', '--quiet', '--showduplicates']
if cacheonly:
cmd_prefix.append('-C')
cmd_prefix.append('list')
for pkg_src in ('installed', 'available'):
# Check installed packages first
out = __salt__['cmd.run_all'](
cmd_prefix + [pkg_src],
output_loglevel='trace',
ignore_retcode=True,
python_shell=False
)
if out['retcode'] == 0:
_parse_output(out['stdout'], strict=True)
else:
for repo in repos:
cmd = [_yum(), '--quiet', '--showduplicates',
'repository-packages', repo, 'list']
if cacheonly:
cmd.append('-C')
# Can't concatenate because args is a tuple, using list.extend()
cmd.extend(args)
out = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
if out['retcode'] != 0 and 'Error:' in out['stdout']:
continue
_parse_output(out['stdout'])
if byrepo:
for reponame in ret:
# Sort versions newest to oldest
for pkgname in ret[reponame]:
sorted_versions = sorted(
[_LooseVersion(x) for x in ret[reponame][pkgname]],
reverse=True
)
ret[reponame][pkgname] = [x.vstring for x in sorted_versions]
return ret
else:
byrepo_ret = {}
for reponame in ret:
for pkgname in ret[reponame]:
byrepo_ret.setdefault(pkgname, []).extend(ret[reponame][pkgname])
for pkgname in byrepo_ret:
sorted_versions = sorted(
[_LooseVersion(x) for x in byrepo_ret[pkgname]],
reverse=True
)
byrepo_ret[pkgname] = [x.vstring for x in sorted_versions]
return byrepo_ret
def list_upgrades(refresh=True, **kwargs):
'''
Check whether or not an upgrade is available for all packages
The ``fromrepo``, ``enablerepo``, and ``disablerepo`` arguments are
supported, as used in pkg states, and the ``disableexcludes`` option is
also supported.
.. versionadded:: 2014.7.0
Support for the ``disableexcludes`` option
CLI Example:
.. code-block:: bash
salt '*' pkg.list_upgrades
'''
options = _get_options(**kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(check_update=False, **kwargs)
cmd = [_yum(), '--quiet']
cmd.extend(options)
cmd.extend(['list', 'upgrades' if _yum() == 'dnf' else 'updates'])
out = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
if out['retcode'] != 0 and 'Error:' in out:
return {}
return dict([(x.name, x.version) for x in _yum_pkginfo(out['stdout'])])
# Preserve expected CLI usage (yum list updates)
list_updates = salt.utils.functools.alias_function(list_upgrades, 'list_updates')
def list_downloaded():
'''
.. versionadded:: 2017.7.0
List prefetched packages downloaded by Yum in the local disk.
CLI example:
.. code-block:: bash
salt '*' pkg.list_downloaded
'''
CACHE_DIR = os.path.join('/var/cache/', _yum())
ret = {}
for root, dirnames, filenames in salt.utils.path.os_walk(CACHE_DIR):
for filename in fnmatch.filter(filenames, '*.rpm'):
package_path = os.path.join(root, filename)
pkg_info = __salt__['lowpkg.bin_pkg_info'](package_path)
pkg_timestamp = int(os.path.getctime(package_path))
ret.setdefault(pkg_info['name'], {})[pkg_info['version']] = {
'path': package_path,
'size': os.path.getsize(package_path),
'creation_date_time_t': pkg_timestamp,
'creation_date_time': datetime.datetime.fromtimestamp(pkg_timestamp).isoformat(),
}
return ret
def info_installed(*names, **kwargs):
'''
.. versionadded:: 2015.8.1
Return the information of the named package(s), installed on the system.
:param all_versions:
Include information for all versions of the packages installed on the minion.
CLI example:
.. code-block:: bash
salt '*' pkg.info_installed <package1>
salt '*' pkg.info_installed <package1> <package2> <package3> ...
salt '*' pkg.info_installed <package1> <package2> <package3> all_versions=True
'''
all_versions = kwargs.get('all_versions', False)
ret = dict()
for pkg_name, pkgs_nfo in __salt__['lowpkg.info'](*names, **kwargs).items():
pkg_nfo = pkgs_nfo if all_versions else [pkgs_nfo]
for _nfo in pkg_nfo:
t_nfo = dict()
# Translate dpkg-specific keys to a common structure
for key, value in _nfo.items():
if key == 'source_rpm':
t_nfo['source'] = value
else:
t_nfo[key] = value
if not all_versions:
ret[pkg_name] = t_nfo
else:
ret.setdefault(pkg_name, []).append(t_nfo)
return ret
def refresh_db(**kwargs):
'''
Check the yum repos for updated packages
Returns:
- ``True``: Updates are available
- ``False``: An error occurred
- ``None``: No updates are available
repo
Refresh just the specified repo
disablerepo
Do not refresh the specified repo
enablerepo
Refresh a disabled repo using this option
branch
Add the specified branch when refreshing
disableexcludes
Disable the excludes defined in your config files. Takes one of three
options:
- ``all`` - disable all excludes
- ``main`` - disable excludes defined in [main] in yum.conf
- ``repoid`` - disable excludes defined for that repo
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
.. versionadded:: Fluorine
CLI Example:
.. code-block:: bash
salt '*' pkg.refresh_db
'''
# Remove rtag file to keep multiple refreshes from happening in pkg states
salt.utils.pkg.clear_rtag(__opts__)
retcodes = {
100: True,
0: None,
1: False,
}
check_update_ = kwargs.pop('check_update', True)
options = _get_options(**kwargs)
clean_cmd = [_yum(), '--quiet', '--assumeyes', 'clean', 'expire-cache']
update_cmd = [_yum(), '--quiet', '--assumeyes', 'check-update']
if __grains__.get('os_family') == 'RedHat' \
and __grains__.get('osmajorrelease') == 7:
# This feature is disabled because it is not used by Salt and adds a
# lot of extra time to the command with large repos like EPEL
update_cmd.append('--setopt=autocheck_running_kernel=false')
clean_cmd.extend(options)
update_cmd.extend(options)
__salt__['cmd.run'](clean_cmd, python_shell=False)
if check_update_:
result = __salt__['cmd.retcode'](update_cmd,
output_loglevel='trace',
ignore_retcode=True,
python_shell=False)
return retcodes.get(result, False)
return True
def clean_metadata(**kwargs):
'''
.. versionadded:: 2014.1.0
Cleans local yum metadata. Functionally identical to :mod:`refresh_db()
<salt.modules.yumpkg.refresh_db>`.
CLI Example:
.. code-block:: bash
salt '*' pkg.clean_metadata
'''
return refresh_db(**kwargs)
class AvailablePackages(salt.utils.lazy.LazyDict):
def __init__(self, *args, **kwargs):
super(AvailablePackages, self).__init__()
self._args = args
self._kwargs = kwargs
def _load(self, key):
self._load_all()
return True
def _load_all(self):
self._dict = list_repo_pkgs(*self._args, **self._kwargs)
self.loaded = True
def install(name=None,
refresh=False,
skip_verify=False,
pkgs=None,
sources=None,
downloadonly=False,
reinstall=False,
normalize=True,
update_holds=False,
saltenv='base',
ignore_epoch=False,
**kwargs):
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Install the passed package(s), add refresh=True to clean the yum database
before package is installed.
name
The name of the package to be installed. Note that this parameter is
ignored if either "pkgs" or "sources" is passed. Additionally, please
note that this option can only be used to install packages from a
software repository. To install a package file manually, use the
"sources" option.
32-bit packages can be installed on 64-bit systems by appending the
architecture designation (``.i686``, ``.i586``, etc.) to the end of the
package name.
CLI Example:
.. code-block:: bash
salt '*' pkg.install <package name>
refresh
Whether or not to update the yum database before executing.
reinstall
Specifying reinstall=True will use ``yum reinstall`` rather than
``yum install`` for requested packages that are already installed.
If a version is specified with the requested package, then
``yum reinstall`` will only be used if the installed version
matches the requested version.
Works with ``sources`` when the package header of the source can be
matched to the name and version of an installed package.
.. versionadded:: 2014.7.0
skip_verify
Skip the GPG verification check (e.g., ``--nogpgcheck``)
downloadonly
Only download the packages, do not install.
version
Install a specific version of the package, e.g. 1.2.3-4.el5. Ignored
if "pkgs" or "sources" is passed.
.. versionchanged:: 2018.3.0
version can now contain comparison operators (e.g. ``>1.2.3``,
``<=2.0``, etc.)
update_holds : False
If ``True``, and this function would update the package version, any
packages held using the yum/dnf "versionlock" plugin will be unheld so
that they can be updated. Otherwise, if this function attempts to
update a held package, the held package(s) will be skipped and an
error will be raised.
.. versionadded:: 2016.11.0
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
CLI Example:
.. code-block:: bash
salt '*' pkg.install foo setopt='obsoletes=0,plugins=0'
.. versionadded:: Fluorine
Repository Options:
fromrepo
Specify a package repository (or repositories) from which to install.
(e.g., ``yum --disablerepo='*' --enablerepo='somerepo'``)
enablerepo (ignored if ``fromrepo`` is specified)
Specify a disabled package repository (or repositories) to enable.
(e.g., ``yum --enablerepo='somerepo'``)
disablerepo (ignored if ``fromrepo`` is specified)
Specify an enabled package repository (or repositories) to disable.
(e.g., ``yum --disablerepo='somerepo'``)
disableexcludes
Disable exclude from main, for a repo or for everything.
(e.g., ``yum --disableexcludes='main'``)
.. versionadded:: 2014.7.0
ignore_epoch : False
Only used when the version of a package is specified using a comparison
operator (e.g. ``>4.1``). If set to ``True``, then the epoch will be
ignored when comparing the currently-installed version to the desired
version.
.. versionadded:: 2018.3.0
Multiple Package Installation Options:
pkgs
A list of packages to install from a software repository. Must be
passed as a python list. A specific version number can be specified
by using a single-element dict representing the package and its
version.
CLI Examples:
.. code-block:: bash
salt '*' pkg.install pkgs='["foo", "bar"]'
salt '*' pkg.install pkgs='["foo", {"bar": "1.2.3-4.el5"}]'
sources
A list of RPM packages to install. Must be passed as a list of dicts,
with the keys being package names, and the values being the source URI
or local path to the package.
CLI Example:
.. code-block:: bash
salt '*' pkg.install sources='[{"foo": "salt://foo.rpm"}, {"bar": "salt://bar.rpm"}]'
normalize : True
Normalize the package name by removing the architecture. This is useful
for poorly created packages which might include the architecture as an
actual part of the name such as kernel modules which match a specific
kernel version.
.. code-block:: bash
salt -G role:nsd pkg.install gpfs.gplbin-2.6.32-279.31.1.el6.x86_64 normalize=False
.. versionadded:: 2014.7.0
diff_attr:
If a list of package attributes is specified, returned value will
contain them, eg.::
{'<package>': {
'old': {
'version': '<old-version>',
'arch': '<old-arch>'},
'new': {
'version': '<new-version>',
'arch': '<new-arch>'}}}
Valid attributes are: ``epoch``, ``version``, ``release``, ``arch``,
``install_date``, ``install_date_time_t``.
If ``all`` is specified, all valid attributes will be returned.
.. versionadded:: 2018.3.0
Returns a dict containing the new package names and versions::
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
If an attribute list in diff_attr is specified, the dict will also contain
any specified attribute, eg.::
{'<package>': {
'old': {
'version': '<old-version>',
'arch': '<old-arch>'},
'new': {
'version': '<new-version>',
'arch': '<new-arch>'}}}
'''
options = _get_options(**kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(**kwargs)
reinstall = salt.utils.data.is_true(reinstall)
try:
pkg_params, pkg_type = __salt__['pkg_resource.parse_targets'](
name, pkgs, sources, saltenv=saltenv, normalize=normalize, **kwargs
)
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params is None or len(pkg_params) == 0:
return {}
version_num = kwargs.get('version')
diff_attr = kwargs.get('diff_attr')
old = list_pkgs(versions_as_list=False, attr=diff_attr) if not downloadonly else list_downloaded()
# Use of __context__ means no duplicate work here, just accessing
# information already in __context__ from the previous call to list_pkgs()
old_as_list = list_pkgs(versions_as_list=True) if not downloadonly else list_downloaded()
to_install = []
to_downgrade = []
to_reinstall = []
_available = {}
# The above three lists will be populated with tuples containing the
# package name and the string being used for this particular package
# modification. The reason for this method is that the string we use for
# installation, downgrading, or reinstallation will be different than the
# package name in a couple cases:
#
# 1) A specific version is being targeted. In this case the string being
# passed to install/downgrade/reinstall will contain the version
# information after the package name.
# 2) A binary package is being installed via the "sources" param. In this
# case the string being passed will be the path to the local copy of
# the package in the minion cachedir.
#
# The reason that we need both items is to be able to modify the installed
# version of held packages.
if pkg_type == 'repository':
has_wildcards = []
has_comparison = []
for pkgname, pkgver in six.iteritems(pkg_params):
try:
if '*' in pkgver:
has_wildcards.append(pkgname)
elif pkgver.startswith('<') or pkgver.startswith('>'):
has_comparison.append(pkgname)
except (TypeError, ValueError):
continue
_available = AvailablePackages(
*has_wildcards + has_comparison,
byrepo=False,
**kwargs)
pkg_params_items = six.iteritems(pkg_params)
elif pkg_type == 'advisory':
pkg_params_items = []
cur_patches = list_patches()
for advisory_id in pkg_params:
if advisory_id not in cur_patches:
raise CommandExecutionError(
'Advisory id "{0}" not found'.format(advisory_id)
)
else:
pkg_params_items.append(advisory_id)
else:
pkg_params_items = []
for pkg_source in pkg_params:
if 'lowpkg.bin_pkg_info' in __salt__:
rpm_info = __salt__['lowpkg.bin_pkg_info'](pkg_source)
else:
rpm_info = None
if rpm_info is None:
log.error(
'pkg.install: Unable to get rpm information for %s. '
'Version comparisons will be unavailable, and return '
'data may be inaccurate if reinstall=True.', pkg_source
)
pkg_params_items.append([pkg_source])
else:
pkg_params_items.append(
[rpm_info['name'], pkg_source, rpm_info['version']]
)
errors = []
for pkg_item_list in pkg_params_items:
if pkg_type == 'repository':
pkgname, version_num = pkg_item_list
elif pkg_type == 'advisory':
pkgname = pkg_item_list
version_num = None
else:
try:
pkgname, pkgpath, version_num = pkg_item_list
except ValueError:
pkgname = None
pkgpath = pkg_item_list[0]
version_num = None
if version_num is None:
if pkg_type == 'repository':
if reinstall and pkgname in old:
to_reinstall.append((pkgname, pkgname))
else:
to_install.append((pkgname, pkgname))
elif pkg_type == 'advisory':
to_install.append((pkgname, pkgname))
else:
to_install.append((pkgname, pkgpath))
else:
# If we are installing a package file and not one from the repo,
# and version_num is not None, then we can assume that pkgname is
# not None, since the only way version_num is not None is if RPM
# metadata parsing was successful.
if pkg_type == 'repository':
# yum/dnf does not support comparison operators. If the version
# starts with an equals sign, ignore it.
version_num = version_num.lstrip('=')
if pkgname in has_comparison:
candidates = _available.get(pkgname, [])
target = salt.utils.pkg.match_version(
version_num,
candidates,
cmp_func=version_cmp,
ignore_epoch=ignore_epoch,
)
if target is None:
errors.append(
'No version matching \'{0}{1}\' could be found '
'(available: {2})'.format(
pkgname,
version_num,
', '.join(candidates) if candidates else None
)
)
continue
else:
version_num = target
if _yum() == 'yum':
# yum install does not support epoch without the arch, and
# we won't know what the arch will be when it's not
# provided. It could either be the OS architecture, or
# 'noarch', and we don't make that distinction in the
# pkg.list_pkgs return data.
version_num = version_num.split(':', 1)[-1]
arch = ''
try:
namepart, archpart = pkgname.rsplit('.', 1)
except ValueError:
pass
else:
if archpart in salt.utils.pkg.rpm.ARCHES:
arch = '.' + archpart
pkgname = namepart
if '*' in version_num:
# Resolve wildcard matches
candidates = _available.get(pkgname, [])
match = salt.utils.itertools.fnmatch_multiple(candidates, version_num)
if match is not None:
version_num = match
else:
errors.append(
'No version matching \'{0}\' found for package '
'\'{1}\' (available: {2})'.format(
version_num,
pkgname,
', '.join(candidates) if candidates else 'none'
)
)
continue
pkgstr = '{0}-{1}{2}'.format(pkgname, version_num, arch)
else:
pkgstr = pkgpath
# Lambda to trim the epoch from the currently-installed version if
# no epoch is specified in the specified version
norm_epoch = lambda x, y: x.split(':', 1)[-1] \
if ':' not in y \
else x
cver = old_as_list.get(pkgname, [])
if reinstall and cver:
for ver in cver:
ver = norm_epoch(ver, version_num)
if salt.utils.versions.compare(ver1=version_num,
oper='==',
ver2=ver,
cmp_func=version_cmp):
# This version is already installed, so we need to
# reinstall.
to_reinstall.append((pkgname, pkgstr))
break
else:
if not cver:
to_install.append((pkgname, pkgstr))
else:
for ver in cver:
ver = norm_epoch(ver, version_num)
if salt.utils.versions.compare(ver1=version_num,
oper='>=',
ver2=ver,
cmp_func=version_cmp):
to_install.append((pkgname, pkgstr))
break
else:
if pkgname is not None:
if re.match('^kernel(|-devel)$', pkgname):
# kernel and kernel-devel support multiple
# installs as their paths do not conflict.
# Performing a yum/dnf downgrade will be a
# no-op so just do an install instead. It will
# fail if there are other interdependencies
# that have conflicts, and that's OK. We don't
# want to force anything, we just want to
# properly handle it if someone tries to
# install a kernel/kernel-devel of a lower
# version than the currently-installed one.
# TODO: find a better way to determine if a
# package supports multiple installs.
to_install.append((pkgname, pkgstr))
else:
# None of the currently-installed versions are
# greater than the specified version, so this
# is a downgrade.
to_downgrade.append((pkgname, pkgstr))
def _add_common_args(cmd):
'''
DRY function to add args common to all yum/dnf commands
'''
cmd.extend(options)
if skip_verify:
cmd.append('--nogpgcheck')
if downloadonly:
cmd.append('--downloadonly')
try:
holds = list_holds(full=False)
except SaltInvocationError:
holds = []
log.debug(
'Failed to get holds, versionlock plugin is probably not '
'installed'
)
unhold_prevented = []
@contextlib.contextmanager
def _temporarily_unhold(pkgs, targets):
'''
Temporarily unhold packages that need to be updated. Add any
successfully-removed ones (and any packages not in the list of current
holds) to the list of targets.
'''
to_unhold = {}
for pkgname, pkgstr in pkgs:
if pkgname in holds:
if update_holds:
to_unhold[pkgname] = pkgstr
else:
unhold_prevented.append(pkgname)
else:
targets.append(pkgstr)
if not to_unhold:
yield
else:
log.debug('Unholding packages: %s', ', '.join(to_unhold))
try:
# Using list() here for python3 compatibility, dict.keys() no
# longer returns a list in python3.
unhold_names = list(to_unhold.keys())
for unheld_pkg, outcome in \
six.iteritems(unhold(pkgs=unhold_names)):
if outcome['result']:
# Package was successfully unheld, add to targets
targets.append(to_unhold[unheld_pkg])
else:
# Failed to unhold package
errors.append(unheld_pkg)
yield
except Exception as exc:
errors.append(
'Error encountered unholding packages {0}: {1}'
.format(', '.join(to_unhold), exc)
)
finally:
hold(pkgs=unhold_names)
targets = []
with _temporarily_unhold(to_install, targets):
if targets:
if pkg_type == 'advisory':
targets = ["--advisory={0}".format(t) for t in targets]
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '-y'])
if _yum() == 'dnf':
cmd.extend(['--best', '--allowerasing'])
_add_common_args(cmd)
cmd.append('install' if pkg_type != 'advisory' else 'update')
cmd.extend(targets)
out = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False,
redirect_stderr=True
)
if out['retcode'] != 0:
errors.append(out['stdout'])
targets = []
with _temporarily_unhold(to_downgrade, targets):
if targets:
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '-y'])
_add_common_args(cmd)
cmd.append('downgrade')
cmd.extend(targets)
out = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False,
redirect_stderr=True
)
if out['retcode'] != 0:
errors.append(out['stdout'])
targets = []
with _temporarily_unhold(to_reinstall, targets):
if targets:
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '-y'])
_add_common_args(cmd)
cmd.append('reinstall')
cmd.extend(targets)
out = __salt__['cmd.run_all'](
cmd,
output_loglevel='trace',
python_shell=False,
redirect_stderr=True
)
if out['retcode'] != 0:
errors.append(out['stdout'])
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs(versions_as_list=False, attr=diff_attr) if not downloadonly else list_downloaded()
ret = salt.utils.data.compare_dicts(old, new)
for pkgname, _ in to_reinstall:
if pkgname not in ret or pkgname in old:
ret.update({pkgname: {'old': old.get(pkgname, ''),
'new': new.get(pkgname, '')}})
if unhold_prevented:
errors.append(
'The following package(s) could not be updated because they are '
'being held: {0}. Set \'update_holds\' to True to temporarily '
'unhold these packages so that they can be updated.'.format(
', '.join(unhold_prevented)
)
)
if errors:
raise CommandExecutionError(
'Error occurred installing{0} package(s)'.format(
'/reinstalling' if to_reinstall else ''
),
info={'errors': errors, 'changes': ret}
)
return ret
def upgrade(name=None,
pkgs=None,
refresh=True,
skip_verify=False,
normalize=True,
**kwargs):
'''
Run a full system upgrade (a ``yum upgrade`` or ``dnf upgrade``), or
upgrade specified packages. If the packages aren't installed, they will
not be installed.
.. versionchanged:: 2014.7.0
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Run a full system upgrade, a yum upgrade
Returns a dictionary containing the changes:
.. code-block:: python
{'<package>': {'old': '<old-version>',
'new': '<new-version>'}}
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade
salt '*' pkg.upgrade name=openssl
Repository Options:
fromrepo
Specify a package repository (or repositories) from which to install.
(e.g., ``yum --disablerepo='*' --enablerepo='somerepo'``)
enablerepo (ignored if ``fromrepo`` is specified)
Specify a disabled package repository (or repositories) to enable.
(e.g., ``yum --enablerepo='somerepo'``)
disablerepo (ignored if ``fromrepo`` is specified)
Specify an enabled package repository (or repositories) to disable.
(e.g., ``yum --disablerepo='somerepo'``)
disableexcludes
Disable exclude from main, for a repo or for everything.
(e.g., ``yum --disableexcludes='main'``)
.. versionadded:: 2014.7
name
The name of the package to be upgraded. Note that this parameter is
ignored if "pkgs" is passed.
32-bit packages can be upgraded on 64-bit systems by appending the
architecture designation (``.i686``, ``.i586``, etc.) to the end of the
package name.
Warning: if you forget 'name=' and run pkg.upgrade openssl, ALL packages
are upgraded. This will be addressed in next releases.
CLI Example:
.. code-block:: bash
salt '*' pkg.upgrade name=openssl
.. versionadded:: 2016.3.0
pkgs
A list of packages to upgrade from a software repository. Must be
passed as a python list. A specific version number can be specified
by using a single-element dict representing the package and its
version. If the package was not already installed on the system,
it will not be installed.
CLI Examples:
.. code-block:: bash
salt '*' pkg.upgrade pkgs='["foo", "bar"]'
salt '*' pkg.upgrade pkgs='["foo", {"bar": "1.2.3-4.el5"}]'
.. versionadded:: 2016.3.0
normalize : True
Normalize the package name by removing the architecture. This is useful
for poorly created packages which might include the architecture as an
actual part of the name such as kernel modules which match a specific
kernel version.
.. code-block:: bash
salt -G role:nsd pkg.upgrade gpfs.gplbin-2.6.32-279.31.1.el6.x86_64 normalize=False
.. versionadded:: 2016.3.0
setopt
A comma-separated or Python list of key=value options. This list will
be expanded and ``--setopt`` prepended to each in the yum/dnf command
that is run.
.. versionadded:: Fluorine
.. note::
To add extra arguments to the ``yum upgrade`` command, pass them as key
word arguments. For arguments without assignments, pass ``True``
.. code-block:: bash
salt '*' pkg.upgrade security=True exclude='kernel*'
'''
options = _get_options(get_extra_options=True, **kwargs)
if salt.utils.data.is_true(refresh):
refresh_db(**kwargs)
old = list_pkgs()
targets = []
if name or pkgs:
try:
pkg_params = __salt__['pkg_resource.parse_targets'](
name=name,
pkgs=pkgs,
sources=None,
normalize=normalize,
**kwargs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
if pkg_params:
# Calling list.extend() on a dict will extend it using the
# dictionary's keys.
targets.extend(pkg_params)
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '--quiet', '-y'])
cmd.extend(options)
if skip_verify:
cmd.append('--nogpgcheck')
cmd.append('upgrade')
cmd.extend(targets)
result = __salt__['cmd.run_all'](cmd,
output_loglevel='trace',
python_shell=False)
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if result['retcode'] != 0:
raise CommandExecutionError(
'Problem encountered upgrading packages',
info={'changes': ret, 'result': result}
)
return ret
def remove(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Remove packages
name
The name of the package to be removed
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.remove <package name>
salt '*' pkg.remove <package1>,<package2>,<package3>
salt '*' pkg.remove pkgs='["foo", "bar"]'
'''
try:
pkg_params = __salt__['pkg_resource.parse_targets'](name, pkgs)[0]
except MinionError as exc:
raise CommandExecutionError(exc)
old = list_pkgs()
targets = []
for target in pkg_params:
# Check if package version set to be removed is actually installed:
# old[target] contains a comma-separated list of installed versions
if target in old and not pkg_params[target]:
targets.append(target)
elif target in old and pkg_params[target] in old[target].split(','):
arch = ''
pkgname = target
try:
namepart, archpart = target.rsplit('.', 1)
except ValueError:
pass
else:
if archpart in salt.utils.pkg.rpm.ARCHES:
arch = '.' + archpart
pkgname = namepart
targets.append('{0}-{1}{2}'.format(pkgname, pkg_params[target], arch))
if not targets:
return {}
cmd = []
if salt.utils.systemd.has_scope(__context__) \
and __salt__['config.get']('systemd.scope', True):
cmd.extend(['systemd-run', '--scope'])
cmd.extend([_yum(), '-y', 'remove'] + targets)
out = __salt__['cmd.run_all'](
[_yum(), '-y', 'remove'] + targets,
output_loglevel='trace',
python_shell=False
)
if out['retcode'] != 0 and out['stderr']:
errors = [out['stderr']]
else:
errors = []
__context__.pop('pkg.list_pkgs', None)
new = list_pkgs()
ret = salt.utils.data.compare_dicts(old, new)
if errors:
raise CommandExecutionError(
'Error occurred removing package(s)',
info={'errors': errors, 'changes': ret}
)
return ret
def purge(name=None, pkgs=None, **kwargs): # pylint: disable=W0613
'''
.. versionchanged:: 2015.8.12,2016.3.3,2016.11.0
On minions running systemd>=205, `systemd-run(1)`_ is now used to
isolate commands which modify installed packages from the
``salt-minion`` daemon's control group. This is done to keep systemd
from killing any yum/dnf commands spawned by Salt when the
``salt-minion`` service is restarted. (see ``KillMode`` in the
`systemd.kill(5)`_ manpage for more information). If desired, usage of
`systemd-run(1)`_ can be suppressed by setting a :mod:`config option
<salt.modules.config.get>` called ``systemd.scope``, with a value of
``False`` (no quotes).
.. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html
.. _`systemd.kill(5)`: https://www.freedesktop.org/software/systemd/man/systemd.kill.html
Package purges are not supported by yum, this function is identical to
:mod:`pkg.remove <salt.modules.yumpkg.remove>`.
name
The name of the package to be purged
Multiple Package Options:
pkgs
A list of packages to delete. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
.. versionadded:: 0.16.0
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.purge <package name>
salt '*' pkg.purge <package1>,<package2>,<package3>
salt '*' pkg.purge pkgs='["foo", "bar"]'
'''
return remove(name=name, pkgs=pkgs)
def hold(name=None, pkgs=None, sources=None, normalize=True, **kwargs): # pylint: disable=W0613
'''
.. versionadded:: 2014.7.0
Version-lock packages
.. note::
Requires the appropriate ``versionlock`` plugin package to be installed:
- On RHEL 5: ``yum-versionlock``
- On RHEL 6 & 7: ``yum-plugin-versionlock``
- On Fedora: ``python-dnf-plugins-extras-versionlock``
name
The name of the package to be held.
Multiple Package Options:
pkgs
A list of packages to hold. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.hold <package name>
salt '*' pkg.hold pkgs='["foo", "bar"]'
'''
_check_versionlock()
if not name and not pkgs and not sources:
raise SaltInvocationError(
'One of name, pkgs, or sources must be specified.'
)
if pkgs and sources:
raise SaltInvocationError(
'Only one of pkgs or sources can be specified.'
)
targets = []
if pkgs:
targets.extend(pkgs)
elif sources:
for source in sources:
targets.append(next(six.iterkeys(source)))
else:
targets.append(name)
current_locks = list_holds(full=False)
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(six.iterkeys(target))
ret[target] = {'name': target,
'changes': {},
'result': False,
'comment': ''}
if target not in current_locks:
if 'test' in __opts__ and __opts__['test']:
ret[target].update(result=None)
ret[target]['comment'] = ('Package {0} is set to be held.'
.format(target))
else:
out = __salt__['cmd.run_all'](
[_yum(), 'versionlock', target],
python_shell=False
)
if out['retcode'] == 0:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is now being held.'
.format(target))
ret[target]['changes']['new'] = 'hold'
ret[target]['changes']['old'] = ''
else:
ret[target]['comment'] = ('Package {0} was unable to be held.'
.format(target))
else:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is already set to be held.'
.format(target))
return ret
def unhold(name=None, pkgs=None, sources=None, **kwargs): # pylint: disable=W0613
'''
.. versionadded:: 2014.7.0
Remove version locks
.. note::
Requires the appropriate ``versionlock`` plugin package to be installed:
- On RHEL 5: ``yum-versionlock``
- On RHEL 6 & 7: ``yum-plugin-versionlock``
- On Fedora: ``python-dnf-plugins-extras-versionlock``
name
The name of the package to be unheld
Multiple Package Options:
pkgs
A list of packages to unhold. Must be passed as a python list. The
``name`` parameter will be ignored if this option is passed.
Returns a dict containing the changes.
CLI Example:
.. code-block:: bash
salt '*' pkg.unhold <package name>
salt '*' pkg.unhold pkgs='["foo", "bar"]'
'''
_check_versionlock()
if not name and not pkgs and not sources:
raise SaltInvocationError(
'One of name, pkgs, or sources must be specified.'
)
if pkgs and sources:
raise SaltInvocationError(
'Only one of pkgs or sources can be specified.'
)
targets = []
if pkgs:
for pkg in salt.utils.data.repack_dictlist(pkgs):
targets.append(pkg)
elif sources:
for source in sources:
targets.append(next(iter(source)))
else:
targets.append(name)
# Yum's versionlock plugin doesn't support passing just the package name
# when removing a lock, so we need to get the full list and then use
# fnmatch below to find the match.
current_locks = list_holds(full=_yum() == 'yum')
ret = {}
for target in targets:
if isinstance(target, dict):
target = next(six.iterkeys(target))
ret[target] = {'name': target,
'changes': {},
'result': False,
'comment': ''}
if _yum() == 'dnf':
search_locks = [x for x in current_locks if x == target]
else:
# To accommodate yum versionlock's lack of support for removing
# locks using just the package name, we have to use fnmatch to do
# glob matching on the target name, and then for each matching
# expression double-check that the package name (obtained via
# _get_hold()) matches the targeted package.
search_locks = [
x for x in current_locks
if fnmatch.fnmatch(x, '*{0}*'.format(target))
and target == _get_hold(x, full=False)
]
if search_locks:
if __opts__['test']:
ret[target].update(result=None)
ret[target]['comment'] = ('Package {0} is set to be unheld.'
.format(target))
else:
out = __salt__['cmd.run_all'](
[_yum(), 'versionlock', 'delete'] + search_locks,
python_shell=False
)
if out['retcode'] == 0:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is no longer held.'
.format(target))
ret[target]['changes']['new'] = ''
ret[target]['changes']['old'] = 'hold'
else:
ret[target]['comment'] = ('Package {0} was unable to be '
'unheld.'.format(target))
else:
ret[target].update(result=True)
ret[target]['comment'] = ('Package {0} is not being held.'
.format(target))
return ret
def list_holds(pattern=__HOLD_PATTERN, full=True):
r'''
.. versionchanged:: 2016.3.0,2015.8.4,2015.5.10
Function renamed from ``pkg.get_locked_pkgs`` to ``pkg.list_holds``.
List information on locked packages
.. note::
Requires the appropriate ``versionlock`` plugin package to be installed:
- On RHEL 5: ``yum-versionlock``
- On RHEL 6 & 7: ``yum-plugin-versionlock``
- On Fedora: ``python-dnf-plugins-extras-versionlock``
pattern : \w+(?:[.-][^-]+)*
Regular expression used to match the package name
full : True
Show the full hold definition including version and epoch. Set to
``False`` to return just the name of the package(s) being held.
CLI Example:
.. code-block:: bash
salt '*' pkg.list_holds
salt '*' pkg.list_holds full=False
'''
_check_versionlock()
out = __salt__['cmd.run']([_yum(), 'versionlock', 'list'],
python_shell=False)
ret = []
for line in salt.utils.itertools.split(out, '\n'):
match = _get_hold(line, pattern=pattern, full=full)
if match is not None:
ret.append(match)
return ret
get_locked_packages = salt.utils.functools.alias_function(list_holds, 'get_locked_packages')
def verify(*names, **kwargs):
'''
.. versionadded:: 2014.1.0
Runs an rpm -Va on a system, and returns the results in a dict
Pass options to modify rpm verify behavior using the ``verify_options``
keyword argument
Files with an attribute of config, doc, ghost, license or readme in the
package header can be ignored using the ``ignore_types`` keyword argument
CLI Example:
.. code-block:: bash
salt '*' pkg.verify
salt '*' pkg.verify httpd
salt '*' pkg.verify 'httpd postfix'
salt '*' pkg.verify 'httpd postfix' ignore_types=['config','doc']
salt '*' pkg.verify 'httpd postfix' verify_options=['nodeps','nosize']
'''
return __salt__['lowpkg.verify'](*names, **kwargs)
def group_list():
'''
.. versionadded:: 2014.1.0
Lists all groups known by yum on this system
CLI Example:
.. code-block:: bash
salt '*' pkg.group_list
'''
ret = {'installed': [],
'available': [],
'installed environments': [],
'available environments': [],
'available languages': {}}
section_map = {
'installed groups:': 'installed',
'available groups:': 'available',
'installed environment groups:': 'installed environments',
'available environment groups:': 'available environments',
'available language groups:': 'available languages',
}
out = __salt__['cmd.run_stdout'](
[_yum(), 'grouplist', 'hidden'],
output_loglevel='trace',
python_shell=False
)
key = None
for line in salt.utils.itertools.split(out, '\n'):
line_lc = line.lower()
if line_lc == 'done':
break
section_lookup = section_map.get(line_lc)
if section_lookup is not None and section_lookup != key:
key = section_lookup
continue
# Ignore any administrative comments (plugin info, repo info, etc.)
if key is None:
continue
line = line.strip()
if key != 'available languages':
ret[key].append(line)
else:
match = re.match(r'(.+) \[(.+)\]', line)
if match:
name, lang = match.groups()
ret[key][line] = {'name': name, 'language': lang}
return ret
def group_info(name, expand=False):
'''
.. versionadded:: 2014.1.0
.. versionchanged:: 2016.3.0,2015.8.4,2015.5.10
The return data has changed. A new key ``type`` has been added to
distinguish environment groups from package groups. Also, keys for the
group name and group ID have been added. The ``mandatory packages``,
``optional packages``, and ``default packages`` keys have been renamed
to ``mandatory``, ``optional``, and ``default`` for accuracy, as
environment groups include other groups, and not packages. Finally,
this function now properly identifies conditional packages.
Lists packages belonging to a certain group
name
Name of the group to query
expand : False
If the specified group is an environment group, then the group will be
expanded and the return data will include package names instead of
group names.
.. versionadded:: 2016.3.0
CLI Example:
.. code-block:: bash
salt '*' pkg.group_info 'Perl Support'
'''
pkgtypes = ('mandatory', 'optional', 'default', 'conditional')
ret = {}
for pkgtype in pkgtypes:
ret[pkgtype] = set()
cmd = [_yum(), '--quiet', 'groupinfo', name]
out = __salt__['cmd.run_stdout'](
cmd,
output_loglevel='trace',
python_shell=False
)
g_info = {}
for line in salt.utils.itertools.split(out, '\n'):
try:
key, value = [x.strip() for x in line.split(':')]
g_info[key.lower()] = value
except ValueError:
continue
if 'environment group' in g_info:
ret['type'] = 'environment group'
elif 'group' in g_info:
ret['type'] = 'package group'
ret['group'] = g_info.get('environment group') or g_info.get('group')
ret['id'] = g_info.get('environment-id') or g_info.get('group-id')
if not ret['group'] and not ret['id']:
raise CommandExecutionError('Group \'{0}\' not found'.format(name))
ret['description'] = g_info.get('description', '')
pkgtypes_capturegroup = '(' + '|'.join(pkgtypes) + ')'
for pkgtype in pkgtypes:
target_found = False
for line in salt.utils.itertools.split(out, '\n'):
line = line.strip().lstrip(string.punctuation)
match = re.match(
pkgtypes_capturegroup + r' (?:groups|packages):\s*$',
line.lower()
)
if match:
if target_found:
# We've reached a new section, break from loop
break
else:
if match.group(1) == pkgtype:
# We've reached the targeted section
target_found = True
continue
if target_found:
if expand and ret['type'] == 'environment group':
expanded = group_info(line, expand=True)
# Don't shadow the pkgtype variable from the outer loop
for p_type in pkgtypes:
ret[p_type].update(set(expanded[p_type]))
else:
ret[pkgtype].add(line)
for pkgtype in pkgtypes:
ret[pkgtype] = sorted(ret[pkgtype])
return ret
def group_diff(name):
'''
.. versionadded:: 2014.1.0
.. versionchanged:: 2016.3.0,2015.8.4,2015.5.10
Environment groups are now supported. The key names have been renamed,
similar to the changes made in :py:func:`pkg.group_info
<salt.modules.yumpkg.group_info>`.
Lists which of a group's packages are installed and which are not
installed
CLI Example:
.. code-block:: bash
salt '*' pkg.group_diff 'Perl Support'
'''
pkgtypes = ('mandatory', 'optional', 'default', 'conditional')
ret = {}
for pkgtype in pkgtypes:
ret[pkgtype] = {'installed': [], 'not installed': []}
pkgs = list_pkgs()
group_pkgs = group_info(name, expand=True)
for pkgtype in pkgtypes:
for member in group_pkgs.get(pkgtype, []):
if member in pkgs:
ret[pkgtype]['installed'].append(member)
else:
ret[pkgtype]['not installed'].append(member)
return ret
def group_install(name,
skip=(),
include=(),
**kwargs):
'''
.. versionadded:: 2014.1.0
Install the passed package group(s). This is basically a wrapper around
:py:func:`pkg.install <salt.modules.yumpkg.install>`, which performs
package group resolution for the user. This function is currently
considered experimental, and should be expected to undergo changes.
name
Package group to install. To install more than one group, either use a
comma-separated list or pass the value as a python list.
CLI Examples:
.. code-block:: bash
salt '*' pkg.group_install 'Group 1'
salt '*' pkg.group_install 'Group 1,Group 2'
salt '*' pkg.group_install '["Group 1", "Group 2"]'
skip
Packages that would normally be installed by the package group
("default" packages), which should not be installed. Can be passed
either as a comma-separated list or a python list.
CLI Examples:
.. code-block:: bash
salt '*' pkg.group_install 'My Group' skip='foo,bar'
salt '*' pkg.group_install 'My Group' skip='["foo", "bar"]'
include
Packages which are included in a group, which would not normally be
installed by a ``yum groupinstall`` ("optional" packages). Note that
this will not enforce group membership; if you include packages which
are not members of the specified groups, they will still be installed.
Can be passed either as a comma-separated list or a python list.
CLI Examples:
.. code-block:: bash
salt '*' pkg.group_install 'My Group' include='foo,bar'
salt '*' pkg.group_install 'My Group' include='["foo", "bar"]'
.. note::
Because this is essentially a wrapper around pkg.install, any argument
which can be passed to pkg.install may also be included here, and it
will be passed along wholesale.
'''
groups = name.split(',') if isinstance(name, six.string_types) else name
if not groups:
raise SaltInvocationError('no groups specified')
elif not isinstance(groups, list):
raise SaltInvocationError('\'groups\' must be a list')
# pylint: disable=maybe-no-member
if isinstance(skip, six.string_types):
skip = skip.split(',')
if not isinstance(skip, (list, tuple)):
raise SaltInvocationError('\'skip\' must be a list')
if isinstance(include, six.string_types):
include = include.split(',')
if not isinstance(include, (list, tuple)):
raise SaltInvocationError('\'include\' must be a list')
# pylint: enable=maybe-no-member
targets = []
for group in groups:
group_detail = group_info(group)
targets.extend(group_detail.get('mandatory packages', []))
targets.extend(
[pkg for pkg in group_detail.get('default packages', [])
if pkg not in skip]
)
if include:
targets.extend(include)
# Don't install packages that are already installed, install() isn't smart
# enough to make this distinction.
pkgs = [x for x in targets if x not in list_pkgs()]
if not pkgs:
return {}
return install(pkgs=pkgs, **kwargs)
groupinstall = salt.utils.functools.alias_function(group_install, 'groupinstall')
def list_repos(basedir=None):
'''
Lists all repos in <basedir> (default: all dirs in `reposdir` yum option).
CLI Example:
.. code-block:: bash
salt '*' pkg.list_repos
salt '*' pkg.list_repos basedir=/path/to/dir
salt '*' pkg.list_repos basedir=/path/to/dir,/path/to/another/dir
'''
basedirs = _normalize_basedir(basedir)
repos = {}
log.debug('Searching for repos in %s', basedirs)
for bdir in basedirs:
if not os.path.exists(bdir):
continue
for repofile in os.listdir(bdir):
repopath = '{0}/{1}'.format(bdir, repofile)
if not repofile.endswith('.repo'):
continue
filerepos = _parse_repo_file(repopath)[1]
for reponame in filerepos:
repo = filerepos[reponame]
repo['file'] = repopath
repos[reponame] = repo
return repos
def get_repo(name, basedir=None, **kwargs): # pylint: disable=W0613
'''
Display a repo from <basedir> (default basedir: all dirs in ``reposdir``
yum option).
CLI Examples:
.. code-block:: bash
salt '*' pkg.get_repo myrepo
salt '*' pkg.get_repo myrepo basedir=/path/to/dir
salt '*' pkg.get_repo myrepo basedir=/path/to/dir,/path/to/another/dir
'''
repos = list_repos(basedir)
# Find out what file the repo lives in
repofile = ''
for repo in repos:
if repo == name:
repofile = repos[repo]['file']
if repofile:
# Return just one repo
filerepos = _parse_repo_file(repofile)[1]
return filerepos[name]
return {}
def del_repo(repo, basedir=None, **kwargs): # pylint: disable=W0613
'''
Delete a repo from <basedir> (default basedir: all dirs in `reposdir` yum
option).
If the .repo file in which the repo exists does not contain any other repo
configuration, the file itself will be deleted.
CLI Examples:
.. code-block:: bash
salt '*' pkg.del_repo myrepo
salt '*' pkg.del_repo myrepo basedir=/path/to/dir
salt '*' pkg.del_repo myrepo basedir=/path/to/dir,/path/to/another/dir
'''
# this is so we know which dirs are searched for our error messages below
basedirs = _normalize_basedir(basedir)
repos = list_repos(basedirs)
if repo not in repos:
return 'Error: the {0} repo does not exist in {1}'.format(
repo, basedirs)
# Find out what file the repo lives in
repofile = ''
for arepo in repos:
if arepo == repo:
repofile = repos[arepo]['file']
# See if the repo is the only one in the file
onlyrepo = True
for arepo in six.iterkeys(repos):
if arepo == repo:
continue
if repos[arepo]['file'] == repofile:
onlyrepo = False
# If this is the only repo in the file, delete the file itself
if onlyrepo:
os.remove(repofile)
return 'File {0} containing repo {1} has been removed'.format(
repofile, repo)
# There must be other repos in this file, write the file with them
header, filerepos = _parse_repo_file(repofile)
content = header
for stanza in six.iterkeys(filerepos):
if stanza == repo:
continue
comments = ''
if 'comments' in six.iterkeys(filerepos[stanza]):
comments = salt.utils.pkg.rpm.combine_comments(
filerepos[stanza]['comments'])
del filerepos[stanza]['comments']
content += '\n[{0}]'.format(stanza)
for line in filerepos[stanza]:
content += '\n{0}={1}'.format(line, filerepos[stanza][line])
content += '\n{0}\n'.format(comments)
with salt.utils.files.fopen(repofile, 'w') as fileout:
fileout.write(salt.utils.stringutils.to_str(content))
return 'Repo {0} has been removed from {1}'.format(repo, repofile)
def mod_repo(repo, basedir=None, **kwargs):
'''
Modify one or more values for a repo. If the repo does not exist, it will
be created, so long as the following values are specified:
repo
name by which the yum refers to the repo
name
a human-readable name for the repo
baseurl
the URL for yum to reference
mirrorlist
the URL for yum to reference
Key/Value pairs may also be removed from a repo's configuration by setting
a key to a blank value. Bear in mind that a name cannot be deleted, and a
baseurl can only be deleted if a mirrorlist is specified (or vice versa).
CLI Examples:
.. code-block:: bash
salt '*' pkg.mod_repo reponame enabled=1 gpgcheck=1
salt '*' pkg.mod_repo reponame basedir=/path/to/dir enabled=1
salt '*' pkg.mod_repo reponame baseurl= mirrorlist=http://host.com/
'''
# Filter out '__pub' arguments, as well as saltenv
repo_opts = dict(
(x, kwargs[x]) for x in kwargs
if not x.startswith('__') and x not in ('saltenv',)
)
if all(x in repo_opts for x in ('mirrorlist', 'baseurl')):
raise SaltInvocationError(
'Only one of \'mirrorlist\' and \'baseurl\' can be specified'
)
# Build a list of keys to be deleted
todelete = []
# list() of keys because the dict could be shrinking in the for loop.
for key in list(repo_opts):
if repo_opts[key] != 0 and not repo_opts[key]:
del repo_opts[key]
todelete.append(key)
# Add baseurl or mirrorlist to the 'todelete' list if the other was
# specified in the repo_opts
if 'mirrorlist' in repo_opts:
todelete.append('baseurl')
elif 'baseurl' in repo_opts:
todelete.append('mirrorlist')
# Fail if the user tried to delete the name
if 'name' in todelete:
raise SaltInvocationError('The repo name cannot be deleted')
# Give the user the ability to change the basedir
repos = {}
basedirs = _normalize_basedir(basedir)
repos = list_repos(basedirs)
repofile = ''
header = ''
filerepos = {}
if repo not in repos:
# If the repo doesn't exist, create it in a new file in the first
# repo directory that exists
newdir = None
for d in basedirs:
if os.path.exists(d):
newdir = d
break
if not newdir:
raise SaltInvocationError(
'The repo does not exist and needs to be created, but none '
'of the following basedir directories exist: {0}'.format(basedirs)
)
repofile = '{0}/{1}.repo'.format(newdir, repo)
if 'name' not in repo_opts:
raise SaltInvocationError(
'The repo does not exist and needs to be created, but a name '
'was not given'
)
if 'baseurl' not in repo_opts and 'mirrorlist' not in repo_opts:
raise SaltInvocationError(
'The repo does not exist and needs to be created, but either '
'a baseurl or a mirrorlist needs to be given'
)
filerepos[repo] = {}
else:
# The repo does exist, open its file
repofile = repos[repo]['file']
header, filerepos = _parse_repo_file(repofile)
# Error out if they tried to delete baseurl or mirrorlist improperly
if 'baseurl' in todelete:
if 'mirrorlist' not in repo_opts and 'mirrorlist' \
not in filerepos[repo]:
raise SaltInvocationError(
'Cannot delete baseurl without specifying mirrorlist'
)
if 'mirrorlist' in todelete:
if 'baseurl' not in repo_opts and 'baseurl' \
not in filerepos[repo]:
raise SaltInvocationError(
'Cannot delete mirrorlist without specifying baseurl'
)
# Delete anything in the todelete list
for key in todelete:
if key in six.iterkeys(filerepos[repo].copy()):
del filerepos[repo][key]
_bool_to_str = lambda x: '1' if x else '0'
# Old file or new, write out the repos(s)
filerepos[repo].update(repo_opts)
content = header
for stanza in six.iterkeys(filerepos):
comments = salt.utils.pkg.rpm.combine_comments(
filerepos[stanza].pop('comments', [])
)
content += '[{0}]\n'.format(stanza)
for line in six.iterkeys(filerepos[stanza]):
content += '{0}={1}\n'.format(
line,
filerepos[stanza][line]
if not isinstance(filerepos[stanza][line], bool)
else _bool_to_str(filerepos[stanza][line])
)
content += comments + '\n'
with salt.utils.files.fopen(repofile, 'w') as fileout:
fileout.write(salt.utils.stringutils.to_str(content))
return {repofile: filerepos}
def _parse_repo_file(filename):
'''
Turn a single repo file into a dict
'''
parsed = configparser.ConfigParser()
config = {}
try:
parsed.read(filename)
except configparser.MissingSectionHeaderError as err:
log.error(
'Failed to parse file %s, error: %s',
filename, err.message
)
return ('', {})
for section in parsed._sections:
section_dict = dict(parsed._sections[section])
section_dict.pop('__name__', None)
config[section] = section_dict
# Try to extract header comments, as well as comments for each repo. Read
# from the beginning of the file and assume any leading comments are
# header comments. Continue to read each section header and then find the
# comments for each repo.
headers = ''
section = None
with salt.utils.files.fopen(filename, 'r') as repofile:
for line in repofile:
line = salt.utils.stringutils.to_unicode(line)
line = line.strip()
if line.startswith('#'):
if section is None:
headers += line + '\n'
else:
try:
comments = config[section].setdefault('comments', [])
comments.append(line[1:].lstrip())
except KeyError:
log.debug(
'Found comment in %s which does not appear to '
'belong to any repo section: %s', filename, line
)
elif line.startswith('[') and line.endswith(']'):
section = line[1:-1]
return (headers, salt.utils.data.decode(config))
def file_list(*packages):
'''
.. versionadded:: 2014.1.0
List the files that belong to a package. Not specifying any packages will
return a list of *every* file on the system's rpm database (not generally
recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
'''
return __salt__['lowpkg.file_list'](*packages)
def file_dict(*packages):
'''
.. versionadded:: 2014.1.0
List the files that belong to a package, grouped by package. Not
specifying any packages will return a list of *every* file on the system's
rpm database (not generally recommended).
CLI Examples:
.. code-block:: bash
salt '*' pkg.file_list httpd
salt '*' pkg.file_list httpd postfix
salt '*' pkg.file_list
'''
return __salt__['lowpkg.file_dict'](*packages)
def owner(*paths):
'''
.. versionadded:: 2014.7.0
Return the name of the package that owns the file. Multiple file paths can
be passed. Like :mod:`pkg.version <salt.modules.yumpkg.version`, if a
single path is passed, a string will be returned, and if multiple paths are
passed, a dictionary of file/package name pairs will be returned.
If the file is not owned by a package, or is not present on the minion,
then an empty string will be returned for that path.
CLI Examples:
.. code-block:: bash
salt '*' pkg.owner /usr/bin/apachectl
salt '*' pkg.owner /usr/bin/apachectl /etc/httpd/conf/httpd.conf
'''
if not paths:
return ''
ret = {}
cmd_prefix = ['rpm', '-qf', '--queryformat', '%{name}']
for path in paths:
ret[path] = __salt__['cmd.run_stdout'](
cmd_prefix + [path],
output_loglevel='trace',
python_shell=False
)
if 'not owned' in ret[path].lower():
ret[path] = ''
if len(ret) == 1:
return next(six.itervalues(ret))
return ret
def modified(*packages, **flags):
'''
List the modified files that belong to a package. Not specifying any packages
will return a list of _all_ modified files on the system's RPM database.
.. versionadded:: 2015.5.0
Filtering by flags (True or False):
size
Include only files where size changed.
mode
Include only files which file's mode has been changed.
checksum
Include only files which MD5 checksum has been changed.
device
Include only files which major and minor numbers has been changed.
symlink
Include only files which are symbolic link contents.
owner
Include only files where owner has been changed.
group
Include only files where group has been changed.
time
Include only files where modification time of the file has been
changed.
capabilities
Include only files where capabilities differ or not. Note: supported
only on newer RPM versions.
CLI Examples:
.. code-block:: bash
salt '*' pkg.modified
salt '*' pkg.modified httpd
salt '*' pkg.modified httpd postfix
salt '*' pkg.modified httpd owner=True group=False
'''
return __salt__['lowpkg.modified'](*packages, **flags)
@salt.utils.decorators.path.which('yumdownloader')
def download(*packages):
'''
.. versionadded:: 2015.5.0
Download packages to the local disk. Requires ``yumdownloader`` from
``yum-utils`` package.
.. note::
``yum-utils`` will already be installed on the minion if the package
was installed from the Fedora / EPEL repositories.
CLI example:
.. code-block:: bash
salt '*' pkg.download httpd
salt '*' pkg.download httpd postfix
'''
if not packages:
raise SaltInvocationError('No packages were specified')
CACHE_DIR = '/var/cache/yum/packages'
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
cached_pkgs = os.listdir(CACHE_DIR)
to_purge = []
for pkg in packages:
to_purge.extend([os.path.join(CACHE_DIR, x)
for x in cached_pkgs
if x.startswith('{0}-'.format(pkg))])
for purge_target in set(to_purge):
log.debug('Removing cached package %s', purge_target)
try:
os.unlink(purge_target)
except OSError as exc:
log.error('Unable to remove %s: %s', purge_target, exc)
cmd = ['yumdownloader', '-q', '--destdir={0}'.format(CACHE_DIR)]
cmd.extend(packages)
__salt__['cmd.run'](
cmd,
output_loglevel='trace',
python_shell=False
)
ret = {}
for dld_result in os.listdir(CACHE_DIR):
if not dld_result.endswith('.rpm'):
continue
pkg_name = None
pkg_file = None
for query_pkg in packages:
if dld_result.startswith('{0}-'.format(query_pkg)):
pkg_name = query_pkg
pkg_file = dld_result
break
if pkg_file is not None:
ret[pkg_name] = os.path.join(CACHE_DIR, pkg_file)
if not ret:
raise CommandExecutionError(
'Unable to download any of the following packages: {0}'
.format(', '.join(packages))
)
failed = [x for x in packages if x not in ret]
if failed:
ret['_error'] = ('The following package(s) failed to download: {0}'
.format(', '.join(failed)))
return ret
def diff(*paths):
'''
Return a formatted diff between current files and original in a package.
NOTE: this function includes all files (configuration and not), but does
not work on binary content.
:param path: Full path to the installed file
:return: Difference string or raises and exception if examined file is binary.
CLI example:
.. code-block:: bash
salt '*' pkg.diff /etc/apache2/httpd.conf /etc/sudoers
'''
ret = {}
pkg_to_paths = {}
for pth in paths:
pth_pkg = __salt__['lowpkg.owner'](pth)
if not pth_pkg:
ret[pth] = os.path.exists(pth) and 'Not managed' or 'N/A'
else:
if pkg_to_paths.get(pth_pkg) is None:
pkg_to_paths[pth_pkg] = []
pkg_to_paths[pth_pkg].append(pth)
if pkg_to_paths:
local_pkgs = __salt__['pkg.download'](*pkg_to_paths.keys())
for pkg, files in pkg_to_paths.items():
for path in files:
ret[path] = __salt__['lowpkg.diff'](
local_pkgs[pkg]['path'], path) or 'Unchanged'
return ret
def _get_patches(installed_only=False):
'''
List all known patches in repos.
'''
patches = {}
cmd = [_yum(), '--quiet', 'updateinfo', 'list', 'all']
ret = __salt__['cmd.run_stdout'](
cmd,
python_shell=False
)
for line in salt.utils.itertools.split(ret, os.linesep):
inst, advisory_id, sev, pkg = re.match(r'([i|\s]) ([^\s]+) +([^\s]+) +([^\s]+)',
line).groups()
if inst != 'i' and installed_only:
continue
patches[advisory_id] = {
'installed': True if inst == 'i' else False,
'summary': pkg
}
return patches
def list_patches(refresh=False):
'''
.. versionadded:: 2017.7.0
List all known advisory patches from available repos.
refresh
force a refresh if set to True.
If set to False (default) it depends on yum if a refresh is
executed.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_patches
'''
if refresh:
refresh_db()
return _get_patches()
def list_installed_patches():
'''
.. versionadded:: 2017.7.0
List installed advisory patches on the system.
CLI Examples:
.. code-block:: bash
salt '*' pkg.list_installed_patches
'''
return _get_patches(installed_only=True)
|
the-stack_0_13866 | """
calc.py
Core calculation logic for the runoff calculator.
"""
import math
from collections import OrderedDict
# dependencies (numpy included with ArcPy)
import numpy
# dependencies (3rd party)
import petl as etl
from .utils import msg
QP_HEADER=['Y1','Y2','Y5','Y10','Y25','Y50','Y100','Y200','Y500','Y1000']
def calculate_tc(
max_flow_length, #units of meters
mean_slope, # percent slope
const_a=0.000325,
const_b=0.77,
const_c=-0.385
):
"""
calculate time of concentration (hourly)
Inputs:
- max_flow_length: maximum flow length of a catchment area, derived
from the DEM for the catchment area.
- mean_slope: average slope, from the DEM *for just the catchment area*. This must be
percent slope, provided as an integer (e.g., 23, not 0.23)
Outputs:
tc_hr: time of concentration (hourly)
"""
if not mean_slope:
mean_slope = 0.00001
return (
const_a
* math.pow(max_flow_length, const_b)
* math.pow((mean_slope / 100), const_c)
)
def calculate_peak_flow(
catchment_area_sqkm,
tc_hr,
avg_cn,
precip_table,
qp_header=QP_HEADER
):
"""Calculate peak runoff statistics at a "pour point" (e.g., a stormwater
inlet, a culvert, or otherwise a basin's outlet of some sort) using
parameters dervied from prior analysis of that pour point's catchment area
(i.e., it's watershed or contributing area) and *24-hour* precipitation estimates.
Note that the TR-55 methodology is designed around a 24-hour storm *duration*. YMMV
if providing rainfall estimates (via the precip_table parameter) for other storm durations.
This calculator by default returns peak flow for storm *frequencies* ranging from 1 to 1000 year events.
Inputs:
- catchment_area_sqkm: area measurement of catchment in *square kilometers*
- tc_hr: hourly time of concentration number for the catchment area
- avg_cn: average curve number for the catchment area
- precip_table: precipitation estimates as a 1D array (a list)
derived from standard NOAA Preciptation Frequency Estimates. Values in centimeters
tables (the `precip_table_etl()` function can automatically prep this)
Outputs:
- runoff: a dictionary indicating peak runoff at the pour point for
storm events by frequency
"""
# reference some variables:
# time of concentration in hours
tc = tc_hr
# average curve number, area-weighted
cn = avg_cn
# Skip calculation altogether if curve number or time of concentration are 0.
# (this indicates invalid data)
if cn in [0,'',None] or tc in [0,'',None]:
qp_data = [0 for i in range(0,len(qp_header))]
return OrderedDict(zip(qp_header, qp_data))
# array for storing peak flows
Qp = []
# calculate storage, S in cm
# NOTE: THIS ASSUMES THE CURVE NUMBER RASTER IS IN METERS
Storage = 0.1 * ((25400.0 / cn) - 254.0) #cn is the average curve number of the catchment area
#msg("Storage: {0}".format(Storage))
Ia = 0.2 * Storage #inital abstraction, amount of precip that never has a chance to become runoff
#msg("Ia: {0}".format(Ia))
# setup precip list for the correct watershed from dictionary
P = numpy.array(precip_table) #P in cm
#msg("P: {0}".format(P))
#calculate depth of runoff from each storm
#if P < Ia NO runoff is produced
Pe = (P - Ia)
Pe = numpy.array([0 if i < 0 else i for i in Pe]) # get rid of negative Pe's
#msg("Pe: {0}".format(Pe))
Q = (Pe**2) / (P + (Storage - Ia))
#msg("Q: {0}".format(Q))
# calculate q_peak, cubic meters per second
# q_u is an adjustment because these watersheds are very small. It is a function of tc,
# and constants Const0, Const1, and Const2 which are in turn functions of Ia/P (rain_ratio) and rainfall type
# We are using rainfall Type II because that is applicable to most of New York State
# rain_ratio is a vector with one element per input return period
rain_ratio = Ia/P
rain_ratio = numpy.array([.1 if i < .1 else .5 if i > .5 else i for i in rain_ratio]) # keep rain ratio within limits set by TR55
#msg("Rain Ratio: {0}".format(rain_ratio))
# TODO: expose these as parameters; document here what they are (referencing the TR-55 documentation)
# TODO: some of these are geographically-derived; use geodata to pull the correct/suggested ones in (possibly
# in a function that precedes this)
Const0 = (rain_ratio**2) * -2.2349 + (rain_ratio * 0.4759) + 2.5273
Const1 = (rain_ratio**2) * 1.5555 - (rain_ratio * 0.7081) - 0.5584
Const2 = (rain_ratio**2) * 0.6041 + (rain_ratio * 0.0437) - 0.1761
#qu has weird units which take care of the difference between Q in cm and area in km2 (m^3 s^-1 km^-2 cm^-1)
qu = 10 ** (Const0 + Const1 * numpy.log10(tc) + Const2 * (numpy.log10(tc))**2 - 2.366)
#msg("qu: {0}".format(qu))
q_peak = Q * qu * catchment_area_sqkm # m^3 s^-1
#msg("q_peak: {0}".format(q_peak.tolist()))
# TODO: better parameterize the header here (goes all the way back to how NOAA csv is ingested)
results = OrderedDict(zip(qp_header,q_peak))
#msg("Results:")
# for i in results.items():
#msg("%-5s: %s" % (i[0], i[1]))
return results
def peak_flow_core(
mean_slope_pct,
max_flow_length_m,
rainfall_cm,
basin_area_sqkm,
avg_cn
):
"""This is a bare bones version of the two functions above and represents
the core TR-55 logic originally developed by the Cornell Soil and Water lab.
Numbers go in, numbers come out.
TODO: expose hardcoded constants as parameters, and document them.
:param mean_slope_pct: average slope in the basin, as percent rise
:type mean_slope_pct: float
:param max_flow_length_m: maximum flow length, in meters
:type max_flow_length_m: float
:param rainfall_cm: rainfall for a 24 hour event, in centimeters
:type rainfall_cm: float
:param basin_area_sqkm: area of the basin, in square kilometers
:type basin_area_sqkm: float
:param avg_cn: average curve number of the basin, area-weighted
:type avg_cn: float
:return: peak flow, in cubic meters / second
:rtype: float
"""
# INIITAL CHECKS ------------------------------------------
# Skip calculation altogether if curve number or time of concentration are 0.
# (this indicates invalid data)
if avg_cn in [0,'',None] or tc_hr in [0,'',None]:
return None
# -------------------------------------------
# TIME OF CONCENTRATION
TC_CONST_A = 0.000325
TC_CONST_B = 0.77
TC_CONST_C = -0.385
if not mean_slope_pct:
mean_slope_pct = 0.00001
# time of concentration in hours
tc_hr = TC_CONST_A * math.pow(max_flow_length, TC_CONST_B) * math.pow((mean_slope_pct / 100), TC_CONST_C)
# -------------------------------------------
# STORAGE
# calculate storage, S in cm
# NOTE: THIS ASSUMES THE CURVE NUMBER RASTER IS IN METERS
storage = 0.1 * ((25400.0 / avg_cn) - 254.0)
# inital abstraction, amount of precip that never has a chance to become runoff
init_abstraction = 0.2 * storage
# -------------------------------------------
# RUNOFF DEPTH
# calculate depth of runoff from each storm
# if P < Ia NO runoff is produced
Pe = (rainfall_cm - init_abstraction)
if Pe < 0:
return None
Q = (Pe**2) / (rainfall_cm + (storage - init_abstraction))
# -------------------------------------------
# RAIN RATIO AND PEAK FLOW
# calculate q_peak, cubic meters per second
# q_u is an adjustment because these watersheds are very small. It is a function of tc,
# and constants Const0, Const1, and Const2 which are in turn functions of Ia/P (rain_ratio) and rainfall type
# We are using rainfall Type II because that is applicable to most of New York State
# rain_ratio is a vector with one element per input return period
rain_ratio = init_abstraction / P
rain_ratio = [.1 if i < .1 else .5 if i > .5 else i for i in [rain_ratio]][0] # keep rain ratio within limits set by TR55
CONST_0 = (rain_ratio**2) * -2.2349 + (rain_ratio * 0.4759) + 2.5273
CONST_1 = (rain_ratio**2) * 1.5555 - (rain_ratio * 0.7081) - 0.5584
CONST_2 = (rain_ratio**2) * 0.6041 + (rain_ratio * 0.0437) - 0.1761
# qu has weird units which take care of the difference between Q in cm and area in km2
# qu is in m^3 s^-1 km^-2 cm^-1
qu = 10 ** (CONST_0 + CONST_1 * numpy.log10(tc_hr) + CONST_2 * (numpy.log10(tc_hr))**2 - 2.366)
q_peak = Q * qu * catchment_area_sqkm # m^3 s^-1
return q_peak |
the-stack_0_13867 | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
from google.protobuf.message import DecodeError
from sawtooth_validator.consensus.proxy import UnknownBlock
from sawtooth_validator.protobuf import consensus_pb2
from sawtooth_validator.protobuf import validator_pb2
from sawtooth_validator.networking.dispatch import Handler
from sawtooth_validator.networking.dispatch import HandlerResult
from sawtooth_validator.networking.dispatch import HandlerStatus
from sawtooth_validator.journal.block_wrapper import BlockStatus
from sawtooth_validator.journal.publisher import BlockEmpty
from sawtooth_validator.journal.publisher import BlockInProgress
from sawtooth_validator.journal.publisher import BlockNotInitialized
from sawtooth_validator.journal.publisher import MissingPredecessor
from sawtooth_validator.protobuf.block_pb2 import BlockHeader
from sawtooth_validator.protobuf.consensus_pb2 import ConsensusSettingsEntry
from sawtooth_validator.protobuf.consensus_pb2 import ConsensusStateEntry
LOGGER = logging.getLogger(__name__)
class ConsensusServiceHandler(Handler):
def __init__(
self,
request_class,
request_type,
response_class,
response_type
):
self._request_class = request_class
self._request_type = request_type
self._response_class = response_class
self._response_type = response_type
def handle_request(self, request, response, connection_id):
raise NotImplementedError()
@property
def request_class(self):
return self._request_class
@property
def response_class(self):
return self._response_class
@property
def response_type(self):
return self._response_type
@property
def request_type(self):
return self._request_type
def handle(self, connection_id, message_content):
request = self._request_class()
response = self._response_class()
response.status = response.OK
if not (
self._request_type
== validator_pb2.Message.CONSENSUS_REGISTER_REQUEST
or self._proxy.is_active_engine_id(connection_id)
):
response.status = response.NOT_ACTIVE_ENGINE
return HandlerResult(
status=HandlerStatus.RETURN,
message_out=response,
message_type=self._response_type)
try:
request.ParseFromString(message_content)
except DecodeError:
response.status = response.BAD_REQUEST
handler_status = HandlerStatus.RETURN
else:
handler_status = self.handle_request(
request, response, connection_id)
return HandlerResult(
status=handler_status,
message_out=response,
message_type=self._response_type)
class ConsensusRegisterHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusRegisterRequest,
validator_pb2.Message.CONSENSUS_REGISTER_REQUEST,
consensus_pb2.ConsensusRegisterResponse,
validator_pb2.Message.CONSENSUS_REGISTER_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
if request.additional_protocols is not None:
additional_protocols = \
[(p.name, p.version) for p in request.additional_protocols]
else:
additional_protocols = []
self._proxy.register(
request.name, request.version, additional_protocols, connection_id)
LOGGER.info(
"Consensus engine registered: %s %s (additional protocols: %s)",
request.name,
request.version,
request.additional_protocols)
return HandlerStatus.RETURN_AND_PASS
class ConsensusRegisterActivateHandler(Handler):
def __init__(self, proxy):
self._proxy = proxy
self._request_type = validator_pb2.Message.CONSENSUS_REGISTER_REQUEST
@property
def request_type(self):
return self._request_type
def handle(self, connection_id, message_content):
# If this is the configured consensus engine, make it active. This is
# necessary for setting the active engine when the configured engine is
# changed to an engine that is not registered yet
request = consensus_pb2.ConsensusRegisterRequest()
try:
request.ParseFromString(message_content)
except DecodeError:
LOGGER.exception("Unable to decode ConsensusRegisterRequest")
return HandlerResult(status=HandlerResult.DROP)
if request.additional_protocols is not None:
additional_protocols = \
[(p.name, p.version) for p in request.additional_protocols]
else:
additional_protocols = []
self._proxy.activate_if_configured(
request.name, request.version, additional_protocols)
return HandlerResult(status=HandlerStatus.PASS)
class ConsensusSendToHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusSendToRequest,
validator_pb2.Message.CONSENSUS_SEND_TO_REQUEST,
consensus_pb2.ConsensusSendToResponse,
validator_pb2.Message.CONSENSUS_SEND_TO_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
self._proxy.send_to(
request.receiver_id,
request.message_type,
request.content,
connection_id)
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusSendTo")
response.status =\
consensus_pb2.ConsensusSendToResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusBroadcastHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusBroadcastRequest,
validator_pb2.Message.CONSENSUS_BROADCAST_REQUEST,
consensus_pb2.ConsensusBroadcastResponse,
validator_pb2.Message.CONSENSUS_BROADCAST_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
self._proxy.broadcast(
request.message_type,
request.content,
connection_id)
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusBroadcast")
response.status =\
consensus_pb2.ConsensusBroadcastResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusInitializeBlockHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusInitializeBlockRequest,
validator_pb2.Message.CONSENSUS_INITIALIZE_BLOCK_REQUEST,
consensus_pb2.ConsensusInitializeBlockResponse,
validator_pb2.Message.CONSENSUS_INITIALIZE_BLOCK_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
self._proxy.initialize_block(request.previous_id)
except MissingPredecessor:
response.status =\
consensus_pb2.ConsensusInitializeBlockResponse.UNKNOWN_BLOCK
except BlockInProgress:
response.status =\
consensus_pb2.ConsensusInitializeBlockResponse.INVALID_STATE
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusInitializeBlock")
response.status =\
consensus_pb2.ConsensusInitializeBlockResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusSummarizeBlockHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusSummarizeBlockRequest,
validator_pb2.Message.CONSENSUS_SUMMARIZE_BLOCK_REQUEST,
consensus_pb2.ConsensusSummarizeBlockResponse,
validator_pb2.Message.CONSENSUS_SUMMARIZE_BLOCK_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
summary = self._proxy.summarize_block()
response.summary = summary
except BlockNotInitialized:
response.status =\
consensus_pb2.ConsensusSummarizeBlockResponse.INVALID_STATE
except BlockEmpty:
response.status =\
consensus_pb2.ConsensusSummarizeBlockResponse.BLOCK_NOT_READY
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusSummarizeBlock")
response.status =\
consensus_pb2.ConsensusSummarizeBlockResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusFinalizeBlockHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusFinalizeBlockRequest,
validator_pb2.Message.CONSENSUS_FINALIZE_BLOCK_REQUEST,
consensus_pb2.ConsensusFinalizeBlockResponse,
validator_pb2.Message.CONSENSUS_FINALIZE_BLOCK_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
response.block_id = self._proxy.finalize_block(request.data)
except BlockNotInitialized:
response.status =\
consensus_pb2.ConsensusFinalizeBlockResponse.INVALID_STATE
except BlockEmpty:
response.status =\
consensus_pb2.ConsensusFinalizeBlockResponse.BLOCK_NOT_READY
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusFinalizeBlock")
response.status =\
consensus_pb2.ConsensusFinalizeBlockResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusCancelBlockHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusCancelBlockRequest,
validator_pb2.Message.CONSENSUS_CANCEL_BLOCK_REQUEST,
consensus_pb2.ConsensusCancelBlockResponse,
validator_pb2.Message.CONSENSUS_CANCEL_BLOCK_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
self._proxy.cancel_block()
except BlockNotInitialized:
response.status =\
consensus_pb2.ConsensusCancelBlockResponse.INVALID_STATE
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusCancelBlock")
response.status =\
consensus_pb2.ConsensusCancelBlockResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusCheckBlocksHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusCheckBlocksRequest,
validator_pb2.Message.CONSENSUS_CHECK_BLOCKS_REQUEST,
consensus_pb2.ConsensusCheckBlocksResponse,
validator_pb2.Message.CONSENSUS_CHECK_BLOCKS_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
self._proxy.check_blocks(request.block_ids)
except UnknownBlock:
response.status =\
consensus_pb2.ConsensusCheckBlocksResponse.UNKNOWN_BLOCK
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusCheckBlocks")
response.status =\
consensus_pb2.ConsensusCheckBlocksResponse.SERVICE_ERROR
return HandlerStatus.RETURN_AND_PASS
class ConsensusCheckBlocksNotifier(Handler):
def __init__(self, proxy, consensus_notifier):
self._proxy = proxy
self._consensus_notifier = consensus_notifier
self._request_type = \
validator_pb2.Message.CONSENSUS_CHECK_BLOCKS_REQUEST
@property
def request_type(self):
return self._request_type
def handle(self, connection_id, message_content):
# No need to verify this is a valid consensus engine; previous handler
# ConsensusCheckBlocksHandler has already verifified
request = consensus_pb2.ConsensusCheckBlocksRequest()
try:
request.ParseFromString(message_content)
except DecodeError:
LOGGER.exception("Unable to decode ConsensusCheckBlocksRequest")
return HandlerResult(status=HandlerResult.DROP)
block_statuses = self._proxy.get_block_statuses(request.block_ids)
for (block_id, block_status) in block_statuses:
if block_status == BlockStatus.Valid:
self._consensus_notifier.notify_block_valid(block_id)
elif block_status == BlockStatus.Invalid:
self._consensus_notifier.notify_block_invalid(block_id)
elif block_status == BlockStatus.Unknown:
# No need to worry about unknown block, this is checked in the
# previous handler.
self._proxy.validate_block(block_id)
elif block_status == BlockStatus.Missing:
LOGGER.error("Missing block: %s", block_id)
elif block_status == BlockStatus.InValidation:
# Block is already being validated, notification will be sent
# when it's complete
pass
return HandlerResult(status=HandlerStatus.PASS)
class ConsensusCommitBlockHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusCommitBlockRequest,
validator_pb2.Message.CONSENSUS_COMMIT_BLOCK_REQUEST,
consensus_pb2.ConsensusCommitBlockResponse,
validator_pb2.Message.CONSENSUS_COMMIT_BLOCK_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
self._proxy.commit_block(request.block_id)
except UnknownBlock:
response.status =\
consensus_pb2.ConsensusCommitBlockResponse.UNKNOWN_BLOCK
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusCommitBlock")
response.status =\
consensus_pb2.ConsensusCommitBlockResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusIgnoreBlockHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusIgnoreBlockRequest,
validator_pb2.Message.CONSENSUS_IGNORE_BLOCK_REQUEST,
consensus_pb2.ConsensusIgnoreBlockResponse,
validator_pb2.Message.CONSENSUS_IGNORE_BLOCK_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
self._proxy.ignore_block(request.block_id)
except UnknownBlock:
response.status =\
consensus_pb2.ConsensusIgnoreBlockResponse.UNKNOWN_BLOCK
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusIgnoreBlock")
response.status =\
consensus_pb2.ConsensusIgnoreBlockResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusFailBlockHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusFailBlockRequest,
validator_pb2.Message.CONSENSUS_FAIL_BLOCK_REQUEST,
consensus_pb2.ConsensusFailBlockResponse,
validator_pb2.Message.CONSENSUS_FAIL_BLOCK_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
self._proxy.fail_block(request.block_id)
except UnknownBlock:
response.status =\
consensus_pb2.ConsensusFailBlockResponse.UNKNOWN_BLOCK
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusFailBlock")
response.status =\
consensus_pb2.ConsensusFailBlockResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusBlocksGetHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusBlocksGetRequest,
validator_pb2.Message.CONSENSUS_BLOCKS_GET_REQUEST,
consensus_pb2.ConsensusBlocksGetResponse,
validator_pb2.Message.CONSENSUS_BLOCKS_GET_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
blocks = []
for block in self._proxy.blocks_get(request.block_ids):
block_header = BlockHeader()
block_header.ParseFromString(block.header)
blocks.append(consensus_pb2.ConsensusBlock(
block_id=bytes.fromhex(block.header_signature),
previous_id=bytes.fromhex(block_header.previous_block_id),
signer_id=bytes.fromhex(block_header.signer_public_key),
block_num=block_header.block_num,
payload=block_header.consensus))
response.blocks.extend(blocks)
except UnknownBlock:
response.status =\
consensus_pb2.ConsensusBlocksGetResponse.UNKNOWN_BLOCK
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusBlocksGet")
response.status =\
consensus_pb2.ConsensusBlocksGetResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusChainHeadGetHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusChainHeadGetRequest,
validator_pb2.Message.CONSENSUS_CHAIN_HEAD_GET_REQUEST,
consensus_pb2.ConsensusChainHeadGetResponse,
validator_pb2.Message.CONSENSUS_CHAIN_HEAD_GET_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
chain_head = self._proxy.chain_head_get()
block_header = BlockHeader()
block_header.ParseFromString(chain_head.header)
response.block.block_id = bytes.fromhex(
chain_head.header_signature)
response.block.previous_id =\
bytes.fromhex(block_header.previous_block_id)
response.block.signer_id =\
bytes.fromhex(block_header.signer_public_key)
response.block.block_num = block_header.block_num
response.block.payload = block_header.consensus
except UnknownBlock:
response.status =\
consensus_pb2.ConsensusChainHeadGetResponse.NO_CHAIN_HEAD
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusChainHeadGet")
response.status =\
consensus_pb2.ConsensusChainHeadGetResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusSettingsGetHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusSettingsGetRequest,
validator_pb2.Message.CONSENSUS_SETTINGS_GET_REQUEST,
consensus_pb2.ConsensusSettingsGetResponse,
validator_pb2.Message.CONSENSUS_SETTINGS_GET_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
response.entries.extend([
ConsensusSettingsEntry(
key=key,
value=value)
for key, value in self._proxy.settings_get(
request.block_id, request.keys)
])
except UnknownBlock:
response.status = \
consensus_pb2.ConsensusSettingsGetResponse.UNKNOWN_BLOCK
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusSettingsGet")
response.status =\
consensus_pb2.ConsensusSettingsGetResponse.SERVICE_ERROR
return HandlerStatus.RETURN
class ConsensusStateGetHandler(ConsensusServiceHandler):
def __init__(self, proxy):
super().__init__(
consensus_pb2.ConsensusStateGetRequest,
validator_pb2.Message.CONSENSUS_STATE_GET_REQUEST,
consensus_pb2.ConsensusStateGetResponse,
validator_pb2.Message.CONSENSUS_STATE_GET_RESPONSE)
self._proxy = proxy
def handle_request(self, request, response, connection_id):
try:
response.entries.extend([
ConsensusStateEntry(
address=address,
data=data)
for address, data in self._proxy.state_get(
request.block_id, request.addresses)
])
except UnknownBlock:
response.status = \
consensus_pb2.ConsensusStateGetResponse.UNKNOWN_BLOCK
except Exception: # pylint: disable=broad-except
LOGGER.exception("ConsensusStateGet")
response.status =\
consensus_pb2.ConsensusStateGetResponse.SERVICE_ERROR
return HandlerStatus.RETURN
|
the-stack_0_13868 | import os
import sys
import json
def read():
config = None
default_config = {
"DEFAULT": {
"NETWORK_API_URL": "https://network.satnogs.org/api/",
"DB_API_URL": "https://db.satnogs.org/api/",
"DB_API_KEY": "",
"HTTPS_PROXY": "",
"HTTP_PROXY": ""
},
"MODULES": {
"FOR_EACH": {
"ARCHIVE": [],
"WATERFALL": [],
"DEMODDATA": [],
"FRAME": [],
"FOR_ALL_OBSERVATION": []
},
"END": {
"ARCHIVE": [],
"WATERFALL": [],
"DEMODDATA": [],
"FRAME": [],
"FOR_ALL_OBSERVATION": []
}
},
"LOGFILE": "glouton.log"
}
try:
if os.path.exists(os.path.dirname(sys.argv[0]) + "/../glouton/config.json"):
with open(os.path.dirname(sys.argv[0]) + "/../glouton/config.json", 'r') as f:
config = json.load(f)
else:
config = default_config
except Exception as eee:
print("error: ", eee)
config = default_config
return config
|
the-stack_0_13869 | # Copyright (c) 2018 Pablo Moreno-Munoz
# Universidad Carlos III de Madrid and University of Sheffield
from GPy import kern
from GPy.util import linalg
import random
import warnings
import numpy as np
import climin
from functools import partial
import matplotlib.pyplot as plt
import VariationalOptimization as vo
def get_batch_scales(X_all, X):
batch_scales = []
for t, X_all_task in enumerate(X_all):
batch_scales.append(float(X_all_task.shape[0]) / float(X[t].shape[0]))
return batch_scales
def true_u_functions(X_list, Q):
u_functions = []
amplitude = (1.5-0.5)*np.random.rand(Q,3) + 0.5
freq = (3-1)*np.random.rand(Q,3) + 1
shift = 2*np.random.rand(Q,3)
for X in X_list:
u_task = np.empty((X.shape[0],Q))
for q in range(Q):
u_task[:,q,None] = 3*amplitude[q,0]*np.cos(freq[q,0]*np.pi*X + shift[q,0]*np.pi) - \
2*amplitude[q,1]*np.sin(2*freq[q,1]*np.pi*X + shift[q,1]*np.pi) + \
amplitude[q,2] * np.cos(4*freq[q, 2] * np.pi * X + shift[q, 2] * np.pi)
u_functions.append(u_task)
return u_functions
def true_f_functions(true_u, W_list, D, likelihood_list, Y_metadata):
true_f = []
f_index = Y_metadata['function_index'].flatten()
d_index = Y_metadata['d_index'].flatten()
for t, u_task in enumerate(true_u):
Ntask = u_task.shape[0]
_, num_f_task, _ = likelihood_list[t].get_metadata()
F = np.zeros((Ntask, num_f_task))
for q, W in enumerate(W_list):
for d in range(D):
if f_index[d] == t:
F[:,d_index[d],None] += np.tile(W[d].T, (Ntask, 1)) * u_task[:, q, None]
true_f.append(F)
return true_f
def mini_slices(n_samples, batch_size):
"""Yield slices of size `batch_size` that work with a container of length
`n_samples`."""
n_batches, rest = divmod(n_samples, batch_size)
if rest != 0:
n_batches += 1
return [slice(i * batch_size, (i + 1) * batch_size) for i in range(n_batches)]
def draw_mini_slices(n_samples, batch_size, with_replacement=False):
slices = mini_slices(n_samples, batch_size)
idxs = list(range(len(slices))) # change this line
if with_replacement:
yield random.choice(slices)
else:
while True:
random.shuffle(list(idxs))
for i in idxs:
yield slices[i]
def latent_functions_prior(Q, lenghtscale=None, variance=None, input_dim=None):
if lenghtscale is None:
lenghtscale = np.random.rand(Q)
else:
lenghtscale = lenghtscale
if variance is None:
variance = np.random.rand(Q)
else:
variance = variance
kern_list = []
for q in range(Q):
kern_q = kern.RBF(input_dim=input_dim, lengthscale=lenghtscale[q], variance=variance[q], name='rbf')+ kern.White(input_dim)# \
kern_q.name = 'kern_q'+str(q)
kern_list.append(kern_q)
return kern_list
def random_W_kappas(Q,D,rank, experiment=False):
W_list = []
kappa_list = []
for q in range(Q):
p = np.random.binomial(n=1, p=0.5*np.ones((D,1)))
Ws = p*np.random.normal(loc=0.5, scale=0.5, size=(D,1)) - (p-1)*np.random.normal(loc=-0.5, scale=0.5, size=(D,1))
W_list.append(Ws / np.sqrt(rank)) # deberían ser tanto positivos como negativos
if experiment:
kappa_list.append(np.zeros(D))
else:
kappa_list.append(np.zeros(D))
return W_list, kappa_list
def ICM(input_dim, output_dim, kernel, rank, W=None, kappa=None, name='ICM'):
"""
Builds a kernel for an Intrinsic Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
kern_q = kernel.copy()
if kernel.input_dim != input_dim:
kernel.input_dim = input_dim
warnings.warn("kernel's input dimension overwritten to fit input_dim parameter.")
B = kern.Coregionalize(input_dim=input_dim, output_dim=output_dim, rank=rank, W=W, kappa=kappa)
B.name = name
K = kern_q.prod(B, name=name)
return K, B
def LCM(input_dim, output_dim, kernels_list, W_list, kappa_list, rank, name='B_q'):
"""
Builds a kernel for an Linear Coregionalization Model
:input_dim: Input dimensionality (does not include dimension of indices)
:num_outputs: Number of outputs
:param kernel: kernel that will be multiplied by the coregionalize kernel (matrix B).
:type kernel: a GPy kernel
:param W_rank: number tuples of the corregionalization parameters 'W'
:type W_rank: integer
"""
B_q = []
K, B = ICM(input_dim, output_dim, kernels_list[0], W=W_list[0], kappa=kappa_list[0], rank=rank, name='%s%s' %(name,0))
B_q.append(B)
for q, kernel in enumerate(kernels_list[1:]):
Kq, Bq = ICM(input_dim, output_dim, kernel, W=W_list[q+1], kappa=kappa_list[q+1], rank=rank, name='%s%s' %(name,q+1))
B_q.append(Bq)
K += Kq
return K, B_q
def cross_covariance(X, Z, B, kernel_list, d):
"""
Builds the cross-covariance cov[f_d(x),u(z)] of a Multi-output GP
:param X: Input data
:param Z: Inducing Points
:param B: Coregionalization matric
:param kernel_list: Kernels of u_q functions
:param d: output function f_d index
:return: Kfdu
"""
N,_ = X.shape
M,Dz = Z.shape
Q = len(B)
Xdim = int(Dz/Q)
Kfdu = np.empty([N,M*Q])
for q, B_q in enumerate(B):
Kfdu[:, q * M:(q * M) + M] = B_q.W[d] * kernel_list[q].K(X, Z[:, q*Xdim:q*Xdim+Xdim])
#Kfdu[:,q*M:(q*M)+M] = B_q.W[d]*kernel_list[q].K(X,Z[:,q,None])
#Kfdu[:, q * M:(q * M) + M] = B_q.B[d,d] * kernel_list[q].K(X, Z[:,q,None])
return Kfdu
def function_covariance(X, B, kernel_list, d):
"""
Builds the cross-covariance Kfdfd = cov[f_d(x),f_d(x)] of a Multi-output GP
:param X: Input data
:param B: Coregionalization matrix
:param kernel_list: Kernels of u_q functions
:param d: output function f_d index
:return: Kfdfd
"""
N,_ = X.shape
Kfdfd = np.zeros((N, N))
for q, B_q in enumerate(B):
Kfdfd += B_q.B[d,d]*kernel_list[q].K(X,X)
return Kfdfd
def latent_funs_cov(Z, kernel_list):
"""
Builds the full-covariance cov[u(z),u(z)] of a Multi-output GP
for a Sparse approximation
:param Z: Inducing Points
:param kernel_list: Kernels of u_q functions priors
:return: Kuu
"""
Q = len(kernel_list)
M,Dz = Z.shape
Xdim = int(Dz/Q)
#Kuu = np.zeros([Q*M,Q*M])
Kuu = np.empty((Q, M, M))
Luu = np.empty((Q, M, M))
Kuui = np.empty((Q, M, M))
for q, kern in enumerate(kernel_list):
Kuu[q, :, :] = kern.K(Z[:,q*Xdim:q*Xdim+Xdim],Z[:,q*Xdim:q*Xdim+Xdim])
Luu[q, :, :] = linalg.jitchol(Kuu[q, :, :],maxtries=10)
Kuui[q, :, :], _ = linalg.dpotri(np.asfortranarray(Luu[q, :, :]))
return Kuu, Luu, Kuui
def generate_toy_U(X,Q):
arg = np.tile(X, (1,Q))
rnd = np.tile(np.random.rand(1,Q), (X.shape))
U = 2*rnd*np.sin(10*rnd*arg + np.random.randn(1)) + 2*rnd*np.cos(20*rnd*arg + np.random.randn(1))
return U
def _gradient_reduce_numpy(coreg, dL_dK, index, index2):
index, index2 = index[:,0], index2[:,0]
dL_dK_small = np.zeros_like(coreg.B)
for i in range(coreg.output_dim):
tmp1 = dL_dK[index==i]
for j in range(coreg.output_dim):
dL_dK_small[j,i] = tmp1[:,index2==j].sum()
return dL_dK_small
def _gradient_B(coreg, dL_dK, index, index2):
index, index2 = index[:,0], index2[:,0]
B = coreg.B
isqrtB = 1 / np.sqrt(B)
dL_dK_small = np.zeros_like(B)
for i in range(coreg.output_dim):
tmp1 = dL_dK[index==i]
for j in range(coreg.output_dim):
dL_dK_small[j,i] = (0.5 * isqrtB[i,j] * tmp1[:,index2==j]).sum()
return dL_dK_small
def update_gradients_diag(coreg, dL_dKdiag):
dL_dKdiag_small = np.array([dL_dKdiag_task.sum() for dL_dKdiag_task in dL_dKdiag])
coreg.W.gradient = 2.*coreg.W*dL_dKdiag_small[:, None] # should it be 2*..? R/Yes Pablo, it should be :)
coreg.kappa.gradient = dL_dKdiag_small
def update_gradients_full(coreg, dL_dK, X, X2=None):
index = np.asarray(X, dtype=np.int)
if X2 is None:
index2 = index
else:
index2 = np.asarray(X2, dtype=np.int)
dL_dK_small = _gradient_reduce_numpy(coreg, dL_dK, index, index2)
dkappa = np.diag(dL_dK_small).copy()
dL_dK_small += dL_dK_small.T
dW = (coreg.W[:, None, :]*dL_dK_small[:, :, None]).sum(0)
coreg.W.gradient = dW
coreg.kappa.gradient = dkappa
def update_gradients_Kmn(coreg, dL_dK, D):
dW = np.zeros((D,1))
dkappa = np.zeros((D)) # not used
for d in range(D):
dW[d,:] = dL_dK[d].sum()
coreg.W.gradient = dW
coreg.kappa.gradient = dkappa
def gradients_coreg(coreg, dL_dK, X, X2=None):
index = np.asarray(X, dtype=np.int)
if X2 is None:
index2 = index
else:
index2 = np.asarray(X2, dtype=np.int)
dK_dB = _gradient_B(coreg, dL_dK, index, index2)
dkappa = np.diag(dK_dB).copy()
dK_dB += dK_dB.T
dW = (coreg.W[:, None, :]*dK_dB[:, :, None]).sum(0)
coreg.W.gradient = dW
coreg.kappa.gradient = dkappa
def gradients_coreg_diag(coreg, dL_dKdiag, kern_q, X, X2=None):
# dL_dKdiag is (NxD)
if X2 is None:
X2 = X
N,D = dL_dKdiag.shape
matrix_sum = np.zeros((D,1))
for d in range(D):
matrix_sum[d,0] = np.sum(np.diag(kern_q.K(X, X2)) * dL_dKdiag[:,d,None])
dW = 2 * coreg.W * matrix_sum
dkappa = matrix_sum
return dW, dkappa
def vem_algorithm(model, vem_iters=None, maxIter_perVEM = None, step_rate=None ,verbose=False, optZ=True, verbose_plot=False, non_chained=True):
if vem_iters is None:
vem_iters = 5
if maxIter_perVEM is None:
maxIter_perVEM = 100
model['.*.kappa'].fix() # must be always fixed
#model.elbo = np.empty((vem_iters,1))
if model.batch_size is None:
for i in range(vem_iters):
# VARIATIONAL E-STEP
model['.*.lengthscale'].fix()
model['.*.variance'].fix()
model.Z.fix()
model['.*.W'].fix()
model.q_u_means.unfix()
model.q_u_chols.unfix()
model.optimize(messages=verbose, max_iters=maxIter_perVEM)
print('iteration ('+str(i+1)+') VE step, log_likelihood='+str(model.log_likelihood().flatten()))
# VARIATIONAL M-STEP
model['.*.lengthscale'].unfix()
model['.*.variance'].unfix()
if optZ:
model.Z.unfix()
if non_chained:
model['.*.W'].unfix()
model.q_u_means.fix()
model.q_u_chols.fix()
model.optimize(messages=verbose, max_iters=maxIter_perVEM)
print('iteration (' + str(i+1) + ') VM step, log_likelihood=' + str(model.log_likelihood().flatten()))
else:
if step_rate is None:
step_rate = 0.01
# Here the E step has maxIter_perVEM (100 by default) and
# the M step has also maxIter_perVEM (100 by default)
model.elbo = np.empty((2*maxIter_perVEM*vem_iters+2, 1))
model.elbo[0,0]=model.log_likelihood()
c_full = partial(model.callback, max_iter=maxIter_perVEM, verbose=verbose, verbose_plot=verbose_plot)
for i in range(vem_iters):
# VARIATIONAL E-STEP
model['.*.lengthscale'].fix()
model['.*.variance'].fix()
model.Z.fix()
model['.*.W'].fix()
model.q_u_means.unfix()
model.q_u_chols.unfix()
# optimizer = climin.Adam(model.optimizer_array, model.stochastic_grad, step_rate=step_rate,
# decay_mom1=1 - 0.9, decay_mom2=1 - 0.999)
# model.index_VEM = 2*(i) * maxIter_perVEM
# optimizer.minimize_until(c_full)
vo.variational_opt_HetMOGP(model=model, max_iters=maxIter_perVEM, step_size=step_rate, momentum=0.0,prior_lambda=1.0e-1,MC=1)
print('iteration (' + str(i + 1) + ') VE step, mini-batch log_likelihood=' + str(
model.log_likelihood().flatten()))
#
# # VARIATIONAL M-STEP
model['.*.lengthscale'].unfix()
model['.*.variance'].unfix()
if optZ:
model.Z.unfix()
if non_chained:
model['.*.W'].unfix()
model.q_u_means.fix()
model.q_u_chols.fix()
# optimizer = climin.Adam(model.optimizer_array, model.stochastic_grad, step_rate=step_rate,decay_mom1=1 - 0.9, decay_mom2=1 - 0.999)
# model.index_VEM = 2*(i) * maxIter_perVEM +maxIter_perVEM
# optimizer.minimize_until(c_full)
vo.variational_opt_HetMOGP(model=model, max_iters=maxIter_perVEM, step_size=step_rate, momentum=0.0,prior_lambda=1.0e-1,MC=1)
print('iteration (' + str(i + 1) + ') VM step, mini-batch log_likelihood=' + str(
model.log_likelihood().flatten()))
return model
|
the-stack_0_13870 | #! /usr/bin/python3
import sys
def resolve_overlap(o, n):
if (o.x0 > n.x1 or o.x1 < n.x0 or
o.y0 > n.y1 or o.y1 < n.y0 or
o.z0 > n.z1 or o.z1 < n.z0):
return {o, n} # No overlap
if o.x0 < n.x0:
if o.x1 > n.x1: # n sits fully within o on the x axis
oa = Region(o.state, o.x0, n.x0 - 1, o.y0, o.y1, o.z0, o.z1)
ob = Region(o.state, n.x0, n.x1, o.y0, o.y1, o.z0, o.z1)
oc = Region(o.state, n.x1 + 1, o.x1, o.y0, o.y1, o.z0, o.z1)
return {oa, oc} | resolve_overlap(ob, n)
else: # right side of o overlaps left side of n
oa = Region(o.state, o.x0, n.x0 - 1, o.y0, o.y1, o.z0, o.z1)
ob = Region(o.state, n.x0, o.x1, o.y0, o.y1, o.z0, o.z1)
return {oa} | resolve_overlap(ob, n)
elif o.x1 > n.x1: # left side of o overlaps right side of n
oa = Region(o.state, o.x0, n.x1, o.y0, o.y1, o.z0, o.z1)
ob = Region(o.state, n.x1 + 1, o.x1, o.y0, o.y1, o.z0, o.z1)
return {ob} | resolve_overlap(oa, n)
elif o.y0 < n.y0:
if o.y1 > n.y1: # n sits fully within o on the y axis
oa = Region(o.state, o.x0, o.x1, o.y0, n.y0 - 1, o.z0, o.z1)
ob = Region(o.state, o.x0, o.x1, n.y0, n.y1, o.z0, o.z1)
oc = Region(o.state, o.x0, o.x1, n.y1 + 1, o.y1, o.z0, o.z1)
return {oa, oc} | resolve_overlap(ob, n)
else: # top of o overlaps bottom of n
oa = Region(o.state, o.x0, o.x1, o.y0, n.y0 - 1, o.z0, o.z1)
ob = Region(o.state, o.x0, o.x1, n.y0, o.y1, o.z0, o.z1)
return {oa} | resolve_overlap(ob, n)
elif o.y1 > n.y1: # bottom of o overlaps top of n
oa = Region(o.state, o.x0, o.x1, o.y0, n.y1, o.z0, o.z1)
ob = Region(o.state, o.x0, o.x1, n.y1 + 1, o.y1, o.z0, o.z1)
return {ob} | resolve_overlap(oa, n)
elif o.z0 < n.z0:
if o.z1 > n.z1: # n sits fully within o on the z axis
oa = Region(o.state, o.x0, o.x1, o.y0, o.y1, o.z0, n.z0 - 1)
ob = Region(o.state, o.x0, o.x1, o.y0, o.y1, n.z0, n.z1)
oc = Region(o.state, o.x0, o.x1, o.y0, o.y1, n.z1 + 1, o.z1)
return {oa, oc} | resolve_overlap(ob, n)
else: # far side of o overlaps near side of n
oa = Region(o.state, o.x0, o.x1, o.y0, o.y1, o.z0, n.z0 - 1)
ob = Region(o.state, o.x0, o.x1, o.y0, o.y1, n.z0, o.z1)
return {oa} | resolve_overlap(ob, n)
elif o.z1 > n.z1: # near side of o overlaps far side of n
oa = Region(o.state, o.x0, o.x1, o.y0, o.y1, o.z0, n.z1)
ob = Region(o.state, o.x0, o.x1, o.y0, o.y1, n.z1 + 1, o.z1)
return {ob} | resolve_overlap(oa, n)
return {n} # o sits fully within n, so can discard it
class Region:
def __init__(self, state, x0, x1, y0, y1, z0, z1):
assert x0 <= x1 and y0 <= y1 and z0 <= z1, (x0, x1, y0, y1, z0, z1)
self.x0 = x0
self.x1 = x1
self.y0 = y0
self.y1 = y1
self.z0 = z0
self.z1 = z1
self.state = state
def volume(self):
return (self.x1 + 1 - self.x0) * (self.y1 + 1 - self.y0) * (self.z1 + 1 - self.z0)
def run_steps(steps):
regions = {steps[0]}
for step in steps[1:]:
new_regions = set()
for region in regions:
new_regions |= resolve_overlap(region, step)
regions = {region for region in new_regions if region.state}
return sum(region.volume() for region in regions)
def parse_input(path):
steps = []
for line in open(path):
if line.strip():
state, coords = line.split()
region = [1 if state == 'on' else 0]
axes = coords.split(',')
for axis in axes:
region += [int(x) for x in axis[2:].split('..')]
steps.append(Region(*region))
return steps
def main(input_file):
steps = parse_input(input_file)
part1_steps = [s for s in steps if (-50 <= s.x0 <= s.x1 <= 50 and
-50 <= s.y0 <= s.y1 <= 50 and
-50 <= s.z0 <= s.z1 <= 50)]
print("Part 1:", run_steps(part1_steps))
print("Part 2:", run_steps(steps))
if __name__ == '__main__':
main(sys.argv[1])
|
the-stack_0_13871 | import os
import json
if __name__ == "__main__":
total = 0
for data_file in os.listdir("./data/raw"):
path = os.path.join("./data/raw", data_file)
with open(path, "r") as f:
data = json.loads(f.read())
total += len(data)
print(total)
|
the-stack_0_13873 | import Adafruit_GPIO.SPI as SPI
import Adafruit_MCP3008
import RPi.GPIO as GPIO
from time import sleep
import urllib.request
import time
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO_TRIGGER = 14
GPIO_ECHO = 15
ir = 16
CLK = 11
MISO = 9
MOSI = 10
CS = 8
GPIO.setup(GPIO_TRIGGER, GPIO.OUT)
GPIO.setup(GPIO_ECHO, GPIO.IN)
GPIO.setup(ir, GPIO.IN)
AcsValue=0.0
AcsValue1=0.0
Samples=0.0
Samples1=0.0
AvgAcs=0.0
AvgAcs1=0.0
Current1=0.0
Current2=0.0
current = 0.0
ir_read = 0
distance = 0
c=0.0
timeout = time.time() + 60*5
mcp = Adafruit_MCP3008.MCP3008(clk=CLK, cs=CS, miso=MISO, mosi=MOSI)
def fun_I():
try:
values = [0]*2
global current
Samples = 0
Samples1 = 0
for x in range (150):
for i in range(2):
values[i] = mcp.read_adc(i)
AcsValue = values[0]
AcsValue1 = values[1]
Samples = Samples + AcsValue
Samples1 = Samples1 + AcsValue1
sleep(0.01)
AvgAcs = Samples/150.0;
AvgAcs1 = Samples1/150.0;
Current1 = (2.5 - (AvgAcs * (5.0 / 1024.0)) )/0.066;
Current2 = (2.5 - (AvgAcs1 * (5.0 / 1024.0)) )/0.066;
print("Current 1 = ", round(Current1,2))
print("Current 2 = ", round(Current2, 2))
current = Current1-Current2
if current < 0:
current = current * (-1)
print('Current Diff = ',round(current,2))
except:
print("\n Something Went wrong (fun_I)")
def fun_ir():
try:
global ir_read
ir_read = GPIO.input(ir)
if ir_read == False:
ir_read = 1
else:
ir_read = 0
print("IR detect = ",ir_read)
except:
print("\n Something Went wrong(fun_ir)")
def fun_distance():
try:
global distance
GPIO.output(GPIO_TRIGGER, True)
time.sleep(0.00001)
GPIO.output(GPIO_TRIGGER, False)
StartTime = time.time()
StopTime = time.time()
while GPIO.input(GPIO_ECHO) == 0:
StartTime = time.time()
while GPIO.input(GPIO_ECHO) == 1:
StopTime = time.time()
TimeElapsed = StopTime - StartTime
distance = (TimeElapsed * 34300) / 2
print ("Distance = ",round(distance,2))
except:
print("\n Something Went wrong(fun_distance)")
def IOT():
try:
request = urllib.request.Request("https://api.thingspeak.com/update?api_key=Z6D6Z9I4653LTKQ3"+"&field1=%s"%(current)+"&field2=%s"%(distance)+"&field3=%s"%(ir_read))
urllib.request.urlopen(request)
except:
print("\n Check Network Connection (IOT)")
while True:
try:
a=time.time()
fun_I()
fun_ir()
fun_distance()
IOT()
sleep(1)
b=time.time()
c=(c+(b-a))
if c>15:
c = 0.0
IOT()
except KeyboardInterrupt:
print("Stopped by User")
GPIO.cleanup()
|
the-stack_0_13876 | from datetime import timedelta
from django.core.urlresolvers import reverse
from django.utils.timezone import now
import mock
from funfactory.helpers import urlparams
from nose.tools import eq_, ok_
from remo.base.tests import RemoTestCase
from remo.base.utils import month2number
from remo.profiles.tests import UserFactory
from remo.reports.tests import NGReportFactory
from remo.reports.utils import count_user_ng_reports, get_last_report
class TestUserCommitedReports(RemoTestCase):
"""Tests for count_user_ng_reports utility."""
def test_current_streak(self):
user = UserFactory.create()
# Add a report every 22 hours for the last 4 days (5 reports)
for i in range(0, 4):
NGReportFactory.create(user=user,
report_date=(now().date() -
timedelta(days=i)))
eq_(count_user_ng_reports(user, current_streak=True), 4)
def test_longest_streak(self):
user = UserFactory.create()
past_day = now().date() - timedelta(days=30)
# Add 7 continuous reports somewhere in the past
for i in range(0, 7):
NGReportFactory.create(user=user,
report_date=(past_day - timedelta(days=i)))
# Add a report, one each day for the last 4 days (6 reports)
for i in range(0, 3):
NGReportFactory.create(user=user,
report_date=(now().date() -
timedelta(days=i)))
eq_(count_user_ng_reports(user, longest_streak=True), 7)
def test_get_last_two_weeks_reports(self):
user = UserFactory.create()
# Add 4 reports more than a day apart
for i in range(8, 0, -2):
NGReportFactory.create(user=user,
report_date=(now().date() -
timedelta(days=i)))
# Get the reports added in the last two weeks
eq_(count_user_ng_reports(user, period=2), 4)
def test_get_last_ten_weeks_reports(self):
user = UserFactory.create()
# Add 4 reports more than a day apart
for i in range(8, 0, -2):
NGReportFactory.create(user=user,
report_date=(now().date() -
timedelta(days=i)))
# Get the reports added in the last 10 weeks
eq_(count_user_ng_reports(user, period=10), 4)
class Month2NumberTest(RemoTestCase):
@mock.patch('remo.reports.views.month2number', wraps=month2number)
def test_base(self, mocked_month2number):
user = UserFactory.create(groups='Rep')
reports_url = reverse('list_ng_reports_rep',
args=(user.userprofile.display_name,))
reports_url = urlparams(reports_url, year='2014', month='Apri')
response = self.client.get(reports_url, follow=True)
mocked_month2number.assert_called_once_with(u'Apri')
eq_(response.status_code, 404)
class GetUserLastReportTest(RemoTestCase):
"""Test get last report date helper."""
def test_get_last_report_past(self):
report_date = now().date() - timedelta(weeks=5)
user = UserFactory.create(groups=['Rep'])
NGReportFactory.create(user=user, report_date=report_date)
eq_(get_last_report(user).report_date, report_date)
def test_get_last_report_future(self):
past_date = now().date() - timedelta(weeks=5)
future_date = now().date() + timedelta(weeks=2)
user = UserFactory.create(groups=['Rep'])
NGReportFactory.create(user=user, report_date=past_date)
NGReportFactory.create(user=user, report_date=future_date)
eq_(get_last_report(user).report_date, past_date)
def test_last_report_date_none(self):
user = UserFactory.create(groups=['Rep'])
ok_(not get_last_report(user))
future_date = now().date() + timedelta(weeks=2)
NGReportFactory.create(user=user, report_date=future_date)
ok_(not get_last_report(user))
|
the-stack_0_13877 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICBazel()
common = gcp.CommonTemplates()
for version in ['v1', 'v1beta1']:
library = gapic.php_library(
service='billingbudgets',
version=version,
bazel_target=f'//google/cloud/billing/budgets/{version}:google-cloud-billing-budgets-{version}-php',
)
# copy all src including partial veneer classes
s.move(library / 'src')
# copy proto files to src also
s.move(library / 'proto/src/Google/Cloud/Billing/Budgets', 'src/')
s.move(library / 'tests/')
# copy GPBMetadata file to metadata
s.move(library / 'proto/src/GPBMetadata/Google/Cloud/Billing/Budgets', 'metadata/')
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# fix year
s.replace(
'**/*Client.php',
r'Copyright \d{4}',
'Copyright 2021')
s.replace(
'tests/**/*Test.php',
r'Copyright \d{4}',
'Copyright 2021')
# Change the wording for the deprecation warning.
s.replace(
'src/*/*_*.php',
r'will be removed in the next major release',
'will be removed in a future release')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/**/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/**/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/**/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
# fix relative cloud.google.com links
s.replace(
"src/**/V*/**/*.php",
r"(.{0,})\]\((/.{0,})\)",
r"\1](https://cloud.google.com\2)"
)
|
the-stack_0_13879 | #!/usr/bin/env python
"""
Add face index attribute to mesh.
"""
import argparse
import pymesh
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__);
parser.add_argument("input_mesh", help="input mesh");
parser.add_argument("output_mesh", help="output mesh");
return parser.parse_args();
def main():
args = parse_args();
mesh = pymesh.load_mesh(args.input_mesh);
mesh.add_attribute("vertex_index");
mesh.add_attribute("face_index");
mesh.add_attribute("voxel_index");
pymesh.save_mesh(args.output_mesh, mesh, *mesh.attribute_names);
if __name__ == "__main__":
main();
|
the-stack_0_13880 | import argparse
import os
from datetime import datetime
from ais import stream
from pymongo import MongoClient, errors
from src.functions import more_processing
client = MongoClient('mongodb://useradmin:[email protected]:27011/ais')
# save to mongo
def save_mongo(result):
try:
client.ais.ais_data.insert_many(result)
for message in result:
more_processing(message)
except errors.PyMongoError as e:
print("save_mongo(): ", str(e))
# get time from str to object
def utc_time(date_obj):
try:
return datetime.strptime(date_obj, "%Y-%m-%d %H:%M:%S.%f")
except Exception as e:
print("utc_time:", str(e))
return False
# take a file path, read it and parse message,
def read_file_decode(file_path):
client = MongoClient('mongodb://useradmin:[email protected]:27011/ais')
lst = []
print("FileName:", file_path)
counter = 0
with open(file_path) as msg_file:
for msg in stream.decode(msg_file):
msg['batch'] = True
time = utc_time(msg_file.readline().strip('\n'))
if time:
msg['event_time'] = time
try:
x = msg.get('x', None)
y = msg.get('y', None)
if x is not None and y is not None and abs(x) <= 180 and abs(y) < 90:
msg['location'] = {'type': 'Point', 'coordinates': [x, y]}
except Exception as e:
print("read_file_decode():", str(e))
if len(lst) != 1000:
lst.append(msg)
else:
save_mongo(lst)
lst.clear()
lst.append(msg)
if lst:
save_mongo(lst)
lst.clear()
print("Finished Decoding", file_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Decode files in a directory')
parser.add_argument('-dir', help='Directory to Index', type=str)
parser.add_argument('-file', help='file to parse and decode', )
args = parser.parse_args()
if args.dir:
FileDirectory = args.dir
files = os.listdir(FileDirectory)
files = sorted(files)
for file in files:
path = FileDirectory + file
read_file_decode(path)
elif args.file:
filename = args.file
read_file_decode(filename)
|
the-stack_0_13883 |
__author__ = "Radical.Utils Development Team"
__copyright__ = "Copyright 2016, RADICAL@Rutgers"
__license__ = "MIT"
# ------------------------------------------------------------------------------
#
# We provide a json based config file parser with following properties
#
# - system config files will be merged with user configs (if those exist)
# - python style comments are filtered out before parsing
# - after parsing, `${ABC:default}`-style values are set or expanded via
# `os.environ`
#
#
# Config Names and Locations
# --------------------------
#
# We assume two basic locations for config files: one is installed within the
# scope of a Python module, the other one is under user control, and usually in
# the users home directory. The config reader accepts the following parameters
# to derive the exact locations:
#
# - module: name of module under which the config is installed
# - path : config file path relative to the module home
# - name : config file name relative to the module home
#
# The `module` string is interpreted as follows:
#
# m = __import__('module')
# sys_config_dir = "%s/configs" % os.path.dirname(m.__file__)
# usr_config_dir = "%s/.%s/" % (os.environ['HOME'], m.replace('.', '/'))
#
# so the location of the module's `__init__.py` is used to derive the location
# of the installed system config files, and the module name is used to derive
# the location of the user provided config files.
#
# For example, the module `radical.utils` will have the following config dirs:
#
# sys_config_dir = /tmp/ve/lib/python2.7/site-packages/radical/utils/configs/
# usr_config_dir = /home/merzky/.radical/utils/
#
# The remaining two arguments are exclusive (exactly *one* must be specified).
# If `path` is given, it is interpreted as a path under those locations.
# If `name` is given, then the same `. -> /` replacement as on the module name
# is performed, and the result is interpreted like `path` again.
#
# In both cases, we add the file extension `.json` if no match is found without
# it. It is not an error if the so specified config files do not exist -- in
# that case, the config is considered empty.
#
# After loading the system level config file, any existing user level config
# file is merged into it, via
#
# radical.utils.dict_merge(user_cgf, system_cfg, mode='overwrite')
#
# so that the user config settings supercede the system config settings.
#
# Both path and name specifiers can contain `*` as wildcard, which is then
# interpreted as by `glob()`. If that wirldcard exist, then all matching config
# files are read into *one* configuration dict, where each root key is set to
# the value the '*' expands to (minus the `.json` extension).
#
# For example, the name `radical.pilot.resource_*` with the following config
# files:
#
# /tmp/ve/[...]/radical/pilot/configs/resource_xsede.json
# /tmp/ve/[...]/radical/pilot/configs/resource_ncsa.json
#
# will result in a config dict like:
#
# {
# 'xsede' : { 'foo' : 'bar' },
# 'ncsa' : { 'fiz' : 'baz' }
# }
#
#
# Queries
# -------
#
# We support two types of queries on the resulting parsed configs:
#
# - dict like queries (via `ru.DictMixin`)
# - the `query(key)` method returns a single value, or 'None' if not found.
#
# In the latter `query()` case, the `key` can be specified as dot-separated
# path, so that the following two snippets are equivalent (assuming that a
# `foo.bar` section exists):
#
# val = cfg['foo']['bar'].get('baz')
# val = cfg.query('foo.bar.baz')
#
#
# Environment
# -----------
#
# Towards `os.environ` completion, we support the following syntax in all string
# *values* (not keys):
#
# '${RADICAL_UTILS_ENV:default_value}
#
# which will be replaced by
#
# `os.environ.get('RADICAL_UTILS_ENV', 'default_value')`
#
# The default value is optional, an empty string is used if no default value is
# given. Env evaluation is only performed at time of parsing, not at time of
# query.
#
#
# Validation
# ----------
#
# It probably makes sense to switch to a json schema validator at some point,
# see for example https://pypi.python.org/pypi/json-schema-validator. For now
# this implementation remains schema-less, and will thus, in a very pythonesque
# way, only fail once values are queried or used.
#
# ------------------------------------------------------------------------------
import glob
import os
from .misc import find_module, is_str
from .misc import expand_env as ru_expand_env
from .read_json import read_json
from .dict_mixin import dict_merge, DictMixin
from .singleton import Singleton
# ------------------------------------------------------------------------------
#
class Config(object, DictMixin):
# FIXME: we should do some magic on values, like, convert to into, float,
# bool, list of those, after env expansion. For now, typing is the
# repsonsibility of the consumer.
# FIXME: we should cache config files after reading, so that repeated
# instance creations do not trigger a new (identical) round of
# parsing.
# FIXME: ensure that deepcopy is working (or add `from_dict` c'tor)
# identify as dictionary
# FIXME: why is this not inherited from DictMixin?
# FIXME: we also want to identify as ru.Config!
@property
def __class__(self):
return dict
# --------------------------------------------------------------------------
#
def __init__(self, module, path=None, name=None, cfg=None,
expand=True, env=None):
'''
expand: enable / disable environment var expansion. When disabled, the
consumer should expand manually upon use of config entries.
env: environment dictionary to be used for expansion
defaults to `os.environ`
'''
modpath = find_module(module)
if not modpath:
raise ValueError("Cannot find module %s" % module)
home = os.environ.get('HOME', '/tmp')
home = os.environ.get('RADICAL_CONFIG_USER_DIR', home)
sys_dir = "%s/configs" % (modpath)
usr_dir = "%s/.%s" % (home, module.replace('.', '/'))
if path and name:
raise ValueError("'path' and 'name' parameters are exclusive")
# if a name starts with a module prefix, strip that prefix
if name and name.startswith('%s.' % module):
name = name[len(module) + 1:]
# if a path starts with a module prefix, strip that prefix
if path and path.startswith('%s/' % module.replace('.', '/')):
path = path[len(module) + 1:]
if not path and not name:
# Default to `name='*.json'`
name = '*.json'
if not cfg:
cfg = dict()
if path: path = path
else : path = name.replace('.', '/')
if '*' in path: starred = True
else : starred = False
if starred and path.count('*') > 1:
raise ValueError('only one wildcard allowed in config path')
if path.startswith('/'):
sys_fspec = path
usr_fspec = None
else:
sys_fspec = '%s/%s' % (sys_dir, path)
usr_fspec = '%s/%s' % (usr_dir, path)
app_cfg = cfg
sys_cfg = dict()
usr_cfg = dict()
if not starred:
if sys_fspec:
sys_fname = sys_fspec
if not os.path.isfile(sys_fname): sys_fname += '.json'
if os.path.isfile(sys_fname): sys_cfg = read_json(sys_fname)
if usr_fspec:
usr_fname = usr_fspec
if not os.path.isfile(usr_fname): usr_fname += '.json'
if os.path.isfile(usr_fname): usr_cfg = read_json(usr_fname)
else:
# wildcard mode: whatever the '*' expands into is used as root dict
# entry, and the respective content of the config file is stored
# underneath it.
if sys_fspec:
prefix_len = sys_fspec.find('*')
postfix_len = len(sys_fspec) - prefix_len - 1
for sys_fname in glob.glob(sys_fspec):
if postfix_len: base = sys_fname[prefix_len:-postfix_len]
else : base = sys_fname[prefix_len:]
scfg = read_json(sys_fname)
sys_cfg[base] = scfg
if usr_fspec:
prefix_len = usr_fspec.find('*')
postfix_len = len(usr_fspec) - prefix_len - 1
for usr_fname in glob.glob(usr_fspec):
base = usr_fname[prefix_len:-postfix_len]
ucfg = read_json(usr_fname)
usr_cfg[base] = ucfg
# merge sys, app, and user cfg before expansion
self._cfg = dict()
self._cfg = dict_merge(self._cfg, sys_cfg, policy='overwrite')
self._cfg = dict_merge(self._cfg, app_cfg, policy='overwrite')
self._cfg = dict_merge(self._cfg, usr_cfg, policy='overwrite')
if expand:
self.expand_env(env)
# --------------------------------------------------------------------------
#
def expand_env(self, env):
# expand environment
def _expand_env(d):
if isinstance(d, dict):
for k,v in d.iteritems():
d[k] = _expand_env(v)
elif isinstance(d, list):
for i,v in enumerate(d):
d[i] = _expand_env(v)
elif isinstance(d, basestring):
d = ru_expand_env(d, env)
return d
_expand_env(self._cfg)
# --------------------------------------------------------------------------
#
def __repr__(self):
import pprint
return pprint.pformat(self._cfg)
# --------------------------------------------------------------------------
#
def as_dict(self):
return self._cfg
# --------------------------------------------------------------------------
#
# first level definitions should be implemented for the dict mixin
#
def __getitem__(self, key):
if key not in self._cfg:
raise KeyError('no such key [%s]' % key)
return self._cfg[key]
def __setitem__(self, key, value):
self._cfg[key] = value
def __delitem__(self, key):
del(self._cfg[key])
def keys(self):
return self._cfg.keys()
# --------------------------------------------------------------------------
#
def query(self, key, default=None):
'''
For a query like
config.query('some.path.to.key', 'foo')
this method behaves like:
config['some']['path']['to'].get('key', default='foo')
'''
if is_str(key): elems = key.split('.')
else : elems = key
if not elems:
raise ValueError('empty key on query')
pos = self._cfg
path = list()
for elem in elems:
if not isinstance(pos, dict):
raise KeyError('no such key [%s]' % '.'.join(path))
if elem in pos: pos = pos[elem]
else : pos = default
path.append(elem)
return pos
# ------------------------------------------------------------------------------
#
class DefaultConfig(Config):
'''
The settings in this default config are, unsurprisingly, used as default
values for various RU classes and methods, as for example for log file
locations, log levels, profile locations, etc.
'''
__metaclass__ = Singleton
def __init__(self):
pwd = os.getcwd()
cfg = {'log_lvl' : '${RADICAL_DEFAULT_LOG_LVL:ERROR}',
'log_tgt' : '${RADICAL_DEFAULT_LOG_TGT:.}',
'log_dir' : '${RADICAL_DEFAULT_LOG_DIR:%s}' % pwd,
'report' : '${RADICAL_DEFAULT_REPORT:TRUE}',
'report_tgt' : '${RADICAL_DEFAULT_REPORT_TGT:stderr}',
'report_dir' : '${RADICAL_DEFAULT_REPORT_DIR:%s}' % pwd,
'profile' : '${RADICAL_DEFAULT_PROFILE:TRUE}',
'profile_dir': '${RADICAL_DEFAULT_PROFILE_DIR:%s}' % pwd,
}
super(DefaultConfig, self).__init__(module='radical.utils', cfg=cfg)
# ------------------------------------------------------------------------------
|
the-stack_0_13884 | # -*- coding: utf-8 -*-
'''
Operations on regular files, special files, directories, and symlinks
=====================================================================
Salt States can aggressively manipulate files on a system. There are a number
of ways in which files can be managed.
Regular files can be enforced with the :mod:`file.managed
<salt.states.file.managed>` state. This state downloads files from the salt
master and places them on the target system. Managed files can be rendered as a
jinja, mako, or wempy template, adding a dynamic component to file management.
An example of :mod:`file.managed <salt.states.file.managed>` which makes use of
the jinja templating system would look like this:
.. code-block:: jinja
/etc/http/conf/http.conf:
file.managed:
- source: salt://apache/http.conf
- user: root
- group: root
- mode: 644
- attrs: ai
- template: jinja
- defaults:
custom_var: "default value"
other_var: 123
{% if grains['os'] == 'Ubuntu' %}
- context:
custom_var: "override"
{% endif %}
It is also possible to use the :mod:`py renderer <salt.renderers.py>` as a
templating option. The template would be a Python script which would need to
contain a function called ``run()``, which returns a string. All arguments
to the state will be made available to the Python script as globals. The
returned string will be the contents of the managed file. For example:
.. code-block:: python
def run():
lines = ['foo', 'bar', 'baz']
lines.extend([source, name, user, context]) # Arguments as globals
return '\\n\\n'.join(lines)
.. note::
The ``defaults`` and ``context`` arguments require extra indentation (four
spaces instead of the normal two) in order to create a nested dictionary.
:ref:`More information <nested-dict-indentation>`.
If using a template, any user-defined template variables in the file defined in
``source`` must be passed in using the ``defaults`` and/or ``context``
arguments. The general best practice is to place default values in
``defaults``, with conditional overrides going into ``context``, as seen above.
The template will receive a variable ``custom_var``, which would be accessed in
the template using ``{{ custom_var }}``. If the operating system is Ubuntu, the
value of the variable ``custom_var`` would be *override*, otherwise it is the
default *default value*
The ``source`` parameter can be specified as a list. If this is done, then the
first file to be matched will be the one that is used. This allows you to have
a default file on which to fall back if the desired file does not exist on the
salt fileserver. Here's an example:
.. code-block:: jinja
/etc/foo.conf:
file.managed:
- source:
- salt://foo.conf.{{ grains['fqdn'] }}
- salt://foo.conf.fallback
- user: foo
- group: users
- mode: 644
- attrs: i
- backup: minion
.. note::
Salt supports backing up managed files via the backup option. For more
details on this functionality please review the
:ref:`backup_mode documentation <file-state-backups>`.
The ``source`` parameter can also specify a file in another Salt environment.
In this example ``foo.conf`` in the ``dev`` environment will be used instead.
.. code-block:: yaml
/etc/foo.conf:
file.managed:
- source:
- 'salt://foo.conf?saltenv=dev'
- user: foo
- group: users
- mode: '0644'
- attrs: i
.. warning::
When using a mode that includes a leading zero you must wrap the
value in single quotes. If the value is not wrapped in quotes it
will be read by YAML as an integer and evaluated as an octal.
The ``names`` parameter, which is part of the state compiler, can be used to
expand the contents of a single state declaration into multiple, single state
declarations. Each item in the ``names`` list receives its own individual state
``name`` and is converted into its own low-data structure. This is a convenient
way to manage several files with similar attributes.
There is more documentation about this feature in the :ref:`Names declaration
<names-declaration>` section of the :ref:`Highstate docs <states-highstate>`.
Special files can be managed via the ``mknod`` function. This function will
create and enforce the permissions on a special file. The function supports the
creation of character devices, block devices, and FIFO pipes. The function will
create the directory structure up to the special file if it is needed on the
minion. The function will not overwrite or operate on (change major/minor
numbers) existing special files with the exception of user, group, and
permissions. In most cases the creation of some special files require root
permissions on the minion. This would require that the minion to be run as the
root user. Here is an example of a character device:
.. code-block:: yaml
/var/named/chroot/dev/random:
file.mknod:
- ntype: c
- major: 1
- minor: 8
- user: named
- group: named
- mode: 660
Here is an example of a block device:
.. code-block:: yaml
/var/named/chroot/dev/loop0:
file.mknod:
- ntype: b
- major: 7
- minor: 0
- user: named
- group: named
- mode: 660
Here is an example of a fifo pipe:
.. code-block:: yaml
/var/named/chroot/var/log/logfifo:
file.mknod:
- ntype: p
- user: named
- group: named
- mode: 660
Directories can be managed via the ``directory`` function. This function can
create and enforce the permissions on a directory. A directory statement will
look like this:
.. code-block:: yaml
/srv/stuff/substuf:
file.directory:
- user: fred
- group: users
- mode: 755
- makedirs: True
If you need to enforce user and/or group ownership or permissions recursively
on the directory's contents, you can do so by adding a ``recurse`` directive:
.. code-block:: yaml
/srv/stuff/substuf:
file.directory:
- user: fred
- group: users
- mode: 755
- makedirs: True
- recurse:
- user
- group
- mode
As a default, ``mode`` will resolve to ``dir_mode`` and ``file_mode``, to
specify both directory and file permissions, use this form:
.. code-block:: yaml
/srv/stuff/substuf:
file.directory:
- user: fred
- group: users
- file_mode: 744
- dir_mode: 755
- makedirs: True
- recurse:
- user
- group
- mode
Symlinks can be easily created; the symlink function is very simple and only
takes a few arguments:
.. code-block:: yaml
/etc/grub.conf:
file.symlink:
- target: /boot/grub/grub.conf
Recursive directory management can also be set via the ``recurse``
function. Recursive directory management allows for a directory on the salt
master to be recursively copied down to the minion. This is a great tool for
deploying large code and configuration systems. A state using ``recurse``
would look something like this:
.. code-block:: yaml
/opt/code/flask:
file.recurse:
- source: salt://code/flask
- include_empty: True
A more complex ``recurse`` example:
.. code-block:: jinja
{% set site_user = 'testuser' %}
{% set site_name = 'test_site' %}
{% set project_name = 'test_proj' %}
{% set sites_dir = 'test_dir' %}
django-project:
file.recurse:
- name: {{ sites_dir }}/{{ site_name }}/{{ project_name }}
- user: {{ site_user }}
- dir_mode: 2775
- file_mode: '0644'
- template: jinja
- source: salt://project/templates_dir
- include_empty: True
Retention scheduling can be applied to manage contents of backup directories.
For example:
.. code-block:: yaml
/var/backups/example_directory:
file.retention_schedule:
- strptime_format: example_name_%Y%m%dT%H%M%S.tar.bz2
- retain:
most_recent: 5
first_of_hour: 4
first_of_day: 14
first_of_week: 6
first_of_month: 6
first_of_year: all
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import difflib
import itertools
import logging
import os
import posixpath
import re
import shutil
import sys
import time
import traceback
from collections import Iterable, Mapping, defaultdict
from datetime import datetime # python3 problem in the making?
# Import salt libs
import salt.loader
import salt.payload
import salt.utils.data
import salt.utils.dateutils
import salt.utils.dictupdate
import salt.utils.files
import salt.utils.hashutils
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.versions
from salt.exceptions import CommandExecutionError
from salt.state import get_accumulator_dir as _get_accumulator_dir
if salt.utils.platform.is_windows():
import salt.utils.win_dacl
import salt.utils.win_functions
# Import 3rd-party libs
from salt.ext import six
from salt.ext.six.moves import zip_longest
from salt.ext.six.moves.urllib.parse import urlparse as _urlparse # pylint: disable=no-name-in-module
if salt.utils.platform.is_windows():
import pywintypes
import win32com.client
log = logging.getLogger(__name__)
COMMENT_REGEX = r'^([[:space:]]*){0}[[:space:]]?'
__NOT_FOUND = object()
__func_alias__ = {
'copy_': 'copy',
}
def _get_accumulator_filepath():
'''
Return accumulator data path.
'''
return os.path.join(
_get_accumulator_dir(__opts__['cachedir']),
__instance_id__
)
def _load_accumulators():
def _deserialize(path):
serial = salt.payload.Serial(__opts__)
ret = {'accumulators': {}, 'accumulators_deps': {}}
try:
with salt.utils.files.fopen(path, 'rb') as f:
loaded = serial.load(f)
return loaded if loaded else ret
except (IOError, NameError):
# NameError is a msgpack error from salt-ssh
return ret
loaded = _deserialize(_get_accumulator_filepath())
return loaded['accumulators'], loaded['accumulators_deps']
def _persist_accummulators(accumulators, accumulators_deps):
accumm_data = {'accumulators': accumulators,
'accumulators_deps': accumulators_deps}
serial = salt.payload.Serial(__opts__)
try:
with salt.utils.files.fopen(_get_accumulator_filepath(), 'w+b') as f:
serial.dump(accumm_data, f)
except NameError:
# msgpack error from salt-ssh
pass
def _check_user(user, group):
'''
Checks if the named user and group are present on the minion
'''
err = ''
if user:
uid = __salt__['file.user_to_uid'](user)
if uid == '':
err += 'User {0} is not available '.format(user)
if group:
gid = __salt__['file.group_to_gid'](group)
if gid == '':
err += 'Group {0} is not available'.format(group)
return err
def _is_valid_relpath(
relpath,
maxdepth=None):
'''
Performs basic sanity checks on a relative path.
Requires POSIX-compatible paths (i.e. the kind obtained through
cp.list_master or other such calls).
Ensures that the path does not contain directory transversal, and
that it does not exceed a stated maximum depth (if specified).
'''
# Check relpath surrounded by slashes, so that `..` can be caught as
# a path component at the start, end, and in the middle of the path.
sep, pardir = posixpath.sep, posixpath.pardir
if sep + pardir + sep in sep + relpath + sep:
return False
# Check that the relative path's depth does not exceed maxdepth
if maxdepth is not None:
path_depth = relpath.strip(sep).count(sep)
if path_depth > maxdepth:
return False
return True
def _salt_to_os_path(path):
'''
Converts a path from the form received via salt master to the OS's native
path format.
'''
return os.path.normpath(path.replace(posixpath.sep, os.path.sep))
def _gen_recurse_managed_files(
name,
source,
keep_symlinks=False,
include_pat=None,
exclude_pat=None,
maxdepth=None,
include_empty=False,
**kwargs):
'''
Generate the list of files managed by a recurse state
'''
# Convert a relative path generated from salt master paths to an OS path
# using "name" as the base directory
def full_path(master_relpath):
return os.path.join(name, _salt_to_os_path(master_relpath))
# Process symlinks and return the updated filenames list
def process_symlinks(filenames, symlinks):
for lname, ltarget in six.iteritems(symlinks):
srelpath = posixpath.relpath(lname, srcpath)
if not _is_valid_relpath(srelpath, maxdepth=maxdepth):
continue
if not salt.utils.stringutils.check_include_exclude(
srelpath, include_pat, exclude_pat):
continue
# Check for all paths that begin with the symlink
# and axe it leaving only the dirs/files below it.
# This needs to use list() otherwise they reference
# the same list.
_filenames = list(filenames)
for filename in _filenames:
if filename.startswith(lname):
log.debug('** skipping file ** {0}, it intersects a '
'symlink'.format(filename))
filenames.remove(filename)
# Create the symlink along with the necessary dirs.
# The dir perms/ownership will be adjusted later
# if needed
managed_symlinks.add((srelpath, ltarget))
# Add the path to the keep set in case clean is set to True
keep.add(full_path(srelpath))
vdir.update(keep)
return filenames
managed_files = set()
managed_directories = set()
managed_symlinks = set()
keep = set()
vdir = set()
srcpath, senv = salt.utils.url.parse(source)
if senv is None:
senv = __env__
if not srcpath.endswith(posixpath.sep):
# we're searching for things that start with this *directory*.
srcpath = srcpath + posixpath.sep
fns_ = __salt__['cp.list_master'](senv, srcpath)
# If we are instructed to keep symlinks, then process them.
if keep_symlinks:
# Make this global so that emptydirs can use it if needed.
symlinks = __salt__['cp.list_master_symlinks'](senv, srcpath)
fns_ = process_symlinks(fns_, symlinks)
for fn_ in fns_:
if not fn_.strip():
continue
# fn_ here is the absolute (from file_roots) source path of
# the file to copy from; it is either a normal file or an
# empty dir(if include_empty==true).
relname = salt.utils.data.decode(posixpath.relpath(fn_, srcpath))
if not _is_valid_relpath(relname, maxdepth=maxdepth):
continue
# Check if it is to be excluded. Match only part of the path
# relative to the target directory
if not salt.utils.stringutils.check_include_exclude(
relname, include_pat, exclude_pat):
continue
dest = full_path(relname)
dirname = os.path.dirname(dest)
keep.add(dest)
if dirname not in vdir:
# verify the directory perms if they are set
managed_directories.add(dirname)
vdir.add(dirname)
src = salt.utils.url.create(fn_, saltenv=senv)
managed_files.add((dest, src))
if include_empty:
mdirs = __salt__['cp.list_master_dirs'](senv, srcpath)
for mdir in mdirs:
relname = posixpath.relpath(mdir, srcpath)
if not _is_valid_relpath(relname, maxdepth=maxdepth):
continue
if not salt.utils.stringutils.check_include_exclude(
relname, include_pat, exclude_pat):
continue
mdest = full_path(relname)
# Check for symlinks that happen to point to an empty dir.
if keep_symlinks:
islink = False
for link in symlinks:
if mdir.startswith(link, 0):
log.debug('** skipping empty dir ** {0}, it intersects'
' a symlink'.format(mdir))
islink = True
break
if islink:
continue
managed_directories.add(mdest)
keep.add(mdest)
return managed_files, managed_directories, managed_symlinks, keep
def _gen_keep_files(name, require, walk_d=None):
'''
Generate the list of files that need to be kept when a dir based function
like directory or recurse has a clean.
'''
def _is_child(path, directory):
'''
Check whether ``path`` is child of ``directory``
'''
path = os.path.abspath(path)
directory = os.path.abspath(directory)
relative = os.path.relpath(path, directory)
return not relative.startswith(os.pardir)
def _add_current_path(path):
_ret = set()
if os.path.isdir(path):
dirs, files = walk_d.get(path, ((), ()))
_ret.add(path)
for _name in files:
_ret.add(os.path.join(path, _name))
for _name in dirs:
_ret.add(os.path.join(path, _name))
return _ret
def _process_by_walk_d(name, ret):
if os.path.isdir(name):
walk_ret.update(_add_current_path(name))
dirs, _ = walk_d.get(name, ((), ()))
for _d in dirs:
p = os.path.join(name, _d)
walk_ret.update(_add_current_path(p))
_process_by_walk_d(p, ret)
def _process(name):
ret = set()
if os.path.isdir(name):
for root, dirs, files in salt.utils.path.os_walk(name):
ret.add(name)
for name in files:
ret.add(os.path.join(root, name))
for name in dirs:
ret.add(os.path.join(root, name))
return ret
keep = set()
if isinstance(require, list):
required_files = [comp for comp in require if 'file' in comp]
for comp in required_files:
for low in __lowstate__:
# A requirement should match either the ID and the name of
# another state.
if low['name'] == comp['file'] or low['__id__'] == comp['file']:
fn = low['name']
fun = low['fun']
if os.path.isdir(fn):
if _is_child(fn, name):
if fun == 'recurse':
fkeep = _gen_recurse_managed_files(**low)[3]
log.debug('Keep from {0}: {1}'.format(fn, fkeep))
keep.update(fkeep)
elif walk_d:
walk_ret = set()
_process_by_walk_d(fn, walk_ret)
keep.update(walk_ret)
else:
keep.update(_process(fn))
else:
keep.add(fn)
log.debug('Files to keep from required states: {0}'.format(list(keep)))
return list(keep)
def _check_file(name):
ret = True
msg = ''
if not os.path.isabs(name):
ret = False
msg = 'Specified file {0} is not an absolute path'.format(name)
elif not os.path.exists(name):
ret = False
msg = '{0}: file not found'.format(name)
return ret, msg
def _find_keep_files(root, keep):
'''
Compile a list of valid keep files (and directories).
'''
real_keep = set()
real_keep.add(root)
if isinstance(keep, list):
for fn_ in keep:
if not os.path.isabs(fn_):
continue
fn_ = os.path.normcase(os.path.abspath(fn_))
real_keep.add(fn_)
while True:
fn_ = os.path.abspath(os.path.dirname(fn_))
real_keep.add(fn_)
drive, path = os.path.splitdrive(fn_)
if not path.lstrip(os.sep):
break
return real_keep
def _clean_dir(root, keep, exclude_pat):
'''
Clean out all of the files and directories in a directory (root) while
preserving the files in a list (keep) and part of exclude_pat
'''
real_keep = _find_keep_files(root, keep)
removed = set()
def _delete_not_kept(nfn):
if nfn not in real_keep:
# -- check if this is a part of exclude_pat(only). No need to
# check include_pat
if not salt.utils.stringutils.check_include_exclude(
os.path.relpath(nfn, root), None, exclude_pat):
return
removed.add(nfn)
if not __opts__['test']:
try:
os.remove(nfn)
except OSError:
__salt__['file.remove'](nfn)
for roots, dirs, files in salt.utils.path.os_walk(root):
for name in itertools.chain(dirs, files):
_delete_not_kept(os.path.join(roots, name))
return list(removed)
def _error(ret, err_msg):
ret['result'] = False
ret['comment'] = err_msg
return ret
def _check_directory(name,
user,
group,
recurse,
mode,
clean,
require,
exclude_pat,
max_depth=None,
follow_symlinks=False):
'''
Check what changes need to be made on a directory
'''
changes = {}
if recurse or clean:
assert max_depth is None or not clean
# walk path only once and store the result
walk_l = list(_depth_limited_walk(name, max_depth))
# root: (dirs, files) structure, compatible for python2.6
walk_d = {}
for i in walk_l:
walk_d[i[0]] = (i[1], i[2])
if recurse:
try:
recurse_set = _get_recurse_set(recurse)
except (TypeError, ValueError) as exc:
return False, '{0}'.format(exc), changes
if 'user' not in recurse_set:
user = None
if 'group' not in recurse_set:
group = None
if 'mode' not in recurse_set:
mode = None
check_files = 'ignore_files' not in recurse_set
check_dirs = 'ignore_dirs' not in recurse_set
for root, dirs, files in walk_l:
if check_files:
for fname in files:
fchange = {}
path = os.path.join(root, fname)
stats = __salt__['file.stats'](
path, None, follow_symlinks
)
if user is not None and user != stats.get('user'):
fchange['user'] = user
if group is not None and group != stats.get('group'):
fchange['group'] = group
if fchange:
changes[path] = fchange
if check_dirs:
for name_ in dirs:
path = os.path.join(root, name_)
fchange = _check_dir_meta(path, user, group, mode, follow_symlinks)
if fchange:
changes[path] = fchange
# Recurse skips root (we always do dirs, not root), so always check root:
fchange = _check_dir_meta(name, user, group, mode, follow_symlinks)
if fchange:
changes[name] = fchange
if clean:
keep = _gen_keep_files(name, require, walk_d)
def _check_changes(fname):
path = os.path.join(root, fname)
if path in keep:
return {}
else:
if not salt.utils.stringutils.check_include_exclude(
os.path.relpath(path, name), None, exclude_pat):
return {}
else:
return {path: {'removed': 'Removed due to clean'}}
for root, dirs, files in walk_l:
for fname in files:
changes.update(_check_changes(fname))
for name_ in dirs:
changes.update(_check_changes(name_))
if not os.path.isdir(name):
changes[name] = {'directory': 'new'}
if changes:
comments = ['The following files will be changed:\n']
for fn_ in changes:
for key, val in six.iteritems(changes[fn_]):
comments.append('{0}: {1} - {2}\n'.format(fn_, key, val))
return None, ''.join(comments), changes
return True, 'The directory {0} is in the correct state'.format(name), changes
def _check_directory_win(name,
win_owner,
win_perms=None,
win_deny_perms=None,
win_inheritance=None,
win_perms_reset=None):
'''
Check what changes need to be made on a directory
'''
changes = {}
if not os.path.isdir(name):
changes = {name: {'directory': 'new'}}
else:
# Check owner
owner = salt.utils.win_dacl.get_owner(name)
if not owner.lower() == win_owner.lower():
changes['owner'] = win_owner
# Check perms
perms = salt.utils.win_dacl.get_permissions(name)
# Verify Permissions
if win_perms is not None:
for user in win_perms:
# Check that user exists:
try:
salt.utils.win_dacl.get_name(user)
except CommandExecutionError:
continue
grant_perms = []
# Check for permissions
if isinstance(win_perms[user]['perms'], six.string_types):
if not salt.utils.win_dacl.has_permission(
name, user, win_perms[user]['perms']):
grant_perms = win_perms[user]['perms']
else:
for perm in win_perms[user]['perms']:
if not salt.utils.win_dacl.has_permission(
name, user, perm, exact=False):
grant_perms.append(win_perms[user]['perms'])
if grant_perms:
if 'grant_perms' not in changes:
changes['grant_perms'] = {}
if user not in changes['grant_perms']:
changes['grant_perms'][user] = {}
changes['grant_perms'][user]['perms'] = grant_perms
# Check Applies to
if 'applies_to' not in win_perms[user]:
applies_to = 'this_folder_subfolders_files'
else:
applies_to = win_perms[user]['applies_to']
if user in perms:
user = salt.utils.win_dacl.get_name(user)
# Get the proper applies_to text
at_flag = salt.utils.win_dacl.flags().ace_prop['file'][applies_to]
applies_to_text = salt.utils.win_dacl.flags().ace_prop['file'][at_flag]
if 'grant' in perms[user]:
if not perms[user]['grant']['applies to'] == applies_to_text:
if 'grant_perms' not in changes:
changes['grant_perms'] = {}
if user not in changes['grant_perms']:
changes['grant_perms'][user] = {}
changes['grant_perms'][user]['applies_to'] = applies_to
# Verify Deny Permissions
if win_deny_perms is not None:
for user in win_deny_perms:
# Check that user exists:
try:
salt.utils.win_dacl.get_name(user)
except CommandExecutionError:
continue
deny_perms = []
# Check for permissions
if isinstance(win_deny_perms[user]['perms'], six.string_types):
if not salt.utils.win_dacl.has_permission(
name, user, win_deny_perms[user]['perms'], 'deny'):
deny_perms = win_deny_perms[user]['perms']
else:
for perm in win_deny_perms[user]['perms']:
if not salt.utils.win_dacl.has_permission(
name, user, perm, 'deny', exact=False):
deny_perms.append(win_deny_perms[user]['perms'])
if deny_perms:
if 'deny_perms' not in changes:
changes['deny_perms'] = {}
if user not in changes['deny_perms']:
changes['deny_perms'][user] = {}
changes['deny_perms'][user]['perms'] = deny_perms
# Check Applies to
if 'applies_to' not in win_deny_perms[user]:
applies_to = 'this_folder_subfolders_files'
else:
applies_to = win_deny_perms[user]['applies_to']
if user in perms:
user = salt.utils.win_dacl.get_name(user)
# Get the proper applies_to text
at_flag = salt.utils.win_dacl.flags().ace_prop['file'][applies_to]
applies_to_text = salt.utils.win_dacl.flags().ace_prop['file'][at_flag]
if 'deny' in perms[user]:
if not perms[user]['deny']['applies to'] == applies_to_text:
if 'deny_perms' not in changes:
changes['deny_perms'] = {}
if user not in changes['deny_perms']:
changes['deny_perms'][user] = {}
changes['deny_perms'][user]['applies_to'] = applies_to
# Check inheritance
if win_inheritance is not None:
if not win_inheritance == salt.utils.win_dacl.get_inheritance(name):
changes['inheritance'] = win_inheritance
# Check reset
if win_perms_reset:
for user_name in perms:
if user_name not in win_perms:
if 'grant' in perms[user_name] and not perms[user_name]['grant']['inherited']:
if 'remove_perms' not in changes:
changes['remove_perms'] = {}
changes['remove_perms'].update({user_name: perms[user_name]})
if user_name not in win_deny_perms:
if 'deny' in perms[user_name] and not perms[user_name]['deny']['inherited']:
if 'remove_perms' not in changes:
changes['remove_perms'] = {}
changes['remove_perms'].update({user_name: perms[user_name]})
if changes:
return None, 'The directory "{0}" will be changed'.format(name), changes
return True, 'The directory {0} is in the correct state'.format(name), changes
def _check_dir_meta(name,
user,
group,
mode,
follow_symlinks=False):
'''
Check the changes in directory metadata
'''
try:
stats = __salt__['file.stats'](name, None, follow_symlinks)
except CommandExecutionError:
stats = {}
changes = {}
if not stats:
changes['directory'] = 'new'
return changes
if (user is not None
and user != stats['user']
and user != stats.get('uid')):
changes['user'] = user
if (group is not None
and group != stats['group']
and group != stats.get('gid')):
changes['group'] = group
# Normalize the dir mode
smode = salt.utils.files.normalize_mode(stats['mode'])
mode = salt.utils.files.normalize_mode(mode)
if mode is not None and mode != smode:
changes['mode'] = mode
return changes
def _check_touch(name, atime, mtime):
'''
Check to see if a file needs to be updated or created
'''
if not os.path.exists(name):
return None, 'File {0} is set to be created'.format(name)
stats = __salt__['file.stats'](name, follow_symlinks=False)
if atime is not None:
if six.text_type(atime) != six.text_type(stats['atime']):
return None, 'Times set to be updated on file {0}'.format(name)
if mtime is not None:
if six.text_type(mtime) != six.text_type(stats['mtime']):
return None, 'Times set to be updated on file {0}'.format(name)
return True, 'File {0} exists and has the correct times'.format(name)
def _get_symlink_ownership(path):
return (
__salt__['file.get_user'](path, follow_symlinks=False),
__salt__['file.get_group'](path, follow_symlinks=False)
)
def _check_symlink_ownership(path, user, group):
'''
Check if the symlink ownership matches the specified user and group
'''
cur_user, cur_group = _get_symlink_ownership(path)
return (cur_user == user) and (cur_group == group)
def _set_symlink_ownership(path, user, group):
'''
Set the ownership of a symlink and return a boolean indicating
success/failure
'''
try:
__salt__['file.lchown'](path, user, group)
except OSError:
pass
return _check_symlink_ownership(path, user, group)
def _symlink_check(name, target, force, user, group):
'''
Check the symlink function
'''
pchanges = {}
if not os.path.exists(name) and not __salt__['file.is_link'](name):
pchanges['new'] = name
return None, 'Symlink {0} to {1} is set for creation'.format(
name, target
), pchanges
if __salt__['file.is_link'](name):
if __salt__['file.readlink'](name) != target:
pchanges['change'] = name
return None, 'Link {0} target is set to be changed to {1}'.format(
name, target
), pchanges
else:
result = True
msg = 'The symlink {0} is present'.format(name)
if not _check_symlink_ownership(name, user, group):
result = None
pchanges['ownership'] = '{0}:{1}'.format(*_get_symlink_ownership(name))
msg += (
', but the ownership of the symlink would be changed '
'from {2}:{3} to {0}:{1}'
).format(user, group, *_get_symlink_ownership(name))
return result, msg, pchanges
else:
if force:
return None, ('The file or directory {0} is set for removal to '
'make way for a new symlink targeting {1}'
.format(name, target)), pchanges
return False, ('File or directory exists where the symlink {0} '
'should be. Did you mean to use force?'.format(name)), pchanges
def _test_owner(kwargs, user=None):
'''
Convert owner to user, since other config management tools use owner,
no need to punish people coming from other systems.
PLEASE DO NOT DOCUMENT THIS! WE USE USER, NOT OWNER!!!!
'''
if user:
return user
if 'owner' in kwargs:
log.warning(
'Use of argument owner found, "owner" is invalid, please '
'use "user"'
)
return kwargs['owner']
return user
def _unify_sources_and_hashes(source=None, source_hash=None,
sources=None, source_hashes=None):
'''
Silly little function to give us a standard tuple list for sources and
source_hashes
'''
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
if source and sources:
return (False,
"source and sources are mutually exclusive", [])
if source_hash and source_hashes:
return (False,
"source_hash and source_hashes are mutually exclusive", [])
if source:
return (True, '', [(source, source_hash)])
# Make a nice neat list of tuples exactly len(sources) long..
return True, '', list(zip_longest(sources, source_hashes[:len(sources)]))
def _get_template_texts(source_list=None,
template='jinja',
defaults=None,
context=None,
**kwargs):
'''
Iterate a list of sources and process them as templates.
Returns a list of 'chunks' containing the rendered templates.
'''
ret = {'name': '_get_template_texts',
'changes': {},
'result': True,
'comment': '',
'data': []}
if source_list is None:
return _error(ret,
'_get_template_texts called with empty source_list')
txtl = []
for (source, source_hash) in source_list:
tmpctx = defaults if defaults else {}
if context:
tmpctx.update(context)
rndrd_templ_fn = __salt__['cp.get_template'](
source,
'',
template=template,
saltenv=__env__,
context=tmpctx,
**kwargs
)
msg = 'cp.get_template returned {0} (Called with: {1})'
log.debug(msg.format(rndrd_templ_fn, source))
if rndrd_templ_fn:
tmplines = None
with salt.utils.files.fopen(rndrd_templ_fn, 'rb') as fp_:
tmplines = fp_.read()
if six.PY3:
tmplines = tmplines.decode(__salt_system_encoding__)
tmplines = tmplines.splitlines(True)
if not tmplines:
msg = 'Failed to read rendered template file {0} ({1})'
log.debug(msg.format(rndrd_templ_fn, source))
ret['name'] = source
return _error(ret, msg.format(rndrd_templ_fn, source))
txtl.append(''.join(tmplines))
else:
msg = 'Failed to load template file {0}'.format(source)
log.debug(msg)
ret['name'] = source
return _error(ret, msg)
ret['data'] = txtl
return ret
def _validate_str_list(arg):
'''
ensure ``arg`` is a list of strings
'''
if isinstance(arg, six.string_types):
ret = [arg]
elif isinstance(arg, Iterable) and not isinstance(arg, Mapping):
ret = []
for item in arg:
if isinstance(item, six.string_types):
ret.append(item)
else:
ret.append(six.text_type(item))
else:
ret = [six.text_type(arg)]
return ret
def _get_shortcut_ownership(path):
return __salt__['file.get_user'](path, follow_symlinks=False)
def _check_shortcut_ownership(path, user):
'''
Check if the shortcut ownership matches the specified user
'''
cur_user = _get_shortcut_ownership(path)
return cur_user == user
def _set_shortcut_ownership(path, user):
'''
Set the ownership of a shortcut and return a boolean indicating
success/failure
'''
try:
__salt__['file.lchown'](path, user)
except OSError:
pass
return _check_shortcut_ownership(path, user)
def _shortcut_check(name,
target,
arguments,
working_dir,
description,
icon_location,
force,
user):
'''
Check the shortcut function
'''
pchanges = {}
if not os.path.exists(name):
pchanges['new'] = name
return None, 'Shortcut "{0}" to "{1}" is set for creation'.format(
name, target
), pchanges
if os.path.isfile(name):
shell = win32com.client.Dispatch("WScript.Shell")
scut = shell.CreateShortcut(name)
state_checks = [scut.TargetPath.lower() == target.lower()]
if arguments is not None:
state_checks.append(scut.Arguments == arguments)
if working_dir is not None:
state_checks.append(
scut.WorkingDirectory.lower() == working_dir.lower()
)
if description is not None:
state_checks.append(scut.Description == description)
if icon_location is not None:
state_checks.append(
scut.IconLocation.lower() == icon_location.lower()
)
if not all(state_checks):
pchanges['change'] = name
return None, 'Shortcut "{0}" target is set to be changed to "{1}"'.format(
name, target
), pchanges
else:
result = True
msg = 'The shortcut "{0}" is present'.format(name)
if not _check_shortcut_ownership(name, user):
result = None
pchanges['ownership'] = '{0}'.format(_get_shortcut_ownership(name))
msg += (
', but the ownership of the shortcut would be changed '
'from {1} to {0}'
).format(user, _get_shortcut_ownership(name))
return result, msg, pchanges
else:
if force:
return None, ('The link or directory "{0}" is set for removal to '
'make way for a new shortcut targeting "{1}"'
.format(name, target)), pchanges
return False, ('Link or directory exists where the shortcut "{0}" '
'should be. Did you mean to use force?'.format(name)), pchanges
def symlink(
name,
target,
force=False,
backupname=None,
makedirs=False,
user=None,
group=None,
mode=None,
**kwargs):
'''
Create a symbolic link (symlink, soft link)
If the file already exists and is a symlink pointing to any location other
than the specified target, the symlink will be replaced. If the symlink is
a regular file or directory then the state will return False. If the
regular file or directory is desired to be replaced with a symlink pass
force: True, if it is to be renamed, pass a backupname.
name
The location of the symlink to create
target
The location that the symlink points to
force
If the name of the symlink exists and is not a symlink and
force is set to False, the state will fail. If force is set to
True, the file or directory in the way of the symlink file
will be deleted to make room for the symlink, unless
backupname is set, when it will be renamed
backupname
If the name of the symlink exists and is not a symlink, it will be
renamed to the backupname. If the backupname already
exists and force is False, the state will fail. Otherwise, the
backupname will be removed first.
An absolute path OR a basename file/directory name must be provided.
The latter will be placed relative to the symlink destination's parent
directory.
makedirs
If the location of the symlink does not already have a parent directory
then the state will fail, setting makedirs to True will allow Salt to
create the parent directory
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
'''
name = os.path.expanduser(name)
# Make sure that leading zeros stripped by YAML loader are added back
mode = salt.utils.files.normalize_mode(mode)
user = _test_owner(kwargs, user=user)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.symlink')
if user is None:
user = __opts__['user']
if salt.utils.platform.is_windows():
# Make sure the user exists in Windows
# Salt default is 'root'
if not __salt__['user.info'](user):
# User not found, use the account salt is running under
# If username not found, use System
user = __salt__['user.current']()
if not user:
user = 'SYSTEM'
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this '
'is a Windows system.'.format(name)
)
group = user
if group is None:
group = __salt__['file.gid_to_group'](
__salt__['user.info'](user).get('gid', 0)
)
preflight_errors = []
uid = __salt__['file.user_to_uid'](user)
gid = __salt__['file.group_to_gid'](group)
if uid == '':
preflight_errors.append('User {0} does not exist'.format(user))
if gid == '':
preflight_errors.append('Group {0} does not exist'.format(group))
if not os.path.isabs(name):
preflight_errors.append(
'Specified file {0} is not an absolute path'.format(name)
)
if preflight_errors:
msg = '. '.join(preflight_errors)
if len(preflight_errors) > 1:
msg += '.'
return _error(ret, msg)
presult, pcomment, ret['pchanges'] = _symlink_check(name,
target,
force,
user,
group)
if __opts__['test']:
ret['result'] = presult
ret['comment'] = pcomment
return ret
if not os.path.isdir(os.path.dirname(name)):
if makedirs:
__salt__['file.makedirs'](
name,
user=user,
group=group,
mode=mode)
else:
return _error(
ret,
'Directory {0} for symlink is not present'.format(
os.path.dirname(name)
)
)
if __salt__['file.is_link'](name):
# The link exists, verify that it matches the target
if os.path.normpath(__salt__['file.readlink'](name)) != os.path.normpath(target):
# The target is wrong, delete the link
os.remove(name)
else:
if _check_symlink_ownership(name, user, group):
# The link looks good!
ret['comment'] = ('Symlink {0} is present and owned by '
'{1}:{2}'.format(name, user, group))
else:
if _set_symlink_ownership(name, user, group):
ret['comment'] = ('Set ownership of symlink {0} to '
'{1}:{2}'.format(name, user, group))
ret['changes']['ownership'] = '{0}:{1}'.format(user, group)
else:
ret['result'] = False
ret['comment'] += (
'Failed to set ownership of symlink {0} to '
'{1}:{2}'.format(name, user, group)
)
return ret
elif os.path.isfile(name) or os.path.isdir(name):
# It is not a link, but a file or dir
if backupname is not None:
if not os.path.isabs(backupname):
if backupname == os.path.basename(backupname):
backupname = os.path.join(
os.path.dirname(os.path.normpath(name)),
backupname)
else:
return _error(ret, (('Backupname must be an absolute path '
'or a file name: {0}').format(backupname)))
# Make a backup first
if os.path.lexists(backupname):
if not force:
return _error(ret, (('Symlink & backup dest exists and Force not set.'
' {0} -> {1} - backup: {2}').format(
name, target, backupname)))
else:
__salt__['file.remove'](backupname)
try:
__salt__['file.move'](name, backupname)
except Exception as exc:
ret['changes'] = {}
log.debug(
'Encountered error renaming %s to %s',
name, backupname, exc_info=True
)
return _error(ret, ('Unable to rename {0} to backup {1} -> '
': {2}'.format(name, backupname, exc)))
elif force:
# Remove whatever is in the way
if __salt__['file.is_link'](name):
__salt__['file.remove'](name)
ret['changes']['forced'] = 'Symlink was forcibly replaced'
else:
__salt__['file.remove'](name)
else:
# Otherwise throw an error
if os.path.isfile(name):
return _error(ret,
('File exists where the symlink {0} should be'
.format(name)))
else:
return _error(ret, ((
'Directory exists where the symlink {0} should be'
).format(name)))
if not os.path.exists(name):
# The link is not present, make it
try:
__salt__['file.symlink'](target, name)
except OSError as exc:
ret['result'] = False
ret['comment'] = ('Unable to create new symlink {0} -> '
'{1}: {2}'.format(name, target, exc))
return ret
else:
ret['comment'] = ('Created new symlink {0} -> '
'{1}'.format(name, target))
ret['changes']['new'] = name
if not _check_symlink_ownership(name, user, group):
if not _set_symlink_ownership(name, user, group):
ret['result'] = False
ret['comment'] += (', but was unable to set ownership to '
'{0}:{1}'.format(user, group))
return ret
def absent(name,
**kwargs):
'''
Make sure that the named file or directory is absent. If it exists, it will
be deleted. This will work to reverse any of the functions in the file
state module. If a directory is supplied, it will be recursively deleted.
name
The path which should be deleted
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.absent')
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name)
)
if name == '/':
return _error(ret, 'Refusing to make "/" absent')
if os.path.isfile(name) or os.path.islink(name):
ret['pchanges']['removed'] = name
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'File {0} is set for removal'.format(name)
return ret
try:
if salt.utils.platform.is_windows():
__salt__['file.remove'](name, force=True)
else:
__salt__['file.remove'](name)
ret['comment'] = 'Removed file {0}'.format(name)
ret['changes']['removed'] = name
return ret
except CommandExecutionError as exc:
return _error(ret, '{0}'.format(exc))
elif os.path.isdir(name):
ret['pchanges']['removed'] = name
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Directory {0} is set for removal'.format(name)
return ret
try:
if salt.utils.platform.is_windows():
__salt__['file.remove'](name, force=True)
else:
__salt__['file.remove'](name)
ret['comment'] = 'Removed directory {0}'.format(name)
ret['changes']['removed'] = name
return ret
except (OSError, IOError):
return _error(ret, 'Failed to remove directory {0}'.format(name))
ret['comment'] = 'File {0} is not present'.format(name)
return ret
def exists(name,
**kwargs):
'''
Verify that the named file or directory is present or exists.
Ensures pre-requisites outside of Salt's purview
(e.g., keytabs, private keys, etc.) have been previously satisfied before
deployment.
This function does not create the file if it doesn't exist, it will return
an error.
name
Absolute path which must exist
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.exists')
if not os.path.exists(name):
return _error(ret, 'Specified path {0} does not exist'.format(name))
ret['comment'] = 'Path {0} exists'.format(name)
return ret
def missing(name,
**kwargs):
'''
Verify that the named file or directory is missing, this returns True only
if the named file is missing but does not remove the file if it is present.
name
Absolute path which must NOT exist
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.missing')
if os.path.exists(name):
return _error(ret, 'Specified path {0} exists'.format(name))
ret['comment'] = 'Path {0} is missing'.format(name)
return ret
def managed(name,
source=None,
source_hash='',
source_hash_name=None,
keep_source=True,
user=None,
group=None,
mode=None,
attrs=None,
template=None,
makedirs=False,
dir_mode=None,
context=None,
replace=True,
defaults=None,
backup='',
show_changes=True,
create=True,
contents=None,
tmp_dir='',
tmp_ext='',
contents_pillar=None,
contents_grains=None,
contents_newline=True,
contents_delimiter=':',
encoding=None,
encoding_errors='strict',
allow_empty=True,
follow_symlinks=True,
check_cmd=None,
skip_verify=False,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
**kwargs):
r'''
Manage a given file, this function allows for a file to be downloaded from
the salt master and potentially run through a templating system.
name
The location of the file to manage, as an absolute path.
source
The source file to download to the minion, this source file can be
hosted on either the salt master server (``salt://``), the salt minion
local file system (``/``), or on an HTTP or FTP server (``http(s)://``,
``ftp://``).
Both HTTPS and HTTP are supported as well as downloading directly
from Amazon S3 compatible URLs with both pre-configured and automatic
IAM credentials. (see s3.get state documentation)
File retrieval from Openstack Swift object storage is supported via
swift://container/object_path URLs, see swift.get documentation.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs. If source is left blank or None
(use ~ in YAML), the file will be created as an empty file and
the content will not be managed. This is also the case when a file
already exists and the source is undefined; the contents of the file
will not be changed or managed.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required.
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
will be used and subsequent entries in the list will be ignored. Source
list functionality only supports local files and remote files hosted on
the salt master server or retrievable via HTTP, HTTPS, or FTP.
.. code-block:: yaml
file_override_example:
file.managed:
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
**Using a Source Hash File**
The file can contain several checksums for several files. Each line
must contain both the file name and the hash. If no file name is
matched, the first hash encountered will be used, otherwise the most
secure hash with the correct source file name will be used.
When using a source hash file the source_hash argument needs to be a
url, the standard download urls are supported, ftp, http, salt etc:
Example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.hash
The following lines are all supported formats:
.. code-block:: text
/etc/rc.conf ef6e82e4006dee563d98ada2a2a80a27
sha254c8525aee419eb649f0233be91c151178b30f0dff8ebbdcc8de71b1d5c8bcc06a /etc/resolv.conf
ead48423703509d37c4a90e6a0d53e143b6fc268
Debian file type ``*.dsc`` files are also supported.
**Inserting the Source Hash in the SLS Data**
The source_hash can be specified as a simple checksum, like so:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: 79eef25f9b0b2c642c62b7f737d4f53f
.. note::
Releases prior to 2016.11.0 must also include the hash type, like
in the below example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: md5=79eef25f9b0b2c642c62b7f737d4f53f
Known issues:
If the remote server URL has the hash file as an apparent
sub-directory of the source file, the module will discover that it
has already cached a directory where a file should be cached. For
example:
.. code-block:: yaml
tomdroid-src-0.7.3.tar.gz:
file.managed:
- name: /tmp/tomdroid-src-0.7.3.tar.gz
- source: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz
- source_hash: https://launchpad.net/tomdroid/beta/0.7.3/+download/tomdroid-src-0.7.3.tar.gz/+md5
source_hash_name
When ``source_hash`` refers to a hash file, Salt will try to find the
correct hash by matching the filename/URI associated with that hash. By
default, Salt will look for the filename being managed. When managing a
file at path ``/tmp/foo.txt``, then the following line in a hash file
would match:
.. code-block:: text
acbd18db4cc2f85cedef654fccc4a4d8 foo.txt
However, sometimes a hash file will include multiple similar paths:
.. code-block:: text
37b51d194a7513e45b56f6524f2d51f2 ./dir1/foo.txt
acbd18db4cc2f85cedef654fccc4a4d8 ./dir2/foo.txt
73feffa4b7f6bb68e44cf984c85f6e88 ./dir3/foo.txt
In cases like this, Salt may match the incorrect hash. This argument
can be used to tell Salt which filename to match, to ensure that the
correct hash is identified. For example:
.. code-block:: yaml
/tmp/foo.txt:
file.managed:
- source: https://mydomain.tld/dir2/foo.txt
- source_hash: https://mydomain.tld/hashes
- source_hash_name: ./dir2/foo.txt
.. note::
This argument must contain the full filename entry from the
checksum file, as this argument is meant to disambiguate matches
for multiple files that have the same basename. So, in the
example above, simply using ``foo.txt`` would not match.
.. versionadded:: 2016.3.5
keep_source : True
Set to ``False`` to discard the cached copy of the source file once the
state completes. This can be useful for larger files to keep them from
taking up space in minion cache. However, keep in mind that discarding
the source file will result in the state needing to re-download the
source file if the state is run again.
.. versionadded:: 2017.7.3
user
The user to own the file, this defaults to the user salt is running as
on the minion
group
The group ownership set for the file, this defaults to the group salt
is running as on the minion. On Windows, this is ignored
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds to the
umask of the salt process. The mode of existing files and directories
will only be changed if ``mode`` is specified.
.. note::
This option is **not** supported on Windows.
.. versionchanged:: 2016.11.0
This option can be set to ``keep``, and Salt will keep the mode
from the Salt fileserver. This is only supported when the
``source`` URL begins with ``salt://``, or for files local to the
minion. Because the ``source`` option cannot be used with any of
the ``contents`` options, setting the ``mode`` to ``keep`` is also
incompatible with the ``contents`` options.
.. note:: keep does not work with salt-ssh.
As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion
attrs
The attributes to have on this file, e.g. ``a``, ``i``. The attributes
can be any or a combination of the following characters:
``acdijstuADST``.
.. note::
This option is **not** supported on Windows.
.. versionadded:: 2018.3.0
template
If this setting is applied, the named templating engine will be used to
render the downloaded file. The following templates are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
makedirs : False
If set to ``True``, then the parent directories will be created to
facilitate the creation of the named file. If ``False``, and the parent
directory of the destination file doesn't exist, the state will fail.
dir_mode
If directories are to be created, passing this option specifies the
permissions for those directories. If this is not set, directories
will be assigned permissions by adding the execute bit to the mode of
the files.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
replace : True
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
backup
Overrides the default backup mode for this specific file. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
show_changes
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create : True
If set to ``False``, then the file will only be managed if the file
already exists on the system.
contents
Specify the contents of the file. Cannot be used in combination with
``source``. Ignores hashes and does not use a templating engine.
This value can be either a single string, a multiline YAML string or a
list of strings. If a list of strings, then the strings will be joined
together with newlines in the resulting file. For example, the below
two example states would result in identical file contents:
.. code-block:: yaml
/path/to/file1:
file.managed:
- contents:
- This is line 1
- This is line 2
/path/to/file2:
file.managed:
- contents: |
This is line 1
This is line 2
contents_pillar
.. versionadded:: 0.17.0
.. versionchanged: 2016.11.0
contents_pillar can also be a list, and the pillars will be
concatinated together to form one file.
Operates like ``contents``, but draws from a value stored in pillar,
using the pillar path syntax used in :mod:`pillar.get
<salt.modules.pillar.get>`. This is useful when the pillar value
contains newlines, as referencing a pillar variable using a jinja/mako
template can result in YAML formatting issues due to the newlines
causing indentation mismatches.
For example, the following could be used to deploy an SSH private key:
.. code-block:: yaml
/home/deployer/.ssh/id_rsa:
file.managed:
- user: deployer
- group: deployer
- mode: 600
- attrs: a
- contents_pillar: userdata:deployer:id_rsa
This would populate ``/home/deployer/.ssh/id_rsa`` with the contents of
``pillar['userdata']['deployer']['id_rsa']``. An example of this pillar
setup would be like so:
.. code-block:: yaml
userdata:
deployer:
id_rsa: |
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAoQiwO3JhBquPAalQF9qP1lLZNXVjYMIswrMe2HcWUVBgh+vY
U7sCwx/dH6+VvNwmCoqmNnP+8gTPKGl1vgAObJAnMT623dMXjVKwnEagZPRJIxDy
B/HaAre9euNiY3LvIzBTWRSeMfT+rWvIKVBpvwlgGrfgz70m0pqxu+UyFbAGLin+
GpxzZAMaFpZw4sSbIlRuissXZj/sHpQb8p9M5IeO4Z3rjkCP1cxI
-----END RSA PRIVATE KEY-----
.. note::
The private key above is shortened to keep the example brief, but
shows how to do multiline string in YAML. The key is followed by a
pipe character, and the mutliline string is indented two more
spaces.
To avoid the hassle of creating an indented multiline YAML string,
the :mod:`file_tree external pillar <salt.pillar.file_tree>` can
be used instead. However, this will not work for binary files in
Salt releases before 2015.8.4.
contents_grains
.. versionadded:: 2014.7.0
Operates like ``contents``, but draws from a value stored in grains,
using the grains path syntax used in :mod:`grains.get
<salt.modules.grains.get>`. This functionality works similarly to
``contents_pillar``, but with grains.
For example, the following could be used to deploy a "message of the day"
file:
.. code-block:: yaml
write_motd:
file.managed:
- name: /etc/motd
- contents_grains: motd
This would populate ``/etc/motd`` file with the contents of the ``motd``
grain. The ``motd`` grain is not a default grain, and would need to be
set prior to running the state:
.. code-block:: bash
salt '*' grains.set motd 'Welcome! This system is managed by Salt.'
contents_newline : True
.. versionadded:: 2014.7.0
.. versionchanged:: 2015.8.4
This option is now ignored if the contents being deployed contain
binary data.
If ``True``, files managed using ``contents``, ``contents_pillar``, or
``contents_grains`` will have a newline added to the end of the file if
one is not present. Setting this option to ``False`` will omit this
final newline.
contents_delimiter
.. versionadded:: 2015.8.4
Can be used to specify an alternate delimiter for ``contents_pillar``
or ``contents_grains``. This delimiter will be passed through to
:py:func:`pillar.get <salt.modules.pillar.get>` or :py:func:`grains.get
<salt.modules.grains.get>` when retrieving the contents.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors : 'strict'
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
.. versionadded:: 2017.7.0
allow_empty : True
.. versionadded:: 2015.8.4
If set to ``False``, then the state will fail if the contents specified
by ``contents_pillar`` or ``contents_grains`` are empty.
follow_symlinks : True
.. versionadded:: 2014.7.0
If the desired path is a symlink follow it and make changes to the
file to which the symlink points.
check_cmd
.. versionadded:: 2014.7.0
The specified command will be run with an appended argument of a
*temporary* file containing the new managed contents. If the command
exits with a zero status the new managed contents will be written to
the managed destination. If the command exits with a nonzero exit
code, the state will fail and no changes will be made to the file.
For example, the following could be used to verify sudoers before making
changes:
.. code-block:: yaml
/etc/sudoers:
file.managed:
- user: root
- group: root
- mode: 0440
- attrs: i
- source: salt://sudoers/files/sudoers.jinja
- template: jinja
- check_cmd: /usr/sbin/visudo -c -f
**NOTE**: This ``check_cmd`` functions differently than the requisite
``check_cmd``.
tmp_dir
Directory for temp file created by ``check_cmd``. Useful for checkers
dependent on config file location (e.g. daemons restricted to their
own config directories by an apparmor profile).
.. code-block:: yaml
/etc/dhcp/dhcpd.conf:
file.managed:
- user: root
- group: root
- mode: 0755
- tmp_dir: '/etc/dhcp'
- contents: "# Managed by Salt"
- check_cmd: dhcpd -t -cf
tmp_ext
Suffix for temp file created by ``check_cmd``. Useful for checkers
dependent on config file extension (e.g. the init-checkconf upstart
config checker).
.. code-block:: yaml
/etc/init/test.conf:
file.managed:
- user: root
- group: root
- mode: 0440
- tmp_ext: '.conf'
- contents:
- 'description "Salt Minion"'
- 'start on started mountall'
- 'stop on shutdown'
- 'respawn'
- 'exec salt-minion'
- check_cmd: init-checkconf -f
skip_verify : False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
win_owner : None
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
.. versionadded:: 2017.7.0
win_perms : None
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
.. versionadded:: 2017.7.0
win_deny_perms : None
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control'}}`` Can be a
single basic perm or a list of advanced perms. ``perms`` must be
specified. ``applies_to`` does not apply to file objects.
.. versionadded:: 2017.7.0
win_inheritance : True
True to inherit permissions from the parent directory, False not to
inherit permission.
.. versionadded:: 2017.7.0
win_perms_reset : False
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
.. versionadded:: 2018.3.0
Here's an example using the above ``win_*`` parameters:
.. code-block:: yaml
create_config_file:
file.managed:
- name: C:\config\settings.cfg
- source: salt://settings.cfg
- win_owner: Administrators
- win_perms:
# Basic Permissions
dev_ops:
perms: full_control
# List of advanced permissions
appuser:
perms:
- read_attributes
- read_ea
- create_folders
- read_permissions
joe_snuffy:
perms: read
- win_deny_perms:
fred_snuffy:
perms: full_control
- win_inheritance: False
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
name = os.path.expanduser(name)
ret = {'changes': {},
'pchanges': {},
'comment': '',
'name': name,
'result': True}
if not name:
return _error(ret, 'Destination file name is required')
if mode is not None and salt.utils.platform.is_windows():
return _error(ret, 'The \'mode\' option is not supported on Windows')
if attrs is not None and salt.utils.platform.is_windows():
return _error(ret, 'The \'attrs\' option is not supported on Windows')
try:
keep_mode = mode.lower() == 'keep'
if keep_mode:
# We're not hard-coding the mode, so set it to None
mode = None
except AttributeError:
keep_mode = False
# Make sure that any leading zeros stripped by YAML loader are added back
mode = salt.utils.files.normalize_mode(mode)
contents_count = len(
[x for x in (contents, contents_pillar, contents_grains)
if x is not None]
)
if source and contents_count > 0:
return _error(
ret,
'\'source\' cannot be used in combination with \'contents\', '
'\'contents_pillar\', or \'contents_grains\''
)
elif keep_mode and contents_count > 0:
return _error(
ret,
'Mode preservation cannot be used in combination with \'contents\', '
'\'contents_pillar\', or \'contents_grains\''
)
elif contents_count > 1:
return _error(
ret,
'Only one of \'contents\', \'contents_pillar\', and '
'\'contents_grains\' is permitted'
)
# If no source is specified, set replace to False, as there is nothing
# with which to replace the file.
if not source and contents_count == 0 and replace:
replace = False
log.warning(
'State for file: {0} - Neither \'source\' nor \'contents\' nor '
'\'contents_pillar\' nor \'contents_grains\' was defined, yet '
'\'replace\' was set to \'True\'. As there is no source to '
'replace the file with, \'replace\' has been set to \'False\' to '
'avoid reading the file unnecessarily.'.format(name)
)
if 'file_mode' in kwargs:
ret.setdefault('warnings', []).append(
'The \'file_mode\' argument will be ignored. '
'Please use \'mode\' instead to set file permissions.'
)
# Use this below to avoid multiple '\0' checks and save some CPU cycles
if contents_pillar is not None:
if isinstance(contents_pillar, list):
list_contents = []
for nextp in contents_pillar:
nextc = __salt__['pillar.get'](nextp, __NOT_FOUND,
delimiter=contents_delimiter)
if nextc is __NOT_FOUND:
return _error(
ret,
'Pillar {0} does not exist'.format(nextp)
)
list_contents.append(nextc)
use_contents = os.linesep.join(list_contents)
else:
use_contents = __salt__['pillar.get'](contents_pillar, __NOT_FOUND,
delimiter=contents_delimiter)
if use_contents is __NOT_FOUND:
return _error(
ret,
'Pillar {0} does not exist'.format(contents_pillar)
)
elif contents_grains is not None:
if isinstance(contents_grains, list):
list_contents = []
for nextg in contents_grains:
nextc = __salt__['grains.get'](nextg, __NOT_FOUND,
delimiter=contents_delimiter)
if nextc is __NOT_FOUND:
return _error(
ret,
'Grain {0} does not exist'.format(nextc)
)
list_contents.append(nextc)
use_contents = os.linesep.join(list_contents)
else:
use_contents = __salt__['grains.get'](contents_grains, __NOT_FOUND,
delimiter=contents_delimiter)
if use_contents is __NOT_FOUND:
return _error(
ret,
'Grain {0} does not exist'.format(contents_grains)
)
elif contents is not None:
use_contents = contents
else:
use_contents = None
if use_contents is not None:
if not allow_empty and not use_contents:
if contents_pillar:
contents_id = 'contents_pillar {0}'.format(contents_pillar)
elif contents_grains:
contents_id = 'contents_grains {0}'.format(contents_grains)
else:
contents_id = '\'contents\''
return _error(
ret,
'{0} value would result in empty contents. Set allow_empty '
'to True to allow the managed file to be empty.'
.format(contents_id)
)
if isinstance(use_contents, bytes) and b'\0' in use_contents:
contents = use_contents
elif isinstance(use_contents, six.string_types) and str('\0') in use_contents:
contents = use_contents
else:
validated_contents = _validate_str_list(use_contents)
if not validated_contents:
return _error(
ret,
'Contents specified by contents/contents_pillar/'
'contents_grains is not a string or list of strings, and '
'is not binary data. SLS is likely malformed.'
)
contents = os.linesep.join(validated_contents)
if contents_newline and not contents.endswith(os.linesep):
contents += os.linesep
if template:
contents = __salt__['file.apply_template_on_contents'](
contents,
template=template,
context=context,
defaults=defaults,
saltenv=__env__)
if not isinstance(contents, six.string_types):
if 'result' in contents:
ret['result'] = contents['result']
else:
ret['result'] = False
if 'comment' in contents:
ret['comment'] = contents['comment']
else:
ret['comment'] = 'Error while applying template on contents'
return ret
user = _test_owner(kwargs, user=user)
if salt.utils.platform.is_windows():
# If win_owner not passed, use user
if win_owner is None:
win_owner = user if user else None
# Group isn't relevant to Windows, use win_perms/win_deny_perms
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this is '
'a Windows system. Please use the `win_*` parameters to set '
'permissions in Windows.'.format(name)
)
group = user
if not create:
if not os.path.isfile(name):
# Don't create a file that is not already present
ret['comment'] = ('File {0} is not present and is not set for '
'creation').format(name)
return ret
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if os.path.isdir(name):
ret['comment'] = 'Specified target {0} is a directory'.format(name)
ret['result'] = False
return ret
if context is None:
context = {}
elif not isinstance(context, dict):
return _error(
ret, 'Context must be formed as a dict')
if defaults and not isinstance(defaults, dict):
return _error(
ret, 'Defaults must be formed as a dict')
if not replace and os.path.exists(name):
# Check and set the permissions if necessary
if salt.utils.platform.is_windows():
ret = __salt__['file.check_perms'](
path=name,
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
else:
ret, _ = __salt__['file.check_perms'](
name, ret, user, group, mode, attrs, follow_symlinks)
if __opts__['test']:
ret['comment'] = 'File {0} not updated'.format(name)
elif not ret['changes'] and ret['result']:
ret['comment'] = ('File {0} exists with proper permissions. '
'No changes made.'.format(name))
return ret
accum_data, _ = _load_accumulators()
if name in accum_data:
if not context:
context = {}
context['accumulator'] = accum_data[name]
try:
if __opts__['test']:
if 'file.check_managed_changes' in __salt__:
ret['pchanges'] = __salt__['file.check_managed_changes'](
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
__env__,
contents,
skip_verify,
keep_mode,
**kwargs
)
if salt.utils.platform.is_windows():
try:
ret = __salt__['file.check_perms'](
path=name,
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
except CommandExecutionError as exc:
if exc.strerror.startswith('Path not found'):
ret['pchanges'] = '{0} will be created'.format(name)
if isinstance(ret['pchanges'], tuple):
ret['result'], ret['comment'] = ret['pchanges']
elif ret['pchanges']:
ret['result'] = None
ret['comment'] = 'The file {0} is set to be changed'.format(name)
if 'diff' in ret['pchanges'] and not show_changes:
ret['pchanges']['diff'] = '<show_changes=False>'
else:
ret['result'] = True
ret['comment'] = 'The file {0} is in the correct state'.format(name)
return ret
# If the source is a list then find which file exists
source, source_hash = __salt__['file.source_list'](
source,
source_hash,
__env__
)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = 'Unable to manage file: {0}'.format(exc)
return ret
# Gather the source file from the server
try:
sfn, source_sum, comment_ = __salt__['file.get_managed'](
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
__env__,
context,
defaults,
skip_verify,
**kwargs
)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
return _error(ret, 'Unable to manage file: {0}'.format(exc))
tmp_filename = None
if check_cmd:
tmp_filename = salt.utils.files.mkstemp(suffix=tmp_ext, dir=tmp_dir)
# if exists copy existing file to tmp to compare
if __salt__['file.file_exists'](name):
try:
__salt__['file.copy'](name, tmp_filename)
except Exception as exc:
return _error(
ret,
'Unable to copy file {0} to {1}: {2}'.format(
name, tmp_filename, exc
)
)
try:
ret = __salt__['file.manage_file'](
tmp_filename,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
__env__,
backup,
makedirs,
template,
show_changes,
contents,
dir_mode,
follow_symlinks,
skip_verify,
keep_mode,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset,
encoding=encoding,
encoding_errors=encoding_errors,
**kwargs)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
salt.utils.files.remove(tmp_filename)
if not keep_source and sfn:
salt.utils.files.remove(sfn)
return _error(ret, 'Unable to check_cmd file: {0}'.format(exc))
# file being updated to verify using check_cmd
if ret['changes']:
# Reset ret
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
check_cmd_opts = {}
if 'shell' in __grains__:
check_cmd_opts['shell'] = __grains__['shell']
cret = mod_run_check_cmd(check_cmd, tmp_filename, **check_cmd_opts)
if isinstance(cret, dict):
ret.update(cret)
salt.utils.files.remove(tmp_filename)
return ret
# Since we generated a new tempfile and we are not returning here
# lets change the original sfn to the new tempfile or else we will
# get file not found
sfn = tmp_filename
else:
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if comment_ and contents is None:
return _error(ret, comment_)
else:
try:
return __salt__['file.manage_file'](
name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
__env__,
backup,
makedirs,
template,
show_changes,
contents,
dir_mode,
follow_symlinks,
skip_verify,
keep_mode,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset,
encoding=encoding,
encoding_errors=encoding_errors,
**kwargs)
except Exception as exc:
ret['changes'] = {}
log.debug(traceback.format_exc())
return _error(ret, 'Unable to manage file: {0}'.format(exc))
finally:
if tmp_filename:
salt.utils.files.remove(tmp_filename)
if not keep_source and sfn:
salt.utils.files.remove(sfn)
_RECURSE_TYPES = ['user', 'group', 'mode', 'ignore_files', 'ignore_dirs']
def _get_recurse_set(recurse):
'''
Converse *recurse* definition to a set of strings.
Raises TypeError or ValueError when *recurse* has wrong structure.
'''
if not recurse:
return set()
if not isinstance(recurse, list):
raise TypeError('"recurse" must be formed as a list of strings')
try:
recurse_set = set(recurse)
except TypeError: # non-hashable elements
recurse_set = None
if recurse_set is None or not set(_RECURSE_TYPES) >= recurse_set:
raise ValueError('Types for "recurse" limited to {0}.'.format(
', '.join('"{0}"'.format(rtype) for rtype in _RECURSE_TYPES)))
if 'ignore_files' in recurse_set and 'ignore_dirs' in recurse_set:
raise ValueError('Must not specify "recurse" options "ignore_files"'
' and "ignore_dirs" at the same time.')
return recurse_set
def _depth_limited_walk(top, max_depth=None):
'''
Walk the directory tree under root up till reaching max_depth.
With max_depth=None (default), do not limit depth.
'''
for root, dirs, files in salt.utils.path.os_walk(top):
if max_depth is not None:
rel_depth = root.count(os.path.sep) - top.count(os.path.sep)
if rel_depth >= max_depth:
del dirs[:]
yield (six.text_type(root), list(dirs), list(files))
def directory(name,
user=None,
group=None,
recurse=None,
max_depth=None,
dir_mode=None,
file_mode=None,
makedirs=False,
clean=False,
require=None,
exclude_pat=None,
follow_symlinks=False,
force=False,
backupname=None,
allow_symlink=True,
children_only=False,
win_owner=None,
win_perms=None,
win_deny_perms=None,
win_inheritance=True,
win_perms_reset=False,
**kwargs):
r'''
Ensure that a named directory is present and has the right perms
name
The location to create or manage a directory, as an absolute path
user
The user to own the directory; this defaults to the user salt is
running as on the minion
group
The group ownership set for the directory; this defaults to the group
salt is running as on the minion. On Windows, this is ignored
recurse
Enforce user/group ownership and mode of directory recursively. Accepts
a list of strings representing what you would like to recurse. If
``mode`` is defined, will recurse on both ``file_mode`` and ``dir_mode`` if
they are defined. If ``ignore_files`` or ``ignore_dirs`` is included, files or
directories will be left unchanged respectively.
Example:
.. code-block:: yaml
/var/log/httpd:
file.directory:
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
- recurse:
- user
- group
- mode
Leave files or directories unchanged:
.. code-block:: yaml
/var/log/httpd:
file.directory:
- user: root
- group: root
- dir_mode: 755
- file_mode: 644
- recurse:
- user
- group
- mode
- ignore_dirs
.. versionadded:: 2015.5.0
max_depth
Limit the recursion depth. The default is no limit=None.
'max_depth' and 'clean' are mutually exclusive.
.. versionadded:: 2016.11.0
dir_mode / mode
The permissions mode to set any directories created. Not supported on
Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
file_mode
The permissions mode to set any files created if 'mode' is run in
'recurse'. This defaults to dir_mode. Not supported on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
makedirs
If the directory is located in a path without a parent directory, then
the state will fail. If makedirs is set to True, then the parent
directories will be created to facilitate the creation of the named
file.
clean
Make sure that only files that are set up by salt and required by this
function are kept. If this option is set then everything in this
directory will be deleted unless it is required.
'clean' and 'max_depth' are mutually exclusive.
require
Require other resources such as packages or files
exclude_pat
When 'clean' is set to True, exclude this pattern from removal list
and preserve in the destination.
follow_symlinks : False
If the desired path is a symlink (or ``recurse`` is defined and a
symlink is encountered while recursing), follow it and check the
permissions of the directory/file to which the symlink points.
.. versionadded:: 2014.1.4
force
If the name of the directory exists and is not a directory and
force is set to False, the state will fail. If force is set to
True, the file in the way of the directory will be deleted to
make room for the directory, unless backupname is set,
then it will be renamed.
.. versionadded:: 2014.7.0
backupname
If the name of the directory exists and is not a directory, it will be
renamed to the backupname. If the backupname already
exists and force is False, the state will fail. Otherwise, the
backupname will be removed first.
.. versionadded:: 2014.7.0
allow_symlink : True
If allow_symlink is True and the specified path is a symlink, it will be
allowed to remain if it points to a directory. If allow_symlink is False
then the state will fail, unless force is also set to True, in which case
it will be removed or renamed, depending on the value of the backupname
argument.
.. versionadded:: 2014.7.0
children_only : False
If children_only is True the base of a path is excluded when performing
a recursive operation. In case of /path/to/base, base will be ignored
while all of /path/to/base/* are still operated on.
win_owner : None
The owner of the directory. If this is not passed, user will be used. If
user is not passed, the account under which Salt is running will be
used.
.. versionadded:: 2017.7.0
win_perms : None
A dictionary containing permissions to grant and their propagation. For
example: ``{'Administrators': {'perms': 'full_control', 'applies_to':
'this_folder_only'}}`` Can be a single basic perm or a list of advanced
perms. ``perms`` must be specified. ``applies_to`` is optional and
defaults to ``this_folder_subfoler_files``.
.. versionadded:: 2017.7.0
win_deny_perms : None
A dictionary containing permissions to deny and their propagation. For
example: ``{'Administrators': {'perms': 'full_control', 'applies_to':
'this_folder_only'}}`` Can be a single basic perm or a list of advanced
perms.
.. versionadded:: 2017.7.0
win_inheritance : True
True to inherit permissions from the parent directory, False not to
inherit permission.
.. versionadded:: 2017.7.0
win_perms_reset : False
If ``True`` the existing DACL will be cleared and replaced with the
settings defined in this function. If ``False``, new entries will be
appended to the existing DACL. Default is ``False``.
.. versionadded:: 2018.3.0
Here's an example using the above ``win_*`` parameters:
.. code-block:: yaml
create_config_dir:
file.directory:
- name: 'C:\config\'
- win_owner: Administrators
- win_perms:
# Basic Permissions
dev_ops:
perms: full_control
# List of advanced permissions
appuser:
perms:
- read_attributes
- read_ea
- create_folders
- read_permissions
applies_to: this_folder_only
joe_snuffy:
perms: read
applies_to: this_folder_files
- win_deny_perms:
fred_snuffy:
perms: full_control
- win_inheritance: False
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.directory')
# Remove trailing slash, if present and we're not working on "/" itself
if name[-1] == '/' and name != '/':
name = name[:-1]
if max_depth is not None and clean:
return _error(ret, 'Cannot specify both max_depth and clean')
user = _test_owner(kwargs, user=user)
if salt.utils.platform.is_windows():
# If win_owner not passed, use user
if win_owner is None:
win_owner = user if user else salt.utils.win_functions.get_current_user()
# Group isn't relevant to Windows, use win_perms/win_deny_perms
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this is '
'a Windows system. Please use the `win_*` parameters to set '
'permissions in Windows.'.format(name)
)
group = user
if 'mode' in kwargs and not dir_mode:
dir_mode = kwargs.get('mode', [])
if not file_mode:
file_mode = dir_mode
# Make sure that leading zeros stripped by YAML loader are added back
dir_mode = salt.utils.files.normalize_mode(dir_mode)
file_mode = salt.utils.files.normalize_mode(file_mode)
if salt.utils.platform.is_windows():
# Verify win_owner is valid on the target system
try:
salt.utils.win_dacl.get_sid(win_owner)
except CommandExecutionError as exc:
return _error(ret, exc)
else:
# Verify user and group are valid
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
# Must be an absolute path
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
# Check for existing file or symlink
if os.path.isfile(name) or (not allow_symlink and os.path.islink(name)) \
or (force and os.path.islink(name)):
# Was a backupname specified
if backupname is not None:
# Make a backup first
if os.path.lexists(backupname):
if not force:
return _error(ret, ((
'File exists where the backup target {0} should go'
).format(backupname)))
else:
__salt__['file.remove'](backupname)
os.rename(name, backupname)
elif force:
# Remove whatever is in the way
if os.path.isfile(name):
if __opts__['test']:
ret['pchanges']['forced'] = 'File was forcibly replaced'
else:
os.remove(name)
ret['changes']['forced'] = 'File was forcibly replaced'
elif __salt__['file.is_link'](name):
if __opts__['test']:
ret['pchanges']['forced'] = 'Symlink was forcibly replaced'
else:
__salt__['file.remove'](name)
ret['changes']['forced'] = 'Symlink was forcibly replaced'
else:
if __opts__['test']:
ret['pchanges']['forced'] = 'Directory was forcibly replaced'
else:
__salt__['file.remove'](name)
ret['changes']['forced'] = 'Directory was forcibly replaced'
else:
if os.path.isfile(name):
return _error(
ret,
'Specified location {0} exists and is a file'.format(name))
elif os.path.islink(name):
return _error(
ret,
'Specified location {0} exists and is a symlink'.format(name))
# Check directory?
if salt.utils.platform.is_windows():
presult, pcomment, pchanges = _check_directory_win(
name=name,
win_owner=win_owner,
win_perms=win_perms,
win_deny_perms=win_deny_perms,
win_inheritance=win_inheritance,
win_perms_reset=win_perms_reset)
else:
presult, pcomment, pchanges = _check_directory(
name, user, group, recurse or [], dir_mode, clean, require,
exclude_pat, max_depth, follow_symlinks)
if pchanges:
ret['pchanges'].update(pchanges)
# Don't run through the reset of the function if there are no changes to be
# made
if not ret['pchanges'] or __opts__['test']:
ret['result'] = presult
ret['comment'] = pcomment
return ret
if not os.path.isdir(name):
# The dir does not exist, make it
if not os.path.isdir(os.path.dirname(name)):
# The parent directory does not exist, create them
if makedirs:
# Everything's good, create the parent Dirs
if salt.utils.platform.is_windows():
# Make sure the drive is mapped before trying to create the
# path in windows
drive, path = os.path.splitdrive(name)
if not os.path.isdir(drive):
return _error(
ret, 'Drive {0} is not mapped'.format(drive))
__salt__['file.makedirs'](
path=name,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
else:
__salt__['file.makedirs'](name, user=user, group=group,
mode=dir_mode)
else:
return _error(
ret, 'No directory to create {0} in'.format(name))
if salt.utils.platform.is_windows():
__salt__['file.mkdir'](
path=name,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
else:
__salt__['file.mkdir'](name, user=user, group=group, mode=dir_mode)
ret['changes'][name] = 'New Dir'
if not os.path.isdir(name):
return _error(ret, 'Failed to create directory {0}'.format(name))
# issue 32707: skip this __salt__['file.check_perms'] call if children_only == True
# Check permissions
if not children_only:
if salt.utils.platform.is_windows():
ret = __salt__['file.check_perms'](
path=name,
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
else:
ret, perms = __salt__['file.check_perms'](
name, ret, user, group, dir_mode, None, follow_symlinks)
errors = []
if recurse or clean:
# walk path only once and store the result
walk_l = list(_depth_limited_walk(name, max_depth))
# root: (dirs, files) structure, compatible for python2.6
walk_d = {}
for i in walk_l:
walk_d[i[0]] = (i[1], i[2])
recurse_set = None
if recurse:
try:
recurse_set = _get_recurse_set(recurse)
except (TypeError, ValueError) as exc:
ret['result'] = False
ret['comment'] = '{0}'.format(exc)
# NOTE: Should this be enough to stop the whole check altogether?
if recurse_set:
if 'user' in recurse_set:
if user:
uid = __salt__['file.user_to_uid'](user)
# file.user_to_uid returns '' if user does not exist. Above
# check for user is not fatal, so we need to be sure user
# exists.
if isinstance(uid, six.string_types):
ret['result'] = False
ret['comment'] = 'Failed to enforce ownership for ' \
'user {0} (user does not ' \
'exist)'.format(user)
else:
ret['result'] = False
ret['comment'] = 'user not specified, but configured as ' \
'a target for recursive ownership ' \
'management'
else:
user = None
if 'group' in recurse_set:
if group:
gid = __salt__['file.group_to_gid'](group)
# As above with user, we need to make sure group exists.
if isinstance(gid, six.string_types):
ret['result'] = False
ret['comment'] = 'Failed to enforce group ownership ' \
'for group {0}'.format(group)
else:
ret['result'] = False
ret['comment'] = 'group not specified, but configured ' \
'as a target for recursive ownership ' \
'management'
else:
group = None
if 'mode' not in recurse_set:
file_mode = None
dir_mode = None
check_files = 'ignore_files' not in recurse_set
check_dirs = 'ignore_dirs' not in recurse_set
for root, dirs, files in walk_l:
if check_files:
for fn_ in files:
full = os.path.join(root, fn_)
try:
if salt.utils.platform.is_windows():
ret = __salt__['file.check_perms'](
path=full,
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
else:
ret, _ = __salt__['file.check_perms'](
full, ret, user, group, file_mode, None, follow_symlinks)
except CommandExecutionError as exc:
if not exc.strerror.endswith('does not exist'):
errors.append(exc.strerror)
if check_dirs:
for dir_ in dirs:
full = os.path.join(root, dir_)
try:
if salt.utils.platform.is_windows():
ret = __salt__['file.check_perms'](
path=full,
ret=ret,
owner=win_owner,
grant_perms=win_perms,
deny_perms=win_deny_perms,
inheritance=win_inheritance,
reset=win_perms_reset)
else:
ret, _ = __salt__['file.check_perms'](
full, ret, user, group, dir_mode, None, follow_symlinks)
except CommandExecutionError as exc:
if not exc.strerror.startswith('Path not found'):
errors.append(exc.strerror)
if clean:
keep = _gen_keep_files(name, require, walk_d)
log.debug('List of kept files when use file.directory with clean: %s',
keep)
removed = _clean_dir(name, list(keep), exclude_pat)
if removed:
ret['changes']['removed'] = removed
ret['comment'] = 'Files cleaned from directory {0}'.format(name)
# issue 32707: reflect children_only selection in comments
if not ret['comment']:
if children_only:
ret['comment'] = 'Directory {0}/* updated'.format(name)
else:
if ret['changes']:
ret['comment'] = 'Directory {0} updated'.format(name)
if __opts__['test']:
ret['comment'] = 'Directory {0} not updated'.format(name)
elif not ret['changes'] and ret['result']:
orig_comment = None
if ret['comment']:
orig_comment = ret['comment']
ret['comment'] = 'Directory {0} is in the correct state'.format(name)
if orig_comment:
ret['comment'] = '\n'.join([ret['comment'], orig_comment])
if errors:
ret['result'] = False
ret['comment'] += '\n\nThe following errors were encountered:\n'
for error in errors:
ret['comment'] += '\n- {0}'.format(error)
return ret
def recurse(name,
source,
keep_source=True,
clean=False,
require=None,
user=None,
group=None,
dir_mode=None,
file_mode=None,
sym_mode=None,
template=None,
context=None,
replace=True,
defaults=None,
include_empty=False,
backup='',
include_pat=None,
exclude_pat=None,
maxdepth=None,
keep_symlinks=False,
force_symlinks=False,
**kwargs):
'''
Recurse through a subdirectory on the master and copy said subdirectory
over to the specified path.
name
The directory to set the recursion in
source
The source directory, this directory is located on the salt master file
server and is specified with the salt:// protocol. If the directory is
located on the master in the directory named spam, and is called eggs,
the source string is salt://spam/eggs
keep_source : True
Set to ``False`` to discard the cached copy of the source file once the
state completes. This can be useful for larger files to keep them from
taking up space in minion cache. However, keep in mind that discarding
the source file will result in the state needing to re-download the
source file if the state is run again.
.. versionadded:: 2017.7.3
clean
Make sure that only files that are set up by salt and required by this
function are kept. If this option is set then everything in this
directory will be deleted unless it is required.
require
Require other resources such as packages or files
user
The user to own the directory. This defaults to the user salt is
running as on the minion
group
The group ownership set for the directory. This defaults to the group
salt is running as on the minion. On Windows, this is ignored
dir_mode
The permissions mode to set on any directories created.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
.. note::
This option is **not** supported on Windows.
file_mode
The permissions mode to set on any files created.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
.. note::
This option is **not** supported on Windows.
.. versionchanged:: 2016.11.0
This option can be set to ``keep``, and Salt will keep the mode
from the Salt fileserver. This is only supported when the
``source`` URL begins with ``salt://``, or for files local to the
minion. Because the ``source`` option cannot be used with any of
the ``contents`` options, setting the ``mode`` to ``keep`` is also
incompatible with the ``contents`` options.
sym_mode
The permissions mode to set on any symlink created.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
.. note::
This option is **not** supported on Windows.
template
If this setting is applied, the named templating engine will be used to
render the downloaded file. The following templates are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
.. note::
The template option is required when recursively applying templates.
replace : True
If set to ``False`` and the file already exists, the file will not be
modified even if changes would otherwise be made. Permissions and
ownership will still be enforced, however.
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
include_empty
Set this to True if empty directories should also be created
(default is False)
backup
Overrides the default backup mode for all replaced files. See
:ref:`backup_mode documentation <file-state-backups>` for more details.
include_pat
When copying, include only this pattern from the source. Default
is glob match; if prefixed with 'E@', then regexp match.
Example:
.. code-block:: text
- include_pat: hello* :: glob matches 'hello01', 'hello02'
... but not 'otherhello'
- include_pat: E@hello :: regexp matches 'otherhello',
'hello01' ...
exclude_pat
Exclude this pattern from the source when copying. If both
`include_pat` and `exclude_pat` are supplied, then it will apply
conditions cumulatively. i.e. first select based on include_pat, and
then within that result apply exclude_pat.
Also, when 'clean=True', exclude this pattern from the removal
list and preserve in the destination.
Example:
.. code-block:: text
- exclude_pat: APPDATA* :: glob matches APPDATA.01,
APPDATA.02,.. for exclusion
- exclude_pat: E@(APPDATA)|(TEMPDATA) :: regexp matches APPDATA
or TEMPDATA for exclusion
maxdepth
When copying, only copy paths which are of depth `maxdepth` from the
source path.
Example:
.. code-block:: text
- maxdepth: 0 :: Only include files located in the source
directory
- maxdepth: 1 :: Only include files located in the source
or immediate subdirectories
keep_symlinks
Keep symlinks when copying from the source. This option will cause
the copy operation to terminate at the symlink. If desire behavior
similar to rsync, then set this to True.
force_symlinks
Force symlink creation. This option will force the symlink creation.
If a file or directory is obstructing symlink creation it will be
recursively removed so that symlink creation can proceed. This
option is usually not needed except in special circumstances.
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
name = salt.utils.data.decode(os.path.expanduser(name))
user = _test_owner(kwargs, user=user)
if salt.utils.platform.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this '
'is a Windows system.'.format(name)
)
group = user
ret = {
'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': {} # { path: [comment, ...] }
}
if 'mode' in kwargs:
ret['result'] = False
ret['comment'] = (
'\'mode\' is not allowed in \'file.recurse\'. Please use '
'\'file_mode\' and \'dir_mode\'.'
)
return ret
if any([x is not None for x in (dir_mode, file_mode, sym_mode)]) \
and salt.utils.platform.is_windows():
return _error(ret, 'mode management is not supported on Windows')
# Make sure that leading zeros stripped by YAML loader are added back
dir_mode = salt.utils.files.normalize_mode(dir_mode)
try:
keep_mode = file_mode.lower() == 'keep'
if keep_mode:
# We're not hard-coding the mode, so set it to None
file_mode = None
except AttributeError:
keep_mode = False
file_mode = salt.utils.files.normalize_mode(file_mode)
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
# expand source into source_list
source_list = _validate_str_list(source)
for idx, val in enumerate(source_list):
source_list[idx] = val.rstrip('/')
for precheck in source_list:
if not precheck.startswith('salt://'):
return _error(ret, ('Invalid source \'{0}\' '
'(must be a salt:// URI)'.format(precheck)))
# Select the first source in source_list that exists
try:
source, source_hash = __salt__['file.source_list'](source_list, '', __env__)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = 'Recurse failed: {0}'.format(exc)
return ret
# Check source path relative to fileserver root, make sure it is a
# directory
srcpath, senv = salt.utils.url.parse(source)
if senv is None:
senv = __env__
master_dirs = __salt__['cp.list_master_dirs'](saltenv=senv)
if srcpath not in master_dirs \
and not any((x for x in master_dirs
if x.startswith(srcpath + '/'))):
ret['result'] = False
ret['comment'] = (
'The directory \'{0}\' does not exist on the salt fileserver '
'in saltenv \'{1}\''.format(srcpath, senv)
)
return ret
# Verify the target directory
if not os.path.isdir(name):
if os.path.exists(name):
# it is not a dir, but it exists - fail out
return _error(
ret, 'The path {0} exists and is not a directory'.format(name))
if not __opts__['test']:
__salt__['file.makedirs_perms'](name, user, group, dir_mode)
def add_comment(path, comment):
comments = ret['comment'].setdefault(path, [])
if isinstance(comment, six.string_types):
comments.append(comment)
else:
comments.extend(comment)
def merge_ret(path, _ret):
# Use the most "negative" result code (out of True, None, False)
if _ret['result'] is False or ret['result'] is True:
ret['result'] = _ret['result']
# Only include comments about files that changed
if _ret['result'] is not True and _ret['comment']:
add_comment(path, _ret['comment'])
if _ret['changes']:
ret['changes'][path] = _ret['changes']
def manage_file(path, source, replace):
if clean and os.path.exists(path) and os.path.isdir(path) and replace:
_ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if __opts__['test']:
_ret['comment'] = 'Replacing directory {0} with a ' \
'file'.format(path)
_ret['result'] = None
merge_ret(path, _ret)
return
else:
__salt__['file.remove'](path)
_ret['changes'] = {'diff': 'Replaced directory with a '
'new file'}
merge_ret(path, _ret)
# Conflicts can occur if some kwargs are passed in here
pass_kwargs = {}
faults = ['mode', 'makedirs']
for key in kwargs:
if key not in faults:
pass_kwargs[key] = kwargs[key]
_ret = managed(
path,
source=source,
keep_source=keep_source,
user=user,
group=group,
mode='keep' if keep_mode else file_mode,
attrs=None,
template=template,
makedirs=True,
replace=replace,
context=context,
defaults=defaults,
backup=backup,
**pass_kwargs)
merge_ret(path, _ret)
def manage_directory(path):
if os.path.basename(path) == '..':
return
if clean and os.path.exists(path) and not os.path.isdir(path):
_ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
if __opts__['test']:
_ret['comment'] = 'Replacing {0} with a directory'.format(path)
_ret['result'] = None
merge_ret(path, _ret)
return
else:
__salt__['file.remove'](path)
_ret['changes'] = {'diff': 'Replaced file with a directory'}
merge_ret(path, _ret)
_ret = directory(
path,
user=user,
group=group,
recurse=[],
dir_mode=dir_mode,
file_mode=None,
makedirs=True,
clean=False,
require=None)
merge_ret(path, _ret)
mng_files, mng_dirs, mng_symlinks, keep = _gen_recurse_managed_files(
name,
source,
keep_symlinks,
include_pat,
exclude_pat,
maxdepth,
include_empty)
for srelpath, ltarget in mng_symlinks:
_ret = symlink(os.path.join(name, srelpath),
ltarget,
makedirs=True,
force=force_symlinks,
user=user,
group=group,
mode=sym_mode)
if not _ret:
continue
merge_ret(os.path.join(name, srelpath), _ret)
for dirname in mng_dirs:
manage_directory(dirname)
for dest, src in mng_files:
manage_file(dest, src, replace)
if clean:
# TODO: Use directory(clean=True) instead
keep.update(_gen_keep_files(name, require))
removed = _clean_dir(name, list(keep), exclude_pat)
if removed:
if __opts__['test']:
if ret['result']:
ret['result'] = None
add_comment('removed', removed)
else:
ret['changes']['removed'] = removed
# Flatten comments until salt command line client learns
# to display structured comments in a readable fashion
ret['comment'] = '\n'.join('\n#### {0} ####\n{1}'.format(
k, v if isinstance(v, six.string_types) else '\n'.join(v)
) for (k, v) in six.iteritems(ret['comment'])).strip()
if not ret['comment']:
ret['comment'] = 'Recursively updated {0}'.format(name)
if not ret['changes'] and ret['result']:
ret['comment'] = 'The directory {0} is in the correct state'.format(
name
)
return ret
def retention_schedule(name, retain, strptime_format=None, timezone=None):
'''
Apply retention scheduling to backup storage directory.
.. versionadded:: 2016.11.0
:param name:
The filesystem path to the directory containing backups to be managed.
:param retain:
Delete the backups, except for the ones we want to keep.
The N below should be an integer but may also be the special value of ``all``,
which keeps all files matching the criteria.
All of the retain options default to None,
which means to not keep files based on this criteria.
:most_recent N:
Keep the most recent N files.
:first_of_hour N:
For the last N hours from now, keep the first file after the hour.
:first_of_day N:
For the last N days from now, keep the first file after midnight.
See also ``timezone``.
:first_of_week N:
For the last N weeks from now, keep the first file after Sunday midnight.
:first_of_month N:
For the last N months from now, keep the first file after the start of the month.
:first_of_year N:
For the last N years from now, keep the first file after the start of the year.
:param strptime_format:
A python strptime format string used to first match the filenames of backups
and then parse the filename to determine the datetime of the file.
https://docs.python.org/2/library/datetime.html#datetime.datetime.strptime
Defaults to None, which considers all files in the directory to be backups eligible for deletion
and uses ``os.path.getmtime()`` to determine the datetime.
:param timezone:
The timezone to use when determining midnight.
This is only used when datetime is pulled from ``os.path.getmtime()``.
Defaults to ``None`` which uses the timezone from the locale.
Usage example:
.. code-block:: yaml
/var/backups/example_directory:
file.retention_schedule:
- retain:
most_recent: 5
first_of_hour: 4
first_of_day: 7
first_of_week: 6 # NotImplemented yet.
first_of_month: 6
first_of_year: all
- strptime_format: example_name_%Y%m%dT%H%M%S.tar.bz2
- timezone: None
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {'retained': [], 'deleted': [], 'ignored': []},
'pchanges': {'retained': [], 'deleted': [], 'ignored': []},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.retention_schedule')
if not os.path.isdir(name):
return _error(ret, 'Name provided to file.retention must be a directory')
# get list of files in directory
all_files = __salt__['file.readdir'](name)
# if strptime_format is set, filter through the list to find names which parse and get their datetimes.
beginning_of_unix_time = datetime(1970, 1, 1)
def get_file_time_from_strptime(f):
try:
ts = datetime.strptime(f, strptime_format)
ts_epoch = salt.utils.dateutils.total_seconds(ts - beginning_of_unix_time)
return (ts, ts_epoch)
except ValueError:
# Files which don't match the pattern are not relevant files.
return (None, None)
def get_file_time_from_mtime(f):
lstat = __salt__['file.lstat'](os.path.join(name, f))
if lstat:
mtime = lstat['st_mtime']
return (datetime.fromtimestamp(mtime, timezone), mtime)
else: # maybe it was deleted since we did the readdir?
return (None, None)
get_file_time = get_file_time_from_strptime if strptime_format else get_file_time_from_mtime
# data structures are nested dicts:
# files_by_ymd = year.month.day.hour.unixtime: filename
# files_by_y_week_dow = year.week_of_year.day_of_week.unixtime: filename
# http://the.randomengineer.com/2015/04/28/python-recursive-defaultdict/
# TODO: move to an ordered dict model and reduce the number of sorts in the rest of the code?
def dict_maker():
return defaultdict(dict_maker)
files_by_ymd = dict_maker()
files_by_y_week_dow = dict_maker()
relevant_files = set()
ignored_files = set()
for f in all_files:
ts, ts_epoch = get_file_time(f)
if ts:
files_by_ymd[ts.year][ts.month][ts.day][ts.hour][ts_epoch] = f
week_of_year = ts.isocalendar()[1]
files_by_y_week_dow[ts.year][week_of_year][ts.weekday()][ts_epoch] = f
relevant_files.add(f)
else:
ignored_files.add(f)
# This is tightly coupled with the file_with_times data-structure above.
RETAIN_TO_DEPTH = {
'first_of_year': 1,
'first_of_month': 2,
'first_of_day': 3,
'first_of_hour': 4,
'most_recent': 5,
}
def get_first(fwt):
if isinstance(fwt, dict):
first_sub_key = sorted(fwt.keys())[0]
return get_first(fwt[first_sub_key])
else:
return set([fwt, ])
def get_first_n_at_depth(fwt, depth, n):
if depth <= 0:
return get_first(fwt)
else:
result_set = set()
for k in sorted(fwt.keys(), reverse=True):
needed = n - len(result_set)
if needed < 1:
break
result_set |= get_first_n_at_depth(fwt[k], depth - 1, needed)
return result_set
# for each retain criteria, add filenames which match the criteria to the retain set.
retained_files = set()
for retention_rule, keep_count in retain.items():
# This is kind of a hack, since 'all' should really mean all,
# but I think it's a large enough number that even modern filesystems would
# choke if they had this many files in a single directory.
keep_count = sys.maxsize if 'all' == keep_count else int(keep_count)
if 'first_of_week' == retention_rule:
first_of_week_depth = 2 # year + week_of_year = 2
# I'm adding 1 to keep_count below because it fixed an off-by one
# issue in the tests. I don't understand why, and that bothers me.
retained_files |= get_first_n_at_depth(files_by_y_week_dow,
first_of_week_depth,
keep_count + 1)
else:
retained_files |= get_first_n_at_depth(files_by_ymd,
RETAIN_TO_DEPTH[retention_rule],
keep_count)
deletable_files = list(relevant_files - retained_files)
deletable_files.sort(reverse=True)
changes = {
'retained': sorted(list(retained_files), reverse=True),
'deleted': deletable_files,
'ignored': sorted(list(ignored_files), reverse=True),
}
ret['pchanges'] = changes
# TODO: track and report how much space was / would be reclaimed
if __opts__['test']:
ret['comment'] = '{0} backups would have been removed from {1}.\n'.format(len(deletable_files), name)
if deletable_files:
ret['result'] = None
else:
for f in deletable_files:
__salt__['file.remove'](os.path.join(name, f))
ret['comment'] = '{0} backups were removed from {1}.\n'.format(len(deletable_files), name)
ret['changes'] = changes
return ret
def line(name, content=None, match=None, mode=None, location=None,
before=None, after=None, show_changes=True, backup=False,
quiet=False, indent=True, create=False, user=None,
group=None, file_mode=None):
'''
Line-based editing of a file.
.. versionadded:: 2015.8.0
:param name:
Filesystem path to the file to be edited.
:param content:
Content of the line. Allowed to be empty if mode=delete.
:param match:
Match the target line for an action by
a fragment of a string or regular expression.
If neither ``before`` nor ``after`` are provided, and ``match``
is also ``None``, match becomes the ``content`` value.
:param mode:
Defines how to edit a line. One of the following options is
required:
- ensure
If line does not exist, it will be added.
- replace
If line already exists, it will be replaced.
- delete
Delete the line, once found.
- insert
Insert a line.
.. note::
If ``mode=insert`` is used, at least one of the following
options must also be defined: ``location``, ``before``, or
``after``. If ``location`` is used, it takes precedence
over the other two options.
:param location:
Defines where to place content in the line. Note this option is only
used when ``mode=insert`` is specified. If a location is passed in, it
takes precedence over both the ``before`` and ``after`` kwargs. Valid
locations are:
- start
Place the content at the beginning of the file.
- end
Place the content at the end of the file.
:param before:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param after:
Regular expression or an exact case-sensitive fragment of the string.
This option is only used when either the ``ensure`` or ``insert`` mode
is defined.
:param show_changes:
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
:param backup:
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
:param quiet:
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
:param indent:
Keep indentation with the previous line. This option is not considered when
the ``delete`` mode is specified.
:param create:
Create an empty file if doesn't exists.
.. versionadded:: 2016.11.0
:param user:
The user to own the file, this defaults to the user salt is running as
on the minion.
.. versionadded:: 2016.11.0
:param group:
The group ownership set for the file, this defaults to the group salt
is running as on the minion On Windows, this is ignored.
.. versionadded:: 2016.11.0
:param file_mode:
The permissions to set on this file, aka 644, 0775, 4664. Not supported
on Windows.
.. versionadded:: 2016.11.0
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: yaml
update_config:
file.line:
- name: /etc/myconfig.conf
- mode: ensure
- content: my key = my value
- before: somekey.*?
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.line')
managed(
name,
create=create,
user=user,
group=group,
mode=file_mode,
replace=False)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
# We've set the content to be empty in the function params but we want to make sure
# it gets passed when needed. Feature #37092
mode = mode and mode.lower() or mode
if mode is None:
return _error(ret, 'Mode was not defined. How to process the file?')
modeswithemptycontent = ['delete']
if mode not in modeswithemptycontent and content is None:
return _error(ret, 'Content can only be empty if mode is {0}'.format(modeswithemptycontent))
del modeswithemptycontent
changes = __salt__['file.line'](
name, content, match=match, mode=mode, location=location,
before=before, after=after, show_changes=show_changes,
backup=backup, quiet=quiet, indent=indent)
if changes:
ret['pchanges']['diff'] = changes
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would be made:\ndiff:\n{0}'.format(changes)
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
ret['changes'] = {'diff': changes}
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def replace(name,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup='.bak',
show_changes=True,
ignore_if_missing=False,
backslash_literal=False):
r'''
Maintain an edit in a file.
.. versionadded:: 0.17.0
name
Filesystem path to the file to be edited. If a symlink is specified, it
will be resolved to its target.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
.. note::
If you need to match a literal string that contains regex special
characters, you may want to use salt's custom Jinja filter,
``regex_escape``.
.. code-block:: jinja
{{ 'http://example.com?foo=bar%20baz' | regex_escape }}
repl
The replacement text
count
Maximum number of pattern occurrences to be replaced. Defaults to 0.
If count is a positive integer n, no more than n occurrences will be
replaced, otherwise all occurrences will be replaced.
flags
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
``8`` (which equates to ``['MULTILINE']``).
.. note::
``file.replace`` reads the entire file as a string to support
multiline regex patterns. Therefore, when using anchors such as
``^`` or ``$`` in the pattern, those anchors may be relative to
the line OR relative to the file. The default for ``file.replace``
is to treat anchors as relative to the line, which is implemented
by setting the default value of ``flags`` to ``['MULTILINE']``.
When overriding the default value for ``flags``, if
``'MULTILINE'`` is not present then anchors will be relative to
the file. If the desired behavior is for anchors to be relative to
the line, then simply add ``'MULTILINE'`` to the list of flags.
bufsize
How much of the file to buffer into memory at once. The default value
``1`` processes one line at a time. The special value ``file`` may be
specified which will read the entire file into memory before
processing.
append_if_not_found : False
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
.. versionadded:: 2014.7.0
prepend_if_not_found : False
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
.. versionadded:: 2014.7.0
not_found_content
Content to use for append/prepend if not found. If ``None`` (default),
uses ``repl``. Useful when ``repl`` uses references to group in
pattern.
.. versionadded:: 2014.7.0
backup
The file extension to use for a backup of the file before editing. Set
to ``False`` to skip making a backup.
show_changes : True
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made. Returns a boolean or a
string.
.. note:
Using this option will store two copies of the file in memory (the
original version and the edited version) in order to generate the
diff. This may not normally be a concern, but could impact
performance if used with large files.
ignore_if_missing : False
.. versionadded:: 2016.3.4
Controls what to do if the file is missing. If set to ``False``, the
state will display an error raised by the execution module. If set to
``True``, the state will simply report no changes.
backslash_literal : False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
For complex regex patterns, it can be useful to avoid the need for complex
quoting and escape sequences by making use of YAML's multiline string
syntax.
.. code-block:: yaml
complex_search_and_replace:
file.replace:
# <...snip...>
- pattern: |
CentOS \(2.6.32[^\\n]+\\n\s+root[^\\n]+\\n\)+
.. note::
When using YAML multiline string syntax in ``pattern:``, make sure to
also use that syntax in the ``repl:`` part, or you might loose line
feeds.
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': True,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.replace')
check_res, check_msg = _check_file(name)
if not check_res:
if ignore_if_missing and 'file not found' in check_msg:
ret['comment'] = 'No changes needed to be made'
return ret
else:
return _error(ret, check_msg)
changes = __salt__['file.replace'](name,
pattern,
repl,
count=count,
flags=flags,
bufsize=bufsize,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
not_found_content=not_found_content,
backup=backup,
dry_run=__opts__['test'],
show_changes=show_changes,
ignore_if_missing=ignore_if_missing,
backslash_literal=backslash_literal)
if changes:
ret['pchanges']['diff'] = changes
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Changes would have been made:\ndiff:\n{0}'.format(changes)
else:
ret['result'] = True
ret['comment'] = 'Changes were made'
ret['changes'] = {'diff': changes}
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def blockreplace(
name,
marker_start='#-- start managed zone --',
marker_end='#-- end managed zone --',
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None,
content='',
append_if_not_found=False,
prepend_if_not_found=False,
backup='.bak',
show_changes=True,
append_newline=None):
'''
Maintain an edit in a file in a zone delimited by two line markers
.. versionadded:: 2014.1.0
.. versionchanged:: 2017.7.5,2018.3.1
``append_newline`` argument added. Additionally, to improve
idempotence, if the string represented by ``marker_end`` is found in
the middle of the line, the content preceding the marker will be
removed when the block is replaced. This allows one to remove
``append_newline: False`` from the SLS and have the block properly
replaced if the end of the content block is immediately followed by the
``marker_end`` (i.e. no newline before the marker).
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal. This can help you
maintaining an un-managed file containing manual edits.
Note: this function will store two copies of the file in-memory
(the original version and the edited version) in order to detect changes
and only edit the targeted file if necessary.
name
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying a line as the end of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output. Note: you can use file.accumulated and target this state.
All accumulated data dictionaries content will be added as new lines in
the content
content
The content to be used between the two lines identified by
``marker_start`` and ``marker_end``
source
The source file to download to the minion, this source file can be
hosted on either the salt master server, or on an HTTP or FTP server.
Both HTTPS and HTTP are supported as well as downloading directly
from Amazon S3 compatible URLs with both pre-configured and automatic
IAM credentials. (see s3.get state documentation)
File retrieval from Openstack Swift object storage is supported via
swift://container/object_path URLs, see swift.get documentation.
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs. If source is left blank or None
(use ~ in YAML), the file will be created as an empty file and
the content will not be managed. This is also the case when a file
already exists and the source is undefined; the contents of the file
will not be changed or managed.
If the file is hosted on a HTTP or FTP server then the source_hash
argument is also required.
A list of sources can also be passed in to provide a default source and
a set of fallbacks. The first source in the list that is found to exist
will be used and subsequent entries in the list will be ignored.
.. code-block:: yaml
file_override_example:
file.blockreplace:
- name: /etc/example.conf
- source:
- salt://file_that_does_not_exist
- salt://file_that_exists
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
See the ``source_hash`` parameter description for :mod:`file.managed
<salt.states.file.managed>` function for more details and examples.
template : jinja
Templating engine to be used to render the downloaded file. The
following engines are supported:
- :mod:`cheetah <salt.renderers.cheetah>`
- :mod:`genshi <salt.renderers.genshi>`
- :mod:`jinja <salt.renderers.jinja>`
- :mod:`mako <salt.renderers.mako>`
- :mod:`py <salt.renderers.py>`
- :mod:`wempy <salt.renderers.wempy>`
context
Overrides default context variables passed to the template
defaults
Default context passed to the template
append_if_not_found : False
If markers are not found and this option is set to ``True``, the
content block will be appended to the file.
prepend_if_not_found : False
If markers are not found and this option is set to ``True``, the
content block will be prepended to the file.
backup
The file extension to use for a backup of the file if any edit is made.
Set this to ``False`` to skip making a backup.
dry_run : False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes : True
Controls how changes are presented. If ``True``, the ``Changes``
section of the state return will contain a unified diff of the changes
made. If False, then it will contain a boolean (``True`` if any changes
were made, otherwise ``False``).
append_newline
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is unspecified, then a newline will
only be added to the content block if it does not already end in a
newline.
.. versionadded:: 2017.7.5,2018.3.1
Example of usage with an accumulator and with a variable:
.. code-block:: jinja
{% set myvar = 42 %}
hosts-config-block-{{ myvar }}:
file.blockreplace:
- name: /etc/hosts
- marker_start: "# START managed zone {{ myvar }} -DO-NOT-EDIT-"
- marker_end: "# END managed zone {{ myvar }} --"
- content: 'First line of content'
- append_if_not_found: True
- backup: '.bak'
- show_changes: True
hosts-config-block-{{ myvar }}-accumulated1:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: "text 2"
- require_in:
- file: hosts-config-block-{{ myvar }}
hosts-config-block-{{ myvar }}-accumulated2:
file.accumulated:
- filename: /etc/hosts
- name: my-accumulator-{{ myvar }}
- text: |
text 3
text 4
- require_in:
- file: hosts-config-block-{{ myvar }}
will generate and maintain a block of content in ``/etc/hosts``:
.. code-block:: text
# START managed zone 42 -DO-NOT-EDIT-
First line of content
text 2
text 3
text 4
# END managed zone 42 --
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.blockreplace')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
accum_data, accum_deps = _load_accumulators()
if name in accum_data:
accumulator = accum_data[name]
# if we have multiple accumulators for a file, only apply the one
# required at a time
deps = accum_deps.get(name, [])
filtered = [a for a in deps if
__low__['__id__'] in deps[a] and a in accumulator]
if not filtered:
filtered = [a for a in accumulator]
for acc in filtered:
acc_content = accumulator[acc]
for line in acc_content:
if content == '':
content = line
else:
content += "\n" + line
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
for index, item in enumerate(text):
content += six.text_type(item)
try:
changes = __salt__['file.blockreplace'](
name,
marker_start,
marker_end,
content=content,
append_if_not_found=append_if_not_found,
prepend_if_not_found=prepend_if_not_found,
backup=backup,
dry_run=__opts__['test'],
show_changes=show_changes,
append_newline=append_newline)
except Exception as exc:
log.exception('Encountered error managing block')
ret['comment'] = (
'Encountered error managing block: {0}. '
'See the log for details.'.format(exc)
)
return ret
if changes:
ret['pchanges'] = {'diff': changes}
if __opts__['test']:
ret['changes']['diff'] = ret['pchanges']['diff']
ret['result'] = None
ret['comment'] = 'Changes would be made'
else:
ret['changes']['diff'] = ret['pchanges']['diff']
ret['result'] = True
ret['comment'] = 'Changes were made'
else:
ret['result'] = True
ret['comment'] = 'No changes needed to be made'
return ret
def comment(name, regex, char='#', backup='.bak'):
'''
Comment out specified lines in a file.
name
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be commented;
this pattern will be wrapped in parenthesis and will move any
preceding/trailing ``^`` or ``$`` characters outside the parenthesis
(e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``)
Note that you _need_ the leading ^, otherwise each time you run
highstate, another comment char will be inserted.
char : ``#``
The character to be inserted at the beginning of a line in order to
comment it out
backup : ``.bak``
The file will be backed up before edit with this file extension
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
Set to False/None to not keep a backup.
Usage:
.. code-block:: yaml
/etc/fstab:
file.comment:
- regex: ^bind 127.0.0.1
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.comment')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
# remove (?i)-like flags, ^ and $
unanchor_regex = re.sub(r'^(\(\?[iLmsux]\))?\^?(.*?)\$?$', r'\2', regex)
comment_regex = char + unanchor_regex
# Check if the line is already commented
if __salt__['file.search'](name, comment_regex, multiline=True):
commented = True
else:
commented = False
# Make sure the pattern appears in the file before continuing
if commented or not __salt__['file.search'](name, regex, multiline=True):
if __salt__['file.search'](name, unanchor_regex, multiline=True):
ret['comment'] = 'Pattern already commented'
ret['result'] = True
return ret
else:
return _error(ret, '{0}: Pattern not found'.format(unanchor_regex))
ret['pchanges'][name] = 'updated'
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
return ret
with salt.utils.files.fopen(name, 'rb') as fp_:
slines = fp_.read()
if six.PY3:
slines = slines.decode(__salt_system_encoding__)
slines = slines.splitlines(True)
# Perform the edit
__salt__['file.comment_line'](name, regex, char, True, backup)
with salt.utils.files.fopen(name, 'rb') as fp_:
nlines = fp_.read()
if six.PY3:
nlines = nlines.decode(__salt_system_encoding__)
nlines = nlines.splitlines(True)
# Check the result
ret['result'] = __salt__['file.search'](name, unanchor_regex, multiline=True)
if slines != nlines:
if not __utils__['files.is_text'](name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if ret['result']:
ret['comment'] = 'Commented lines successfully'
else:
ret['comment'] = 'Expected commented lines not found'
return ret
def uncomment(name, regex, char='#', backup='.bak'):
'''
Uncomment specified commented lines in a file
name
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()). The regex will be searched for
from the beginning of the line, ignoring leading spaces (we prepend
'^[ \\t]*')
char : ``#``
The character to remove in order to uncomment a line
backup : ``.bak``
The file will be backed up before edit with this file extension;
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
Set to False/None to not keep a backup.
Usage:
.. code-block:: yaml
/etc/adduser.conf:
file.uncomment:
- regex: EXTRA_GROUPS
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.uncomment')
check_res, check_msg = _check_file(name)
if not check_res:
return _error(ret, check_msg)
# Make sure the pattern appears in the file
if __salt__['file.search'](
name,
'^[ \t]*{0}'.format(regex.lstrip('^')),
multiline=True):
ret['comment'] = 'Pattern already uncommented'
ret['result'] = True
return ret
elif __salt__['file.search'](
name,
'{0}[ \t]*{1}'.format(char, regex.lstrip('^')),
multiline=True):
# Line exists and is commented
pass
else:
return _error(ret, '{0}: Pattern not found'.format(regex))
ret['pchanges'][name] = 'updated'
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
return ret
with salt.utils.files.fopen(name, 'rb') as fp_:
slines = fp_.read()
if six.PY3:
slines = slines.decode(__salt_system_encoding__)
slines = slines.splitlines(True)
# Perform the edit
__salt__['file.comment_line'](name, regex, char, False, backup)
with salt.utils.files.fopen(name, 'rb') as fp_:
nlines = fp_.read()
if six.PY3:
nlines = nlines.decode(__salt_system_encoding__)
nlines = nlines.splitlines(True)
# Check the result
ret['result'] = __salt__['file.search'](
name,
'^[ \t]*{0}'.format(regex.lstrip('^')),
multiline=True
)
if slines != nlines:
if not __utils__['files.is_text'](name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if ret['result']:
ret['comment'] = 'Uncommented lines successfully'
else:
ret['comment'] = 'Expected uncommented lines not found'
return ret
def append(name,
text=None,
makedirs=False,
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None,
ignore_whitespace=True):
'''
Ensure that some text appears at the end of a file.
The text will not be appended if it already exists in the file.
A single string of text or a list of strings may be appended.
name
The location of the file to append to.
text
The text to be appended, which can be a single string or a list
of strings.
makedirs
If the file is located in a path without a parent directory,
then the state will fail. If makedirs is set to True, then
the parent directories will be created to facilitate the
creation of the named file. Defaults to False.
source
A single source file to append. This source file can be hosted on either
the salt master server, or on an HTTP or FTP server. Both HTTPS and
HTTP are supported as well as downloading directly from Amazon S3
compatible URLs with both pre-configured and automatic IAM credentials
(see s3.get state documentation). File retrieval from Openstack Swift
object storage is supported via swift://container/object_path URLs
(see swift.get documentation).
For files hosted on the salt file server, if the file is located on
the master in the directory named spam, and is called eggs, the source
string is salt://spam/eggs.
If the file is hosted on an HTTP or FTP server, the source_hash argument
is also required.
source_hash
This can be one of the following:
1. a source hash string
2. the URI of a file that contains source hash strings
The function accepts the first encountered long unbroken alphanumeric
string of correct length as a valid hash, in order from most secure to
least secure:
.. code-block:: text
Type Length
====== ======
sha512 128
sha384 96
sha256 64
sha224 56
sha1 40
md5 32
See the ``source_hash`` parameter description for :mod:`file.managed
<salt.states.file.managed>` function for more details and examples.
template
The named templating engine will be used to render the appended-to file.
Defaults to ``jinja``. The following templates are supported:
- :mod:`cheetah<salt.renderers.cheetah>`
- :mod:`genshi<salt.renderers.genshi>`
- :mod:`jinja<salt.renderers.jinja>`
- :mod:`mako<salt.renderers.mako>`
- :mod:`py<salt.renderers.py>`
- :mod:`wempy<salt.renderers.wempy>`
sources
A list of source files to append. If the files are hosted on an HTTP or
FTP server, the source_hashes argument is also required.
source_hashes
A list of source_hashes corresponding to the sources list specified in
the sources argument.
defaults
Default context passed to the template.
context
Overrides default context variables passed to the template.
ignore_whitespace
.. versionadded:: 2015.8.4
Spaces and Tabs in text are ignored by default, when searching for the
appending content, one space or multiple tabs are the same for salt.
Set this option to ``False`` if you want to change this behavior.
Multi-line example:
.. code-block:: yaml
/etc/motd:
file.append:
- text: |
Thou hadst better eat salt with the Philosophers of Greece,
than sugar with the Courtiers of Italy.
- Benjamin Franklin
Multiple lines of text:
.. code-block:: yaml
/etc/motd:
file.append:
- text:
- Trust no one unless you have eaten much salt with him.
- "Salt is born of the purest of parents: the sun and the sea."
Gather text from multiple template files:
.. code-block:: yaml
/etc/motd:
file:
- append
- template: jinja
- sources:
- salt://motd/devops-messages.tmpl
- salt://motd/hr-messages.tmpl
- salt://motd/general-messages.tmpl
.. versionadded:: 0.9.5
'''
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.append')
name = os.path.expanduser(name)
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
# Add sources and source_hashes with template support
# NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text'
# is re-assigned in the original code.
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
if makedirs is True:
dirname = os.path.dirname(name)
if not __salt__['file.directory_exists'](dirname):
__salt__['file.makedirs'](name)
check_res, check_msg, ret['pchanges'] = _check_directory(
dirname, None, None, False, None, False, False, None
)
if not check_res:
return _error(ret, check_msg)
check_res, check_msg = _check_file(name)
if not check_res:
# Try to create the file
touch(name, makedirs=makedirs)
retry_res, retry_msg = _check_file(name)
if not retry_res:
return _error(ret, check_msg)
# Follow the original logic and re-assign 'text' if using source(s)...
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
text = _validate_str_list(text)
with salt.utils.files.fopen(name, 'rb') as fp_:
slines = fp_.read()
if six.PY3:
slines = slines.decode(__salt_system_encoding__)
slines = slines.splitlines()
append_lines = []
try:
for chunk in text:
if ignore_whitespace:
if __salt__['file.search'](
name,
salt.utils.stringutils.build_whitespace_split_regex(chunk),
multiline=True):
continue
elif __salt__['file.search'](
name,
chunk,
multiline=True):
continue
for line_item in chunk.splitlines():
append_lines.append('{0}'.format(line_item))
except TypeError:
return _error(ret, 'No text found to append. Nothing appended')
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
nlines = list(slines)
nlines.extend(append_lines)
if slines != nlines:
if not __utils__['files.is_text'](name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
'\n'.join(difflib.unified_diff(slines, nlines))
)
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
if append_lines:
__salt__['file.append'](name, args=append_lines)
ret['comment'] = 'Appended {0} lines'.format(len(append_lines))
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
with salt.utils.files.fopen(name, 'rb') as fp_:
nlines = fp_.read()
if six.PY3:
nlines = nlines.decode(__salt_system_encoding__)
nlines = nlines.splitlines()
if slines != nlines:
if not __utils__['files.is_text'](name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
'\n'.join(difflib.unified_diff(slines, nlines)))
ret['result'] = True
return ret
def prepend(name,
text=None,
makedirs=False,
source=None,
source_hash=None,
template='jinja',
sources=None,
source_hashes=None,
defaults=None,
context=None,
header=None):
'''
Ensure that some text appears at the beginning of a file
The text will not be prepended again if it already exists in the file. You
may specify a single line of text or a list of lines to append.
Multi-line example:
.. code-block:: yaml
/etc/motd:
file.prepend:
- text: |
Thou hadst better eat salt with the Philosophers of Greece,
than sugar with the Courtiers of Italy.
- Benjamin Franklin
Multiple lines of text:
.. code-block:: yaml
/etc/motd:
file.prepend:
- text:
- Trust no one unless you have eaten much salt with him.
- "Salt is born of the purest of parents: the sun and the sea."
Optionally, require the text to appear exactly as specified
(order and position). Combine with multi-line or multiple lines of input.
.. code-block:: yaml
/etc/motd:
file.prepend:
- header: True
- text:
- This will be the very first line in the file.
- The 2nd line, regardless of duplicates elsewhere in the file.
- These will be written anew if they do not appear verbatim.
Gather text from multiple template files:
.. code-block:: yaml
/etc/motd:
file:
- prepend
- template: jinja
- sources:
- salt://motd/devops-messages.tmpl
- salt://motd/hr-messages.tmpl
- salt://motd/general-messages.tmpl
.. versionadded:: 2014.7.0
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'pchanges': {},
'result': False,
'comment': ''}
if not name:
return _error(ret, 'Must provide name to file.prepend')
if sources is None:
sources = []
if source_hashes is None:
source_hashes = []
# Add sources and source_hashes with template support
# NOTE: FIX 'text' and any 'source' are mutually exclusive as 'text'
# is re-assigned in the original code.
(ok_, err, sl_) = _unify_sources_and_hashes(source=source,
source_hash=source_hash,
sources=sources,
source_hashes=source_hashes)
if not ok_:
return _error(ret, err)
if makedirs is True:
dirname = os.path.dirname(name)
if not __salt__['file.directory_exists'](dirname):
__salt__['file.makedirs'](name)
check_res, check_msg, ret['pchanges'] = _check_directory(
dirname, None, None, False, None, False, False, None
)
if not check_res:
return _error(ret, check_msg)
check_res, check_msg = _check_file(name)
if not check_res:
# Try to create the file
touch(name, makedirs=makedirs)
retry_res, retry_msg = _check_file(name)
if not retry_res:
return _error(ret, check_msg)
# Follow the original logic and re-assign 'text' if using source(s)...
if sl_:
tmpret = _get_template_texts(source_list=sl_,
template=template,
defaults=defaults,
context=context)
if not tmpret['result']:
return tmpret
text = tmpret['data']
text = _validate_str_list(text)
with salt.utils.files.fopen(name, 'rb') as fp_:
slines = fp_.read()
if six.PY3:
slines = slines.decode(__salt_system_encoding__)
slines = slines.splitlines(True)
count = 0
test_lines = []
preface = []
for chunk in text:
# if header kwarg is unset of False, use regex search
if not header:
if __salt__['file.search'](
name,
salt.utils.stringutils.build_whitespace_split_regex(chunk),
multiline=True):
continue
lines = chunk.splitlines()
for line in lines:
if __opts__['test']:
ret['comment'] = 'File {0} is set to be updated'.format(name)
ret['result'] = None
test_lines.append('{0}\n'.format(line))
else:
preface.append(line)
count += 1
if __opts__['test']:
nlines = test_lines + slines
if slines != nlines:
if not __utils__['files.is_text'](name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
ret['result'] = None
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
# if header kwarg is True, use verbatim compare
if header:
with salt.utils.files.fopen(name, 'rb') as fp_:
# read as many lines of target file as length of user input
contents = fp_.read()
if six.PY3:
contents = contents.decode(__salt_system_encoding__)
contents = contents.splitlines(True)
target_head = contents[0:len(preface)]
target_lines = []
# strip newline chars from list entries
for chunk in target_head:
target_lines += chunk.splitlines()
# compare current top lines in target file with user input
# and write user input if they differ
if target_lines != preface:
__salt__['file.prepend'](name, *preface)
else:
# clear changed lines counter if target file not modified
count = 0
else:
__salt__['file.prepend'](name, *preface)
with salt.utils.files.fopen(name, 'rb') as fp_:
nlines = fp_.read()
if six.PY3:
nlines = nlines.decode(__salt_system_encoding__)
nlines = nlines.splitlines(True)
if slines != nlines:
if not __utils__['files.is_text'](name):
ret['changes']['diff'] = 'Replace binary file'
else:
# Changes happened, add them
ret['changes']['diff'] = (
''.join(difflib.unified_diff(slines, nlines))
)
if count:
ret['comment'] = 'Prepended {0} lines'.format(count)
else:
ret['comment'] = 'File {0} is in correct state'.format(name)
ret['result'] = True
return ret
def patch(name,
source=None,
source_hash=None,
source_hash_name=None,
skip_verify=False,
template=None,
context=None,
defaults=None,
options='',
reject_file=None,
strip=None,
saltenv=None,
**kwargs):
'''
Ensure that a patch has been applied to the specified file or directory
.. versionchanged:: Fluorine
The ``hash`` and ``dry_run_first`` options are now ignored, as the
logic which determines whether or not the patch has already been
applied no longer requires them. Additionally, this state now supports
patch files that modify more than one file. To use these sort of
patches, specify a directory (and, if necessary, the ``strip`` option)
instead of a file.
.. note::
A suitable ``patch`` executable must be available on the minion. Also,
keep in mind that the pre-check this state does to determine whether or
not changes need to be made will create a temp file and send all patch
output to that file. This means that, in the event that the patch would
not have applied cleanly, the comment included in the state results will
reference a temp file that will no longer exist once the state finishes
running.
name
The file or directory to which the patch should be applied
source
The patch file to apply
.. versionchanged:: Fluorine
The source can now be from any file source supported by Salt
(``salt://``, ``http://``, ``https://``, ``ftp://``, etc.).
Templating is also now supported.
source_hash
Works the same way as in :py:func:`file.managed
<salt.states.file.managed>`.
.. versionadded:: Fluorine
source_hash_name
Works the same way as in :py:func:`file.managed
<salt.states.file.managed>`
.. versionadded:: Fluorine
skip_verify
Works the same way as in :py:func:`file.managed
<salt.states.file.managed>`
.. versionadded:: Fluorine
template
Works the same way as in :py:func:`file.managed
<salt.states.file.managed>`
.. versionadded:: Fluorine
context
Works the same way as in :py:func:`file.managed
<salt.states.file.managed>`
.. versionadded:: Fluorine
defaults
Works the same way as in :py:func:`file.managed
<salt.states.file.managed>`
.. versionadded:: Fluorine
options
Extra options to pass to patch. This should not be necessary in most
cases.
.. note::
For best results, short opts should be separate from one another.
The ``-N`` and ``-r``, and ``-o`` options are used internally by
this state and cannot be used here. Additionally, instead of using
``-pN`` or ``--strip=N``, use the ``strip`` option documented
below.
reject_file
If specified, any rejected hunks will be written to this file. If not
specified, then they will be written to a temp file which will be
deleted when the state finishes running.
.. important::
The parent directory must exist. Also, this will overwrite the file
if it is already present.
.. versionadded:: Fluorine
strip
Number of directories to strip from paths in the patch file. For
example, using the below SLS would instruct Salt to use ``-p1`` when
applying the patch:
.. code-block:: yaml
/etc/myfile.conf:
file.patch:
- source: salt://myfile.patch
- strip: 1
.. versionadded:: Fluorine
In previous versions, ``-p1`` would need to be passed as part of
the ``options`` value.
saltenv
Specify the environment from which to retrieve the patch file indicated
by the ``source`` parameter. If not provided, this defaults to the
environment from which the state is being executed.
.. note::
Ignored when the patch file is from a non-``salt://`` source.
**Usage:**
.. code-block:: yaml
# Equivalent to ``patch --forward /opt/myfile.txt myfile.patch``
/opt/myfile.txt:
file.patch:
- source: salt://myfile.patch
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not salt.utils.path.which('patch'):
ret['comment'] = 'patch executable not found on minion'
return ret
# is_dir should be defined if we proceed past the if/else block below, but
# just in case, avoid a NameError.
is_dir = False
if not name:
ret['comment'] = 'A file/directory to be patched is required'
return ret
else:
try:
name = os.path.expanduser(name)
except Exception:
ret['comment'] = 'Invalid path \'{0}\''.format(name)
return ret
else:
if not os.path.isabs(name):
ret['comment'] = '{0} is not an absolute path'.format(name)
return ret
elif not os.path.exists(name):
ret['comment'] = '{0} does not exist'.format(name)
return ret
else:
is_dir = os.path.isdir(name)
for deprecated_arg in ('hash', 'dry_run_first'):
if deprecated_arg in kwargs:
ret.setdefault('warnings', []).append(
'The \'{0}\' argument is no longer used and has been '
'ignored.'.format(deprecated_arg)
)
if reject_file is not None:
try:
reject_file_parent = os.path.dirname(reject_file)
except Exception:
ret['comment'] = 'Invalid path \'{0}\' for reject_file'.format(
reject_file
)
return ret
else:
if not os.path.isabs(reject_file_parent):
ret['comment'] = '\'{0}\' is not an absolute path'.format(
reject_file
)
return ret
elif not os.path.isdir(reject_file_parent):
ret['comment'] = (
'Parent directory for reject_file \'{0}\' either does '
'not exist, or is not a directory'.format(reject_file)
)
return ret
sanitized_options = []
options = salt.utils.args.shlex_split(options)
index = 0
max_index = len(options) - 1
# Not using enumerate here because we may need to consume more than one
# option if --strip is used.
blacklisted_options = []
while index <= max_index:
option = options[index]
if not isinstance(option, six.string_types):
option = six.text_type(option)
for item in ('-N', '--forward', '-r', '--reject-file', '-o', '--output'):
if option.startswith(item):
blacklisted = option
break
else:
blacklisted = None
if blacklisted is not None:
blacklisted_options.append(blacklisted)
if option.startswith('-p'):
try:
strip = int(option[2:])
except Exception:
ret['comment'] = (
'Invalid format for \'-p\' CLI option. Consider using '
'the \'strip\' option for this state.'
)
return ret
elif option.startswith('--strip'):
if '=' in option:
# Assume --strip=N
try:
strip = int(option.rsplit('=', 1)[-1])
except Exception:
ret['comment'] = (
'Invalid format for \'-strip\' CLI option. Consider '
'using the \'strip\' option for this state.'
)
return ret
else:
# Assume --strip N and grab the next option in the list
try:
strip = int(options[index + 1])
except Exception:
ret['comment'] = (
'Invalid format for \'-strip\' CLI option. Consider '
'using the \'strip\' option for this state.'
)
return ret
else:
# We need to increment again because we grabbed the next
# option in the list.
index += 1
else:
sanitized_options.append(option)
# Increment the index
index += 1
if blacklisted_options:
ret['comment'] = (
'The following CLI options are not allowed: {0}'.format(
', '.join(blacklisted_options)
)
)
return ret
options = sanitized_options
try:
source_match = __salt__['file.source_list'](source,
source_hash,
__env__)[0]
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
else:
# Passing the saltenv to file.managed to pull down the patch file is
# not supported, because the saltenv is already being passed via the
# state compiler and this would result in two values for that argument
# (and a traceback). Therefore, we will add the saltenv to the source
# URL to ensure we pull the file from the correct environment.
if saltenv is not None:
source_match_url, source_match_saltenv = \
salt.utils.url.parse(source_match)
if source_match_url.startswith('salt://'):
if source_match_saltenv is not None \
and source_match_saltenv != saltenv:
ret.setdefault('warnings', []).append(
'Ignoring \'saltenv\' option in favor of saltenv '
'included in the source URL.'
)
else:
source_match += '?saltenv={0}'.format(saltenv)
cleanup = []
try:
patch_file = salt.utils.files.mkstemp()
cleanup.append(patch_file)
try:
orig_test = __opts__['test']
__opts__['test'] = False
sys.modules[__salt__['test.ping'].__module__].__opts__['test'] = False
result = managed(patch_file,
source=source_match,
source_hash=source_hash,
source_hash_name=source_hash_name,
skip_verify=skip_verify,
template=template,
context=context,
defaults=defaults)
except Exception as exc:
msg = 'Failed to cache patch file {0}: {1}'.format(
salt.utils.url.redact_http_basic_auth(source_match),
exc
)
log.exception(msg)
ret['comment'] = msg
return ret
else:
log.debug('file.managed: %s', result)
finally:
__opts__['test'] = orig_test
sys.modules[__salt__['test.ping'].__module__].__opts__['test'] = orig_test
if not result['result']:
log.debug(
'failed to download %s',
salt.utils.url.redact_http_basic_auth(source_match)
)
return result
def _patch(patch_file, options=None, dry_run=False):
patch_opts = copy.copy(sanitized_options)
if options is not None:
patch_opts.extend(options)
return __salt__['file.patch'](
name,
patch_file,
options=patch_opts,
dry_run=dry_run)
if reject_file is not None:
patch_rejects = reject_file
else:
# No rejects file specified, create a temp file
patch_rejects = salt.utils.files.mkstemp()
cleanup.append(patch_rejects)
patch_output = salt.utils.files.mkstemp()
cleanup.append(patch_output)
# Older patch releases can only write patch output to regular files,
# meaning that /dev/null can't be relied on. Also, if we ever want this
# to work on Windows with patch.exe, /dev/null is a non-starter.
# Therefore, redirect all patch output to a temp file, which we will
# then remove.
patch_opts = ['-N', '-r', patch_rejects, '-o', patch_output]
if is_dir and strip is not None:
patch_opts.append('-p{0}'.format(strip))
pre_check = _patch(patch_file, patch_opts)
if pre_check['retcode'] != 0:
# Try to reverse-apply hunks from rejects file using a dry-run.
# If this returns a retcode of 0, we know that the patch was
# already applied. Rejects are written from the base of the
# directory, so the strip option doesn't apply here.
reverse_pass = _patch(patch_rejects, ['-R', '-f'], dry_run=True)
already_applied = reverse_pass['retcode'] == 0
if already_applied:
ret['comment'] = 'Patch was already applied'
ret['result'] = True
return ret
else:
ret['comment'] = (
'Patch would not apply cleanly, no changes made. Results '
'of dry-run are below.'
)
if reject_file is None:
ret['comment'] += (
' Run state again using the reject_file option to '
'save rejects to a persistent file.'
)
opts = copy.copy(__opts__)
opts['color'] = False
ret['comment'] += '\n\n' + salt.output.out_format(
pre_check,
'nested',
opts,
nested_indent=14)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'The patch would be applied'
ret['changes'] = pre_check
return ret
# If we've made it here, the patch should apply cleanly
patch_opts = []
if is_dir and strip is not None:
patch_opts.append('-p{0}'.format(strip))
ret['changes'] = _patch(patch_file, patch_opts)
if ret['changes']['retcode'] == 0:
ret['comment'] = 'Patch successfully applied'
ret['result'] = True
else:
ret['comment'] = 'Failed to apply patch'
return ret
finally:
# Clean up any temp files
for path in cleanup:
try:
os.remove(path)
except OSError as exc:
if exc.errno != os.errno.ENOENT:
log.error(
'file.patch: Failed to remove temp file %s: %s',
path, exc
)
def touch(name, atime=None, mtime=None, makedirs=False):
'''
Replicate the 'nix "touch" command to create a new empty
file or update the atime and mtime of an existing file.
Note that if you just want to create a file and don't care about atime or
mtime, you should use ``file.managed`` instead, as it is more
feature-complete. (Just leave out the ``source``/``template``/``contents``
arguments, and it will just create the file and/or check its permissions,
without messing with contents)
name
name of the file
atime
atime of the file
mtime
mtime of the file
makedirs
whether we should create the parent directory/directories in order to
touch the file
Usage:
.. code-block:: yaml
/var/log/httpd/logrotate.empty:
file.touch
.. versionadded:: 0.9.5
'''
name = os.path.expanduser(name)
ret = {
'name': name,
'changes': {},
}
if not name:
return _error(ret, 'Must provide name to file.touch')
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name)
)
if __opts__['test']:
ret['result'], ret['comment'] = _check_touch(name, atime, mtime)
return ret
if makedirs:
__salt__['file.makedirs'](name)
if not os.path.isdir(os.path.dirname(name)):
return _error(
ret, 'Directory not present to touch file {0}'.format(name)
)
extant = os.path.exists(name)
ret['result'] = __salt__['file.touch'](name, atime, mtime)
if not extant and ret['result']:
ret['comment'] = 'Created empty file {0}'.format(name)
ret['changes']['new'] = name
elif extant and ret['result']:
ret['comment'] = 'Updated times on {0} {1}'.format(
'directory' if os.path.isdir(name) else 'file', name
)
ret['changes']['touched'] = name
return ret
def copy_(name,
source,
force=False,
makedirs=False,
preserve=False,
user=None,
group=None,
mode=None,
subdir=False,
**kwargs):
'''
If the file defined by the ``source`` option exists on the minion, copy it
to the named path. The file will not be overwritten if it already exists,
unless the ``force`` option is set to ``True``.
.. note::
This state only copies files from one location on a minion to another
location on the same minion. For copying files from the master, use a
:py:func:`file.managed <salt.states.file.managed>` state.
name
The location of the file to copy to
source
The location of the file to copy to the location specified with name
force
If the target location is present then the file will not be moved,
specify "force: True" to overwrite the target file
makedirs
If the target subdirectories don't exist create them
preserve
.. versionadded:: 2015.5.0
Set ``preserve: True`` to preserve user/group ownership and mode
after copying. Default is ``False``. If ``preserve`` is set to ``True``,
then user/group/mode attributes will be ignored.
user
.. versionadded:: 2015.5.0
The user to own the copied file, this defaults to the user salt is
running as on the minion. If ``preserve`` is set to ``True``, then
this will be ignored
group
.. versionadded:: 2015.5.0
The group to own the copied file, this defaults to the group salt is
running as on the minion. If ``preserve`` is set to ``True`` or on
Windows this will be ignored
mode
.. versionadded:: 2015.5.0
The permissions to set on the copied file, aka 644, '0775', '4664'.
If ``preserve`` is set to ``True``, then this will be ignored.
Not supported on Windows.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
subdir
.. versionadded:: 2015.5.0
If the name is a directory then place the file inside the named
directory
.. note::
The copy function accepts paths that are local to the Salt minion.
This function does not support salt://, http://, or the other
additional file paths that are supported by :mod:`states.file.managed
<salt.states.file.managed>` and :mod:`states.file.recurse
<salt.states.file.recurse>`.
'''
name = os.path.expanduser(name)
source = os.path.expanduser(source)
ret = {
'name': name,
'changes': {},
'comment': 'Copied "{0}" to "{1}"'.format(source, name),
'result': True}
if not name:
return _error(ret, 'Must provide name to file.copy')
changed = True
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if not os.path.exists(source):
return _error(ret, 'Source file "{0}" is not present'.format(source))
if preserve:
user = __salt__['file.get_user'](source)
group = __salt__['file.get_group'](source)
mode = __salt__['file.get_mode'](source)
else:
user = _test_owner(kwargs, user=user)
if user is None:
user = __opts__['user']
if salt.utils.platform.is_windows():
if group is not None:
log.warning(
'The group argument for {0} has been ignored as this is '
'a Windows system.'.format(name)
)
group = user
if group is None:
group = __salt__['file.gid_to_group'](
__salt__['user.info'](user).get('gid', 0)
)
u_check = _check_user(user, group)
if u_check:
# The specified user or group do not exist
return _error(ret, u_check)
if mode is None:
mode = __salt__['file.get_mode'](source)
if os.path.isdir(name) and subdir:
# If the target is a dir, and overwrite_dir is False, copy into the dir
name = os.path.join(name, os.path.basename(source))
if os.path.lexists(source) and os.path.lexists(name):
# if this is a file which did not change, do not update
if force and os.path.isfile(name):
hash1 = salt.utils.hashutils.get_hash(name)
hash2 = salt.utils.hashutils.get_hash(source)
if hash1 == hash2:
changed = True
ret['comment'] = ' '.join([ret['comment'], '- files are identical but force flag is set'])
if not force:
changed = False
elif not __opts__['test'] and changed:
# Remove the destination to prevent problems later
try:
__salt__['file.remove'](name)
except (IOError, OSError):
return _error(
ret,
'Failed to delete "{0}" in preparation for '
'forced move'.format(name)
)
if __opts__['test']:
if changed:
ret['comment'] = 'File "{0}" is set to be copied to "{1}"'.format(
source,
name
)
ret['result'] = None
else:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = True
return ret
if not changed:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = True
return ret
# Run makedirs
dname = os.path.dirname(name)
if not os.path.isdir(dname):
if makedirs:
__salt__['file.makedirs'](name)
else:
return _error(
ret,
'The target directory {0} is not present'.format(dname))
# All tests pass, move the file into place
try:
if os.path.isdir(source):
shutil.copytree(source, name, symlinks=True)
for root, dirs, files in salt.utils.path.os_walk(name):
for dir_ in dirs:
__salt__['file.lchown'](os.path.join(root, dir_), user, group)
for file_ in files:
__salt__['file.lchown'](os.path.join(root, file_), user, group)
else:
shutil.copy(source, name)
ret['changes'] = {name: source}
# Preserve really means just keep the behavior of the cp command. If
# the filesystem we're copying to is squashed or doesn't support chown
# then we shouldn't be checking anything.
if not preserve:
__salt__['file.check_perms'](name, ret, user, group, mode)
except (IOError, OSError):
return _error(
ret, 'Failed to copy "{0}" to "{1}"'.format(source, name))
return ret
def rename(name, source, force=False, makedirs=False):
'''
If the source file exists on the system, rename it to the named file. The
named file will not be overwritten if it already exists unless the force
option is set to True.
name
The location of the file to rename to
source
The location of the file to move to the location specified with name
force
If the target location is present then the file will not be moved,
specify "force: True" to overwrite the target file
makedirs
If the target subdirectories don't exist create them
'''
name = os.path.expanduser(name)
source = os.path.expanduser(source)
ret = {
'name': name,
'changes': {},
'comment': '',
'result': True}
if not name:
return _error(ret, 'Must provide name to file.rename')
if not os.path.isabs(name):
return _error(
ret, 'Specified file {0} is not an absolute path'.format(name))
if not os.path.lexists(source):
ret['comment'] = ('Source file "{0}" has already been moved out of '
'place').format(source)
return ret
if os.path.lexists(source) and os.path.lexists(name):
if not force:
ret['comment'] = ('The target file "{0}" exists and will not be '
'overwritten'.format(name))
ret['result'] = False
return ret
elif not __opts__['test']:
# Remove the destination to prevent problems later
try:
__salt__['file.remove'](name)
except (IOError, OSError):
return _error(
ret,
'Failed to delete "{0}" in preparation for '
'forced move'.format(name)
)
if __opts__['test']:
ret['comment'] = 'File "{0}" is set to be moved to "{1}"'.format(
source,
name
)
ret['result'] = None
return ret
# Run makedirs
dname = os.path.dirname(name)
if not os.path.isdir(dname):
if makedirs:
__salt__['file.makedirs'](name)
else:
return _error(
ret,
'The target directory {0} is not present'.format(dname))
# All tests pass, move the file into place
try:
if os.path.islink(source):
linkto = os.readlink(source)
os.symlink(linkto, name)
os.unlink(source)
else:
shutil.move(source, name)
except (IOError, OSError):
return _error(
ret, 'Failed to move "{0}" to "{1}"'.format(source, name))
ret['comment'] = 'Moved "{0}" to "{1}"'.format(source, name)
ret['changes'] = {name: source}
return ret
def accumulated(name, filename, text, **kwargs):
'''
Prepare accumulator which can be used in template in file.managed state.
Accumulator dictionary becomes available in template. It can also be used
in file.blockreplace.
name
Accumulator name
filename
Filename which would receive this accumulator (see file.managed state
documentation about ``name``)
text
String or list for adding in accumulator
require_in / watch_in
One of them required for sure we fill up accumulator before we manage
the file. Probably the same as filename
Example:
Given the following:
.. code-block:: yaml
animals_doing_things:
file.accumulated:
- filename: /tmp/animal_file.txt
- text: ' jumps over the lazy dog.'
- require_in:
- file: animal_file
animal_file:
file.managed:
- name: /tmp/animal_file.txt
- source: salt://animal_file.txt
- template: jinja
One might write a template for ``animal_file.txt`` like the following:
.. code-block:: jinja
The quick brown fox{% for animal in accumulator['animals_doing_things'] %}{{ animal }}{% endfor %}
Collectively, the above states and template file will produce:
.. code-block:: text
The quick brown fox jumps over the lazy dog.
Multiple accumulators can be "chained" together.
.. note::
The 'accumulator' data structure is a Python dictionary.
Do not expect any loop over the keys in a deterministic order!
'''
ret = {
'name': name,
'changes': {},
'result': True,
'comment': ''
}
if not name:
return _error(ret, 'Must provide name to file.accumulated')
if text is None:
ret['result'] = False
ret['comment'] = 'No text supplied for accumulator'
return ret
require_in = __low__.get('require_in', [])
watch_in = __low__.get('watch_in', [])
deps = require_in + watch_in
if not [x for x in deps if 'file' in x]:
ret['result'] = False
ret['comment'] = 'Orphaned accumulator {0} in {1}:{2}'.format(
name,
__low__['__sls__'],
__low__['__id__']
)
return ret
if isinstance(text, six.string_types):
text = (text,)
elif isinstance(text, dict):
text = (text,)
accum_data, accum_deps = _load_accumulators()
if filename not in accum_data:
accum_data[filename] = {}
if filename not in accum_deps:
accum_deps[filename] = {}
if name not in accum_deps[filename]:
accum_deps[filename][name] = []
for accumulator in deps:
accum_deps[filename][name].extend(six.itervalues(accumulator))
if name not in accum_data[filename]:
accum_data[filename][name] = []
for chunk in text:
if chunk not in accum_data[filename][name]:
accum_data[filename][name].append(chunk)
ret['comment'] = ('Accumulator {0} for file {1} '
'was charged by text'.format(name, filename))
_persist_accummulators(accum_data, accum_deps)
return ret
def serialize(name,
dataset=None,
dataset_pillar=None,
user=None,
group=None,
mode=None,
backup='',
makedirs=False,
show_changes=True,
create=True,
merge_if_exists=False,
encoding=None,
encoding_errors='strict',
serializer_opts=None,
**kwargs):
'''
Serializes dataset and store it into managed file. Useful for sharing
simple configuration files.
name
The location of the file to create
dataset
The dataset that will be serialized
dataset_pillar
Operates like ``dataset``, but draws from a value stored in pillar,
using the pillar path syntax used in :mod:`pillar.get
<salt.modules.pillar.get>`. This is useful when the pillar value
contains newlines, as referencing a pillar variable using a jinja/mako
template can result in YAML formatting issues due to the newlines
causing indentation mismatches.
.. versionadded:: 2015.8.0
formatter
Write the data as this format. See the list of :py:mod:`serializer
modules <salt.serializers>` for supported output formats.
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors : 'strict'
Error encoding scheme. Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the list of available schemes.
.. versionadded:: 2017.7.0
user
The user to own the directory, this defaults to the user salt is
running as on the minion
group
The group ownership set for the directory, this defaults to the group
salt is running as on the minion
mode
The permissions to set on this file, e.g. ``644``, ``0775``, or
``4664``.
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
.. note::
This option is **not** supported on Windows.
backup
Overrides the default backup mode for this specific file.
makedirs
Create parent directories for destination file.
.. versionadded:: 2014.1.3
show_changes
Output a unified diff of the old file and the new file. If ``False``
return a boolean if any changes were made.
create
Default is True, if create is set to False then the file will only be
managed if the file already exists on the system.
merge_if_exists
Default is False, if merge_if_exists is True then the existing file will
be parsed and the dataset passed in will be merged with the existing
content
.. versionadded:: 2014.7.0
serializer_opts
Pass through options to serializer. For example:
.. code-block:: yaml
/etc/dummy/package.yaml
file.serialize:
- formatter: yaml
- serializer_opts:
- explicit_start: True
- default_flow_style: True
- indent: 4
The valid opts are the additional opts (i.e. not the data being
serialized) for the function used to serialize the data. Documentation
for the these functions can be found in the list below:
- For **yaml**: `yaml.dump()`_
- For **json**: `json.dumps()`_
- For **python**: `pprint.pformat()`_
.. _`yaml.dump()`: https://pyyaml.org/wiki/PyYAMLDocumentation
.. _`json.dumps()`: https://docs.python.org/2/library/json.html#json.dumps
.. _`pprint.pformat()`: https://docs.python.org/2/library/pprint.html#pprint.pformat
.. versionadded:: Fluorine
For example, this state:
.. code-block:: yaml
/etc/dummy/package.json:
file.serialize:
- dataset:
name: naive
description: A package using naive versioning
author: A confused individual <[email protected]>
dependencies:
express: '>= 1.2.0'
optimist: '>= 0.1.0'
engine: node 0.4.1
- formatter: json
will manage the file ``/etc/dummy/package.json``:
.. code-block:: json
{
"author": "A confused individual <[email protected]>",
"dependencies": {
"express": ">= 1.2.0",
"optimist": ">= 0.1.0"
},
"description": "A package using naive versioning",
"engine": "node 0.4.1",
"name": "naive"
}
'''
if 'env' in kwargs:
# "env" is not supported; Use "saltenv".
kwargs.pop('env')
name = os.path.expanduser(name)
# Set some defaults
options = {
'yaml.serialize': {
'default_flow_style': False,
},
'json.serialize': {
'indent': 2,
'separators': (',', ': '),
'sort_keys': True,
}
}
if encoding:
options['yaml.serialize'].update({'allow_unicode': True})
options['json.serialize'].update({'ensure_ascii': False})
ret = {'changes': {},
'comment': '',
'name': name,
'result': True}
if not name:
return _error(ret, 'Must provide name to file.serialize')
if not create:
if not os.path.isfile(name):
# Don't create a file that is not already present
ret['comment'] = ('File {0} is not present and is not set for '
'creation').format(name)
return ret
formatter = kwargs.pop('formatter', 'yaml').lower()
if len([x for x in (dataset, dataset_pillar) if x]) > 1:
return _error(
ret, 'Only one of \'dataset\' and \'dataset_pillar\' is permitted')
if dataset_pillar:
dataset = __salt__['pillar.get'](dataset_pillar)
if dataset is None:
return _error(
ret, 'Neither \'dataset\' nor \'dataset_pillar\' was defined')
if salt.utils.platform.is_windows():
if group is not None:
log.warning(
'The group argument for %s has been ignored as this '
'is a Windows system.', name
)
group = user
serializer_name = '{0}.serialize'.format(formatter)
deserializer_name = '{0}.deserialize'.format(formatter)
if serializer_name not in __serializers__:
return {'changes': {},
'comment': '{0} format is not supported'.format(
formatter.capitalize()),
'name': name,
'result': False
}
if serializer_opts:
if not options.get(serializer_name, {}):
options[serializer_name] = {}
options.get(serializer_name, {}).update(
salt.utils.data.repack_dictlist(serializer_opts)
)
if merge_if_exists:
if os.path.isfile(name):
if '{0}.deserialize'.format(formatter) not in __serializers__:
return {'changes': {},
'comment': ('{0} format is not supported for merging'
.format(formatter.capitalize())),
'name': name,
'result': False}
with salt.utils.files.fopen(name, 'r') as fhr:
try:
existing_data = __serializers__[deserializer_name](fhr, **options.get(serializer_name, {}))
except (TypeError, salt.serializers.DeserializationError):
log.debug('DeserializationError exception caught, trying to merge without serializer_opts: %s', options.get(serializer_name, {}))
fhr.seek(0)
existing_data = __serializers__[deserializer_name](fhr)
if existing_data is not None:
merged_data = salt.utils.dictupdate.merge_recurse(existing_data, dataset)
if existing_data == merged_data:
ret['result'] = True
ret['comment'] = 'The file {0} is in the correct state'.format(name)
return ret
dataset = merged_data
contents = __serializers__[serializer_name](dataset, **options.get(serializer_name, {}))
contents += '\n'
# Make sure that any leading zeros stripped by YAML loader are added back
mode = salt.utils.files.normalize_mode(mode)
if __opts__['test']:
ret['changes'] = __salt__['file.check_managed_changes'](
name=name,
source=None,
source_hash={},
source_hash_name=None,
user=user,
group=group,
mode=mode,
attrs=None,
template=None,
context=None,
defaults=None,
saltenv=__env__,
contents=contents,
skip_verify=False,
**kwargs
)
if ret['changes']:
ret['result'] = None
ret['comment'] = 'Dataset will be serialized and stored into {0}'.format(
name)
if not show_changes:
ret['changes']['diff'] = '<show_changes=False>'
else:
ret['result'] = True
ret['comment'] = 'The file {0} is in the correct state'.format(name)
return ret
return __salt__['file.manage_file'](name=name,
sfn='',
ret=ret,
source=None,
source_sum={},
user=user,
group=group,
mode=mode,
attrs=None,
saltenv=__env__,
backup=backup,
makedirs=makedirs,
template=None,
show_changes=show_changes,
encoding=encoding,
encoding_errors=encoding_errors,
contents=contents)
def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode='0600'):
'''
Create a special file similar to the 'nix mknod command. The supported
device types are ``p`` (fifo pipe), ``c`` (character device), and ``b``
(block device). Provide the major and minor numbers when specifying a
character device or block device. A fifo pipe does not require this
information. The command will create the necessary dirs if needed. If a
file of the same name not of the same type/major/minor exists, it will not
be overwritten or unlinked (deleted). This is logically in place as a
safety measure because you can really shoot yourself in the foot here and
it is the behavior of 'nix ``mknod``. It is also important to note that not
just anyone can create special devices. Usually this is only done as root.
If the state is executed as none other than root on a minion, you may
receive a permission error.
name
name of the file
ntype
node type 'p' (fifo pipe), 'c' (character device), or 'b'
(block device)
major
major number of the device
does not apply to a fifo pipe
minor
minor number of the device
does not apply to a fifo pipe
user
owning user of the device/pipe
group
owning group of the device/pipe
mode
permissions on the device/pipe
Usage:
.. code-block:: yaml
/dev/chr:
file.mknod:
- ntype: c
- major: 180
- minor: 31
- user: root
- group: root
- mode: 660
/dev/blk:
file.mknod:
- ntype: b
- major: 8
- minor: 999
- user: root
- group: root
- mode: 660
/dev/fifo:
file.mknod:
- ntype: p
- user: root
- group: root
- mode: 660
.. versionadded:: 0.17.0
'''
name = os.path.expanduser(name)
ret = {'name': name,
'changes': {},
'comment': '',
'result': False}
if not name:
return _error(ret, 'Must provide name to file.mknod')
if ntype == 'c':
# Check for file existence
if __salt__['file.file_exists'](name):
ret['comment'] = (
'File {0} exists and is not a character device. Refusing '
'to continue'.format(name)
)
# Check if it is a character device
elif not __salt__['file.is_chrdev'](name):
if __opts__['test']:
ret['comment'] = \
'Character device {0} is set to be created'.format(name)
ret['result'] = None
else:
ret = __salt__['file.mknod'](name,
ntype,
major,
minor,
user,
group,
mode)
# Check the major/minor
else:
devmaj, devmin = __salt__['file.get_devmm'](name)
if (major, minor) != (devmaj, devmin):
ret['comment'] = (
'Character device {0} exists and has a different '
'major/minor {1}/{2}. Refusing to continue'
.format(name, devmaj, devmin)
)
# Check the perms
else:
ret = __salt__['file.check_perms'](name,
None,
user,
group,
mode)[0]
if not ret['changes']:
ret['comment'] = (
'Character device {0} is in the correct state'.format(
name
)
)
elif ntype == 'b':
# Check for file existence
if __salt__['file.file_exists'](name):
ret['comment'] = (
'File {0} exists and is not a block device. Refusing to '
'continue'.format(name)
)
# Check if it is a block device
elif not __salt__['file.is_blkdev'](name):
if __opts__['test']:
ret['comment'] = 'Block device {0} is set to be created'.format(name)
ret['result'] = None
else:
ret = __salt__['file.mknod'](name,
ntype,
major,
minor,
user,
group,
mode)
# Check the major/minor
else:
devmaj, devmin = __salt__['file.get_devmm'](name)
if (major, minor) != (devmaj, devmin):
ret['comment'] = (
'Block device {0} exists and has a different major/minor '
'{1}/{2}. Refusing to continue'.format(
name, devmaj, devmin
)
)
# Check the perms
else:
ret = __salt__['file.check_perms'](name,
None,
user,
group,
mode)[0]
if not ret['changes']:
ret['comment'] = (
'Block device {0} is in the correct state'.format(name)
)
elif ntype == 'p':
# Check for file existence
if __salt__['file.file_exists'](name):
ret['comment'] = (
'File {0} exists and is not a fifo pipe. Refusing to '
'continue'.format(name)
)
# Check if it is a fifo
elif not __salt__['file.is_fifo'](name):
if __opts__['test']:
ret['comment'] = 'Fifo pipe {0} is set to be created'.format(
name
)
ret['result'] = None
else:
ret = __salt__['file.mknod'](name,
ntype,
major,
minor,
user,
group,
mode)
# Check the perms
else:
ret = __salt__['file.check_perms'](name,
None,
user,
group,
mode)[0]
if not ret['changes']:
ret['comment'] = (
'Fifo pipe {0} is in the correct state'.format(name)
)
else:
ret['comment'] = (
'Node type unavailable: \'{0}\'. Available node types are '
'character (\'c\'), block (\'b\'), and pipe (\'p\')'.format(ntype)
)
return ret
def mod_run_check_cmd(cmd, filename, **check_cmd_opts):
'''
Execute the check_cmd logic.
Return a result dict if ``check_cmd`` succeeds (check_cmd == 0)
otherwise return True
'''
log.debug('running our check_cmd')
_cmd = '{0} {1}'.format(cmd, filename)
cret = __salt__['cmd.run_all'](_cmd, **check_cmd_opts)
if cret['retcode'] != 0:
ret = {'comment': 'check_cmd execution failed',
'skip_watch': True,
'result': False}
if cret.get('stdout'):
ret['comment'] += '\n' + cret['stdout']
if cret.get('stderr'):
ret['comment'] += '\n' + cret['stderr']
return ret
# No reason to stop, return True
return True
def decode(name,
encoded_data=None,
contents_pillar=None,
encoding_type='base64',
checksum='md5'):
'''
Decode an encoded file and write it to disk
.. versionadded:: 2016.3.0
name
Path of the file to be written.
encoded_data
The encoded file. Either this option or ``contents_pillar`` must be
specified.
contents_pillar
A Pillar path to the encoded file. Uses the same path syntax as
:py:func:`pillar.get <salt.modules.pillar.get>`. The
:py:func:`hashutil.base64_encodefile
<salt.modules.hashutil.base64_encodefile>` function can load encoded
content into Pillar. Either this option or ``encoded_data`` must be
specified.
encoding_type : ``base64``
The type of encoding.
checksum : ``md5``
The hashing algorithm to use to generate checksums. Wraps the
:py:func:`hashutil.digest <salt.modules.hashutil.digest>` execution
function.
Usage:
.. code-block:: yaml
write_base64_encoded_string_to_a_file:
file.decode:
- name: /tmp/new_file
- encoding_type: base64
- contents_pillar: mypillar:thefile
# or
write_base64_encoded_string_to_a_file:
file.decode:
- name: /tmp/new_file
- encoding_type: base64
- encoded_data: |
Z2V0IHNhbHRlZAo=
Be careful with multi-line strings that the YAML indentation is correct.
E.g.,
.. code-block:: jinja
write_base64_encoded_string_to_a_file:
file.decode:
- name: /tmp/new_file
- encoding_type: base64
- encoded_data: |
{{ salt.pillar.get('path:to:data') | indent(8) }}
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if not (encoded_data or contents_pillar):
raise CommandExecutionError("Specify either the 'encoded_data' or "
"'contents_pillar' argument.")
elif encoded_data and contents_pillar:
raise CommandExecutionError("Specify only one 'encoded_data' or "
"'contents_pillar' argument.")
elif encoded_data:
content = encoded_data
elif contents_pillar:
content = __salt__['pillar.get'](contents_pillar, False)
if content is False:
raise CommandExecutionError('Pillar data not found.')
else:
raise CommandExecutionError('No contents given.')
dest_exists = __salt__['file.file_exists'](name)
if dest_exists:
instr = __salt__['hashutil.base64_decodestring'](content)
insum = __salt__['hashutil.digest'](instr, checksum)
del instr # no need to keep in-memory after we have the hash
outsum = __salt__['hashutil.digest_file'](name, checksum)
if insum != outsum:
ret['changes'] = {
'old': outsum,
'new': insum,
}
if not ret['changes']:
ret['comment'] = 'File is in the correct state.'
ret['result'] = True
return ret
if __opts__['test'] is True:
ret['comment'] = 'File is set to be updated.'
ret['result'] = None
return ret
ret['result'] = __salt__['hashutil.base64_decodefile'](content, name)
ret['comment'] = 'File was updated.'
if not ret['changes']:
ret['changes'] = {
'old': None,
'new': __salt__['hashutil.digest_file'](name, checksum),
}
return ret
def shortcut(
name,
target,
arguments=None,
working_dir=None,
description=None,
icon_location=None,
force=False,
backupname=None,
makedirs=False,
user=None,
**kwargs):
'''
Create a Windows shortcut
If the file already exists and is a shortcut pointing to any location other
than the specified target, the shortcut will be replaced. If it is
a regular file or directory then the state will return False. If the
regular file or directory is desired to be replaced with a shortcut pass
force: True, if it is to be renamed, pass a backupname.
name
The location of the shortcut to create. Must end with either
".lnk" or ".url"
target
The location that the shortcut points to
arguments
Any arguments to pass in the shortcut
working_dir
Working directory in which to execute target
description
Description to set on shortcut
icon_location
Location of shortcut's icon
force
If the name of the shortcut exists and is not a file and
force is set to False, the state will fail. If force is set to
True, the link or directory in the way of the shortcut file
will be deleted to make room for the shortcut, unless
backupname is set, when it will be renamed
backupname
If the name of the shortcut exists and is not a file, it will be
renamed to the backupname. If the backupname already
exists and force is False, the state will fail. Otherwise, the
backupname will be removed first.
makedirs
If the location of the shortcut does not already have a parent
directory then the state will fail, setting makedirs to True will
allow Salt to create the parent directory. Setting this to True will
also create the parent for backupname if necessary.
user
The user to own the file, this defaults to the user salt is running as
on the minion
The default mode for new files and directories corresponds umask of salt
process. For existing files and directories it's not enforced.
'''
user = _test_owner(kwargs, user=user)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
if not salt.utils.platform.is_windows():
return _error(ret, 'Shortcuts are only supported on Windows')
if not name:
return _error(ret, 'Must provide name to file.shortcut')
if not name.endswith('.lnk') and not name.endswith('.url'):
return _error(ret, 'Name must end with either ".lnk" or ".url"')
# Normalize paths; do this after error checks to avoid invalid input
# getting expanded, e.g. '' turning into '.'
name = os.path.realpath(os.path.expanduser(name))
if name.endswith('.lnk'):
target = os.path.realpath(os.path.expanduser(target))
if working_dir:
working_dir = os.path.realpath(os.path.expanduser(working_dir))
if icon_location:
icon_location = os.path.realpath(os.path.expanduser(icon_location))
if user is None:
user = __opts__['user']
# Make sure the user exists in Windows
# Salt default is 'root'
if not __salt__['user.info'](user):
# User not found, use the account salt is running under
# If username not found, use System
user = __salt__['user.current']()
if not user:
user = 'SYSTEM'
preflight_errors = []
uid = __salt__['file.user_to_uid'](user)
if uid == '':
preflight_errors.append('User {0} does not exist'.format(user))
if not os.path.isabs(name):
preflight_errors.append(
'Specified file {0} is not an absolute path'.format(name)
)
if preflight_errors:
msg = '. '.join(preflight_errors)
if len(preflight_errors) > 1:
msg += '.'
return _error(ret, msg)
presult, pcomment, ret['pchanges'] = _shortcut_check(name,
target,
arguments,
working_dir,
description,
icon_location,
force,
user)
if __opts__['test']:
ret['result'] = presult
ret['comment'] = pcomment
return ret
if not os.path.isdir(os.path.dirname(name)):
if makedirs:
__salt__['file.makedirs'](
name,
user=user)
else:
return _error(
ret,
'Directory "{0}" for shortcut is not present'.format(
os.path.dirname(name)
)
)
if os.path.isdir(name) or os.path.islink(name):
# It is not a shortcut, but a dir or symlink
if backupname is not None:
# Make a backup first
if os.path.lexists(backupname):
if not force:
return _error(ret, ((
'File exists where the backup target {0} should go'
).format(backupname)))
else:
__salt__['file.remove'](backupname)
time.sleep(1) # wait for asynchronous deletion
if not os.path.isdir(os.path.dirname(backupname)):
if makedirs:
os.makedirs(backupname)
else:
return _error(ret, (
'Directory does not exist for'
' backup at "{0}"'
).format(os.path.dirname(backupname)))
os.rename(name, backupname)
time.sleep(1) # wait for asynchronous rename
elif force:
# Remove whatever is in the way
__salt__['file.remove'](name)
ret['changes']['forced'] = 'Shortcut was forcibly replaced'
time.sleep(1) # wait for asynchronous deletion
else:
# Otherwise throw an error
return _error(ret, ((
'Directory or symlink exists where the'
' shortcut "{0}" should be'
).format(name)))
# This will just load the shortcut if it already exists
# It won't create the file until calling scut.Save()
shell = win32com.client.Dispatch("WScript.Shell")
scut = shell.CreateShortcut(name)
# The shortcut target will automatically be created with its
# canonical capitalization; no way to override it, so ignore case
state_checks = [scut.TargetPath.lower() == target.lower()]
if arguments is not None:
state_checks.append(scut.Arguments == arguments)
if working_dir is not None:
state_checks.append(
scut.WorkingDirectory.lower() == working_dir.lower()
)
if description is not None:
state_checks.append(scut.Description == description)
if icon_location is not None:
state_checks.append(scut.IconLocation.lower() == icon_location.lower())
if __salt__['file.file_exists'](name):
# The shortcut exists, verify that it matches the desired state
if not all(state_checks):
# The target is wrong, delete it
os.remove(name)
else:
if _check_shortcut_ownership(name, user):
# The shortcut looks good!
ret['comment'] = ('Shortcut {0} is present and owned by '
'{1}'.format(name, user))
else:
if _set_shortcut_ownership(name, user):
ret['comment'] = ('Set ownership of shortcut {0} to '
'{1}'.format(name, user))
ret['changes']['ownership'] = '{0}'.format(user)
else:
ret['result'] = False
ret['comment'] += (
'Failed to set ownership of shortcut {0} to '
'{1}'.format(name, user)
)
return ret
if not os.path.exists(name):
# The shortcut is not present, make it
try:
scut.TargetPath = target
if arguments is not None:
scut.Arguments = arguments
if working_dir is not None:
scut.WorkingDirectory = working_dir
if description is not None:
scut.Description = description
if icon_location is not None:
scut.IconLocation = icon_location
scut.Save()
except (AttributeError, pywintypes.com_error) as exc:
ret['result'] = False
ret['comment'] = ('Unable to create new shortcut {0} -> '
'{1}: {2}'.format(name, target, exc))
return ret
else:
ret['comment'] = ('Created new shortcut {0} -> '
'{1}'.format(name, target))
ret['changes']['new'] = name
if not _check_shortcut_ownership(name, user):
if not _set_shortcut_ownership(name, user):
ret['result'] = False
ret['comment'] += (', but was unable to set ownership to '
'{0}'.format(user))
return ret
def cached(name,
source_hash='',
source_hash_name=None,
skip_verify=False,
saltenv='base'):
'''
.. versionadded:: 2017.7.3
Ensures that a file is saved to the minion's cache. This state is primarily
invoked by other states to ensure that we do not re-download a source file
if we do not need to.
name
The URL of the file to be cached. To cache a file from an environment
other than ``base``, either use the ``saltenv`` argument or include the
saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``).
.. note::
A list of URLs is not supported, this must be a single URL. If a
local file is passed here, then the state will obviously not try to
download anything, but it will compare a hash if one is specified.
source_hash
See the documentation for this same argument in the
:py:func:`file.managed <salt.states.file.managed>` state.
.. note::
For remote files not originating from the ``salt://`` fileserver,
such as http(s) or ftp servers, this state will not re-download the
file if the locally-cached copy matches this hash. This is done to
prevent unnecessary downloading on repeated runs of this state. To
update the cached copy of a file, it is necessary to update this
hash.
source_hash_name
See the documentation for this same argument in the
:py:func:`file.managed <salt.states.file.managed>` state.
skip_verify
See the documentation for this same argument in the
:py:func:`file.managed <salt.states.file.managed>` state.
.. note::
Setting this to ``True`` will result in a copy of the file being
downloaded from a remote (http(s), ftp, etc.) source each time the
state is run.
saltenv
Used to specify the environment from which to download a file from the
Salt fileserver (i.e. those with ``salt://`` URL).
This state will in most cases not be useful in SLS files, but it is useful
when writing a state or remote-execution module that needs to make sure
that a file at a given URL has been downloaded to the cachedir. One example
of this is in the :py:func:`archive.extracted <salt.states.file.extracted>`
state:
.. code-block:: python
result = __states__['file.cached'](source_match,
source_hash=source_hash,
source_hash_name=source_hash_name,
skip_verify=skip_verify,
saltenv=__env__)
This will return a dictionary containing the state's return data, including
a ``result`` key which will state whether or not the state was successful.
Note that this will not catch exceptions, so it is best used within a
try/except.
Once this state has been run from within another state or remote-execution
module, the actual location of the cached file can be obtained using
:py:func:`cp.is_cached <salt.modules.cp.is_cached>`:
.. code-block:: python
cached = __salt__['cp.is_cached'](source_match, saltenv=__env__)
This function will return the cached path of the file, or an empty string
if the file is not present in the minion cache.
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': False}
try:
parsed = _urlparse(name)
except Exception:
ret['comment'] = 'Only URLs or local file paths are valid input'
return ret
# This if statement will keep the state from proceeding if a remote source
# is specified and no source_hash is presented (unless we're skipping hash
# verification).
if not skip_verify \
and not source_hash \
and parsed.scheme in salt.utils.files.REMOTE_PROTOS:
ret['comment'] = (
'Unable to verify upstream hash of source file {0}, please set '
'source_hash or set skip_verify to True'.format(
salt.utils.url.redact_http_basic_auth(name))
)
return ret
if source_hash:
# Get the hash and hash type from the input. This takes care of parsing
# the hash out of a file containing checksums, if that is how the
# source_hash was specified.
try:
source_sum = __salt__['file.get_source_sum'](
source=name,
source_hash=source_hash,
source_hash_name=source_hash_name,
saltenv=saltenv)
except CommandExecutionError as exc:
ret['comment'] = exc.strerror
return ret
else:
if not source_sum:
# We shouldn't get here, problems in retrieving the hash in
# file.get_source_sum should result in a CommandExecutionError
# being raised, which we catch above. Nevertheless, we should
# provide useful information in the event that
# file.get_source_sum regresses.
ret['comment'] = (
'Failed to get source hash from {0}. This may be a bug. '
'If this error persists, please report it and set '
'skip_verify to True to work around it.'.format(source_hash)
)
return ret
else:
source_sum = {}
if parsed.scheme in salt.utils.files.LOCAL_PROTOS:
# Source is a local file path
full_path = os.path.realpath(os.path.expanduser(parsed.path))
if os.path.exists(full_path):
if not skip_verify and source_sum:
# Enforce the hash
local_hash = __salt__['file.get_hash'](
full_path,
source_sum.get('hash_type', __opts__['hash_type']))
if local_hash == source_sum['hsum']:
ret['result'] = True
ret['comment'] = (
'File {0} is present on the minion and has hash '
'{1}'.format(full_path, local_hash)
)
else:
ret['comment'] = (
'File {0} is present on the minion, but the hash ({1}) '
'does not match the specified hash ({2})'.format(
full_path, local_hash, source_sum['hsum']
)
)
return ret
else:
ret['result'] = True
ret['comment'] = 'File {0} is present on the minion'.format(
full_path
)
return ret
else:
ret['comment'] = 'File {0} is not present on the minion'.format(
full_path
)
return ret
local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv)
if local_copy:
# File is already cached
pre_hash = __salt__['file.get_hash'](
local_copy,
source_sum.get('hash_type', __opts__['hash_type']))
if not skip_verify and source_sum:
# Get the local copy's hash to compare with the hash that was
# specified via source_hash. If it matches, we can exit early from
# the state without going any further, because the file is cached
# with the correct hash.
if pre_hash == source_sum['hsum']:
ret['result'] = True
ret['comment'] = (
'File is already cached to {0} with hash {1}'.format(
local_copy, pre_hash
)
)
else:
pre_hash = None
def _try_cache(path, checksum):
'''
This helper is not needed anymore in develop as the fileclient in the
develop branch now has means of skipping a download if the existing
hash matches one passed to cp.cache_file. Remove this helper and the
code that invokes it, once we have merged forward into develop.
'''
if not path or not checksum:
return True
form = salt.utils.files.HASHES_REVMAP.get(len(checksum))
if form is None:
# Shouldn't happen, an invalid checksum length should be caught
# before we get here. But in the event this gets through, don't let
# it cause any trouble, and just return True.
return True
try:
return salt.utils.hashutils.get_hash(path, form=form) != checksum
except (IOError, OSError, ValueError):
# Again, shouldn't happen, but don't let invalid input/permissions
# in the call to get_hash blow this up.
return True
# Cache the file. Note that this will not actually download the file if
# either of the following is true:
# 1. source is a salt:// URL and the fileserver determines that the hash
# of the minion's copy matches that of the fileserver.
# 2. File is remote (http(s), ftp, etc.) and the specified source_hash
# matches the cached copy.
# Remote, non salt:// sources _will_ download if a copy of the file was
# not already present in the minion cache.
if _try_cache(local_copy, source_sum.get('hsum')):
# The _try_cache helper is obsolete in the develop branch. Once merged
# forward, remove the helper as well as this if statement, and dedent
# the below block.
try:
local_copy = __salt__['cp.cache_file'](
name,
saltenv=saltenv,
source_hash=source_sum.get('hsum'))
except Exception as exc:
ret['comment'] = salt.utils.url.redact_http_basic_auth(exc.__str__())
return ret
if not local_copy:
ret['comment'] = (
'Failed to cache {0}, check minion log for more '
'information'.format(
salt.utils.url.redact_http_basic_auth(name))
)
return ret
post_hash = __salt__['file.get_hash'](
local_copy,
source_sum.get('hash_type', __opts__['hash_type']))
if pre_hash != post_hash:
ret['changes']['hash'] = {'old': pre_hash, 'new': post_hash}
# Check the hash, if we're enforcing one. Note that this will be the first
# hash check if the file was not previously cached, and the 2nd hash check
# if it was cached and the
if not skip_verify and source_sum:
if post_hash == source_sum['hsum']:
ret['result'] = True
ret['comment'] = (
'File is already cached to {0} with hash {1}'.format(
local_copy, post_hash
)
)
else:
ret['comment'] = (
'File is cached to {0}, but the hash ({1}) does not match '
'the specified hash ({2})'.format(
local_copy, post_hash, source_sum['hsum']
)
)
return ret
# We're not enforcing a hash, and we already know that the file was
# successfully cached, so we know the state was successful.
ret['result'] = True
ret['comment'] = 'File is cached to {0}'.format(local_copy)
return ret
def not_cached(name, saltenv='base'):
'''
.. versionadded:: 2017.7.3
Ensures that a file is saved to the minion's cache. This state is primarily
invoked by other states to ensure that we do not re-download a source file
if we do not need to.
name
The URL of the file to be cached. To cache a file from an environment
other than ``base``, either use the ``saltenv`` argument or include the
saltenv in the URL (e.g. ``salt://path/to/file.conf?saltenv=dev``).
.. note::
A list of URLs is not supported, this must be a single URL. If a
local file is passed here, the state will take no action.
saltenv
Used to specify the environment from which to download a file from the
Salt fileserver (i.e. those with ``salt://`` URL).
'''
ret = {'changes': {},
'comment': '',
'name': name,
'result': False}
try:
parsed = _urlparse(name)
except Exception:
ret['comment'] = 'Only URLs or local file paths are valid input'
return ret
else:
if parsed.scheme in salt.utils.files.LOCAL_PROTOS:
full_path = os.path.realpath(os.path.expanduser(parsed.path))
ret['result'] = True
ret['comment'] = (
'File {0} is a local path, no action taken'.format(
full_path
)
)
return ret
local_copy = __salt__['cp.is_cached'](name, saltenv=saltenv)
if local_copy:
try:
os.remove(local_copy)
except Exception as exc:
ret['comment'] = 'Failed to delete {0}: {1}'.format(
local_copy, exc.__str__()
)
else:
ret['result'] = True
ret['changes']['deleted'] = True
ret['comment'] = '{0} was deleted'.format(local_copy)
else:
ret['result'] = True
ret['comment'] = '{0} is not cached'.format(name)
return ret
|
the-stack_0_13886 | # ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from skbio.util import classproperty, overrides
from ._nucleotide_sequence import NucleotideSequence
from ._iupac_sequence import IUPACSequence
class DNA(NucleotideSequence):
"""Store DNA sequence data and optional associated metadata.
Only characters in the IUPAC DNA character set [1]_ are supported.
Parameters
----------
sequence : str, Sequence, or 1D np.ndarray (np.uint8 or '\|S1')
Characters representing the DNA sequence itself.
metadata : dict, optional
Arbitrary metadata which applies to the entire sequence.
positional_metadata : Pandas DataFrame consumable, optional
Arbitrary per-character metadata. For example, quality data from
sequencing reads. Must be able to be passed directly to the Pandas
DataFrame constructor.
validate : bool, optional
If ``True``, validation will be performed to ensure that all sequence
characters are in the IUPAC DNA character set. If ``False``, validation
will not be performed. Turning off validation will improve runtime
performance. If invalid characters are present, however, there is
**no guarantee that operations performed on the resulting object will
work or behave as expected.** Only turn off validation if you are
certain that the sequence characters are valid. To store sequence data
that is not IUPAC-compliant, use ``Sequence``.
case_insensitive : bool, optional
If ``True``, lowercase sequence characters will be converted to
uppercase characters in order to be valid IUPAC DNA characters.
Attributes
----------
values
metadata
positional_metadata
alphabet
gap_chars
nondegenerate_chars
degenerate_chars
degenerate_map
complement_map
See Also
--------
RNA
References
----------
.. [1] Nomenclature for incompletely specified bases in nucleic acid
sequences: recommendations 1984.
Nucleic Acids Res. May 10, 1985; 13(9): 3021-3030.
A Cornish-Bowden
Examples
--------
>>> from skbio import DNA
>>> s = DNA('ACCGAAT')
>>> s
DNA('ACCGAAT', length=7, has_metadata=False, has_positional_metadata=False)
Convert lowercase characters to uppercase:
>>> s = DNA('AcCGaaT', lowercase=True)
>>> s
DNA('ACCGAAT', length=7, has_metadata=False, has_positional_metadata=False)
"""
@classproperty
@overrides(NucleotideSequence)
def complement_map(cls):
comp_map = {
'A': 'T', 'T': 'A', 'G': 'C', 'C': 'G', 'Y': 'R', 'R': 'Y',
'S': 'S', 'W': 'W', 'K': 'M', 'M': 'K', 'B': 'V', 'D': 'H',
'H': 'D', 'V': 'B', 'N': 'N'
}
comp_map.update({c: c for c in cls.gap_chars})
return comp_map
@classproperty
@overrides(IUPACSequence)
def nondegenerate_chars(cls):
return set("ACGT")
@classproperty
@overrides(IUPACSequence)
def degenerate_map(cls):
return {
"R": set("AG"), "Y": set("CT"), "M": set("AC"), "K": set("TG"),
"W": set("AT"), "S": set("GC"), "B": set("CGT"), "D": set("AGT"),
"H": set("ACT"), "V": set("ACG"), "N": set("ACGT")
}
|
the-stack_0_13893 | from fastjsonschema import JsonSchemaException
from src.utils.errors import SDerror
def validate_body(data, validator):
try:
validator(data)
except JsonSchemaException as e:
raise SDerror(
message="Invalid request body",
status_code=400,
error_type="JsonSchemaException",
details={
"detail": e.message
}
) |
the-stack_0_13895 | import logging
from xml.etree.ElementTree import Element
from jmeter_api.basics.config.elements import BasicConfig
from jmeter_api.basics.utils import Renderable, FileEncoding, tree_to_str
class Counter(BasicConfig, Renderable):
root_element_name = 'CounterConfig'
def __init__(self, *,
start: int = 0,
end: int = None,
incr: int = 1,
variable_name: str,
format_: str = '',
reset_on_tg_iteration: bool = False,
per_user: bool = False,
name: str = 'Counter',
comments: str = '',
is_enabled: bool = True):
self.start = start
self.end = end
self.incr = incr
if not end is None and start >= end:
raise ValueError('start must be less then end')
self.per_user = per_user
self.reset_on_tg_iteration = reset_on_tg_iteration
self.variable_name = variable_name
self.format_ = format_
super().__init__(name=name, comments=comments, is_enabled=is_enabled)
@property
def variable_name(self) -> str:
return self._variable_name
@variable_name.setter
def variable_name(self, value):
if not isinstance(value, str):
raise TypeError(
f'variable_name must be str. {type(value).__name__} was given')
self._variable_name = value
@property
def per_user(self) -> bool:
return self._per_user
@per_user.setter
def per_user(self, value):
if not isinstance(value, bool):
raise TypeError(
f'per_user must be bool. {type(value).__name__} was given')
self._per_user = value
@property
def reset_on_tg_iteration(self) -> bool:
return self._reset_on_tg_iteration
@reset_on_tg_iteration.setter
def reset_on_tg_iteration(self, value):
if not isinstance(value, bool):
raise TypeError(
f'per_user must be bool. {type(value).__name__} was given')
if value and not self.per_user:
raise ValueError('reset_on_tg_iteration cant be True while per_user is False')
self._reset_on_tg_iteration = value
@property
def format_(self) -> str:
return self._format_
@format_.setter
def format_(self, value):
if not isinstance(value, str):
raise TypeError(
f'format_ must be str. {type(value).__name__} was given')
self._format_ = value
@property
def start(self) -> str:
return self._start
@start.setter
def start(self, value):
if not isinstance(value, int) or value < 0:
raise TypeError(
f'start must be positive int. {type(value).__name__} was given')
self._start = str(value)
@property
def end(self) -> str:
return self._end
@end.setter
def end(self, value):
if value is None:
self._end = ''
elif not isinstance(value, int) or value < 0:
raise TypeError(
f'end must be positive int. {type(value).__name__} was given')
self._end = str(value)
@property
def incr(self) -> str:
return self._incr
@incr.setter
def incr(self, value):
if not isinstance(value, int) or value <= 0:
raise TypeError(
f'incr must be positive int. {type(value).__name__} was given')
self._incr = str(value)
def to_xml(self) -> str:
element_root, xml_tree = super()._add_basics()
for element in list(element_root):
try:
if element.attrib['name'] == 'CounterConfig.name':
element.text = self.variable_name
elif element.attrib['name'] == 'CounterConfig.format':
element.text = self.format_
elif element.attrib['name'] == 'CounterConfig.start':
element.text = self.start
elif element.attrib['name'] == 'CounterConfig.end':
element.text = self.end
elif element.attrib['name'] == 'CounterConfig.incr':
element.text = self.incr
elif element.attrib['name'] == 'CounterConfig.per_user':
element.text = str(self.per_user).lower()
except KeyError:
logging.error(
f'Unable to properly convert {self.__class__} to xml.')
if self.per_user and self.reset_on_tg_iteration:
el = Element("boolProp", attrib={"name": 'CounterConfig.reset_on_tg_iteration'})
el.text = str(self.reset_on_tg_iteration).lower()
element_root.append(el)
return tree_to_str(xml_tree)
|
the-stack_0_13897 | # Copyright 2013-2014 Massachusetts Open Cloud Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS
# IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
"""This module provides the HaaS service's public API.
TODO: Spec out and document what sanitization is required.
"""
import importlib
import json
import logging
from haas import model
from haas.config import cfg
from moc.rest import APIError, rest_call
class NotFoundError(APIError):
"""An exception indicating that a given resource does not exist."""
status_code = 404 # Not Found
class DuplicateError(APIError):
"""An exception indicating that a given resource already exists."""
status_code = 409 # Conflict
class AllocationError(APIError):
"""An exception indicating resource exhaustion."""
class BadArgumentError(APIError):
"""An exception indicating an invalid request on the part of the user."""
class ProjectMismatchError(APIError):
"""An exception indicating that the resources given don't belong to the
same project.
"""
status_code = 409 # Conflict
class BlockedError(APIError):
"""An exception indicating that the requested action cannot happen until
some other change. For example, deletion is blocked until the components
are deleted, and possibly until the dirty flag is cleared as well.
"""
status_code = 409 # Conflict
class IllegalStateError(APIError):
"""The request is invalid due to the state of the system.
The request might otherwise be perfectly valid.
"""
status_code = 409 # Conflict
class ServerError(APIError):
"""An error occurred when trying to process the request.
This likely not the client's fault; as such the HTTP status is 500.
The semantics are much the same as the corresponding HTTP error.
"""
status_code = 500
@rest_call('PUT', '/user/<user>')
def user_create(user, password):
"""Create user with given password.
If the user already exists, a DuplicateError will be raised.
"""
db = model.Session()
_assert_absent(db, model.User, user)
user = model.User(user, password)
db.add(user)
db.commit()
@rest_call('DELETE', '/user/<user>')
def user_delete(user):
"""Delete user.
If the user does not exist, a NotFoundError will be raised.
"""
db = model.Session()
user = _must_find(db, model.User, user)
db.delete(user)
db.commit()
# Project Code #
################
@rest_call('PUT', '/project/<project>')
def project_create(project):
"""Create a project.
If the project already exists, a DuplicateError will be raised.
"""
db = model.Session()
_assert_absent(db, model.Project, project)
project = model.Project(project)
db.add(project)
db.commit()
@rest_call('DELETE', '/project/<project>')
def project_delete(project):
"""Delete project.
If the project does not exist, a NotFoundError will be raised.
"""
db = model.Session()
project = _must_find(db, model.Project, project)
if project.nodes:
raise BlockedError("Project has nodes still")
if project.networks_created:
raise BlockedError("Project still has networks")
if project.networks_access:
### FIXME: This is not the user's fault, and they cannot fix it. The
### only reason we need to error here is that, with how network access
### is done, the following bad thing happens. If there's a network
### that only the project can access, its "access" field will be the
### project. When you then delete that project, "access" will be set
### to None instead. Counter-intuitively, this then makes that
### network accessible to ALL PROJECTS! Once we use real ACLs, this
### will not be an issue---instead, the network will be accessible by
### NO projects.
raise BlockedError("Project can still access networks")
if project.headnode:
raise BlockedError("Project still has a headnode")
db.delete(project)
db.commit()
@rest_call('POST', '/project/<project>/connect_node')
def project_connect_node(project, node):
"""Add a node to a project.
If the node or project does not exist, a NotFoundError will be raised.
"""
db = model.Session()
project = _must_find(db, model.Project, project)
node = _must_find(db, model.Node, node)
project.nodes.append(node)
db.commit()
@rest_call('POST', '/project/<project>/detach_node')
def project_detach_node(project, node):
"""Remove a node from a project.
If the node or project does not exist, a NotFoundError will be raised.
"""
db = model.Session()
project = _must_find(db, model.Project, project)
node = _must_find(db, model.Node, node)
if node not in project.nodes:
raise NotFoundError("Node not in project")
for nic in node.nics:
if nic.network is not None:
raise BlockedError("Node attached to a network")
if nic.current_action is not None:
raise BlockedError("Node has a networking operation active on it.")
node.stop_console()
node.delete_console()
project.nodes.remove(node)
db.commit()
@rest_call('POST', '/project/<project>/add_user')
def project_add_user(project, user):
"""Add a user to a project.
If the project or user does not exist, a NotFoundError will be raised.
"""
db = model.Session()
user = _must_find(db, model.User, user)
project = _must_find(db, model.Project, project)
if project in user.projects:
raise DuplicateError('User %s is already in project %s'%
(user.label, project.label))
user.projects.append(project)
db.commit()
@rest_call('POST', '/project/<project>/remove_user')
def project_remove_user(project, user):
"""Remove a user from a project.
If the project or user does not exist, a NotFoundError will be raised.
"""
db = model.Session()
user = _must_find(db, model.User, user)
project = _must_find(db, model.Project, project)
if project not in user.projects:
raise NotFoundError("User %s is not in project %s"%
(user.label, project.label))
user.projects.remove(project)
db.commit()
# Node Code #
#############
@rest_call('PUT', '/node/<node>')
def node_register(node, ipmi_host, ipmi_user, ipmi_pass):
"""Create node.
If the node already exists, a DuplicateError will be raised.
"""
db = model.Session()
_assert_absent(db, model.Node, node)
node = model.Node(node, ipmi_host, ipmi_user, ipmi_pass)
db.add(node)
db.commit()
@rest_call('POST', '/node/<node>/power_cycle')
def node_power_cycle(node):
db = model.Session()
node = _must_find(db, model.Node, node)
if not node.power_cycle():
raise ServerError('Could not power cycle node %s' % node.label)
@rest_call('DELETE', '/node/<node>')
def node_delete(node):
"""Delete node.
If the node does not exist, a NotFoundError will be raised.
"""
db = model.Session()
node = _must_find(db, model.Node, node)
node.stop_console()
node.delete_console()
db.delete(node)
db.commit()
@rest_call('PUT', '/node/<node>/nic/<nic>')
def node_register_nic(node, nic, macaddr):
"""Register exitence of nic attached to given node.
If the node does not exist, a NotFoundError will be raised.
If there is already an nic with that name, a DuplicateError will be raised.
"""
db = model.Session()
node = _must_find(db, model.Node, node)
_assert_absent_n(db, node, model.Nic, nic)
nic = model.Nic(node, nic, macaddr)
db.add(nic)
db.commit()
@rest_call('DELETE', '/node/<node>/nic/<nic>')
def node_delete_nic(node, nic):
"""Delete nic with given name.
If the nic does not exist, a NotFoundError will be raised.
"""
db = model.Session()
nic = _must_find_n(db, _must_find(db, model.Node, node), model.Nic, nic)
db.delete(nic)
db.commit()
@rest_call('POST', '/node/<node>/nic/<nic>/connect_network')
def node_connect_network(node, nic, network):
"""Connect a physical NIC to a network.
Raises ProjectMismatchError if the node is not in a project, or if the
project does not have access rights to the given network.
"""
db = model.Session()
node = _must_find(db, model.Node, node)
nic = _must_find_n(db, node, model.Nic, nic)
network = _must_find(db, model.Network, network)
if not node.project:
raise ProjectMismatchError("Node not in project")
project = node.project
if nic.current_action:
raise BlockedError("A networking operation is already active on the nic.")
if (network.access is not None) and (network.access is not project):
raise ProjectMismatchError("Project does not have access to given network.")
db.add(model.NetworkingAction(nic, network))
db.commit()
@rest_call('POST', '/node/<node>/nic/<nic>/detach_network')
def node_detach_network(node, nic):
"""Detach a physical nic from any network it's on.
Raises ProjectMismatchError if the node is not in a project.
"""
db = model.Session()
node = _must_find(db, model.Node, node)
nic = _must_find_n(db, node, model.Nic, nic)
if not node.project:
raise ProjectMismatchError("Node not in project")
project = nic.owner.project
if nic.current_action:
raise BlockedError("A networking operation is already active on the nic.")
db.add(model.NetworkingAction(nic, None))
db.commit()
# Head Node Code #
##################
@rest_call('PUT', '/headnode/<headnode>')
def headnode_create(headnode, project, base_img):
"""Create headnode.
If a node with the same name already exists, a DuplicateError will be
raised.
If the project already has a headnode, a DuplicateError will be raised.
If the project does not exist, a NotFoundError will be raised.
"""
valid_imgs = cfg.get('headnode', 'base_imgs')
valid_imgs = [img.strip() for img in valid_imgs.split(',')]
if base_img not in valid_imgs:
raise BadArgumentError('Provided image is not a valid image.')
db = model.Session()
_assert_absent(db, model.Headnode, headnode)
project = _must_find(db, model.Project, project)
headnode = model.Headnode(project, headnode, base_img)
db.add(headnode)
db.commit()
@rest_call('DELETE', '/headnode/<headnode>')
def headnode_delete(headnode):
"""Delete headnode.
If the node does not exist, a NotFoundError will be raised.
"""
db = model.Session()
headnode = _must_find(db, model.Headnode, headnode)
if not headnode.dirty:
headnode.delete()
for hnic in headnode.hnics:
db.delete(hnic)
db.delete(headnode)
db.commit()
@rest_call('POST', '/headnode/<headnode>/start')
def headnode_start(headnode):
"""Start the headnode.
This actually boots up the headnode virtual machine. The VM is created
within libvirt if needed. Once the VM has been started once, it is
"frozen," and all other headnode-related api calls will fail (by raising
an IllegalStateException), with the exception of headnode_stop.
"""
db = model.Session()
headnode = _must_find(db, model.Headnode, headnode)
if headnode.dirty:
headnode.create()
headnode.start()
db.commit()
@rest_call('POST', '/headnode/<headnode>/stop')
def headnode_stop(headnode):
"""Stop the headnode.
This powers off the headnode. This is a hard poweroff; the VM is not given
the opportunity to shut down cleanly. This does *not* unfreeze the VM;
headnode_start will be the only valid API call after the VM is powered off.
"""
db = model.Session()
headnode = _must_find(db, model.Headnode, headnode)
headnode.stop()
@rest_call('PUT', '/headnode/<headnode>/hnic/<hnic>')
def headnode_create_hnic(headnode, hnic):
"""Create hnic attached to given headnode.
If the node does not exist, a NotFoundError will be raised.
If there is already an hnic with that name, a DuplicateError will
be raised.
"""
db = model.Session()
headnode = _must_find(db, model.Headnode, headnode)
_assert_absent_n(db, headnode, model.Hnic, hnic)
if not headnode.dirty:
raise IllegalStateError
hnic = model.Hnic(headnode, hnic)
db.add(hnic)
db.commit()
@rest_call('DELETE', '/headnode/<headnode>/hnic/<hnic>')
def headnode_delete_hnic(headnode, hnic):
"""Delete hnic on a given headnode.
If the hnic does not exist, a NotFoundError will be raised.
"""
db = model.Session()
headnode = _must_find(db, model.Headnode, headnode)
hnic = _must_find_n(db, headnode, model.Hnic, hnic)
if not headnode.dirty:
raise IllegalStateError
if not hnic:
raise NotFoundError("Hnic: " + hnic.label)
db.delete(hnic)
db.commit()
@rest_call('POST', '/headnode/<headnode>/hnic/<hnic>/connect_network')
def headnode_connect_network(headnode, hnic, network):
"""Connect a headnode's hnic to a network.
Raises IllegalStateError if the headnode has already been started.
Raises ProjectMismatchError if the project does not have access rights to
the given network.
Raises BadArgumentError if the network is a non-allocated network. This
is currently unsupported due to an implementation limitation, but will be
supported in a future release. See issue #333.
"""
db = model.Session()
headnode = _must_find(db, model.Headnode, headnode)
hnic = _must_find_n(db, headnode, model.Hnic, hnic)
network = _must_find(db, model.Network, network)
if not network.allocated:
raise BadArgumentError("Headnodes may only be connected to networks "
"allocated by the project.")
if not headnode.dirty:
raise IllegalStateError
project = headnode.project
if (network.access is not None) and (network.access is not project):
raise ProjectMismatchError("Project does not have access to given network.")
hnic.network = network
db.commit()
@rest_call('POST', '/headnode/<headnode>/hnic/<hnic>/detach_network')
def headnode_detach_network(headnode, hnic):
"""Detach a heanode's nic from any network it's on.
Raises IllegalStateError if the headnode has already been started.
"""
db = model.Session()
headnode = _must_find(db, model.Headnode, headnode)
hnic = _must_find_n(db, headnode, model.Hnic, hnic)
if not headnode.dirty:
raise IllegalStateError
hnic.network = None
db.commit()
# Network Code #
################
@rest_call('PUT', '/network/<network>')
def network_create(network, creator, access, net_id):
"""Create a network.
If the network with that name already exists, a DuplicateError will be
raised.
If the combination of creator, access, and net_id is illegal, a
BadArgumentError will be raised.
If network ID allocation was requested, and the network cannot be
allocated (due to resource exhaustion), an AllocationError will be raised.
Pass 'admin' as creator for an administrator-owned network. Pass '' as
access for a publicly accessible network. Pass '' as net_id if you wish
to use the HaaS's network-id allocation pool.
Details of the various combinations of network attributes are in
docs/networks.md
"""
db = model.Session()
_assert_absent(db, model.Network, network)
# Check legality of arguments, and find correct 'access' and 'creator'
if creator != "admin":
# Project-owned network
if access != creator:
raise BadArgumentError("Project-created networks must be accessed only by that project.")
if net_id != "":
raise BadArgumentError("Project-created networks must use network ID allocation")
creator = _must_find(db, model.Project, creator)
access = _must_find(db, model.Project, access)
else:
# Administrator-owned network
creator = None
if access == "":
access = None
else:
access = _must_find(db, model.Project, access)
# Allocate net_id, if requested
if net_id == "":
driver_name = cfg.get('general', 'driver')
driver = importlib.import_module('haas.drivers.' + driver_name)
net_id = driver.get_new_network_id(db)
if net_id is None:
raise AllocationError('No more networks')
allocated = True
else:
allocated = False
network = model.Network(creator, access, allocated, net_id, network)
db.add(network)
db.commit()
@rest_call('DELETE', '/network/<network>')
def network_delete(network):
"""Delete network.
If the network does not exist, a NotFoundError will be raised.
"""
db = model.Session()
network = _must_find(db, model.Network, network)
if network.nics:
raise BlockedError("Network still connected to nodes")
if network.hnics:
raise BlockedError("Network still connected to headnodes")
if network.scheduled_nics:
raise BlockedError("Network scheduled to become connected to nodes.")
if network.allocated:
driver_name = cfg.get('general', 'driver')
driver = importlib.import_module('haas.drivers.' + driver_name)
driver.free_network_id(db, network.network_id)
db.delete(network)
db.commit()
# Port code #
#############
@rest_call('PUT', '/port/<path:port>')
def port_register(port):
"""Register a port on a switch.
If the port already exists, a DuplicateError will be raised.
"""
db = model.Session()
_assert_absent(db, model.Port, port)
port = model.Port(port)
db.add(port)
db.commit()
@rest_call('DELETE', '/port/<path:port>')
def port_delete(port):
"""Delete a port on a switch.
If the port does not exist, a NotFoundError will be raised.
"""
db = model.Session()
port = _must_find(db, model.Port, port)
db.delete(port)
db.commit()
@rest_call('POST', '/port/<path:port>/connect_nic')
def port_connect_nic(port, node, nic):
"""Connect a port on a switch to a nic on a node.
If any of the three arguments does not exist, a NotFoundError will be
raised.
If the port or the nic is already connected to something, a DuplicateError will be
raised.
"""
db = model.Session()
port = _must_find(db, model.Port, port)
nic = _must_find_n(db, _must_find(db, model.Node, node), model.Nic, nic)
if nic.port is not None:
raise DuplicateError(nic.label)
if port.nic is not None:
raise DuplicateError(port.label)
nic.port = port
db.commit()
@rest_call('POST', '/port/<path:port>/detach_nic')
def port_detach_nic(port):
"""Detach a port from the nic it's attached to
If the port does not exist, a NotFoundError will be raised.
If the port is not connected to anything, a NotFoundError will be raised.
"""
db = model.Session()
port = _must_find(db, model.Port, port)
if port.nic is None:
raise NotFoundError(port.label + " not attached")
port.nic = None
db.commit()
@rest_call('GET', '/free_nodes')
def list_free_nodes():
"""List all nodes not in any project.
Returns a JSON array of strings representing a list of nodes.
Example: '["node1", "node2", "node3"]'
"""
db = model.Session()
nodes = db.query(model.Node).filter_by(project_id=None).all()
nodes = [n.label for n in nodes]
return json.dumps(nodes)
@rest_call('GET', '/project/<project>/nodes')
def list_project_nodes(project):
"""List all nodes belonging the given project.
Returns a JSON array of strings representing a list of nodes.
Example: '["node1", "node2", "node3"]'
"""
db = model.Session()
project = _must_find(db, model.Project, project)
nodes = project.nodes
nodes = [n.label for n in nodes]
return json.dumps(nodes)
@rest_call('GET', '/project/<project>/networks')
def list_project_networks(project):
"""List all private networks the project can access.
Returns a JSON array of strings representing a list of networks.
Example: '["net1", "net2", "net3"]'
"""
db = model.Session()
project = _must_find(db, model.Project, project)
networks = project.networks_access
networks = [n.label for n in networks]
return json.dumps(networks)
@rest_call('GET', '/node/<nodename>')
def show_node(nodename):
"""Show details of a node.
Returns a JSON object representing a node.
The object will have at least the following fields:
* "name", the name/label of the node (string).
* "free", indicates whether the node is free or has been allocated
to a project.
* "nics", a list of nics, each represted by a JSON object having
at least the following fields:
- "label", the nic's label.
- "macaddr", the nic's mac address.
Example: '{"name": "node1",
"free": True,
"nics": [{"label": "nic1", "macaddr": "01:23:45:67:89"},
{"label": "nic2", "macaddr": "12:34:56:78:90"}]
}'
"""
db = model.Session()
node = _must_find(db, model.Node, nodename)
return json.dumps({
'name': node.label,
'free': node.project_id is None,
'nics': [{'label': n.label,
'macaddr': n.mac_addr,
} for n in node.nics],
})
@rest_call('GET', '/headnode/<nodename>')
def show_headnode(nodename):
"""Show details of a headnode.
Returns a JSON object representing a headnode.
The obect will have at least the following fields:
* "name", the name/label of the headnode (string).
* "project", the project to which the headnode belongs.
* "hnics", a JSON array of hnic labels that are attached to this
headnode.
* "vncport", the vnc port that the headnode VM is listening on; this
value can be None if the VM is powered off or has not been
created yet.
Example: '{"name": "headnode1",
"project": "project1",
"hnics": ["hnic1", "hnic2"],
"vncport": 5900
}'
"""
db = model.Session()
headnode = _must_find(db, model.Headnode, nodename)
return json.dumps({
'name': headnode.label,
'project': headnode.project.label,
'hnics': [n.label for n in headnode.hnics],
'vncport': headnode.get_vncport(),
})
@rest_call('GET', '/headnode_images/')
def list_headnode_images():
"""Show headnode images listed in config file.
Returns a JSON array of strings representing a list of headnode images.
Example: '["headnode1.img", "headnode2.img", "headnode3.img"]'
"""
valid_imgs = cfg.get('headnode', 'base_imgs')
valid_imgs = [img.strip() for img in valid_imgs.split(',')]
return json.dumps(valid_imgs)
# Console code #
################
@rest_call('GET', '/node/<nodename>/console')
def show_console(nodename):
"""Show the contents of the console log."""
db = model.Session()
node = _must_find(db, model.Node, nodename)
log = node.get_console()
if log is None:
raise NotFoundError('The console log for %s '
'does not exist.' % nodename)
return log
@rest_call('PUT', '/node/<nodename>/console')
def start_console(nodename):
"""Start logging output from the console."""
db = model.Session()
node = _must_find(db, model.Node, nodename)
node.start_console()
@rest_call('DELETE', '/node/<nodename>/console')
def stop_console(nodename):
"""Stop logging output from the console and delete the log."""
db = model.Session()
node = _must_find(db, model.Node, nodename)
node.stop_console()
node.delete_console()
# Helper functions #
####################
def _assert_absent(session, cls, name):
"""Raises a DuplicateError if the given object is already in the database.
This is useful for most of the *_create functions.
Arguments:
session - a sqlaclhemy session to use.
cls - the class of the object to query.
name - the name of the object in question.
"""
obj = session.query(cls).filter_by(label=name).first()
if obj:
raise DuplicateError("%s %s already exists." % (cls.__name__, name))
def _must_find(session, cls, name):
"""Raises a NotFoundError if the given object doesn't exist in the datbase.
Otherwise returns the object
This is useful for most of the *_delete functions.
Arguments:
session - a sqlaclhemy session to use.
cls - the class of the object to query.
name - the name of the object in question.
"""
obj = session.query(cls).filter_by(label=name).first()
if not obj:
raise NotFoundError("%s %s does not exist." % (cls.__name__, name))
return obj
def _namespaced_query(session, obj_outer, cls_inner, name_inner):
"""Helper function to search for subobjects of an object."""
return session.query(cls_inner) \
.filter_by(owner = obj_outer) \
.filter_by(label = name_inner).first()
def _assert_absent_n(session, obj_outer, cls_inner, name_inner):
"""Raises DuplicateError if a "namespaced" object, such as a node's nic, exists.
Otherwise returns successfully.
Arguments:
session - a SQLAlchemy session to use.
obj_outer - the "owner" object
cls_inner - the "owned" class
name_inner - the name of the "owned" object
"""
obj_inner = _namespaced_query(session, obj_outer, cls_inner, name_inner)
if obj_inner is not None:
raise DuplicateError("%s %s on %s %s already exists" %
(cls_inner.__name__, name_inner,
obj_outer.__class__.__name__, obj_outer.label))
def _must_find_n(session, obj_outer, cls_inner, name_inner):
"""Searches the database for a "namespaced" object, such as a nic on a node.
Raises NotFoundError if there is none. Otherwise returns the object.
Arguments:
session - a SQLAlchemy session to use.
obj_outer - the "owner" object
cls_inner - the "owned" class
name_inner - the name of the "owned" object
"""
obj_inner = _namespaced_query(session, obj_outer, cls_inner, name_inner)
if obj_inner is None:
raise NotFoundError("%s %s on %s %s does not exist." %
(cls_inner.__name__, name_inner,
obj_outer.__class__.__name__, obj_outer.label))
return obj_inner
|
the-stack_0_13898 | # emacs: -*- mode: python; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil; coding: utf-8 -*-
# ex: set sts=4 ts=4 sw=4 et:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Miscellaneous utilities to assist with testing"""
import base64
import glob
import gzip
import inspect
import lzma
import shutil
import stat
from json import dumps
import os
import re
import tempfile
import platform
import multiprocessing
import multiprocessing.queues
import logging
import random
import socket
import textwrap
import warnings
from fnmatch import fnmatch
import time
from difflib import unified_diff
from contextlib import contextmanager
from unittest.mock import patch
import ssl
from http.server import (
HTTPServer,
SimpleHTTPRequestHandler,
)
from functools import wraps
from os.path import (
curdir,
exists,
join as opj,
relpath,
split as pathsplit,
)
from nose.plugins.attrib import attr
from nose.tools import (
assert_equal,
assert_false,
assert_greater,
assert_greater_equal,
assert_in as in_,
assert_in,
assert_is,
assert_is_none,
assert_is_not,
assert_is_not_none,
assert_not_equal,
assert_not_in,
assert_not_is_instance,
assert_raises,
assert_true,
eq_,
make_decorator,
ok_,
raises,
)
from nose.tools import assert_set_equal
from nose.tools import assert_is_instance
from nose import SkipTest
from datalad import cfg as dl_cfg
import datalad.utils as ut
# TODO this must go
from ..utils import *
from datalad.utils import (
Path,
ensure_unicode,
)
from .. import utils
from ..support.exceptions import (
CommandError,
CommandNotAvailableError,
)
from ..support.external_versions import external_versions
from ..support.vcr_ import *
from ..support.keyring_ import MemoryKeyring
from ..support.network import RI
from ..dochelpers import borrowkwargs
from ..cmdline.helpers import get_repo_instance
from ..consts import (
ARCHIVES_TEMP_DIR,
)
from . import _TEMP_PATHS_GENERATED
from datalad.cmd import (
GitWitlessRunner,
KillOutput,
StdOutErrCapture,
WitlessRunner,
)
from datalad.core.local.repo import repo_from_path
# temp paths used by clones
_TEMP_PATHS_CLONES = set()
# Additional indicators
on_travis = bool(os.environ.get('TRAVIS', False))
if external_versions["cmd:git"] >= "2.28":
# The specific value here doesn't matter, but it should not be the default
# from any Git version to test that we work with custom values.
DEFAULT_BRANCH = "dl-test-branch" # Set by setup_package().
else:
DEFAULT_BRANCH = "master"
if external_versions["cmd:git"] >= "2.30.0":
# The specific value here doesn't matter, but it should not be the default
# from any Git version to test that we work with custom values.
DEFAULT_REMOTE = "dl-test-remote" # Set by setup_package().
else:
DEFAULT_REMOTE = "origin"
# additional shortcuts
neq_ = assert_not_equal
nok_ = assert_false
lgr = logging.getLogger("datalad.tests.utils")
def skip_if_no_module(module):
try:
imp = __import__(module)
except Exception as exc:
raise SkipTest("Module %s fails to load" % module) from exc
def skip_if_scrapy_without_selector():
"""A little helper to skip some tests which require recent scrapy"""
try:
import scrapy
from scrapy.selector import Selector
except ImportError:
from nose import SkipTest
raise SkipTest(
"scrapy misses Selector (too old? version: %s)"
% getattr(scrapy, '__version__'))
def skip_if_url_is_not_available(url, regex=None):
# verify that dataset is available
from datalad.downloaders.providers import Providers
from datalad.downloaders.base import DownloadError
providers = Providers.from_config_files()
try:
content = providers.fetch(url)
if regex and re.search(regex, content):
raise SkipTest("%s matched %r -- skipping the test" % (url, regex))
except DownloadError:
raise SkipTest("%s failed to download" % url)
def check_not_generatorfunction(func):
"""Internal helper to verify that we are not decorating generator tests"""
if inspect.isgeneratorfunction(func):
raise RuntimeError("{}: must not be decorated, is a generator test"
.format(func.__name__))
def skip_if_no_network(func=None):
"""Skip test completely in NONETWORK settings
If not used as a decorator, and just a function, could be used at the module level
"""
check_not_generatorfunction(func)
def check_and_raise():
if dl_cfg.get('datalad.tests.nonetwork'):
raise SkipTest("Skipping since no network settings")
if func:
@wraps(func)
@attr('network')
@attr('skip_if_no_network')
def _wrap_skip_if_no_network(*args, **kwargs):
check_and_raise()
return func(*args, **kwargs)
return _wrap_skip_if_no_network
else:
check_and_raise()
def skip_if_on_windows(func=None):
"""Skip test completely under Windows
"""
check_not_generatorfunction(func)
def check_and_raise():
if on_windows:
raise SkipTest("Skipping on Windows")
if func:
@wraps(func)
@attr('skip_if_on_windows')
def _wrap_skip_if_on_windows(*args, **kwargs):
check_and_raise()
return func(*args, **kwargs)
return _wrap_skip_if_on_windows
else:
check_and_raise()
def skip_if_root(func=None):
"""Skip test if uid == 0.
Note that on Windows (or anywhere else `os.geteuid` is not available) the
test is _not_ skipped.
"""
check_not_generatorfunction(func)
def check_and_raise():
if hasattr(os, "geteuid") and os.geteuid() == 0:
raise SkipTest("Skipping: test assumptions fail under root")
if func:
@wraps(func)
@attr('skip_if_root')
def _wrap_skip_if_root(*args, **kwargs):
check_and_raise()
return func(*args, **kwargs)
return _wrap_skip_if_root
else:
check_and_raise()
@optional_args
def skip_if(func, cond=True, msg=None, method='raise'):
"""Skip test for specific condition
Parameters
----------
cond: bool
condition on which to skip
msg: str
message to print if skipping
method: str
either 'raise' or 'pass'. Whether to skip by raising `SkipTest` or by
just proceeding and simply not calling the decorated function.
This is particularly meant to be used, when decorating single assertions
in a test with method='pass' in order to not skip the entire test, but
just that assertion.
"""
check_not_generatorfunction(func)
@wraps(func)
def _wrap_skip_if(*args, **kwargs):
if cond:
if method == 'raise':
raise SkipTest(msg if msg else "condition was True")
elif method == 'pass':
print(msg if msg else "condition was True")
return
return func(*args, **kwargs)
return _wrap_skip_if
def skip_ssh(func):
"""Skips SSH tests if on windows or if environment variable
DATALAD_TESTS_SSH was not set
"""
check_not_generatorfunction(func)
@wraps(func)
@attr('skip_ssh')
def _wrap_skip_ssh(*args, **kwargs):
test_ssh = dl_cfg.get("datalad.tests.ssh", '')
if not test_ssh or test_ssh in ('0', 'false', 'no'):
raise SkipTest("Run this test by setting DATALAD_TESTS_SSH")
return func(*args, **kwargs)
return _wrap_skip_ssh
def skip_nomultiplex_ssh(func):
"""Skips SSH tests if default connection/manager does not support multiplexing
e.g. currently on windows or if set via datalad.ssh.multiplex-connections config variable
"""
check_not_generatorfunction(func)
from ..support.sshconnector import MultiplexSSHManager, SSHManager
@wraps(func)
@attr('skip_nomultiplex_ssh')
@skip_ssh
def _wrap_skip_nomultiplex_ssh(*args, **kwargs):
if SSHManager is not MultiplexSSHManager:
raise SkipTest("SSH without multiplexing is used")
return func(*args, **kwargs)
return _wrap_skip_nomultiplex_ssh
#
# Addition "checkers"
#
import os
from datalad.support.gitrepo import GitRepo
from datalad.support.annexrepo import AnnexRepo, FileNotInAnnexError
from datalad.distribution.dataset import Dataset
from ..utils import chpwd, getpwd
def ok_clean_git(path, annex=None, index_modified=[], untracked=[]):
"""Obsolete test helper. Use assert_repo_status() instead.
Still maps a few common cases to the new helper, to ease transition
in extensions.
"""
kwargs = {}
if index_modified:
kwargs['modified'] = index_modified
if untracked:
kwargs['untracked'] = untracked
assert_repo_status(
path,
annex=annex,
**kwargs,
)
def ok_file_under_git(path, filename=None, annexed=False):
"""Test if file is present and under git/annex control
If relative path provided, then test from current directory
"""
annex, file_repo_path, filename, path, repo = _prep_file_under_git(path, filename)
assert_in(file_repo_path, repo.get_indexed_files()) # file is known to Git
if annex:
in_annex = 'key' in repo.get_file_annexinfo(file_repo_path)
else:
in_annex = False
assert(annexed == in_annex)
def put_file_under_git(path, filename=None, content=None, annexed=False):
"""Place file under git/annex and return used Repo
"""
annex, file_repo_path, filename, path, repo = _prep_file_under_git(path, filename)
if content is None:
content = ""
with open(opj(repo.path, file_repo_path), 'w') as f_:
f_.write(content)
if annexed:
if not isinstance(repo, AnnexRepo):
repo = AnnexRepo(repo.path)
repo.add(file_repo_path)
else:
repo.add(file_repo_path, git=True)
repo.commit(_datalad_msg=True)
ok_file_under_git(repo.path, file_repo_path, annexed)
return repo
def _prep_file_under_git(path, filename):
"""Get instance of the repository for the given filename
Helper to be used by few functions
"""
path = Path(path)
if filename is None:
# path provides the path and the name
filename = Path(path.name)
path = path.parent
else:
filename = Path(filename)
ds = Dataset(utils.get_dataset_root(path))
return isinstance(ds.repo, AnnexRepo), \
str(path.absolute().relative_to(ds.path) / filename) \
if not filename.is_absolute() \
else str(filename.relative_to(ds.pathobj)), \
filename, \
str(path), \
ds.repo
#
# Helpers to test symlinks
#
def ok_symlink(path):
"""Checks whether path is either a working or broken symlink"""
link_path = os.path.islink(path)
if not link_path:
raise AssertionError("Path {} seems not to be a symlink".format(path))
def ok_good_symlink(path):
ok_symlink(path)
rpath = Path(path).resolve()
ok_(rpath.exists(),
msg="Path {} seems to be missing. Symlink {} is broken".format(
rpath, path))
def ok_broken_symlink(path):
ok_symlink(path)
rpath = Path(path).resolve()
assert_false(rpath.exists(),
msg="Path {} seems to be present. Symlink {} is not broken".format(
rpath, path))
def ok_startswith(s, prefix):
ok_(s.startswith(prefix),
msg="String %r doesn't start with %r" % (s, prefix))
def ok_endswith(s, suffix):
ok_(s.endswith(suffix),
msg="String %r doesn't end with %r" % (s, suffix))
def nok_startswith(s, prefix):
assert_false(s.startswith(prefix),
msg="String %r starts with %r" % (s, prefix))
def ok_git_config_not_empty(ar):
"""Helper to verify that nothing rewritten the config file"""
# TODO: we don't support bare -- do we?
assert_true(os.stat(opj(ar.path, '.git', 'config')).st_size)
def ok_annex_get(ar, files, network=True):
"""Helper to run .get decorated checking for correct operation
get passes through stderr from the ar to the user, which pollutes
screen while running tests
Note: Currently not true anymore, since usage of --json disables
progressbars
"""
ok_git_config_not_empty(ar) # we should be working in already inited repo etc
with swallow_outputs() as cmo:
ar.get(files)
# verify that load was fetched
ok_git_config_not_empty(ar) # whatever we do shouldn't destroy the config file
has_content = ar.file_has_content(files)
if isinstance(has_content, bool):
ok_(has_content)
else:
ok_(all(has_content))
def ok_generator(gen):
assert_true(inspect.isgenerator(gen), msg="%s is not a generator" % gen)
assert_is_generator = ok_generator # just an alias
def ok_archives_caches(repopath, n=1, persistent=None):
"""Given a path to repository verify number of archives
Parameters
----------
repopath : str
Path to the repository
n : int, optional
Number of archives directories to expect
persistent: bool or None, optional
If None -- both persistent and not count.
"""
# looking into subdirectories
glob_ptn = opj(repopath,
ARCHIVES_TEMP_DIR + {None: '*', True: '', False: '-*'}[persistent],
'*')
dirs = glob.glob(glob_ptn)
n2 = n * 2 # per each directory we should have a .stamp file
assert_equal(len(dirs), n2,
msg="Found following dirs when needed %d of them: %s" % (n2, dirs))
def ok_exists(path):
assert Path(path).exists(), 'path %s does not exist (or dangling symlink)' % path
def ok_file_has_content(path, content, strip=False, re_=False,
decompress=False, **kwargs):
"""Verify that file exists and has expected content"""
path = Path(path)
ok_exists(path)
if decompress:
if path.suffix == '.gz':
open_func = gzip.open
elif path.suffix in ('.xz', '.lzma'):
open_func = lzma.open
else:
raise NotImplementedError("Don't know how to decompress %s" % path)
else:
open_func = open
with open_func(str(path), 'rb') as f:
file_content = f.read()
if isinstance(content, str):
file_content = ensure_unicode(file_content)
if os.linesep != '\n':
# for consistent comparisons etc. Apparently when reading in `b` mode
# on Windows we would also get \r
# https://github.com/datalad/datalad/pull/3049#issuecomment-444128715
file_content = file_content.replace(os.linesep, '\n')
if strip:
file_content = file_content.strip()
if re_:
assert_re_in(content, file_content, **kwargs)
else:
assert_equal(content, file_content, **kwargs)
#
# Decorators
#
@optional_args
def with_tree(t, tree=None, archives_leading_dir=True, delete=True, **tkwargs):
@wraps(t)
def _wrap_with_tree(*arg, **kw):
if 'dir' not in tkwargs.keys():
# if not specified otherwise, respect datalad.tests.temp.dir config
# as this is a test helper
tkwargs['dir'] = dl_cfg.get("datalad.tests.temp.dir")
tkwargs_ = get_tempfile_kwargs(tkwargs, prefix="tree", wrapped=t)
d = tempfile.mkdtemp(**tkwargs_)
create_tree(d, tree, archives_leading_dir=archives_leading_dir)
try:
return t(*(arg + (d,)), **kw)
finally:
if delete:
rmtemp(d)
return _wrap_with_tree
lgr = logging.getLogger('datalad.tests')
class SilentHTTPHandler(SimpleHTTPRequestHandler):
"""A little adapter to silence the handler
"""
def __init__(self, *args, **kwargs):
self._silent = lgr.getEffectiveLevel() > logging.DEBUG
SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
def log_message(self, format, *args):
if self._silent:
return
lgr.debug("HTTP: " + format, *args)
def _multiproc_serve_path_via_http(
hostname, path_to_serve_from, queue, use_ssl=False, auth=None): # pragma: no cover
handler = SilentHTTPHandler
if auth:
# to-be-expected key for basic auth
auth_test = (b'Basic ' + base64.b64encode(
bytes('%s:%s' % auth, 'utf-8'))).decode('utf-8')
# ad-hoc basic-auth handler
class BasicAuthHandler(SilentHTTPHandler):
def do_HEAD(self, authenticated):
if authenticated:
self.send_response(200)
else:
self.send_response(401)
self.send_header(
'WWW-Authenticate', 'Basic realm=\"Protected\"')
self.send_header('content-type', 'text/html')
self.end_headers()
def do_GET(self):
if self.headers.get('Authorization') == auth_test:
super().do_GET()
else:
self.do_HEAD(False)
self.wfile.write(bytes('Auth failed', 'utf-8'))
handler = BasicAuthHandler
chpwd(path_to_serve_from)
httpd = HTTPServer((hostname, 0), handler)
if use_ssl:
ca_dir = Path(__file__).parent / 'ca'
ssl_key = ca_dir / 'certificate-key.pem'
ssl_cert = ca_dir / 'certificate-pub.pem'
if any(not p.exists for p in (ssl_key, ssl_cert)):
raise RuntimeError(
'SSL requested, but no key/cert file combination can be '
f'located under {ca_dir}')
# turn on SSL
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
context.load_cert_chain(str(ssl_cert), str(ssl_key))
httpd.socket = context.wrap_socket (
httpd.socket,
server_side=True)
queue.put(httpd.server_port)
httpd.serve_forever()
class HTTPPath(object):
"""Serve the content of a path via an HTTP URL.
This class can be used as a context manager, in which case it returns the
URL.
Alternatively, the `start` and `stop` methods can be called directly.
Parameters
----------
path : str
Directory with content to serve.
use_ssl : bool
auth : tuple
Username, password
"""
def __init__(self, path, use_ssl=False, auth=None):
self.path = path
self.url = None
self._env_patch = None
self._mproc = None
self.use_ssl = use_ssl
self.auth = auth
def __enter__(self):
self.start()
return self.url
def __exit__(self, *args):
self.stop()
def start(self):
"""Start serving `path` via HTTP.
"""
# There is a problem with Haskell on wheezy trying to
# fetch via IPv6 whenever there is a ::1 localhost entry in
# /etc/hosts. Apparently fixing that docker image reliably
# is not that straightforward, although see
# http://jasonincode.com/customizing-hosts-file-in-docker/
# so we just force to use 127.0.0.1 while on wheezy
#hostname = '127.0.0.1' if on_debian_wheezy else 'localhost'
if self.use_ssl:
# we cannot use IPs with SSL certificates
hostname = 'localhost'
else:
hostname = '127.0.0.1'
queue = multiprocessing.Queue()
self._mproc = multiprocessing.Process(
target=_multiproc_serve_path_via_http,
args=(hostname, self.path, queue),
kwargs=dict(use_ssl=self.use_ssl, auth=self.auth))
self._mproc.start()
try:
port = queue.get(timeout=300)
except multiprocessing.queues.Empty as e:
if self.use_ssl:
raise SkipTest('No working SSL support') from e
else:
raise
self.url = 'http{}://{}:{}/'.format(
's' if self.use_ssl else '',
hostname,
port)
lgr.debug("HTTP: serving %s under %s", self.path, self.url)
# Such tests don't require real network so if http_proxy settings were
# provided, we remove them from the env for the duration of this run
env = os.environ.copy()
if self.use_ssl:
env.pop('https_proxy', None)
env['REQUESTS_CA_BUNDLE'] = str(
Path(__file__).parent / 'ca' / 'ca_bundle.pem')
else:
env.pop('http_proxy', None)
self._env_patch = patch.dict('os.environ', env, clear=True)
self._env_patch.start()
if self.use_ssl:
# verify that the SSL/cert setup is functional, if not skip the
# test
# python-requests does its own thing re root CA trust
# if this fails, check datalad/tests/ca/prov.sh for ca_bundle
try:
import requests
from requests.auth import HTTPBasicAuth
r = requests.get(
self.url,
verify=True,
auth=HTTPBasicAuth(*self.auth) if self.auth else None)
r.raise_for_status()
# be robust and skip if anything goes wrong, rather than just a
# particular SSL issue
#except requests.exceptions.SSLError as e:
except Exception as e:
self.stop()
raise SkipTest('No working HTTPS setup') from e
# now verify that the stdlib tooling also works
# if this fails, check datalad/tests/ca/prov.sh
# for info on deploying a datalad-root.crt
from urllib.request import (
urlopen,
Request,
)
try:
req = Request(self.url)
if self.auth:
req.add_header(
"Authorization",
b"Basic " + base64.standard_b64encode(
'{0}:{1}'.format(*self.auth).encode('utf-8')))
urlopen(req)
# be robust and skip if anything goes wrong, rather than just a
# particular SSL issue
#except URLError as e:
except Exception as e:
self.stop()
raise SkipTest('No working HTTPS setup') from e
def stop(self):
"""Stop serving `path`.
"""
lgr.debug("HTTP: stopping server under %s", self.path)
self._env_patch.stop()
self._mproc.terminate()
@optional_args
def serve_path_via_http(tfunc, *targs, use_ssl=False, auth=None):
"""Decorator which serves content of a directory via http url
Parameters
----------
path : str
Directory with content to serve.
use_ssl : bool
Flag whether to set up SSL encryption and return a HTTPS
URL. This require a valid certificate setup (which is tested
for proper function) or it will cause a SkipTest to be raised.
auth : tuple or None
If a (username, password) tuple is given, the server access will
be protected via HTTP basic auth.
"""
@wraps(tfunc)
@attr('serve_path_via_http')
def _wrap_serve_path_via_http(*args, **kwargs):
if targs:
# if a path is passed into serve_path_via_http, then it's in targs
assert len(targs) == 1
path = targs[0]
elif len(args) > 1:
args, path = args[:-1], args[-1]
else:
args, path = (), args[0]
with HTTPPath(path, use_ssl=use_ssl, auth=auth) as url:
return tfunc(*(args + (path, url)), **kwargs)
return _wrap_serve_path_via_http
@optional_args
def with_memory_keyring(t):
"""Decorator to use non-persistant MemoryKeyring instance
"""
@wraps(t)
@attr('with_memory_keyring')
def _wrap_with_memory_keyring(*args, **kwargs):
keyring = MemoryKeyring()
with patch("datalad.downloaders.credentials.keyring_", keyring):
return t(*(args + (keyring,)), **kwargs)
return _wrap_with_memory_keyring
@optional_args
def without_http_proxy(tfunc):
"""Decorator to remove http*_proxy env variables for the duration of the test
"""
@wraps(tfunc)
@attr('without_http_proxy')
def _wrap_without_http_proxy(*args, **kwargs):
if on_windows:
raise SkipTest('Unclear why this is not working on windows')
# Such tests don't require real network so if http_proxy settings were
# provided, we remove them from the env for the duration of this run
env = os.environ.copy()
env.pop('http_proxy', None)
env.pop('https_proxy', None)
with patch.dict('os.environ', env, clear=True):
return tfunc(*args, **kwargs)
return _wrap_without_http_proxy
@borrowkwargs(methodname=make_tempfile)
@optional_args
def with_tempfile(t, **tkwargs):
"""Decorator function to provide a temporary file name and remove it at the end
Parameters
----------
To change the used directory without providing keyword argument 'dir' set
DATALAD_TESTS_TEMP_DIR.
Examples
--------
::
@with_tempfile
def test_write(tfile):
open(tfile, 'w').write('silly test')
"""
@wraps(t)
def _wrap_with_tempfile(*arg, **kw):
if 'dir' not in tkwargs.keys():
# if not specified otherwise, respect datalad.tests.temp.dir config
# as this is a test helper
tkwargs['dir'] = dl_cfg.get("datalad.tests.temp.dir")
with make_tempfile(wrapped=t, **tkwargs) as filename:
return t(*(arg + (filename,)), **kw)
return _wrap_with_tempfile
# ### ###
# START known failure decorators
# ### ###
def probe_known_failure(func):
"""Test decorator allowing the test to pass when it fails and vice versa
Setting config datalad.tests.knownfailures.probe to True tests, whether or
not the test is still failing. If it's not, an AssertionError is raised in
order to indicate that the reason for failure seems to be gone.
"""
@wraps(func)
@attr('probe_known_failure')
def _wrap_probe_known_failure(*args, **kwargs):
if dl_cfg.obtain("datalad.tests.knownfailures.probe"):
assert_raises(Exception, func, *args, **kwargs) # marked as known failure
# Note: Since assert_raises lacks a `msg` argument, a comment
# in the same line is helpful to determine what's going on whenever
# this assertion fails and we see a trace back. Otherwise that line
# wouldn't be very telling.
else:
return func(*args, **kwargs)
return _wrap_probe_known_failure
@optional_args
def skip_known_failure(func, method='raise'):
"""Test decorator allowing to skip a test that is known to fail
Setting config datalad.tests.knownfailures.skip to a bool enables/disables
skipping.
"""
@skip_if(cond=dl_cfg.obtain("datalad.tests.knownfailures.skip"),
msg="Skip test known to fail",
method=method)
@wraps(func)
@attr('skip_known_failure')
def _wrap_skip_known_failure(*args, **kwargs):
return func(*args, **kwargs)
return _wrap_skip_known_failure
def known_failure(func):
"""Test decorator marking a test as known to fail
This combines `probe_known_failure` and `skip_known_failure` giving the
skipping precedence over the probing.
"""
@skip_known_failure
@probe_known_failure
@wraps(func)
@attr('known_failure')
def _wrap_known_failure(*args, **kwargs):
return func(*args, **kwargs)
return _wrap_known_failure
def known_failure_direct_mode(func):
"""DEPRECATED. Stop using. Does nothing
Test decorator marking a test as known to fail in a direct mode test run
If datalad.repo.direct is set to True behaves like `known_failure`.
Otherwise the original (undecorated) function is returned.
"""
# TODO: consider adopting nibabel/deprecated.py nibabel/deprecator.py
# mechanism to consistently deprecate functionality and ensure they are
# displayed.
# Since 2.7 Deprecation warnings aren't displayed by default
# and thus kinda pointless to issue a warning here, so we will just log
msg = "Direct mode support is deprecated, so no point in using " \
"@known_failure_direct_mode for %r since glorious future " \
"DataLad 0.12" % func.__name__
lgr.warning(msg)
return func
def known_failure_windows(func):
"""Test decorator marking a test as known to fail on windows
On Windows behaves like `known_failure`.
Otherwise the original (undecorated) function is returned.
"""
if on_windows:
@known_failure
@wraps(func)
@attr('known_failure_windows')
@attr('windows')
def dm_func(*args, **kwargs):
return func(*args, **kwargs)
return dm_func
return func
def known_failure_githubci_win(func):
"""Test decorator for a known test failure on Github's Windows CI
"""
if 'GITHUB_WORKFLOW' in os.environ and on_windows:
@known_failure
@wraps(func)
@attr('known_failure_githubci_win')
@attr('githubci_win')
def dm_func(*args, **kwargs):
return func(*args, **kwargs)
return dm_func
return func
def known_failure_githubci_osx(func):
"""Test decorator for a known test failure on Github's macOS CI
"""
if 'GITHUB_WORKFLOW' in os.environ and on_osx:
@known_failure
@wraps(func)
@attr('known_failure_githubci_osx')
@attr('githubci_osx')
def dm_func(*args, **kwargs):
return func(*args, **kwargs)
return dm_func
return func
def known_failure_osx(func):
"""Test decorator for a known test failure on macOS
"""
if on_osx:
@known_failure
@wraps(func)
@attr('known_failure_osx')
@attr('osx')
def dm_func(*args, **kwargs):
return func(*args, **kwargs)
return dm_func
return func
# ### ###
# END known failure decorators
# ### ###
def _get_resolved_flavors(flavors):
#flavors_ = (['local', 'clone'] + (['local-url'] if not on_windows else [])) \
# if flavors == 'auto' else flavors
flavors_ = (['local', 'clone', 'local-url', 'network'] if not on_windows
else ['network', 'network-clone']) \
if flavors == 'auto' else flavors
if not isinstance(flavors_, list):
flavors_ = [flavors_]
if dl_cfg.get('datalad.tests.nonetwork'):
flavors_ = [x for x in flavors_ if not x.startswith('network')]
return flavors_
def clone_url(url):
runner = GitWitlessRunner()
tdir = tempfile.mkdtemp(**get_tempfile_kwargs(
{'dir': dl_cfg.get("datalad.tests.temp.dir")}, prefix='clone_url'))
runner.run(["git", "clone", url, tdir], protocol=KillOutput)
if GitRepo(tdir).is_with_annex():
AnnexRepo(tdir, init=True)
_TEMP_PATHS_CLONES.add(tdir)
return tdir
local_testrepo_flavors = ['local'] # 'local-url'
_TESTREPOS = None
def _get_testrepos_uris(regex, flavors):
global _TESTREPOS
# we should instantiate those whenever test repos actually asked for
# TODO: just absorb all this lazy construction within some class
if not _TESTREPOS:
from .utils_testrepos import BasicAnnexTestRepo, BasicGitTestRepo, \
SubmoduleDataset, NestedDataset, InnerSubmodule
_basic_annex_test_repo = BasicAnnexTestRepo()
_basic_git_test_repo = BasicGitTestRepo()
_submodule_annex_test_repo = SubmoduleDataset()
_nested_submodule_annex_test_repo = NestedDataset()
_inner_submodule_annex_test_repo = InnerSubmodule()
_TESTREPOS = {'basic_annex':
{'network': 'https://github.com/datalad/testrepo--basic--r1',
'local': _basic_annex_test_repo.path,
'local-url': _basic_annex_test_repo.url},
'basic_git':
{'local': _basic_git_test_repo.path,
'local-url': _basic_git_test_repo.url},
'submodule_annex':
{'local': _submodule_annex_test_repo.path,
'local-url': _submodule_annex_test_repo.url},
'nested_submodule_annex':
{'local': _nested_submodule_annex_test_repo.path,
'local-url': _nested_submodule_annex_test_repo.url},
# TODO: append 'annex' to the name:
# Currently doesn't work with some annex tests, despite
# working manually. So, figure out how the tests' setup
# messes things up with this one.
'inner_submodule':
{'local': _inner_submodule_annex_test_repo.path,
'local-url': _inner_submodule_annex_test_repo.url}
}
# assure that now we do have those test repos created -- delayed
# their creation until actually used
_basic_annex_test_repo.create()
_basic_git_test_repo.create()
_submodule_annex_test_repo.create()
_nested_submodule_annex_test_repo.create()
_inner_submodule_annex_test_repo.create()
uris = []
for name, spec in _TESTREPOS.items():
if not re.match(regex, name):
continue
uris += [spec[x] for x in set(spec.keys()).intersection(flavors)]
# additional flavors which might have not been
if 'clone' in flavors and 'clone' not in spec:
uris.append(clone_url(spec['local']))
if 'network-clone' in flavors \
and 'network' in spec \
and 'network-clone' not in spec:
uris.append(clone_url(spec['network']))
return uris
@optional_args
def with_testrepos(t, regex='.*', flavors='auto', skip=False, count=None):
"""Decorator to provide a local/remote test repository
All tests under datalad/tests/testrepos are stored in two-level hierarchy,
where top-level name describes nature/identifier of the test repository,
and there could be multiple instances (e.g. generated differently) of the
same "content"
Parameters
----------
regex : string, optional
Regex to select which test repos to use
flavors : {'auto', 'local', 'local-url', 'clone', 'network', 'network-clone'} or list of thereof, optional
What URIs to provide. E.g. 'local' would just provide path to the
repository, while 'network' would provide url of the remote location
available on Internet containing the test repository. 'clone' would
clone repository first to a temporary location. 'network-clone' would
first clone from the network location. 'auto' would include the list of
appropriate ones (e.g., no 'network*' flavors if network tests are
"forbidden").
count: int, optional
If specified, only up to that number of repositories to test with
Examples
--------
>>> from datalad.tests.utils import with_testrepos
>>> @with_testrepos('basic_annex')
... def test_write(repo):
... assert(os.path.exists(os.path.join(repo, '.git', 'annex')))
"""
@wraps(t)
@attr('with_testrepos')
def _wrap_with_testrepos(*arg, **kw):
# addurls with our generated file:// URLs doesn't work on appveyor
# https://ci.appveyor.com/project/mih/datalad/builds/29841505/job/330rwn2a3cvtrakj
#if 'APPVEYOR' in os.environ:
# raise SkipTest("Testrepo setup is broken on AppVeyor")
# TODO: would need to either avoid this "decorator" approach for
# parametric tests or again aggregate failures like sweepargs does
flavors_ = _get_resolved_flavors(flavors)
testrepos_uris = _get_testrepos_uris(regex, flavors_)
# we should always have at least one repo to test on, unless explicitly only
# network was requested by we are running without networked tests
if not (dl_cfg.get('datalad.tests.nonetwork') and flavors == ['network']):
assert(testrepos_uris)
else:
if not testrepos_uris:
raise SkipTest("No non-networked repos to test on")
fake_dates = dl_cfg.get("datalad.fake-dates")
ntested = 0
for uri in testrepos_uris:
if count and ntested >= count:
break
ntested += 1
if __debug__:
lgr.debug('Running %s on %s', t.__name__, uri)
try:
t(*(arg + (uri,)), **kw)
finally:
# The is_explicit_path check is needed because it may be a URL,
# but check_dates needs a local path or GitRepo object.
if fake_dates and is_explicit_path(uri):
from ..support.repodates import check_dates
assert_false(
check_dates(uri, annex="tree")["objects"])
if uri in _TEMP_PATHS_CLONES:
_TEMP_PATHS_CLONES.discard(uri)
rmtemp(uri)
pass # might need to provide additional handling so, handle
return _wrap_with_testrepos
with_testrepos.__test__ = False
@optional_args
def with_sameas_remote(func, autoenabled=False):
"""Provide a repository with a git-annex sameas remote configured.
The repository will have two special remotes: r_dir (type=directory) and
r_rsync (type=rsync). The rsync remote will be configured with
--sameas=r_dir, and autoenabled if `autoenabled` is true.
"""
from datalad.support.annexrepo import AnnexRepo
from datalad.support.exceptions import CommandError
@wraps(func)
@attr('with_sameas_remotes')
@skip_if_on_windows
@skip_ssh
@with_tempfile(mkdir=True)
@with_tempfile(mkdir=True)
def _wrap_with_sameas_remote(*args, **kwargs):
# With git-annex's 8.20200522-77-g1f2e2d15e, transferring from an rsync
# special remote hangs on Xenial. This is likely due to an interaction
# with an older rsync or openssh version. Use openssh as a rough
# indicator. See
# https://git-annex.branchable.com/bugs/Recent_hang_with_rsync_remote_with_older_systems___40__Xenial__44___Jessie__41__/
if external_versions['cmd:system-ssh'] < '7.4' and \
'8.20200522' < external_versions['cmd:annex'] < '8.20200720':
raise SkipTest("Test known to hang")
sr_path, repo_path = args[-2:]
fn_args = args[:-2]
repo = AnnexRepo(repo_path)
repo.init_remote("r_dir",
options=["type=directory",
"encryption=none",
"directory=" + sr_path])
options = ["type=rsync",
"rsyncurl=datalad-test:" + sr_path]
if autoenabled:
options.append("autoenable=true")
options.append("--sameas=r_dir")
repo.init_remote("r_rsync", options=options)
return func(*(fn_args + (repo,)), **kwargs)
return _wrap_with_sameas_remote
@optional_args
def with_fake_cookies_db(func, cookies={}):
"""mock original cookies db with a fake one for the duration of the test
"""
from ..support.cookies import cookies_db
@wraps(func)
@attr('with_fake_cookies_db')
def _wrap_with_fake_cookies_db(*args, **kwargs):
try:
orig_cookies_db = cookies_db._cookies_db
cookies_db._cookies_db = cookies.copy()
return func(*args, **kwargs)
finally:
cookies_db._cookies_db = orig_cookies_db
return _wrap_with_fake_cookies_db
@optional_args
def assert_cwd_unchanged(func, ok_to_chdir=False):
"""Decorator to test whether the current working directory remains unchanged
Parameters
----------
ok_to_chdir: bool, optional
If True, allow to chdir, so this decorator would not then raise exception
if chdir'ed but only return to original directory
"""
@wraps(func)
def _wrap_assert_cwd_unchanged(*args, **kwargs):
cwd_before = os.getcwd()
pwd_before = getpwd()
exc_info = None
# record previous state of PWD handling
utils_pwd_mode = utils._pwd_mode
try:
ret = func(*args, **kwargs)
except:
exc_info = sys.exc_info()
finally:
utils._pwd_mode = utils_pwd_mode
try:
cwd_after = os.getcwd()
except OSError as e:
lgr.warning("Failed to getcwd: %s" % e)
cwd_after = None
if cwd_after != cwd_before:
chpwd(pwd_before)
# Above chpwd could also trigger the change of _pwd_mode, so we
# would need to reset it again since we know that it is all kosher
utils._pwd_mode = utils_pwd_mode
if not ok_to_chdir:
lgr.warning(
"%s changed cwd to %s. Mitigating and changing back to %s"
% (func, cwd_after, pwd_before))
# If there was already exception raised, we better reraise
# that one since it must be more important, so not masking it
# here with our assertion
if exc_info is None:
assert_equal(cwd_before, cwd_after,
"CWD changed from %s to %s" % (cwd_before, cwd_after))
if exc_info is not None:
raise exc_info[1]
return ret
return _wrap_assert_cwd_unchanged
@optional_args
def run_under_dir(func, newdir='.'):
"""Decorator to run tests under another directory
It is somewhat ugly since we can't really chdir
back to a directory which had a symlink in its path.
So using this decorator has potential to move entire
testing run under the dereferenced directory name -- sideeffect.
The only way would be to instruct testing framework (i.e. nose
in our case ATM) to run a test by creating a new process with
a new cwd
"""
@wraps(func)
def _wrap_run_under_dir(*args, **kwargs):
pwd_before = getpwd()
try:
chpwd(newdir)
func(*args, **kwargs)
finally:
chpwd(pwd_before)
return _wrap_run_under_dir
def assert_re_in(regex, c, flags=0, match=True, msg=None):
"""Assert that container (list, str, etc) contains entry matching the regex
"""
if not isinstance(c, (list, tuple)):
c = [c]
for e in c:
if (re.match if match else re.search)(regex, e, flags=flags):
return
raise AssertionError(
msg or "Not a single entry matched %r in %r" % (regex, c)
)
def assert_dict_equal(d1, d2):
msgs = []
if set(d1).difference(d2):
msgs.append(" keys in the first dict but not in the second: %s"
% list(set(d1).difference(d2)))
if set(d2).difference(d1):
msgs.append(" keys in the second dict but not in the first: %s"
% list(set(d2).difference(d1)))
for k in set(d1).intersection(d2):
same = True
try:
if isinstance(d1[k], str):
# do not compare types for string types to avoid all the hassle
# with the distinction of str and unicode in PY3, and simple
# test for equality
same = bool(d1[k] == d2[k])
else:
same = type(d1[k]) == type(d2[k]) and bool(d1[k] == d2[k])
except: # if comparison or conversion to bool (e.g. with numpy arrays) fails
same = False
if not same:
msgs.append(" [%r] differs: %r != %r" % (k, d1[k], d2[k]))
if len(msgs) > 10:
msgs.append("and more")
break
if msgs:
raise AssertionError("dicts differ:\n%s" % "\n".join(msgs))
# do generic comparison just in case we screwed up to detect difference correctly above
eq_(d1, d2)
def assert_str_equal(s1, s2):
"""Helper to compare two lines"""
diff = list(unified_diff(s1.splitlines(), s2.splitlines()))
assert not diff, '\n'.join(diff)
assert_equal(s1, s2)
def assert_status(label, results):
"""Verify that each status dict in the results has a given status label
`label` can be a sequence, in which case status must be one of the items
in this sequence.
"""
label = ensure_list(label)
results = ensure_list(results)
for i, r in enumerate(results):
try:
assert_in('status', r)
assert_in(r['status'], label)
except AssertionError:
raise AssertionError('Test {}/{}: expected status {} not found in:\n{}'.format(
i + 1,
len(results),
label,
dumps(results, indent=1, default=lambda x: str(x))))
def assert_message(message, results):
"""Verify that each status dict in the results has a message
This only tests the message template string, and not a formatted message
with args expanded.
"""
for r in ensure_list(results):
assert_in('message', r)
m = r['message'][0] if isinstance(r['message'], tuple) else r['message']
assert_equal(m, message)
def _format_res(x):
return textwrap.indent(
dumps(x, indent=1, default=str, sort_keys=True),
prefix=" ")
def assert_result_count(results, n, **kwargs):
"""Verify specific number of results (matching criteria, if any)"""
count = 0
results = ensure_list(results)
for r in results:
if not len(kwargs):
count += 1
elif all(k in r and r[k] == v for k, v in kwargs.items()):
count += 1
if not n == count:
raise AssertionError(
'Got {} instead of {} expected results matching\n{}\nInspected {} record(s):\n{}'.format(
count,
n,
_format_res(kwargs),
len(results),
_format_res(results)))
def _check_results_in(should_contain, results, **kwargs):
found = False
for r in ensure_list(results):
if all(k in r and r[k] == v for k, v in kwargs.items()):
found = True
break
if found ^ should_contain:
if should_contain:
msg = "Desired result\n{}\nnot found among\n{}"
else:
msg = "Result\n{}\nunexpectedly found among\n{}"
raise AssertionError(msg.format(_format_res(kwargs),
_format_res(results)))
def assert_in_results(results, **kwargs):
"""Verify that the particular combination of keys and values is found in
one of the results"""
_check_results_in(True, results, **kwargs)
def assert_not_in_results(results, **kwargs):
"""Verify that the particular combination of keys and values is not in any
of the results"""
_check_results_in(False, results, **kwargs)
def assert_result_values_equal(results, prop, values):
"""Verify that the values of all results for a given key in the status dicts
match the given sequence"""
assert_equal(
[r[prop] for r in results],
values)
def assert_result_values_cond(results, prop, cond):
"""Verify that the values of all results for a given key in the status dicts
fulfill condition `cond`.
Parameters
----------
results:
prop: str
cond: callable
"""
for r in ensure_list(results):
ok_(cond(r[prop]),
msg="r[{prop}]: {value}".format(prop=prop, value=r[prop]))
def ignore_nose_capturing_stdout(func):
"""DEPRECATED and will be removed soon. Does nothing!
Originally was intended as a decorator workaround for nose's behaviour
with redirecting sys.stdout, but now we monkey patch nose now so no test
should no longer be skipped.
See issue reported here:
https://code.google.com/p/python-nose/issues/detail?id=243&can=1&sort=-id&colspec=ID%20Type%20Status%20Priority%20Stars%20Milestone%20Owner%20Summary
"""
lgr.warning(
"@ignore_nose_capturing_stdout no longer does anything - nose should "
"just be monkey patched in setup_package. {} still has it"
.format(func.__name__)
)
return func
def skip_httpretty_on_problematic_pythons(func):
"""As discovered some httpretty bug causes a side-effect
on other tests on some Pythons. So we skip the test if such
problematic combination detected
References
https://travis-ci.org/datalad/datalad/jobs/94464988
http://stackoverflow.com/a/29603206/1265472
"""
@make_decorator(func)
def _wrap_skip_httpretty_on_problematic_pythons(*args, **kwargs):
if sys.version_info[:3] == (3, 4, 2):
raise SkipTest("Known to cause trouble due to httpretty bug on this Python")
return func(*args, **kwargs)
return _wrap_skip_httpretty_on_problematic_pythons
@optional_args
def with_parametric_batch(t):
"""Helper to run parametric test with possible combinations of batch and direct
"""
@wraps(t)
def _wrap_with_parametric_batch():
for batch in (False, True):
yield t, batch
return _wrap_with_parametric_batch
# List of most obscure filenames which might or not be supported by different
# filesystems across different OSs. Start with the most obscure
OBSCURE_PREFIX = os.getenv('DATALAD_TESTS_OBSCURE_PREFIX', '')
# Those will be tried to be added to the base name if filesystem allows
OBSCURE_FILENAME_PARTS = [' ', '/', '|', ';', '&', '%b5', '{}', "'", '"']
UNICODE_FILENAME = u"ΔЙקم๗あ"
# OSX is exciting -- some I guess FS might be encoding differently from decoding
# so Й might get recoded
# (ref: https://github.com/datalad/datalad/pull/1921#issuecomment-385809366)
if sys.getfilesystemencoding().lower() == 'utf-8':
if on_osx:
# TODO: figure it really out
UNICODE_FILENAME = UNICODE_FILENAME.replace(u"Й", u"")
if on_windows:
# TODO: really figure out unicode handling on windows
UNICODE_FILENAME = ''
if UNICODE_FILENAME:
OBSCURE_FILENAME_PARTS.append(UNICODE_FILENAME)
# space before extension, simple extension and trailing space to finish it up
OBSCURE_FILENAME_PARTS += [' ', '.datc', ' ']
@with_tempfile(mkdir=True)
def get_most_obscure_supported_name(tdir, return_candidates=False):
"""Return the most obscure filename that the filesystem would support under TEMPDIR
Parameters
----------
return_candidates: bool, optional
if True, return a tuple of (good, candidates) where candidates are "partially"
sorted from trickiest considered
TODO: we might want to use it as a function where we would provide tdir
"""
# we need separate good_base so we do not breed leading/trailing spaces
initial = good = OBSCURE_PREFIX
system = platform.system()
OBSCURE_FILENAMES = []
def good_filename(filename):
OBSCURE_FILENAMES.append(candidate)
try:
# Windows seems to not tollerate trailing spaces and
# ATM we do not distinguish obscure filename and dirname.
# So here we will test for both - being able to create dir
# with obscure name and obscure filename under
os.mkdir(opj(tdir, filename))
with open(opj(tdir, filename, filename), 'w') as f:
f.write("TEST LOAD")
return True
except:
lgr.debug("Filename %r is not supported on %s under %s",
filename, system, tdir)
return False
# incrementally build up the most obscure filename from parts
for part in OBSCURE_FILENAME_PARTS:
candidate = good + part
if good_filename(candidate):
good = candidate
if good == initial:
raise RuntimeError("Could not create any of the files under %s among %s"
% (tdir, OBSCURE_FILENAMES))
lgr.debug("Tested %d obscure filename candidates. The winner: %r", len(OBSCURE_FILENAMES), good)
if return_candidates:
return good, OBSCURE_FILENAMES[::-1]
else:
return good
OBSCURE_FILENAME, OBSCURE_FILENAMES = get_most_obscure_supported_name(return_candidates=True)
@optional_args
def with_testsui(t, responses=None, interactive=True):
"""Switch main UI to be 'tests' UI and possibly provide answers to be used"""
@wraps(t)
def _wrap_with_testsui(*args, **kwargs):
from datalad.ui import ui
old_backend = ui.backend
try:
ui.set_backend('tests' if interactive else 'tests-noninteractive')
if responses:
ui.add_responses(responses)
ret = t(*args, **kwargs)
if responses:
responses_left = ui.get_responses()
assert not len(responses_left), "Some responses were left not used: %s" % str(responses_left)
return ret
finally:
ui.set_backend(old_backend)
if not interactive and responses is not None:
raise ValueError("Non-interactive UI cannot provide responses")
return _wrap_with_testsui
with_testsui.__test__ = False
def assert_no_errors_logged(func, skip_re=None):
"""Decorator around function to assert that no errors logged during its execution"""
@wraps(func)
def _wrap_assert_no_errors_logged(*args, **kwargs):
with swallow_logs(new_level=logging.ERROR) as cml:
out = func(*args, **kwargs)
if cml.out:
if not (skip_re and re.search(skip_re, cml.out)):
raise AssertionError(
"Expected no errors to be logged, but log output is %s"
% cml.out
)
return out
return _wrap_assert_no_errors_logged
def get_mtimes_and_digests(target_path):
"""Return digests (md5) and mtimes for all the files under target_path"""
from datalad.utils import find_files
from datalad.support.digests import Digester
digester = Digester(['md5'])
# bother only with existing ones for this test, i.e. skip annexed files without content
target_files = [
f for f in find_files('.*', topdir=target_path, exclude_vcs=False, exclude_datalad=False)
if exists(f)
]
# let's leave only relative paths for easier analysis
target_files_ = [relpath(f, target_path) for f in target_files]
digests = {frel: digester(f) for f, frel in zip(target_files, target_files_)}
mtimes = {frel: os.stat(f).st_mtime for f, frel in zip(target_files, target_files_)}
return digests, mtimes
def get_datasets_topdir():
"""Delayed parsing so it could be monkey patched etc"""
from datalad.consts import DATASETS_TOPURL
return RI(DATASETS_TOPURL).hostname
def assert_repo_status(path, annex=None, untracked_mode='normal', **kwargs):
"""Compare a repo status against (optional) exceptions.
Anything file/directory that is not explicitly indicated must have
state 'clean', i.e. no modifications and recorded in Git.
Parameters
----------
path: str or Repo
in case of a str: path to the repository's base dir;
Note, that passing a Repo instance prevents detecting annex. This might
be useful in case of a non-initialized annex, a GitRepo is pointing to.
annex: bool or None
explicitly set to True or False to indicate, that an annex is (not)
expected; set to None to autodetect, whether there is an annex.
Default: None.
untracked_mode: {'no', 'normal', 'all'}
If and how untracked content is reported. The specification of untracked
files that are OK to be found must match this mode. See `Repo.status()`
**kwargs
Files/directories that are OK to not be in 'clean' state. Each argument
must be one of 'added', 'untracked', 'deleted', 'modified' and each
value must be a list of filenames (relative to the root of the
repository, in POSIX convention).
"""
r = None
if isinstance(path, AnnexRepo):
if annex is None:
annex = True
# if `annex` was set to False, but we find an annex => fail
assert_is(annex, True)
r = path
elif isinstance(path, GitRepo):
if annex is None:
annex = False
# explicitly given GitRepo instance doesn't make sense with
# 'annex' True
assert_is(annex, False)
r = path
else:
# 'path' is an actual path
try:
r = AnnexRepo(path, init=False, create=False)
if annex is None:
annex = True
# if `annex` was set to False, but we find an annex => fail
assert_is(annex, True)
except Exception:
# Instantiation failed => no annex
try:
r = GitRepo(path, init=False, create=False)
except Exception:
raise AssertionError("Couldn't find an annex or a git "
"repository at {}.".format(path))
if annex is None:
annex = False
# explicitly given GitRepo instance doesn't make sense with
# 'annex' True
assert_is(annex, False)
status = r.status(untracked=untracked_mode)
# for any file state that indicates some kind of change (all but 'clean)
for state in ('added', 'untracked', 'deleted', 'modified'):
oktobefound = sorted(r.pathobj.joinpath(ut.PurePosixPath(p))
for p in kwargs.get(state, []))
state_files = sorted(k for k, v in status.items()
if v.get('state', None) == state)
eq_(state_files, oktobefound,
'unexpected content of state "%s": %r != %r'
% (state, state_files, oktobefound))
def get_convoluted_situation(path, repocls=AnnexRepo):
from datalad.api import create
#if 'APPVEYOR' in os.environ:
# # issue only happens on appveyor, Python itself implodes
# # cannot be reproduced on a real windows box
# raise SkipTest(
# 'get_convoluted_situation() causes appveyor to crash, '
# 'reason unknown')
repo = repocls(path, create=True)
# use create(force) to get an ID and config into the empty repo
ds = Dataset(path).create(force=True)
# base content
create_tree(
ds.path,
{
'.gitignore': '*.ignored',
'subdir': {
'file_clean': 'file_clean',
'file_deleted': 'file_deleted',
'file_modified': 'file_clean',
},
'subdir-only-ignored': {
'1.ignored': '',
},
'file_clean': 'file_clean',
'file_deleted': 'file_deleted',
'file_staged_deleted': 'file_staged_deleted',
'file_modified': 'file_clean',
}
)
if isinstance(ds.repo, AnnexRepo):
create_tree(
ds.path,
{
'subdir': {
'file_dropped_clean': 'file_dropped_clean',
},
'file_dropped_clean': 'file_dropped_clean',
}
)
ds.save()
if isinstance(ds.repo, AnnexRepo):
# some files straight in git
create_tree(
ds.path,
{
'subdir': {
'file_ingit_clean': 'file_ingit_clean',
'file_ingit_modified': 'file_ingit_clean',
},
'file_ingit_clean': 'file_ingit_clean',
'file_ingit_modified': 'file_ingit_clean',
}
)
ds.save(to_git=True)
ds.drop([
'file_dropped_clean',
opj('subdir', 'file_dropped_clean')],
check=False)
# clean and proper subdatasets
ds.create('subds_clean')
ds.create(opj('subdir', 'subds_clean'))
ds.create('subds_unavailable_clean')
ds.create(opj('subdir', 'subds_unavailable_clean'))
# uninstall some subdatasets (still clean)
ds.drop([
'subds_unavailable_clean',
opj('subdir', 'subds_unavailable_clean')],
what='all', reckless='kill', recursive=True)
assert_repo_status(ds.path)
# make a dirty subdataset
ds.create('subds_modified')
ds.create(opj('subds_modified', 'someds'))
ds.create(opj('subds_modified', 'someds', 'dirtyds'))
# make a subdataset with additional commits
ds.create(opj('subdir', 'subds_modified'))
pdspath = opj(ds.path, 'subdir', 'subds_modified', 'progressedds')
ds.create(pdspath)
create_tree(
pdspath,
{'file_clean': 'file_ingit_clean'}
)
Dataset(pdspath).save()
assert_repo_status(pdspath)
# staged subds, and files
create(opj(ds.path, 'subds_added'))
# use internal helper to get subdataset into an 'added' state
# that would not happen in standard datalad workflows
list(ds.repo._save_add_submodules([ds.pathobj / 'subds_added']))
create(opj(ds.path, 'subdir', 'subds_added'))
list(ds.repo._save_add_submodules([ds.pathobj / 'subdir' / 'subds_added']))
# some more untracked files
create_tree(
ds.path,
{
'subdir': {
'file_untracked': 'file_untracked',
'file_added': 'file_added',
},
'file_untracked': 'file_untracked',
'file_added': 'file_added',
'dir_untracked': {
'file_untracked': 'file_untracked',
},
'subds_modified': {
'someds': {
"dirtyds": {
'file_untracked': 'file_untracked',
},
},
},
}
)
ds.repo.add(['file_added', opj('subdir', 'file_added')])
# untracked subdatasets
create(opj(ds.path, 'subds_untracked'))
create(opj(ds.path, 'subdir', 'subds_untracked'))
# deleted files
os.remove(opj(ds.path, 'file_deleted'))
os.remove(opj(ds.path, 'subdir', 'file_deleted'))
# staged deletion
ds.repo.remove('file_staged_deleted')
# modified files
if isinstance(ds.repo, AnnexRepo):
ds.repo.unlock(['file_modified', opj('subdir', 'file_modified')])
create_tree(
ds.path,
{
'subdir': {
'file_ingit_modified': 'file_ingit_modified',
},
'file_ingit_modified': 'file_ingit_modified',
}
)
create_tree(
ds.path,
{
'subdir': {
'file_modified': 'file_modified',
},
'file_modified': 'file_modified',
}
)
return ds
def get_deeply_nested_structure(path):
""" Here is what this does (assuming UNIX, locked):
| .
| ├── directory_untracked
| │ └── link2dir -> ../subdir
| ├── OBSCURE_FILENAME_file_modified
| ├── link2dir -> subdir
| ├── link2subdsdir -> subds_modified/subdir
| ├── link2subdsroot -> subds_modified
| ├── subdir
| │ ├── annexed_file.txt -> ../.git/annex/objects/...
| │ ├── file_modified
| │ ├── git_file.txt
| │ └── link2annex_files.txt -> annexed_file.txt
| └── subds_modified
| ├── link2superdsdir -> ../subdir
| ├── subdir
| │ └── annexed_file.txt -> ../.git/annex/objects/...
| └── subds_lvl1_modified
| └── OBSCURE_FILENAME_directory_untracked
| └── untracked_file
When a system has no symlink support, the link2... components are not
included.
"""
ds = Dataset(path).create()
(ds.pathobj / 'subdir').mkdir()
(ds.pathobj / 'subdir' / 'annexed_file.txt').write_text(u'dummy')
ds.save()
(ds.pathobj / 'subdir' / 'git_file.txt').write_text(u'dummy')
ds.save(to_git=True)
# a subtree of datasets
subds = ds.create('subds_modified')
# another dataset, plus an additional dir in it
ds.create(opj('subds_modified', 'subds_lvl1_modified'))
create_tree(
ds.path,
{
'subdir': {
'file_modified': 'file_modified',
},
OBSCURE_FILENAME + u'file_modified_': 'file_modified',
}
)
create_tree(
str(ds.pathobj / 'subds_modified' / 'subds_lvl1_modified'),
{OBSCURE_FILENAME + u'_directory_untracked': {"untracked_file": ""}}
)
(ut.Path(subds.path) / 'subdir').mkdir()
(ut.Path(subds.path) / 'subdir' / 'annexed_file.txt').write_text(u'dummy')
subds.save()
(ds.pathobj / 'directory_untracked').mkdir()
if not has_symlink_capability():
return ds
# symlink farm #1
# symlink to annexed file
(ds.pathobj / 'subdir' / 'link2annex_files.txt').symlink_to(
'annexed_file.txt')
# symlink to directory within the dataset
(ds.pathobj / 'link2dir').symlink_to('subdir')
# upwards pointing symlink to directory within the same dataset
(ds.pathobj / 'directory_untracked' / 'link2dir').symlink_to(
opj('..', 'subdir'))
# symlink pointing to a subdataset mount in the same dataset
(ds.pathobj / 'link2subdsroot').symlink_to('subds_modified')
# symlink to a dir in a subdataset (across dataset boundaries)
(ds.pathobj / 'link2subdsdir').symlink_to(
opj('subds_modified', 'subdir'))
# symlink to a dir in a superdataset (across dataset boundaries)
(ut.Path(subds.path) / 'link2superdsdir').symlink_to(
opj('..', 'subdir'))
return ds
def maybe_adjust_repo(repo):
"""Put repo into an adjusted branch if it is not already.
"""
if not repo.is_managed_branch():
repo.call_annex(["upgrade"])
repo.config.reload(force=True)
repo.adjust()
@with_tempfile
@with_tempfile
def has_symlink_capability(p1, p2):
path = ut.Path(p1)
target = ut.Path(p2)
return utils.check_symlink_capability(path, target)
def skip_wo_symlink_capability(func):
"""Skip test when environment does not support symlinks
Perform a behavioral test instead of top-down logic, as on
windows this could be on or off on a case-by-case basis.
"""
@wraps(func)
@attr('skip_wo_symlink_capability')
def _wrap_skip_wo_symlink_capability(*args, **kwargs):
if not has_symlink_capability():
raise SkipTest("no symlink capabilities")
return func(*args, **kwargs)
return _wrap_skip_wo_symlink_capability
_TESTS_ADJUSTED_TMPDIR = None
def skip_if_adjusted_branch(func):
"""Skip test if adjusted branch is used by default on TMPDIR file system.
"""
@wraps(func)
@attr('skip_if_adjusted_branch')
def _wrap_skip_if_adjusted_branch(*args, **kwargs):
global _TESTS_ADJUSTED_TMPDIR
if _TESTS_ADJUSTED_TMPDIR is None:
@with_tempfile
def _check(path):
ds = Dataset(path).create(force=True)
return ds.repo.is_managed_branch()
_TESTS_ADJUSTED_TMPDIR = _check()
if _TESTS_ADJUSTED_TMPDIR:
raise SkipTest("Test incompatible with adjusted branch default")
return func(*args, **kwargs)
return _wrap_skip_if_adjusted_branch
def get_ssh_port(host):
"""Get port of `host` in ssh_config.
Our tests depend on the host being defined in ssh_config, including its
port. This method can be used by tests that want to check handling of an
explicitly specified
Note that if `host` does not match a host in ssh_config, the default value
of 22 is returned.
Parameters
----------
host : str
Returns
-------
port (int)
Raises
------
SkipTest if port cannot be found.
"""
out = ''
runner = WitlessRunner()
try:
res = runner.run(["ssh", "-G", host], protocol=StdOutErrCapture)
out = res["stdout"]
err = res["stderr"]
except Exception as exc:
err = str(exc)
port = None
for line in out.splitlines():
if line.startswith("port "):
try:
port = int(line.split()[1])
except Exception as exc:
err = str(exc)
break
if port is None:
raise SkipTest("port for {} could not be determined: {}"
.format(host, err))
return port
#
# Context Managers
#
def patch_config(vars):
"""Patch our config with custom settings. Returns mock.patch cm
Only the merged configuration from all sources (global, local, dataset)
will be patched. Source-constrained patches (e.g. only committed dataset
configuration) are not supported.
"""
return patch.dict(dl_cfg._merged_store, vars)
@contextmanager
def set_date(timestamp):
"""Temporarily override environment variables for git/git-annex dates.
Parameters
----------
timestamp : int
Unix timestamp.
"""
git_ts = "@{} +0000".format(timestamp)
with patch.dict("os.environ",
{"GIT_COMMITTER_DATE": git_ts,
"GIT_AUTHOR_DATE": git_ts,
"GIT_ANNEX_VECTOR_CLOCK": str(timestamp),
"DATALAD_FAKE__DATES": "0"}):
yield
@contextmanager
def set_annex_version(version):
"""Override the git-annex version.
This temporarily masks the git-annex version present in external_versions
and make AnnexRepo forget its cached version information.
"""
from datalad.support.annexrepo import AnnexRepo
ar_vers = AnnexRepo.git_annex_version
with patch.dict(
"datalad.support.annexrepo.external_versions._versions",
{"cmd:annex": version}):
try:
AnnexRepo.git_annex_version = None
yield
finally:
AnnexRepo.git_annex_version = ar_vers
#
# Test tags
#
# To be explicit, and not "loose" some tests due to typos, decided to make
# explicit decorators for common types
from nose.plugins.attrib import attr
def integration(f):
"""Mark test as an "integration" test which generally is not needed to be run
Generally tend to be slower.
Should be used in combination with @slow and @turtle if that is the case.
"""
return attr('integration')(f)
def slow(f):
"""Mark test as a slow, although not necessarily integration or usecase test
Rule of thumb cut-off to mark as slow is 10 sec
"""
return attr('slow')(f)
def turtle(f):
"""Mark test as very slow, meaning to not run it on Travis due to its
time limit
Rule of thumb cut-off to mark as turtle is 2 minutes
"""
return attr('turtle')(f)
def usecase(f):
"""Mark test as a usecase user ran into and which (typically) caused bug report
to be filed/troubleshooted
Should be used in combination with @slow and @turtle if slow.
"""
return attr('usecase')(f)
|
the-stack_0_13899 | import time
from handler.base_plugin import BasePlugin
class AntifloodPlugin(BasePlugin):
__slots__ = ("users", "delay", "absolute", "absolute_time")
def __init__(self, delay=1, absolute=False):
""" Forbids users to send messages to bot more often than delay `delay`. If `absolute` is True, bot
wont answer on more than 1 message in delay time.
"""
super().__init__()
self.users = {}
self.delay = delay
self.absolute = absolute
self.absolute_time = 0
async def global_before_message(self, msg, plugin):
if len(self.users) > 2000:
self.users.clear()
ct = time.time()
if self.absolute:
if ct - self.absolute_time <= self.delay:
return False
self.absolute_time = ct
return True
else:
if ct - self.users.get(msg.user_id, 0) <= self.delay:
return False
self.users[msg.user_id] = ct
return True
|
the-stack_0_13902 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# This code expects that you have AWS credentials setup per:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html
from logging import basicConfig, getLogger, INFO
from boto3 import client
from pyqldbsamples.create_ledger import wait_for_active
from pyqldbsamples.delete_ledger import delete_ledger, set_deletion_protection
logger = getLogger(__name__)
basicConfig(level=INFO)
qldb_client = client('qldb')
LEDGER_NAME = 'deletion-protection-demo'
def create_with_deletion_protection(ledger_name):
"""
Create a new ledger with the specified name and with deletion protection enabled.
:type ledger_name: str
:param ledger_name: Name for the ledger to be created.
:rtype: dict
:return: Result from the request.
"""
logger.info("Let's create the ledger with name: {}...".format(ledger_name))
result = qldb_client.create_ledger(Name=ledger_name, PermissionsMode='ALLOW_ALL')
logger.info('Success. Ledger state: {}'.format(result.get('State')))
return result
def main(ledger_name=LEDGER_NAME):
"""
Demonstrate the protection of QLDB ledgers against deletion.
"""
try:
create_with_deletion_protection(ledger_name)
wait_for_active(ledger_name)
try:
delete_ledger(ledger_name)
except qldb_client.exceptions.ResourcePreconditionNotMetException:
logger.info('Ledger protected against deletions! Turning off deletion protection now.')
set_deletion_protection(ledger_name, False)
delete_ledger(ledger_name)
except Exception as e:
logger.exception('Error while updating or deleting the ledger!')
raise e
if __name__ == '__main__':
main()
|
the-stack_0_13903 | """Config flow for ReCollect Waste integration."""
from __future__ import annotations
from aiorecollect.client import Client
from aiorecollect.errors import RecollectError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_FRIENDLY_NAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from .const import CONF_PLACE_ID, CONF_SERVICE_ID, DOMAIN, LOGGER
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_PLACE_ID): str, vol.Required(CONF_SERVICE_ID): str}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for ReCollect Waste."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Define the config flow to handle options."""
return RecollectWasteOptionsFlowHandler(config_entry)
async def async_step_import(self, import_config: dict = None) -> dict:
"""Handle configuration via YAML import."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input: dict = None) -> dict:
"""Handle configuration via the UI."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors={}
)
unique_id = f"{user_input[CONF_PLACE_ID]}, {user_input[CONF_SERVICE_ID]}"
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
session = aiohttp_client.async_get_clientsession(self.hass)
client = Client(
user_input[CONF_PLACE_ID], user_input[CONF_SERVICE_ID], session=session
)
try:
await client.async_get_next_pickup_event()
except RecollectError as err:
LOGGER.error("Error during setup of integration: %s", err)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": "invalid_place_or_service_id"},
)
return self.async_create_entry(
title=unique_id,
data={
CONF_PLACE_ID: user_input[CONF_PLACE_ID],
CONF_SERVICE_ID: user_input[CONF_SERVICE_ID],
},
)
class RecollectWasteOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a Recollect Waste options flow."""
def __init__(self, entry: config_entries.ConfigEntry):
"""Initialize."""
self._entry = entry
async def async_step_init(self, user_input: dict | None = None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_FRIENDLY_NAME,
default=self._entry.options.get(CONF_FRIENDLY_NAME),
): bool
}
),
)
|
the-stack_0_13905 | #!/usr/bin/env python
"""
Extract outline edges of a given mesh and save them into
'<original path>/edge_<original mesh file name>.vtk'
or into a user defined output file.
The outline edge is an edge for which norm(nvec1 - nvec2) < eps,
where nvec1 and nvec2 are the normal vectors of the incident facets.
"""
from __future__ import absolute_import
import numpy as nm
from scipy.sparse import coo_matrix
import sys
sys.path.append('.')
from argparse import ArgumentParser
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import edit_filename
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import VTKMeshIO
def merge_lines(mesh, eps=1e-18):
coors, ngroups, conns, mat_ids, ctype = mesh
conns = conns[0]
# vertices to edges map
n_v = coors.shape[0]
n_e = conns.shape[0]
row = nm.repeat(nm.arange(n_e), 2)
aux = coo_matrix((nm.ones((n_e * 2,), dtype=nm.bool),
(row, conns.flatten())), shape=(n_e, n_v))
v2e = aux.tocsc()
n_epv = nm.diff(v2e.indptr)
# directional vectors of edges
de = coors[conns[:, 1], :] - coors[conns[:, 0], :]
de = de / nm.linalg.norm(de, axis=1)[:, nm.newaxis]
eflag = nm.ones((n_e, ), dtype=bool)
valid_e = nm.where(eflag)[0]
e_remove = []
while len(valid_e) > 0:
ie = valid_e[0]
d = de[ie]
buff = [(ie, conns[ie, 0]), (ie, conns[ie, 1])]
eflag[ie] = False # invalidate edge
while len(buff) > 0:
e, v = buff.pop(-1)
if n_epv[v] == 2:
idx = v2e.indptr[v]
aux = v2e.indices[idx]
next_e = v2e.indices[idx + 1] if aux == e else aux
if not eflag[next_e]: # valid edge?
continue
if nm.linalg.norm(de[next_e] - d) < eps\
or nm.linalg.norm(de[next_e] + d) < eps:
next_ec = conns[next_e, :]
new_v = next_ec[0] if next_ec[1] == v else next_ec[1]
idx = 0 if conns[e, 0] == v else 1
conns[e, idx] = new_v # reconnect edge
idx = v2e.indptr[new_v]
aux = v2e.indices[idx]
idx += 0 if aux == next_e else 1
v2e.indices[idx] = e # update v2e map
buff.append((e, new_v)) # continue in searching
eflag[next_e] = False # invalidate edge
e_remove.append(next_e)
valid_e = nm.where(eflag)[0]
if len(e_remove) > 0:
# remove unused edges and vertices
eflag.fill(True)
eflag[nm.asarray(e_remove)] = False
remap = -nm.ones((n_v, ), dtype=nm.int64)
remap[conns[eflag, :]] = 1
vidx = nm.where(remap > 0)[0]
remap[vidx] = nm.arange(len(vidx))
conns_new = remap[conns[eflag, :]]
return coors[vidx, :], ngroups[vidx],\
[conns_new], [mat_ids[0][eflag]], ctype
else:
return mesh
def extract_edges(mesh, eps=1e-16):
"""
Extract outline edges of a given mesh.
The outline edge is an edge for which norm(nvec_1 - nvec_2) < eps,
where nvec_1 and nvec_2 are the normal vectors of the incident facets.
Parameters
----------
mesh : Mesh
The 3D or 2D mesh.
eps : float
The tolerance parameter of the outline edge searching algorithm.
Returns
-------
mesh_out : tuple
The data of the outline mesh, Mesh.from_data() format, i.e.
(coors, ngroups, ed_conns, mat_ids, descs).
"""
domain = FEDomain('domain', mesh)
cmesh = domain.cmesh
output('Mesh - dimension: %d, vertices: %d, elements: %d'
% (mesh.dim, mesh.n_nod, mesh.n_el))
if mesh.dim == 2:
oedges = cmesh.get_surface_facets()
mesh_coors = nm.hstack([cmesh.coors,
nm.zeros((cmesh.coors.shape[0], 1))])
elif mesh.dim == 3:
cmesh.setup_connectivity(1, 2)
cmesh.setup_connectivity(3, 2)
sfaces = cmesh.get_surface_facets()
_, idxs = nm.unique(cmesh.get_conn(3, 2).indices, return_index=True)
normals = cmesh.get_facet_normals()[idxs, :]
se_map, se_off = cmesh.get_incident(1, sfaces, 2, ret_offsets=True)
sedges = nm.unique(se_map)
n_se = sedges.shape[0]
# remap surface edges to continuous range
se_remap = -nm.ones(sedges.max() + 1)
se_remap[sedges] = nm.arange(n_se)
se_map0 = se_remap[se_map]
# surface face/edge connectivity matrix (n_surf x n_edge)
n_ef = nm.diff(se_off)[0] # = 2
n_sf = se_map.shape[0] // n_ef
row = nm.repeat(nm.arange(n_sf), n_ef)
sf2e = coo_matrix((nm.ones((n_sf * n_ef,), dtype=nm.bool),
(row, se_map0)), shape=(n_sf, n_se))
# edge to face map (n_edge x 2)
se2f = sf2e.tocsc().indices.reshape((sedges.shape[0], 2))
snormals = normals[sfaces]
err = nm.linalg.norm(snormals[se2f[:, 0]] - snormals[se2f[:, 1]],
axis=1)
oedges = sedges[nm.where(err > eps)[0]]
mesh_coors = cmesh.coors
else:
raise NotImplementedError
# save outline mesh
if oedges.shape[0] > 0:
ec_idxs = nm.unique(cmesh.get_incident(0, oedges, 1))
ed_coors = mesh_coors[ec_idxs, :]
ngroups = nm.zeros((ed_coors.shape[0],), dtype=nm.int16)
aux = cmesh.get_conn(1, 0).indices
ed_conns = aux.reshape((aux.shape[0] // 2, 2))[oedges, :]
ec_remap = -nm.ones((ec_idxs.max() + 1, ), dtype=nm.int64)
ec_remap[ec_idxs] = nm.arange(ec_idxs.shape[0])
ed_conns = ec_remap[ed_conns]
mat_ids = nm.ones((ed_conns.shape[0],), dtype=nm.int16)
mesh_out = ed_coors, ngroups, [ed_conns], [mat_ids], ['3_2']
return mesh_out
else:
raise ValueError('no outline edges found (eps=%e)!' % eps)
helps = {
'eps': 'tolerance parameter of the edge search algorithm (default: 1e-12)',
'filename-out': 'name of output file',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--eps', action='store', dest='eps',
default=1e-12, help=helps['eps'])
parser.add_argument('-o', '--filename-out',
action='store', dest='filename_out',
default=None, help=helps['filename-out'])
parser.add_argument('filename')
options = parser.parse_args()
filename = options.filename
mesh = Mesh.from_file(filename)
mesh_out = extract_edges(mesh, eps=float(options.eps))
mesh_out = merge_lines(mesh_out)
filename_out = options.filename_out
if filename_out is None:
filename_out = edit_filename(filename, prefix='edge_', new_ext='.vtk')
output('Outline mesh - vertices: %d, edges: %d, output filename: %s'
% (mesh_out[0].shape[0], mesh_out[2][0].shape[0], filename_out))
# hack to write '3_2' elements - edges
io = VTKMeshIO(None)
aux_mesh = Struct()
aux_mesh._get_io_data = lambda: mesh_out
aux_mesh.n_el = mesh_out[2][0].shape[0]
io.write(filename_out, aux_mesh)
if __name__ == '__main__':
main()
|
the-stack_0_13907 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from jacket.storage.i18n import _LW
from jacket.storage.scheduler.evaluator import evaluator
from jacket.storage.scheduler import filters
LOG = logging.getLogger(__name__)
class DriverFilter(filters.BaseHostFilter):
"""DriverFilter filters hosts based on a 'filter function' and metrics.
DriverFilter filters based on volume host's provided 'filter function'
and metrics.
"""
def host_passes(self, host_state, filter_properties):
"""Determines whether a host has a passing filter_function or not."""
stats = self._generate_stats(host_state, filter_properties)
LOG.debug("Checking host '%s'", stats['host_stats']['host'])
result = self._check_filter_function(stats)
LOG.debug("Result: %s", result)
LOG.debug("Done checking host '%s'", stats['host_stats']['host'])
return result
def _check_filter_function(self, stats):
"""Checks if a volume passes a host's filter function.
Returns a tuple in the format (filter_passing, filter_invalid).
Both values are booleans.
"""
if stats['filter_function'] is None:
LOG.debug("Filter function not set :: passing host")
return True
try:
filter_result = self._run_evaluator(stats['filter_function'],
stats)
except Exception as ex:
# Warn the admin for now that there is an error in the
# filter function.
LOG.warning(_LW("Error in filtering function "
"'%(function)s' : '%(error)s' :: failing host"),
{'function': stats['filter_function'],
'error': ex, })
return False
return filter_result
def _run_evaluator(self, func, stats):
"""Evaluates a given function using the provided available stats."""
host_stats = stats['host_stats']
host_caps = stats['host_caps']
extra_specs = stats['extra_specs']
qos_specs = stats['qos_specs']
volume_stats = stats['volume_stats']
result = evaluator.evaluate(
func,
extra=extra_specs,
stats=host_stats,
capabilities=host_caps,
volume=volume_stats,
qos=qos_specs)
return result
def _generate_stats(self, host_state, filter_properties):
"""Generates statistics from host and volume data."""
host_stats = {
'host': host_state.host,
'volume_backend_name': host_state.volume_backend_name,
'vendor_name': host_state.vendor_name,
'driver_version': host_state.driver_version,
'storage_protocol': host_state.storage_protocol,
'QoS_support': host_state.QoS_support,
'total_capacity_gb': host_state.total_capacity_gb,
'allocated_capacity_gb': host_state.allocated_capacity_gb,
'free_capacity_gb': host_state.free_capacity_gb,
'reserved_percentage': host_state.reserved_percentage,
'updated': host_state.updated,
}
host_caps = host_state.capabilities
filter_function = None
if ('filter_function' in host_caps and
host_caps['filter_function'] is not None):
filter_function = six.text_type(host_caps['filter_function'])
qos_specs = filter_properties.get('qos_specs', {})
volume_type = filter_properties.get('volume_type', {})
extra_specs = volume_type.get('extra_specs', {})
request_spec = filter_properties.get('request_spec', {})
volume_stats = request_spec.get('volume_properties', {})
stats = {
'host_stats': host_stats,
'host_caps': host_caps,
'extra_specs': extra_specs,
'qos_specs': qos_specs,
'volume_stats': volume_stats,
'volume_type': volume_type,
'filter_function': filter_function,
}
return stats
|
the-stack_0_13909 | import praw
import os
import sys
from datetime import date
import shutil
import requests
import mimetypes
import logging
import pprint
from redvid import Downloader
logger = logging.getLogger(__name__)
class SubredditScraper():
def __init__(self, subreddit, output, batch_size=10):
mimetypes.init()
self.subreddit = subreddit
self.batch_size = batch_size
self.output = output
def scrape(self):
for submission in self.subreddit.new(limit=self.batch_size):
self.process_submission(submission, self.output)
def download_media(self, media_path, media_metadata):
for media_id, item in media_metadata.items():
if item.get("e") == "Image":
image_url = item.get("s").get("u")
filename = os.path.join(media_path, media_id + mimetypes.guess_extension(item.get("m"), strict=False))
if os.path.exists(filename):
continue
r = requests.get(image_url, stream = True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filename,'wb') as f:
shutil.copyfileobj(r.raw, f)
else:
logger.warn(f"unhandled media type in media_metadata: {item.get('e')}")
def download_image(self, media_path, url):
filename = os.path.join(media_path, url.split("/")[-1])
if os.path.exists(filename):
return
r = requests.get(url, stream = True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filename,'wb') as f:
shutil.copyfileobj(r.raw, f)
def download_gifv(self, media_path, url):
src_file_name = url.split("/")[-1]
file_id = os.path.splitext(src_file_name)[0]
download_url = f"https://imgur.com/download/{file_id}"
filename = os.path.join(media_path, file_id+".mp4")
if os.path.exists(filename):
return
r = requests.get(download_url, stream = True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filename,'wb') as f:
shutil.copyfileobj(r.raw, f)
def download_video(self, media_path, media, url):
for media_type, item in media.items():
if media_type == "reddit_video":
if item.get("transcoding_status") != "completed":
continue
Downloader(url=url, path = os.path.abspath(media_path), max_q=True).download()
else:
logger.warn(f"unhandled media type in media_metadata: {media_type}")
def process_submission(self, submission, scraping_path):
submission_path = os.path.join(scraping_path, submission.subreddit.display_name)
if not os.path.exists(submission_path):
os.mkdir(submission_path)
submission_path = os.path.join(submission_path, submission.id)
if not os.path.exists(submission_path):
logger.debug(f"new submission {submission.id} found: {submission.title}")
os.mkdir(submission_path)
# if submission.media:
submission_media_path = os.path.join(submission_path, "media")
if hasattr(submission,"media_metadata"):
if not os.path.exists(submission_media_path):
os.mkdir(submission_media_path)
self.download_media(submission_media_path, submission.media_metadata)
elif submission.is_video:
if not os.path.exists(submission_media_path):
os.mkdir(submission_media_path)
self.download_video(submission_media_path, submission.media,submission.url)
elif submission.url.endswith(".gifv"):
if not os.path.exists(submission_media_path):
os.mkdir(submission_media_path)
self.download_gifv(submission_media_path, submission.url)
elif submission.url.endswith(".jpg") or submission.url.endswith(".png") or submission.url.endswith(".jpeg"):
if not os.path.exists(submission_media_path):
os.mkdir(submission_media_path)
self.download_image(submission_media_path, submission.url)
else:
logger.warn(f"could not process {submission.permlink}")
|
the-stack_0_13915 | #----------------------------------------WEATHER APPLICATION----------------------------------------
import tkinter as tk
import requests
from tkinter import font
#--------------------------------------FUNCTION FOR DISPLAYING THE WEATHER CONDITIONS------------------------
def get_result(weather):
try:
a = weather['name']
b = weather['weather'][0]['description']
c = weather['main']['temp']
feels_like = weather['main']['feels_like']
temp_min = weather['main']['temp_min']
temp_max = weather['main']['temp_max']
pressure = weather['main']['pressure']
humidity = weather['main']['humidity']
final_str = 'City = ' + str(a) + '\nConditions = ' + str(b) + '\nTemperature = ' + str(c) + '°C' + '\nFeels like = ' + str(feels_like) + '°C' + '\nTemperature min = ' + str(temp_min) + '°C' + '\nTemperature max = ' + str(temp_max) + '°C' + '\npressure = ' + str(pressure) + '\nHumidity = ' + str(humidity) + '%'
except:
final_str = 'There was a problem in retrieving the information'
return final_str
#------------------------------------FUNCTION FOR GETTING THE REQUIRED INFORMATION---------------------------------
def get_weather(enter):
weather_key = '2140004b122066c33d4c3361cd2ef42bFEW4252423'
url = 'https://api.openweathermap.org/data/2.5/weather'
pam = {'appid': weather_key, 'q': enter, 'units': 'Metric'}
response = requests.get(url, params=pam)
weather = response.json()
print(weather)
label['text'] = get_result(weather)
#----------------------------------------------CODE FOR THE USER INTERFACE------------------------------------
rex = tk.Tk()
rex.title('Weather')
canvas = tk.Canvas(rex, height=500, width=700)
canvas.pack()
background_image = tk.PhotoImage(file='po.png')
background_place = tk.Label(rex, image=background_image)
background_place.place(relwidth=1, relheight=1)
frame = tk.Frame(rex, bg='#C0C3CC', bd=4)
frame.place(relx=0.12, rely=0.12, relheight=0.1, relwidth=0.75)
entry = tk.Entry(frame, font=('Bahnschrift', 12))
entry.place(relx=0, rely=0, relheight=1, relwidth=0.65)
button = tk.Button(frame, text='Get Weather', font=('Bahnschrift', 12), command=lambda: get_weather(entry.get()))
button.place(relx=0.68, rely=0, relheight=1, relwidth=0.3)
down_frame = tk.Frame(rex, bg='#6BC1FF', bd=5, relief='groove')
down_frame.place(relx=0.12, rely=0.3, relheight=0.6, relwidth=0.75)
label = tk.Label(down_frame, font=('Courier', 12))
label.place(relx=0, rely=0, relheight=1, relwidth=1)
rex.mainloop() |
the-stack_0_13916 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from ansible import constants as C
from ansible.module_utils.six import iteritems
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator']
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_dep_chain = None
self.run_state = PlayIterator.ITERATING_SETUP
self.fail_state = PlayIterator.FAILED_NONE
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
def _run_state_to_string(n):
states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
try:
return states[n]
except IndexError:
return "UNKNOWN STATE"
def _failed_state_to_string(n):
states = {1: "FAILED_SETUP", 2: "FAILED_TASKS", 4: "FAILED_RESCUE", 8: "FAILED_ALWAYS"}
if n == 0:
return "FAILED_NONE"
else:
ret = []
for i in (1, 2, 4, 8):
if n & i:
ret.append(states[i])
return "|".join(ret)
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
"rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
_run_state_to_string(self.run_state),
_failed_state_to_string(self.fail_state),
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.cur_dep_chain is not None:
new_state.cur_dep_chain = self.cur_dep_chain[:]
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
# the primary running states for the play iteration
ITERATING_SETUP = 0
ITERATING_TASKS = 1
ITERATING_RESCUE = 2
ITERATING_ALWAYS = 3
ITERATING_COMPLETE = 4
# the failure states for the play iteration, which are powers
# of 2 as they may be or'ed together in certain circumstances
FAILED_NONE = 0
FAILED_SETUP = 1
FAILED_TASKS = 2
FAILED_RESCUE = 4
FAILED_ALWAYS = 8
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
# Default options to gather
gather_subset = self._play.gather_subset
gather_timeout = self._play.gather_timeout
fact_path = self._play.fact_path
setup_block = Block(play=self._play)
# Gathering facts with run_once would copy the facts from one host to
# the others.
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'gather_facts'
setup_task.name = 'Gathering Facts'
setup_task.args = {
'gather_subset': gather_subset,
}
# Unless play is specifically tagged, gathering should 'always' run
if not self._play.tags:
setup_task.tags = ['always']
if gather_timeout:
setup_task.args['gather_timeout'] = gather_timeout
if fact_path:
setup_task.args['fact_path'] = fact_path
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(all_vars)
self._blocks.append(setup_block)
for block in self._play.compile():
new_block = block.filter_tagged_tasks(all_vars)
if new_block.has_tasks():
self._blocks.append(new_block)
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
self.batch_size = len(batch)
for host in batch:
self._host_states[host.name] = HostState(blocks=self._blocks)
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == self.ITERATING_COMPLETE:
break
if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
else:
self.get_next_task_for_host(host)
# finally, reset the host's state to ITERATING_SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = self.ITERATING_SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self._host_states[host.name] = HostState(blocks=[])
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
# now a noop, we've changed the way we do caching and finding of
# original task entries, but just in case any 3rd party strategies
# are using this we're leaving it here for now
return
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == self.ITERATING_COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
if not peek:
self._host_states[host.name] = s
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host, peek, in_child=False):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = self.ITERATING_COMPLETE
return (state, None)
if state.run_state == self.ITERATING_SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through ITERATING_SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through ITERATING_SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to ITERATING_TASKS
state.pending_setup = False
state.run_state = self.ITERATING_TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
elif state.run_state == self.ITERATING_TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = self.ITERATING_RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = self.ITERATING_ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block):
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = self.ITERATING_TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == self.ITERATING_RESCUE:
# The process here is identical to ITERATING_TASKS, except instead
# we move into the always portion of the block.
if host.name in self._play._removed_hosts:
self._play._removed_hosts.remove(host.name)
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
state.run_state = self.ITERATING_ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = self.FAILED_NONE
state.run_state = self.ITERATING_ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block):
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == self.ITERATING_ALWAYS:
# And again, the process here is identical to ITERATING_TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to ITERATING_COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != self.FAILED_NONE:
state.run_state = self.ITERATING_COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = self.ITERATING_TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
# we're advancing blocks, so if this was an end-of-role block we
# mark the current role complete
if block._eor and host.name in block._role._had_task_run and not in_child and not peek:
block._role._completed[host.name] = True
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block):
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_always_task += 1
elif state.run_state == self.ITERATING_COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == self.ITERATING_SETUP:
state.fail_state |= self.FAILED_SETUP
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= self.FAILED_TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = self.ITERATING_RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= self.FAILED_RESCUE
if state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= self.FAILED_ALWAYS
state.run_state = self.ITERATING_COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self._host_states[host.name] = s
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.fail_state != self.FAILED_NONE:
if state.run_state == self.ITERATING_RESCUE and state.fail_state & self.FAILED_RESCUE == 0:
return False
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state & self.FAILED_ALWAYS == 0:
return False
else:
return not (state.did_rescue and state.fail_state & self.FAILED_ALWAYS == 0)
elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = state._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def get_active_state(self, state):
'''
Finds the active state, recursively if necessary when there are child states.
'''
if state.run_state == self.ITERATING_TASKS and state.tasks_child_state is not None:
return self.get_active_state(state.tasks_child_state)
elif state.run_state == self.ITERATING_RESCUE and state.rescue_child_state is not None:
return self.get_active_state(state.rescue_child_state)
elif state.run_state == self.ITERATING_ALWAYS and state.always_child_state is not None:
return self.get_active_state(state.always_child_state)
return state
def is_any_block_rescuing(self, state):
'''
Given the current HostState state, determines if the current block, or any child blocks,
are in rescue mode.
'''
if state.run_state == self.ITERATING_RESCUE:
return True
if state.tasks_child_state is not None:
return self.is_any_block_rescuing(state.tasks_child_state)
return False
def get_original_task(self, host, task):
# now a noop because we've changed the way we do caching
return (None, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
return state
if state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
return state
def add_tasks(self, host, task_list):
self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
|
the-stack_0_13917 | # -*- coding: utf-8 -*-
import copy
import logging
import warnings
from ruamel.yaml import YAML
from great_expectations.data_context.util import (
instantiate_class_from_config,
load_class,
verify_dynamic_loading_support,
)
from great_expectations.exceptions import ClassInstantiationError
from great_expectations.types import ClassConfig
logger = logging.getLogger(__name__)
yaml = YAML()
yaml.default_flow_style = False
class Datasource(object):
"""
A Datasource connects to a compute environment and one or more storage environments and produces batches of data
that Great Expectations can validate in that compute environment.
Each Datasource provides Batches connected to a specific compute environment, such as a
SQL database, a Spark cluster, or a local in-memory Pandas DataFrame.
Datasources use Batch Kwargs to specify instructions for how to access data from
relevant sources such as an existing object from a DAG runner, a SQL database, S3 bucket, or local filesystem.
To bridge the gap between those worlds, Datasources interact closely with *generators* which
are aware of a source of data and can produce produce identifying information, called
"batch_kwargs" that datasources can use to get individual batches of data. They add flexibility
in how to obtain data such as with time-based partitioning, downsampling, or other techniques
appropriate for the datasource.
For example, a batch kwargs generator could produce a SQL query that logically represents "rows in the Events
table with a timestamp on February 7, 2012," which a SqlAlchemyDatasource could use to materialize
a SqlAlchemyDataset corresponding to that batch of data and ready for validation.
Since opinionated DAG managers such as airflow, dbt, prefect.io, dagster can also act as datasources
and/or batch kwargs generators for a more generic datasource.
When adding custom expectations by subclassing an existing DataAsset type, use the data_asset_type parameter
to configure the datasource to load and return DataAssets of the custom type.
--ge-feature-maturity-info--
id: datasource_s3
title: Datasource - S3
icon:
short_description: S3
description: Support for connecting to Amazon Web Services S3 as an external datasource.
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/configuring_datasources/how_to_configure_a_pandas_s3_datasource.html
maturity: Production
maturity_details:
api_stability: medium
implementation_completeness: Complete
unit_test_coverage:: Complete
integration_infrastructure_test_coverage: None
documentation_completeness: Minimal/Spotty
bug_risk: Low
id: datasource_filesystem
title: Datasource - Filesystem
icon:
short_description: File-based datsource
description: Support for using a mounted filesystem as an external datasource.
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/configuring_datasources/how_to_configure_a_pandas_filesystem_datasource.html
maturity: Production
maturity_details:
api_stability: Medium
implementation_completeness: Complete
unit_test_coverage: Complete
integration_infrastructure_test_coverage: Partial
documentation_completeness: Partial
bug_risk: Low (Moderate for Windows users because of path issues)
id: datasource_gcs
title: Datasource - GCS
icon:
short_description: GCS
description: Support for Google Cloud Storage as an external datasource
how_to_guide_url:
maturity: Experimental
maturity_details:
api_stability: Medium (supported via native ‘gs://' syntax in Pandas and Pyspark; medium because we expect configuration to evolve)
implementation_completeness: Medium (works via passthrough, not via CLI)
unit_test_coverage: Minimal
integration_infrastructure_test_coverage: Minimal
documentation_completeness: Minimal
bug_risk: Moderate
id: datasource_azure_blob_storage
title: Datasource - Azure Blob Storage
icon:
short_description: Azure Blob Storage
description: Support for Microsoft Azure Blob Storage as an external datasource
how_to_guide_url:
maturity: In Roadmap (Sub-Experimental - "Not Impossible")
maturity_details:
api_stability: N/A (Supported on Databricks Spark via ‘wasb://' / ‘wasps://' url; requires local download first for Pandas)
implementation_completeness: Minimal
unit_test_coverage: N/A
integration_infrastructure_test_coverage: N/A
documentation_completeness: Minimal
bug_risk: Unknown
--ge-feature-maturity-info--
"""
recognized_batch_parameters = {"limit"}
@classmethod
def from_configuration(cls, **kwargs):
"""
Build a new datasource from a configuration dictionary.
Args:
**kwargs: configuration key-value pairs
Returns:
datasource (Datasource): the newly-created datasource
"""
return cls(**kwargs)
@classmethod
def build_configuration(
cls,
class_name,
module_name="great_expectations.datasource",
data_asset_type=None,
batch_kwargs_generators=None,
**kwargs
):
"""
Build a full configuration object for a datasource, potentially including batch kwargs generators with defaults.
Args:
class_name: The name of the class for which to build the config
module_name: The name of the module in which the datasource class is located
data_asset_type: A ClassConfig dictionary
batch_kwargs_generators: BatchKwargGenerators configuration dictionary
**kwargs: Additional kwargs to be part of the datasource constructor's initialization
Returns:
A complete datasource configuration.
"""
verify_dynamic_loading_support(module_name=module_name)
class_ = load_class(class_name=class_name, module_name=module_name)
configuration = class_.build_configuration(
data_asset_type=data_asset_type,
batch_kwargs_generators=batch_kwargs_generators,
**kwargs
)
return configuration
def __init__(
self,
name,
data_context=None,
data_asset_type=None,
batch_kwargs_generators=None,
**kwargs
):
"""
Build a new datasource.
Args:
name: the name for the datasource
data_context: data context to which to connect
data_asset_type (ClassConfig): the type of DataAsset to produce
batch_kwargs_generators: BatchKwargGenerators to add to the datasource
"""
self._data_context = data_context
self._name = name
if isinstance(data_asset_type, str):
warnings.warn(
"String-only configuration for data_asset_type is deprecated. Use module_name and class_name instead.",
DeprecationWarning,
)
self._data_asset_type = data_asset_type
self._datasource_config = kwargs
self._batch_kwargs_generators = {}
self._datasource_config["data_asset_type"] = data_asset_type
if batch_kwargs_generators is not None:
self._datasource_config["batch_kwargs_generators"] = batch_kwargs_generators
@property
def name(self):
"""
Property for datasource name
"""
return self._name
@property
def config(self):
return copy.deepcopy(self._datasource_config)
@property
def data_context(self):
"""
Property for attached DataContext
"""
return self._data_context
def _build_generators(self):
"""
Build batch kwargs generator objects from the datasource configuration.
Returns:
None
"""
try:
for generator in self._datasource_config["batch_kwargs_generators"].keys():
self.get_batch_kwargs_generator(generator)
except KeyError:
pass
def add_batch_kwargs_generator(self, name, class_name, **kwargs):
"""Add a BatchKwargGenerator to the datasource.
Args:
name (str): the name of the new BatchKwargGenerator to add
class_name: class of the BatchKwargGenerator to add
kwargs: additional keyword arguments will be passed directly to the new BatchKwargGenerator's constructor
Returns:
BatchKwargGenerator (BatchKwargGenerator)
"""
kwargs["class_name"] = class_name
generator = self._build_batch_kwargs_generator(**kwargs)
if "batch_kwargs_generators" not in self._datasource_config:
self._datasource_config["batch_kwargs_generators"] = dict()
self._datasource_config["batch_kwargs_generators"][name] = kwargs
return generator
def _build_batch_kwargs_generator(self, **kwargs):
"""Build a BatchKwargGenerator using the provided configuration and return the newly-built generator."""
generator = instantiate_class_from_config(
config=kwargs,
runtime_environment={"datasource": self},
config_defaults={
"module_name": "great_expectations.datasource.batch_kwargs_generator"
},
)
if not generator:
raise ClassInstantiationError(
module_name="great_expectations.datasource.batch_kwargs_generator",
package_name=None,
class_name=kwargs["class_name"],
)
return generator
def get_batch_kwargs_generator(self, name):
"""Get the (named) BatchKwargGenerator from a datasource)
Args:
name (str): name of BatchKwargGenerator (default value is 'default')
Returns:
BatchKwargGenerator (BatchKwargGenerator)
"""
if name in self._batch_kwargs_generators:
return self._batch_kwargs_generators[name]
elif (
"batch_kwargs_generators" in self._datasource_config
and name in self._datasource_config["batch_kwargs_generators"]
):
generator_config = copy.deepcopy(
self._datasource_config["batch_kwargs_generators"][name]
)
else:
raise ValueError(
"Unable to load batch kwargs generator %s -- no configuration found or invalid configuration."
% name
)
generator = self._build_batch_kwargs_generator(**generator_config)
self._batch_kwargs_generators[name] = generator
return generator
def list_batch_kwargs_generators(self):
"""List currently-configured BatchKwargGenerator for this datasource.
Returns:
List(dict): each dictionary includes "name" and "type" keys
"""
generators = []
if "batch_kwargs_generators" in self._datasource_config:
for key, value in self._datasource_config[
"batch_kwargs_generators"
].items():
generators.append({"name": key, "class_name": value["class_name"]})
return generators
def process_batch_parameters(self, limit=None, dataset_options=None):
"""Use datasource-specific configuration to translate any batch parameters into batch kwargs at the datasource
level.
Args:
limit (int): a parameter all datasources must accept to allow limiting a batch to a smaller number of rows.
dataset_options (dict): a set of kwargs that will be passed to the constructor of a dataset built using
these batch_kwargs
Returns:
batch_kwargs: Result will include both parameters passed via argument and configured parameters.
"""
batch_kwargs = self._datasource_config.get("batch_kwargs", {})
if limit is not None:
batch_kwargs["limit"] = limit
if dataset_options is not None:
# Then update with any locally-specified reader options
if not batch_kwargs.get("dataset_options"):
batch_kwargs["dataset_options"] = dict()
batch_kwargs["dataset_options"].update(dataset_options)
return batch_kwargs
def get_batch(self, batch_kwargs, batch_parameters=None):
"""Get a batch of data from the datasource.
Args:
batch_kwargs: the BatchKwargs to use to construct the batch
batch_parameters: optional parameters to store as the reference description of the batch. They should
reflect parameters that would provide the passed BatchKwargs.
Returns:
Batch
"""
raise NotImplementedError
def get_available_data_asset_names(self, batch_kwargs_generator_names=None):
"""
Returns a dictionary of data_asset_names that the specified batch kwarg
generator can provide. Note that some batch kwargs generators may not be
capable of describing specific named data assets, and some (such as
filesystem glob batch kwargs generators) require the user to configure
data asset names.
Args:
batch_kwargs_generator_names: the BatchKwargGenerator for which to get available data asset names.
Returns:
dictionary consisting of sets of generator assets available for the specified generators:
::
{
generator_name: {
names: [ (data_asset_1, data_asset_1_type), (data_asset_2, data_asset_2_type) ... ]
}
...
}
"""
available_data_asset_names = {}
if batch_kwargs_generator_names is None:
batch_kwargs_generator_names = [
generator["name"] for generator in self.list_batch_kwargs_generators()
]
elif isinstance(batch_kwargs_generator_names, str):
batch_kwargs_generator_names = [batch_kwargs_generator_names]
for generator_name in batch_kwargs_generator_names:
generator = self.get_batch_kwargs_generator(generator_name)
available_data_asset_names[
generator_name
] = generator.get_available_data_asset_names()
return available_data_asset_names
def build_batch_kwargs(
self, batch_kwargs_generator, data_asset_name=None, partition_id=None, **kwargs
):
if kwargs.get("name"):
if data_asset_name:
raise ValueError(
"Cannot provide both 'name' and 'data_asset_name'. Please use 'data_asset_name' only."
)
warnings.warn(
"name is being deprecated as a batch_parameter. Please use data_asset_name instead.",
DeprecationWarning,
)
data_asset_name = kwargs.pop("name")
generator_obj = self.get_batch_kwargs_generator(batch_kwargs_generator)
if partition_id is not None:
kwargs["partition_id"] = partition_id
return generator_obj.build_batch_kwargs(
data_asset_name=data_asset_name, **kwargs
)
|
the-stack_0_13918 | # Advent of Code 2020
#
# From https://adventofcode.com/2020/day/10
#
from collections import Counter
from math import prod
import networkx as nx
import numpy as np
adapters = np.sort(np.array(list(map(int, [row.strip() for row in open('../inputs/Advent2020_10.txt', 'r')]))))
adapters = np.insert(adapters, 0, 0., axis=0)
adapters = np.append(adapters, adapters[-1] + 3)
differences = adapters[1:] - adapters[:-1]
counts = Counter(differences)
print(f"AoC 2020 Day 10, Part 1 answer is {counts[1] * counts[3]}")
graphs = []
new = True
for ix, adapter in enumerate(adapters[:-1]):
if new:
graphs.append(nx.DiGraph())
for x in range(1, 4):
if ix + x > len(adapters) - 1 or adapters[ix + x] - adapter > 3:
break
graphs[-1].add_edge(adapter, adapters[ix + x])
new = x == 1 and adapters[ix + x] - adapter == 3
paths = []
for graph in graphs:
paths.append(len(list(nx.all_simple_paths(graph, min(graph.nodes), max(graph.nodes)))))
print(f"AoC 2020 Day 10, Part 1 answer is {prod(paths)}")
|
the-stack_0_13920 | # Copyright (c) 2014 Vlad Temian <[email protected]>
# Copyright (c) 2015-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2017 guillaume2 <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2020 Clément Pit-Claudel <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""JSON reporter"""
import json
import sys
from pylint.interfaces import IReporter
from pylint.reporters.base_reporter import BaseReporter
class JSONReporter(BaseReporter):
"""Report messages and layouts in JSON."""
__implements__ = IReporter
name = "json"
extension = "json"
def __init__(self, output=None):
BaseReporter.__init__(self, output or sys.stdout)
self.messages = []
def handle_message(self, msg):
"""Manage message of different type and in the context of path."""
self.messages.append(
{
"type": msg.category,
"module": msg.module,
"obj": msg.obj,
"line": msg.line,
"column": msg.column,
"path": msg.path,
"symbol": msg.symbol,
"message": msg.msg or "",
"message-id": msg.msg_id,
}
)
def display_messages(self, layout):
"""Launch layouts display"""
print(json.dumps(self.messages, indent=4), file=self.out)
def display_reports(self, layout):
"""Don't do anything in this reporter."""
def _display(self, layout):
"""Do nothing."""
def register(linter):
"""Register the reporter classes with the linter."""
linter.register_reporter(JSONReporter)
|
the-stack_0_13921 | #!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2013-2015 First Flamingo Enterprise B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run.py
# firstflamingo/treinenaapje
#
# Created by Berend Schotanus on 11-Jan-13.
#
import sys, unittest
def main():
if len(sys.argv) == 2:
moduleName = sys.argv[1]
else:
moduleName = '*'
pattern = 'Test' + moduleName + '.py'
sys.path.insert(0, SDK_PATH)
sys.path.insert(0, CODE_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(TEST_PATH, pattern=pattern)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
main()
|
the-stack_0_13922 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry responsible for built-in keras classes."""
import tensorflow as tf
# TODO(b/139939526): move to public API.
from tensorflow.python.keras.engine.base_layer import TensorFlowOpLayer
from tensorflow_model_optimization.python.core.sparsity.keras import prunable_layer
layers = tf.keras.layers
class PruneRegistry(object):
"""Registry responsible for built-in keras layers."""
# The keys represent built-in keras layers and the values represent the
# the variables within the layers which hold the kernel weights. This
# allows the wrapper to access and modify the weights.
_LAYERS_WEIGHTS_MAP = {
layers.ELU: [],
layers.LeakyReLU: [],
layers.ReLU: [],
layers.Softmax: [],
layers.ThresholdedReLU: [],
layers.Conv1D: ['kernel'],
layers.Conv2D: ['kernel'],
layers.Conv2DTranspose: ['kernel'],
layers.Conv3D: ['kernel'],
layers.Conv3DTranspose: ['kernel'],
layers.Cropping1D: [],
layers.Cropping2D: [],
layers.Cropping3D: [],
layers.DepthwiseConv2D: [],
layers.SeparableConv1D: ['pointwise_kernel'],
layers.SeparableConv2D: ['pointwise_kernel'],
layers.UpSampling1D: [],
layers.UpSampling2D: [],
layers.UpSampling3D: [],
layers.ZeroPadding1D: [],
layers.ZeroPadding2D: [],
layers.ZeroPadding3D: [],
layers.Activation: [],
layers.ActivityRegularization: [],
layers.Dense: ['kernel'],
layers.Dropout: [],
layers.Flatten: [],
layers.Lambda: [],
layers.Masking: [],
layers.Permute: [],
layers.RepeatVector: [],
layers.Reshape: [],
layers.SpatialDropout1D: [],
layers.SpatialDropout2D: [],
layers.SpatialDropout3D: [],
layers.Embedding: ['embeddings'],
layers.LocallyConnected1D: ['kernel'],
layers.LocallyConnected2D: ['kernel'],
layers.Add: [],
layers.Average: [],
layers.Concatenate: [],
layers.Dot: [],
layers.Maximum: [],
layers.Minimum: [],
layers.Multiply: [],
layers.Subtract: [],
layers.AlphaDropout: [],
layers.GaussianDropout: [],
layers.GaussianNoise: [],
layers.BatchNormalization: [],
layers.LayerNormalization: [],
layers.AveragePooling1D: [],
layers.AveragePooling2D: [],
layers.AveragePooling3D: [],
layers.GlobalAveragePooling1D: [],
layers.GlobalAveragePooling2D: [],
layers.GlobalAveragePooling3D: [],
layers.GlobalMaxPooling1D: [],
layers.GlobalMaxPooling2D: [],
layers.GlobalMaxPooling3D: [],
layers.MaxPooling1D: [],
layers.MaxPooling2D: [],
layers.MaxPooling3D: [],
TensorFlowOpLayer: [],
}
_RNN_CELLS_WEIGHTS_MAP = {
# Allowlist via compat.v1 and compat.v2 to support legacy TensorFlow 2.X
# behavior where the v2 RNN uses the v1 RNNCell instead of the v2 RNNCell.
# See b/145939875 for details.
tf.compat.v1.keras.layers.GRUCell: ['kernel', 'recurrent_kernel'],
tf.compat.v2.keras.layers.GRUCell: ['kernel', 'recurrent_kernel'],
tf.compat.v1.keras.layers.LSTMCell: ['kernel', 'recurrent_kernel'],
tf.compat.v2.keras.layers.LSTMCell: ['kernel', 'recurrent_kernel'],
tf.compat.v1.keras.experimental.PeepholeLSTMCell: [
'kernel', 'recurrent_kernel'
],
tf.compat.v2.keras.experimental.PeepholeLSTMCell: [
'kernel', 'recurrent_kernel'
],
tf.compat.v1.keras.layers.SimpleRNNCell: ['kernel', 'recurrent_kernel'],
tf.compat.v2.keras.layers.SimpleRNNCell: ['kernel', 'recurrent_kernel'],
}
_RNN_LAYERS = frozenset({
layers.GRU,
layers.LSTM,
layers.RNN,
layers.SimpleRNN,
})
_RNN_CELLS_STR = ', '.join(str(_RNN_CELLS_WEIGHTS_MAP.keys()))
_RNN_CELL_ERROR_MSG = (
'RNN Layer {} contains cell type {} which is either not supported or does'
'not inherit PrunableLayer. The cell must be one of {}, or implement '
'PrunableLayer.')
@classmethod
def supports(cls, layer):
"""Returns whether the registry supports this layer type.
Args:
layer: The layer to check for support.
Returns:
True/False whether the layer type is supported.
"""
if layer.__class__ in cls._LAYERS_WEIGHTS_MAP:
return True
if layer.__class__ in cls._RNN_LAYERS:
for cell in cls._get_rnn_cells(layer):
if cell.__class__ not in cls._RNN_CELLS_WEIGHTS_MAP \
and not isinstance(cell, prunable_layer.PrunableLayer):
return False
return True
return False
@staticmethod
def _get_rnn_cells(rnn_layer):
if isinstance(rnn_layer.cell, layers.StackedRNNCells):
return rnn_layer.cell.cells
else:
return [rnn_layer.cell]
@classmethod
def _is_rnn_layer(cls, layer):
return layer.__class__ in cls._RNN_LAYERS
@classmethod
def _weight_names(cls, layer):
return cls._LAYERS_WEIGHTS_MAP[layer.__class__]
@classmethod
def make_prunable(cls, layer):
"""Modifies a built-in layer object to support pruning.
Args:
layer: layer to modify for support.
Returns:
The modified layer object.
"""
if not cls.supports(layer):
raise ValueError('Layer ' + str(layer.__class__) + ' is not supported.')
def get_prunable_weights():
return [getattr(layer, weight) for weight in cls._weight_names(layer)]
def get_prunable_weights_rnn(): # pylint: disable=missing-docstring
def get_prunable_weights_rnn_cell(cell):
if cell.__class__ in cls._RNN_CELLS_WEIGHTS_MAP:
return [getattr(cell, weight)
for weight in cls._RNN_CELLS_WEIGHTS_MAP[cell.__class__]]
if isinstance(cell, prunable_layer.PrunableLayer):
return cell.get_prunable_weights()
raise ValueError(cls._RNN_CELL_ERROR_MSG.format(
layer.__class__, cell.__class__, cls._RNN_CELLS_WEIGHTS_MAP.keys()))
prunable_weights = []
for rnn_cell in cls._get_rnn_cells(layer):
prunable_weights.extend(get_prunable_weights_rnn_cell(rnn_cell))
return prunable_weights
if cls._is_rnn_layer(layer):
layer.get_prunable_weights = get_prunable_weights_rnn
else:
layer.get_prunable_weights = get_prunable_weights
return layer
|
the-stack_0_13923 | def create_lr_scheduler(optimizer, config, max_epochs, num_training_instances):
if 'lr-scheduler' not in config:
return MyNoneScheduler(optimizer)
elif config['lr-scheduler']['type'] == 'linear-decay':
return MyLinearDecayScheduler(optimizer, config['lr-scheduler'], max_epochs, num_training_instances)
else:
raise BaseException("no such scheduler:", config['lr-scheduler']['type'])
class MyLinearDecayScheduler:
def __init__(self, optimizer, config, num_epoch, steps_per_epoch=1):
self.optimizer = optimizer
self.lrate0 = config['lrate0']
self.gamma = config['gamma']
self.t0 = config['t0'] * steps_per_epoch
self.t1 = config['t1'] * steps_per_epoch
self.t = 1
self.lrate = 0
def step(self):
self.t += 1
if self.t <= self.t0:
self.lrate = self.lrate0
elif self.t <= self.t1:
fraction = (self.t - self.t0) / (self.t1 - self.t0)
self.lrate = self.lrate0 * (self.gamma * fraction + 1.0 * (1 - fraction))
for group in self.optimizer.param_groups:
group['lr'] = self.lrate
return self.lrate
class MyNoneScheduler:
def __init__(self, optimizer):
self.optimizer = optimizer
def step(self):
for group in self.optimizer.param_groups:
return group['lr']
|
the-stack_0_13924 | from .base_dataset import Dataset
import numpy as np
import pandas as pd
import os.path
class CSVDataset(Dataset):
"""
CSVDataset class.
Provide access to the Boston Housing Prices dataset.
"""
def __init__(self, target_column, transform=None, mode="train", input_data=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# The name of the .csv dataset file should be the same as the name
# of the archive, but with a different extension.
if input_data is not None:
self.df = input_data
else:
name_prefix = self.dataset_zip_name[:self.dataset_zip_name.find('.')]
dataset_csv_name = name_prefix + '.csv'
data_path = os.path.join(self.root_path, dataset_csv_name)
self.df = pd.read_csv(data_path)
self.target_column = target_column
# split the dataset into train - val - test with the ratio 60 - 20 - 20
assert mode in ["train", "val", "test"], "wrong mode for dataset given"
train, val, test = np.split(self.df.sample(frac=1, random_state=0), [
int(.6 * len(self.df)), int(.8 * len(self.df))])
if mode == "train":
self.df = train
elif mode == "val":
self.df = val
elif mode == "test":
self.df = test
self.data = self.df.loc[:, self.df.columns != self.target_column]
self.targets = self.df[self.target_column]
self.transforms = transform if transform is not None else lambda x: x
self.data.iloc[0]['OverallQual'] = np.nan
def __len__(self):
return len(self.data)
def __getitem__(self, index):
"""
Create a dict of the data at the given index in your dataset.
The dict should have the following format:
{ "features" : <i-th row of the dataframe (except TARGET_COLUMN)>,
"label" : <value of TARGET_COLUMN for i-th row> }
"""
data_dict = {}
data_dict['features'] = self.data.iloc[index]
data_dict['target'] = self.targets.iloc[index]
return self.transforms(data_dict)
class FeatureSelectorAndNormalizationTransform:
"""
Select some numerical features and normalize them between 0 and 1.
"""
def __init__(self, column_stats, target_column):
"""
:param column_stats: a dictionary mapping the column name to the
relevant statistics for normalization (min and max on that column).
It should also include the statistics for the target column.
"""
self.column_stats = column_stats
self.target_column = target_column
def __call__(self, data_dict):
def normalize_column(old_value, column_name):
mn = self.column_stats[column_name]['min']
mx = self.column_stats[column_name]['max']
return (old_value - mn) / (mx - mn)
# For every feature column, normalize it if it's one of the columns
# we want to keep.
feature_columns = []
for column_idx in data_dict['features'].index:
if column_idx in self.column_stats and column_idx != self.target_column:
feature_columns.append(column_idx)
if np.isnan(data_dict['features'][column_idx]):
mean_col_val = self.column_stats[column_idx]['mean']
data_dict['features'][column_idx] = mean_col_val
old_value = data_dict['features'][column_idx]
normalized = normalize_column(old_value, column_idx)
data_dict['features'][column_idx] = normalized
# Drop the rest of the columns.
data_dict['features'] = data_dict['features'][feature_columns]
data_dict['features'] = data_dict['features'].values.astype(np.float32)
# Also normalize the target.
old_value = data_dict['target']
normalized = normalize_column(old_value, self.target_column)
data_dict['target'] = np.array([normalized])
return data_dict
class FeatureSelectorTransform:
"""
Select some numerical features and not normalize them, just return their old values.
This class is used for the binarized data to convert it to the correct format of CSVDataset object
so that it could be loaded by our dataloader
"""
def __init__(self, column_stats, target_column):
"""
:param column_stats: a dictionary mapping the column name to the
relevant statistics for normalization (min and max on that column).
It should also include the statistics for the target column.
"""
self.column_stats = column_stats
self.target_column = target_column
def __call__(self, data_dict):
# For every feature column, just keep it old values
feature_columns = []
for column_idx in data_dict['features'].index:
if column_idx in self.column_stats and column_idx != self.target_column:
feature_columns.append(column_idx)
if np.isnan(data_dict['features'][column_idx]):
mean_col_val = self.column_stats[column_idx]['mean']
data_dict['features'][column_idx] = mean_col_val
data_dict['features'] = data_dict['features'][feature_columns]
data_dict['features'] = data_dict['features'].values.astype(np.float32)
data_dict['target'] = np.array([data_dict['target']])
return data_dict
def get_exercise5_transform():
# dataloading and preprocessing steps as in ex04 2_logistic_regression.ipynb
target_column = 'SalePrice'
i2dl_exercises_path = os.path.dirname(os.path.abspath(os.getcwd()))
root_path = os.path.join(i2dl_exercises_path, "datasets", 'housing')
housing_file_path = os.path.join(root_path, "housing_train.csv")
download_url = 'https://cdn3.vision.in.tum.de/~dl4cv/housing_train.zip'
# Always make sure this line was run at least once before trying to
# access the data manually, as the data is downloaded in the
# constructor of CSVDataset.
train_dataset = CSVDataset(target_column=target_column, root=root_path, download_url=download_url, mode="train")
# For the data transformations, compute min, max and mean for each feature column. We perform the same transformation
# on the training, validation, and test data.
df = train_dataset.df
# Select only 2 features to keep plus the target column.
selected_columns = ['OverallQual', 'GrLivArea', target_column]
# selected_columns = ['GrLivArea', target_column]
mn, mx, mean = df.min(), df.max(), df.mean()
column_stats = {}
for column in selected_columns:
crt_col_stats = {'min': mn[column],
'max': mx[column],
'mean': mean[column]}
column_stats[column] = crt_col_stats
transform = FeatureSelectorAndNormalizationTransform(column_stats, target_column)
return transform |
the-stack_0_13927 | # coding: utf-8
from __future__ import unicode_literals
import re
import random
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class PornoVoisinesIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?pornovoisines\.com/showvideo/(?P<id>\d+)/(?P<display_id>[^/]+)'
_VIDEO_URL_TEMPLATE = 'http://stream%d.pornovoisines.com' \
'/static/media/video/transcoded/%s-640x360-1000-trscded.mp4'
_SERVER_NUMBERS = (1, 2)
_TEST = {
'url': 'http://www.pornovoisines.com/showvideo/1285/recherche-appartement/',
'md5': '5ac670803bc12e9e7f9f662ce64cf1d1',
'info_dict': {
'id': '1285',
'display_id': 'recherche-appartement',
'ext': 'mp4',
'title': 'Recherche appartement',
'description': 'md5:819ea0b785e2a04667a1a01cdc89594e',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20140925',
'duration': 120,
'view_count': int,
'average_rating': float,
'categories': ['Débutantes', 'Scénario', 'Sodomie'],
'age_limit': 18,
}
}
@classmethod
def build_video_url(cls, num):
return cls._VIDEO_URL_TEMPLATE % (random.choice(cls._SERVER_NUMBERS), num)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, video_id)
video_url = self.build_video_url(video_id)
title = self._html_search_regex(
r'<h1>(.+?)</h1>', webpage, 'title', flags=re.DOTALL)
description = self._html_search_regex(
r'<article id="descriptif">(.+?)</article>',
webpage, 'description', fatal=False, flags=re.DOTALL)
thumbnail = self._search_regex(
r'<div id="mediaspace%s">\s*<img src="/?([^"]+)"' % video_id,
webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = 'http://www.pornovoisines.com/%s' % thumbnail
upload_date = unified_strdate(self._search_regex(
r'Publié le ([\d-]+)', webpage, 'upload date', fatal=False))
duration = int_or_none(self._search_regex(
'Durée (\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
r'(\d+) vues', webpage, 'view count', fatal=False))
average_rating = self._search_regex(
r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False)
if average_rating:
average_rating = float_or_none(average_rating.replace(',', '.'))
categories = self._html_search_meta(
'keywords', webpage, 'categories', fatal=False)
if categories:
categories = [category.strip() for category in categories.split(',')]
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'categories': categories,
'age_limit': 18,
}
|
the-stack_0_13930 | # coding: utf-8
import sys
sys.path.append('..')
from Natural_Language_Processing.common.util import preprocess, create_co_matrix, most_similar
text = 'You say goodbye and I say hello.'
corpus, word_to_id, id_to_word = preprocess(text)
vocab_size = len(word_to_id)
C = create_co_matrix(corpus, vocab_size)
most_similar('you', word_to_id, id_to_word, C, top=5)
|
the-stack_0_13931 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
self.waypoints_2d = None
self.waypoint_tree = None
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
# closest_idx = None
# if self.waypoint_tree:
# closest_idx = self.waypoint_tree.query([x,y], 1)[1]
# return closest_idx
return self.waypoint_tree.query([x, y], 1)[1]
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# if(not self.has_image):
# self.prev_light_loc = None
# return False
# cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
#Get classification
# return self.light_classifier.get_classification(cv_image)
return light.state
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if self.pose and self.waypoints:
#car_position = self.get_closest_waypoint(self.pose.pose)
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
diff = len(self.waypoints.waypoints) # we have 8 intersections
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = temp_wp_idx - car_wp_idx
if d >= 0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
the-stack_0_13935 | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.run_job_snapshot_target
import cohesity_management_sdk.models.run_now_parameters
class RunProtectionJobParam(object):
"""Implementation of the 'RunProtectionJobParam' model.
Specify the parameters to run a protection job.
Attributes:
copy_run_targets (list of RunJobSnapshotTarget): Optional parameter to
be set if you want specific replication or archival associated
with the policy to run.
run_now_parameters (list of RunNowParameters): Optional parameters of
a Run Now operation.
run_type (RunTypeRunProtectionJobParamEnum): Specifies the type of
backup. If not specified, 'kRegular' is assumed. 'kRegular'
indicates a incremental (CBT) backup. Incremental backups
utilizing CBT (if supported) are captured of the target protection
objects. The first run of a kRegular schedule captures all the
blocks. 'kFull' indicates a full (no CBT) backup. A complete
backup (all blocks) of the target protection objects are always
captured and Change Block Tracking (CBT) is not utilized. 'kLog'
indicates a Database Log backup. Capture the database transaction
logs to allow rolling back to a specific point in time. 'kSystem'
indicates a system backup. System backups are used to do bare
metal recovery of the system to a specific point in time.
source_ids (list of long|int): Optional parameter if you want to back
up only a subset of sources that are protected by the job in this
run. If a Run Now operation is to be performed then the source ids
should only be provided in the runNowParameters along with the
database Ids.
"""
# Create a mapping from Model property names to API property names
_names = {
"copy_run_targets":'copyRunTargets',
"run_now_parameters":'runNowParameters',
"run_type":'runType',
"source_ids":'sourceIds'
}
def __init__(self,
copy_run_targets=None,
run_now_parameters=None,
run_type=None,
source_ids=None):
"""Constructor for the RunProtectionJobParam class"""
# Initialize members of the class
self.copy_run_targets = copy_run_targets
self.run_now_parameters = run_now_parameters
self.run_type = run_type
self.source_ids = source_ids
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
copy_run_targets = None
if dictionary.get('copyRunTargets') != None:
copy_run_targets = list()
for structure in dictionary.get('copyRunTargets'):
copy_run_targets.append(cohesity_management_sdk.models.run_job_snapshot_target.RunJobSnapshotTarget.from_dictionary(structure))
run_now_parameters = None
if dictionary.get('runNowParameters') != None:
run_now_parameters = list()
for structure in dictionary.get('runNowParameters'):
run_now_parameters.append(cohesity_management_sdk.models.run_now_parameters.RunNowParameters.from_dictionary(structure))
run_type = dictionary.get('runType')
source_ids = dictionary.get('sourceIds')
# Return an object of this model
return cls(copy_run_targets,
run_now_parameters,
run_type,
source_ids)
|
the-stack_0_13936 | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from locust import HttpUser, task, TaskSet, between
username = "username"
password = "password"
products = [
'0PUK6V6EV0',
'1YMWWN1N4O',
'2ZYFJ3GM2N',
'66VCHSJNUP',
'6E92ZMYYFZ',
'9SIQT8TOJO',
'L9ECAV7KIM',
'LS4PSXUNUM',
'OLJCESPC7Z']
#class UserBehavior(TaskSet):
#
# def on_start(self):
# index(self)
#
# tasks = {index: 1,
# setCurrency: 2,
# browseProduct: 10,
# addToCart: 2,
# viewCart: 3,
# checkout: 1}
class WebsiteUser(HttpUser):
wait_time = between(0.1,1)
#task_set = UserBehavior
#min_wait = 1000
#max_wait = 10000
@task(1)
def index(l):
l.client.get("/")
@task(2)
def setCurrency(l):
currencies = ['EUR', 'USD', 'JPY', 'CAD']
l.client.post("/setCurrency",
{'currency_code': random.choice(currencies)})
@task(10)
def browseProduct(l):
l.client.get("/product/" + random.choice(products))
@task(3)
def viewCart(l):
l.client.get("/cart")
@task(2)
def addToCart(l):
product = random.choice(products)
l.client.get("/product/" + product)
l.client.post("/cart", {
'product_id': product,
'quantity': random.choice([1,2,3,4,5,10])})
@task(1)
def checkout(l):
product = random.choice(products)
l.client.get("/product/" + product)
l.client.post("/cart", {
'product_id': product,
'quantity': random.choice([1,2,3,4,5,10])})
l.client.post("/cart/checkout", {
'email': '[email protected]',
'street_address': '1600 Amphitheatre Parkway',
'zip_code': '94043',
'city': 'Mountain View',
'state': 'CA',
'country': 'United States',
'credit_card_number': '4432-8015-6152-0454',
'credit_card_expiration_month': '1',
'credit_card_expiration_year': '2039',
'credit_card_cvv': '672',
})
|
the-stack_0_13937 | import pandas as pd
import numpy as np
# Define functions for model
def confirmed_to_onset(confirmed, p_delay, col_name='num_cases', min_onset_date=None):
min_onset_date = pd.to_datetime(min_onset_date)
# Reverse cases so that we convolve into the past
convolved = np.convolve(np.squeeze(confirmed.iloc[::-1].values), p_delay)
# Calculate the new date range
dr = pd.date_range(end=confirmed.index[-1],
periods=len(convolved))
# Flip the values and assign the date range
onset = pd.Series(np.flip(convolved), index=dr, name=col_name)
if min_onset_date:
onset = np.round(onset.loc[min_onset_date:])
else:
onset = np.round(onset.iloc[onset.values>=1])
onset.index.name = 'date'
return pd.DataFrame(onset)
# Smooths cases using a rolling window and gaussian sampling
def prepare_cases(daily_cases, col='num_cases', out_col=None, cutoff=0):
if not out_col:
out_col = 'smoothed_'+str(col)
daily_cases[out_col] = daily_cases[col].rolling(7,
win_type='gaussian',
min_periods=1,
center=True).mean(std=2).round()
idx_start = np.searchsorted(daily_cases[out_col], cutoff)
daily_cases[out_col] = daily_cases[out_col].iloc[idx_start:]
return daily_cases
# Smooths cases using a rolling window and gaussian sampling
def smooth_1d(signal, col='num_cases', out_col=None, cutoff=0):
if not out_col:
out_col = 'smoothed_'+str(col)
signal[out_col] = signal[col].rolling(7,
win_type='gaussian',
min_periods=1,
center=True).mean(std=2)
idx_start = np.searchsorted(signal[out_col], cutoff)
signal[out_col] = signal[out_col].iloc[idx_start:]
return signal |
the-stack_0_13939 | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.contexts.dummy
from __future__ import absolute_import
import unittest
from yardstick.benchmark.contexts import dummy
class DummyContextTestCase(unittest.TestCase):
def setUp(self):
self.test_context = dummy.DummyContext()
def test__get_server(self):
self.test_context.init(None)
self.test_context.deploy()
result = self.test_context._get_server(None)
self.assertEqual(result, None)
self.test_context.undeploy()
|
the-stack_0_13941 | """empty message
Revision ID: 5e78cc772642
Revises: ec21bd75ea92
Create Date: 2020-07-22 22:44:45.754328
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5e78cc772642'
down_revision = 'ec21bd75ea92'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('recipe',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('recipe_name', sa.Text(), nullable=True),
sa.Column('recipe_link', sa.Text(), nullable=True),
sa.Column('image_link', sa.Text(), nullable=True),
sa.Column('instructions', sa.Text(), nullable=True),
sa.Column('servings', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.drop_index('ix_users_email', table_name='users')
op.drop_index('ix_users_username', table_name='users')
op.drop_table('users')
op.drop_table('recipes')
op.drop_constraint(None, 'ingredients', type_='foreignkey')
op.create_foreign_key(None, 'ingredients', 'recipe', ['recipe_id'], ['id'])
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.create_foreign_key(None, 'saved_recipes', 'user', ['user_id'], ['id'])
op.create_foreign_key(None, 'saved_recipes', 'recipe', ['recipe_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.create_foreign_key(None, 'saved_recipes', 'users', ['user_id'], ['id'])
op.create_foreign_key(None, 'saved_recipes', 'recipes', ['recipe_id'], ['id'])
op.drop_constraint(None, 'ingredients', type_='foreignkey')
op.create_foreign_key(None, 'ingredients', 'recipes', ['recipe_id'], ['id'])
op.create_table('recipes',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('recipe_name', sa.TEXT(), nullable=True),
sa.Column('recipe_link', sa.TEXT(), nullable=True),
sa.Column('image_link', sa.TEXT(), nullable=True),
sa.Column('instructions', sa.TEXT(), nullable=True),
sa.Column('servings', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('username', sa.VARCHAR(length=64), nullable=True),
sa.Column('email', sa.VARCHAR(length=120), nullable=True),
sa.Column('password_hash', sa.VARCHAR(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_users_username', 'users', ['username'], unique=1)
op.create_index('ix_users_email', 'users', ['email'], unique=1)
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_table('recipe')
# ### end Alembic commands ###
|
the-stack_0_13942 | import numpy as np
def validate_points(points: np.array) -> np.array:
# If the user is tracking only a single point, reformat it slightly.
if points.shape == (2,):
points = points[np.newaxis, ...]
elif len(points.shape) == 1:
raise_detection_error(points)
else:
if points.shape[1] != 2 or len(points.shape) > 2:
raise_detection_error(points)
return points
def raise_detection_error(points):
message = (
f"Each `Detection` object should have a property `points` of shape (num_of_points_to_track, 2), not {points.shape}. "
"Check your `Detection` list creation code. "
"You can read the documentation for the `Detection` class here: "
"https://github.com/tryolabs/norfair/tree/master/docs#detection\n"
)
raise ValueError(message)
|
the-stack_0_13943 | """Visualize learned representation."""
import os
import argparse
import importlib
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'large',
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'large',
'ytick.labelsize':'large'}
pylab.rcParams.update(params)
parser = argparse.ArgumentParser()
parser.add_argument('--log_base_dir', type=str,
default=os.path.join(os.getcwd(), 'log'))
parser.add_argument('--output_sub_dir', type=str,
default='learning_curves')
FLAGS = parser.parse_args()
def main():
# setup log directories
output_dir = os.path.join(FLAGS.log_base_dir, FLAGS.output_sub_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
envs = ['OneRoom', 'TwoRoom', 'HardMaze']
r_modes = ['sparse', 'mix', 'l2', 'rawmix']
colors = ['royalblue', 'darkorange', 'seagreen', 'tomato']
linestyles = ['--', '-', '-.', ':']
linewidth = 3
for env_id in envs:
loaded_results = {}
for r_mode in r_modes:
log_dir = os.path.join(
FLAGS.log_base_dir, 'dqn_repr', env_id, r_mode)
results_file = os.path.join(log_dir, 'results.csv')
results = np.loadtxt(results_file, delimiter=',')
loaded_results[r_mode] = results
# plot
handles = []
for r_mode, c, ls in zip(r_modes, colors, linestyles):
x = loaded_results[r_mode][:, 0]
y = loaded_results[r_mode][:, 1]
h, = plt.plot(x, y, color=c, linestyle=ls, linewidth=linewidth,
label=r_mode)
handles.append(h)
plt.title(env_id)
plt.legend(handles=handles)
plt.xlabel('train steps')
plt.ylabel('episodic returns')
figfile = os.path.join(output_dir, '{}.png'.format(env_id))
plt.savefig(figfile, bbox_inches='tight')
plt.clf()
print('Plot saved at {}.'.format(figfile))
if __name__ == '__main__':
main()
|
the-stack_0_13946 | import bblfsh_sonar_checks.utils as utils
import bblfsh
def check(uast):
findings = []
methods = utils.get_methods(uast)
for m in methods:
# Should look at the roles to filter by Boolean but there is a bug in the
# Java driver https://github.com/bblf../../java-driver/issues/83 so we check the token
if m.return_ and m.return_.type_name == 'boolean':
if any(list(bblfsh.filter(m.body, "//*[@roleReturn]//*[@roleNull]"))):
findings.append({"msg": "Don't return Null on Boolean-return methods"})
return findings
if __name__ == '__main__': utils.run_default_fixture(__file__, check)
|
the-stack_0_13948 | """
Tests for DatetimeIndex methods behaving like their Timestamp counterparts
"""
from datetime import datetime
import numpy as np
import pytest
from pandas._libs.tslibs import OutOfBoundsDatetime, to_offset
from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
import pandas as pd
from pandas import DatetimeIndex, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndexOps:
def test_dti_time(self):
rng = date_range("1/1/2000", freq="12min", periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_dti_date(self):
rng = date_range("1/1/2000", freq="12H", periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
@pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]])
def test_dti_date_out_of_range(self, data):
# GH#1475
msg = "Out of bounds nanosecond timestamp: 1400-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
DatetimeIndex(data)
@pytest.mark.parametrize(
"field",
[
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
],
)
def test_dti_timestamp_fields(self, field):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
expected = getattr(idx, field)[-1]
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
def test_dti_timestamp_isocalendar_fields(self):
idx = tm.makeDateIndex(100)
expected = tuple(idx.isocalendar().iloc[-1].to_list())
result = idx[-1].isocalendar()
assert result == expected
def test_dti_timestamp_freq_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
# ----------------------------------------------------------------
# DatetimeIndex.round
def test_round_daily(self):
dti = date_range("20130101 09:10:11", periods=5)
result = dti.round("D")
expected = date_range("20130101", periods=5)
tm.assert_index_equal(result, expected)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
result = dti.round("D")
expected = date_range("20130101", periods=5).tz_localize("US/Eastern")
tm.assert_index_equal(result, expected)
result = dti.round("s")
tm.assert_index_equal(result, dti)
@pytest.mark.parametrize(
"freq, error_msg",
[
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
],
)
def test_round_invalid(self, freq, error_msg):
dti = date_range("20130101 09:10:11", periods=5)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
with pytest.raises(ValueError, match=error_msg):
dti.round(freq)
def test_round(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 01:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
]
)
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq="H"), expected_rng)
assert elt.round(freq="H") == expected_elt
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
rng.round(freq="foo")
with pytest.raises(ValueError, match=msg):
elt.round(freq="foo")
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
rng.round(freq="M")
with pytest.raises(ValueError, match=msg):
elt.round(freq="M")
# GH#14440 & GH#15578
index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ["us", "ns"]:
tm.assert_index_equal(index, index.round(freq))
index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz)
tm.assert_index_equal(result, expected)
index = DatetimeIndex(["2016-10-17 12:00:00.001501031"])
result = index.round("10ns")
expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(False):
ts = "2016-10-17 12:00:00.001501031"
DatetimeIndex([ts]).round("1010ns")
def test_no_rounding_occurs(self, tz_naive_fixture):
# GH 21262
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:02:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:04:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:06:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:08:00", tz=tz, freq="2T"),
]
)
tm.assert_index_equal(rng.round(freq="2T"), expected_rng)
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
(["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]),
(["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]),
(
["2117-01-01 00:00:45.000000012"],
"floor",
"10ns",
["2117-01-01 00:00:45.000000010"],
),
(
["1823-01-01 00:00:01.000000012"],
"ceil",
"10ns",
["1823-01-01 00:00:01.000000020"],
),
(["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]),
(["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]),
(["2018-01-01 00:15:00"], "ceil", "15T", ["2018-01-01 00:15:00"]),
(["2018-01-01 00:15:00"], "floor", "15T", ["2018-01-01 00:15:00"]),
(["1823-01-01 03:00:00"], "ceil", "3H", ["1823-01-01 03:00:00"]),
(["1823-01-01 03:00:00"], "floor", "3H", ["1823-01-01 03:00:00"]),
(
("NaT", "1823-01-01 00:00:01"),
"floor",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
(
("NaT", "1823-01-01 00:00:01"),
"ceil",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = DatetimeIndex(list(test_input))
func = getattr(dt, rounder)
result = func(freq)
expected = DatetimeIndex(list(expected))
assert expected.equals(result)
@pytest.mark.parametrize(
"start, index_freq, periods",
[("2018-01-01", "12H", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)],
)
@pytest.mark.parametrize(
"round_freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"12H",
"1D",
],
)
def test_round_int64(self, start, index_freq, periods, round_freq):
dt = date_range(start=start, freq=index_freq, periods=periods)
unit = to_offset(round_freq).nanos
# test floor
result = dt.floor(round_freq)
diff = dt.asi8 - result.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"floor not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "floor error"
# test ceil
result = dt.ceil(round_freq)
diff = result.asi8 - dt.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"ceil not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "ceil error"
# test round
result = dt.round(round_freq)
diff = abs(result.asi8 - dt.asi8)
mod = result.asi8 % unit
assert (mod == 0).all(), f"round not a {round_freq} multiple"
assert (diff <= unit // 2).all(), "round error"
if unit % 2 == 0:
assert (
result.asi8[diff == unit // 2] % 2 == 0
).all(), "round half to even error"
# ----------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D")
tm.assert_index_equal(result, expected)
arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype(
"datetime64[ns]"
)
rng_ns = DatetimeIndex(arr_ns)
rng_ns_normalized = rng_ns.normalize()
arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype(
"datetime64[ns]"
)
expected = DatetimeIndex(arr_ns)
tm.assert_index_equal(rng_ns_normalized, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_normalize_nat(self):
dti = DatetimeIndex([pd.NaT, Timestamp("2018-01-01 01:00:00")])
result = dti.normalize()
expected = DatetimeIndex([pd.NaT, Timestamp("2018-01-01")])
tm.assert_index_equal(result, expected)
class TestDateTimeIndexToJulianDate:
def test_1700(self):
dr = date_range(start=Timestamp("1710-10-01"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="H")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="T")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="S")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
|
the-stack_0_13949 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numbers
import numpy as np
import scipy.linalg as la
from .cy.interpolate import interp, arr_interp, zinterp, arr_zinterp
__all__ = ['Cubic_Spline']
class Cubic_Spline:
"""
Calculates coefficients for a cubic spline
interpolation of a given data set.
This function assumes that the data is sampled
uniformly over a given interval.
Parameters
----------
a : float
Lower bound of the interval.
b : float
Upper bound of the interval.
y : ndarray
Function values at interval points.
alpha : float
Second-order derivative at a. Default is 0.
beta : float
Second-order derivative at b. Default is 0.
Attributes
----------
a : float
Lower bound of the interval.
b : float
Upper bound of the interval.
coeffs : ndarray
Array of coeffcients defining cubic spline.
Notes
-----
This object can be called like a normal function with a
single or array of input points at which to evaluate
the interplating function.
Habermann & Kindermann, "Multidimensional Spline Interpolation:
Theory and Applications", Comput Econ 30, 153 (2007).
"""
def __init__(self, a, b, y, alpha=0, beta=0):
y = np.asarray(y)
n = y.shape[0] - 1
h = (b - a)/n
coeff = np.zeros(n + 3, dtype=y.dtype)
# Solutions to boundary coeffcients of spline
coeff[1] = 1/6 * (y[0] - (alpha*h*h)/6) # C2 in paper
coeff[n + 1] = 1/6 * (y[n] - (beta*h*h)/6) # cn+2 in paper
# Compressed tridiagonal matrix
ab = np.ones((3, n - 1), dtype=float)
ab[0, 0] = 0 # Because top row is upper diag with one less elem
ab[1, :] = 4
ab[-1, -1] = 0 # Because bottom row is lower diag with one less elem
B = y[1:-1].copy() # grabs elements y[1] - > y[n-2] for reduced array
B[0] -= coeff[1]
B[-1] -= coeff[n + 1]
coeff[2:-2] = la.solve_banded((1, 1), ab, B,
overwrite_ab=True,
overwrite_b=True,
check_finite=False)
coeff[0] = alpha*h*h/6. + 2*coeff[1] - coeff[2]
coeff[-1] = beta*h*h/6. + 2*coeff[-2] - coeff[-3]
self.a = a # Lower-bound of domain
self.b = b # Uppser-bound of domain
self.coeffs = coeff # Spline coefficients
self.is_complex = y.dtype == complex # Tells which dtype solver to use
self.array = y
self.bounds = (alpha, beta)
def __call__(self, pnts, *args):
# If requesting a single return value
if isinstance(pnts, numbers.Number):
if self.is_complex:
return zinterp(pnts, self.a, self.b, self.coeffs)
else:
return interp(pnts, self.a, self.b, self.coeffs)
# If requesting multiple return values from array_like
elif isinstance(pnts, (np.ndarray, list)):
pnts = np.asarray(pnts)
if self.is_complex:
return arr_zinterp(pnts, self.a, self.b, self.coeffs)
return arr_interp(pnts, self.a, self.b, self.coeffs)
raise TypeError
|
the-stack_0_13950 | import requests
import json
import os
from github import Github
BASE = """---
id: default_repositories
title: Default repositories
description: "Default repositories in HACS"
---
<!-- The content of this file is autogenerated during build with script/generate_default_repositories.py -->
"""
github = Github(os.environ['TOKEN'])
integration_org = github.get_organization("custom-components")
plugin_org = github.get_organization("custom-cards")
theme_org = github.get_organization("home-assistant-community-themes")
blacklist = requests.get('https://raw.githubusercontent.com/hacs/default/master/blacklist')
blacklist = json.loads(blacklist.text.lower())
for category in ["integration", "plugin", "appdaemon", "python_script", "theme"]:
response = requests.get(f'https://raw.githubusercontent.com/hacs/default/master/{category}')
repos = json.loads(response.text.lower())
if category == "integration":
for repo in list(integration_org.get_repos()):
repos.append(repo.full_name.lower())
elif category == "plugin":
for repo in list(plugin_org.get_repos()):
repos.append(repo.full_name.lower())
elif category == "theme":
for repo in list(theme_org.get_repos()):
repos.append(repo.full_name.lower())
for repo in repos:
if repo in blacklist:
repos.remove(repo)
title = category.replace("_", " ").title() + 's' if category != 'appdaemon' else 'AppDaemon Apps'
BASE += f"\n## {title}\n\n"
BASE += f"_{len(repos)} Repositories in total._\n\n"
for repo in sorted(repos):
BASE += f"<p className='defaultrepo'><a href='https://github.com/{repo}' target='_blank'>{repo}</a></p>\n"
with open("documentation/default_repositories.md", "w") as mdfile:
mdfile.write(BASE)
|
the-stack_0_13952 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
sum = 0
def sumRootToLeaf(self, root: TreeNode) -> int:
def dfs(root, path_sum):
if not root:
return
path_sum = (path_sum << 1) + root.val
if not root.left and not root.right:
self.sum += path_sum
return
dfs(root.left, path_sum)
dfs(root.right, path_sum)
dfs(root, 0)
return self.sum
|
the-stack_0_13956 | exp = str(input('Digite uma expressão: '))
pilha = []
for simb in exp:
if simb == '(':
pilha.append('(')
elif simb == ')':
if len(pilha) > 0:
pilha.pop()
else:
pilha.append(')')
break
if len(pilha) == 0:
print('Sua expressão está válida!')
else:
print('Sua expressão está inválida!')
|
the-stack_0_13957 | # -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
import tensorflow.compat.v1.keras.backend as K
from tensorflow.compat.v1.keras.backend import get_session
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
class YOLO(object):
_defaults = {
"model_path": 'model_data/test.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
|
the-stack_0_13959 | # to run this script you need a DL1 debug files of hipeRTA and a DL1 file from lstchain from the same run
import tables
import numpy as np
import matplotlib.pyplot as plt
from ctapipe.visualization import CameraDisplay
from ctapipe.instrument import CameraGeometry
from ctapipe.image import tailcuts_clean
from lstchain.io.io import dl1_images_lstcam_key, dl1_params_lstcam_key
from astropy.table import Table, join, Column, hstack
from ctapipe.io.containers import HillasParametersContainer
from matplotlib.backends.backend_pdf import PdfPages
from datetime import date
import astropy.units as u
from astropy.coordinates import Angle
import argparse
def tailcuts_clean_teltype(image, camera_name='LSTCam', **kwargs):
return tailcuts_clean(geom, image, **kwargs)
def get_hillas_container(row):
h = HillasParametersContainer()
h.x = row['x'] * 28 * u.m
h.y = row['y'] * 28 * u.m
h.r = row['r'] * 28 * u.m
h.phi = Angle(row['phi'] * u.rad)
h.width = row['width'] * u.m
h.length = row['length'] * u.m
h.psi = Angle(row['psi'] * u.rad)
h.skewness = row['skewness']
h.kurtosis = row['kurtosis']
return h
dl1_hipecta_filename = '/fefs/aswg/workspace/thomas.vuillaume/mchdf5/run1/dl1_6_3_2_gamma_20deg_180deg_run1___cta-prod3-demo-2147m-LaPalma-baseline-mono_off0.4.h5'
dl1_lstchain_filename = '/fefs/aswg/workspace/thomas.vuillaume/mchdf5/run1/lstchain/GlobalPeakIntegrator/dl1_gamma_20deg_180deg_run1___cta-prod3-demo-2147m-LaPalma-baseline-mono_off0.4.simtel.h5'
def main(dl1_hipecta_filename, dl1_lstchain_filename):
geom = CameraGeometry.from_name('LSTCam')
dl1_hipecta = tables.open_file(dl1_hipecta_filename)
dl1_lstchain = tables.open_file(dl1_lstchain_filename)
with tables.open_file(dl1_hipecta_filename) as dl1_hipecta:
hipecta_images = Table(dl1_hipecta.root.dl1.Tel_1.calib_pic.read())
hipecta_parameters = Table(dl1_hipecta.root.dl1.Tel_1.parameters.read())
with tables.open_file(dl1_lstchain_filename) as dl1_lstchain:
simu_table = Table(dl1_lstchain.root.dl1.event.simulation.LST_LSTCam.read())
lstchain_images = Table(dl1_lstchain.root[dl1_images_lstcam_key].read())
hipecta = join(hipecta_images, hipecta_parameters, keys='event_id')
lstchain_table = hstack([lstchain_images, simu_table], join_type='exact')
lstchain_table.rename_column('tel_id_1', 'tel_id')
lstchain_table.remove_column('tel_id_2')
mega_table = join(lstchain_table[lstchain_table['tel_id']==1],
hipecta,
uniq_col_name='{table_name}_{col_name}',
table_names = ['lstchain', 'hipecta'],
keys='event_id'
)
selected_table = mega_table[:30]
params_cleaning = dict(picture_thresh=6,
boundary_thresh=3,
keep_isolated_pixels=False,
min_number_picture_neighbors=2)
lstchain_cleaning = np.apply_along_axis(tailcuts_clean_teltype, selected_table['image'], **params_cleaning)
selected_table.add_column(Column(lstchain_cleaning, dtype=int), name='lstchain_clean_mask')
with PdfPages(f'compare_lstchain_hipecta_images_{date.today()}.pdf') as pp:
for ii, row in enumerate(selected_table[:10]):
print(f"{ii}. event id : {row['event_id']}")
# print(row)
h = get_hillas_container(row)
image_lstchain = row['image']
image_hipecta = row['signal']
clean_mask_ctapipe_on_lstchain = row['lstchain_clean_mask']
clean_mask_ctapipe_on_hipecta = tailcuts_clean(geom, image_hipecta, **params_cleaning)
clean_mask_hipecta = row['clean_mask'].astype(bool)
fig, axes = plt.subplots(2,3, figsize=(12,6))
# axes[0,2].remove()
display = CameraDisplay(geom, image_lstchain, ax=axes[0,0])
display.add_colorbar(ax=axes[0,0])
axes[0,0].set_title('lstchain image')
display = CameraDisplay(geom, clean_mask_ctapipe_on_lstchain, ax=axes[0,1])
# display.add_colorbar(ax=axes[0,1])
display.highlight_pixels(clean_mask_ctapipe_on_lstchain.astype(bool), color='red')
axes[0,1].set_title('lstchain clean mask')
display = CameraDisplay(geom, image_hipecta, ax=axes[1,0])
display.add_colorbar(ax=axes[1,0])
axes[1,0].set_title('hipecta image')
display = CameraDisplay(geom, clean_mask_hipecta, ax=axes[1,1])
# display.add_colorbar(ax=axes[1,1])
display.highlight_pixels(clean_mask_ctapipe_on_hipecta, color='red')
axes[1,1].set_title('hipecta clean mask')
axes[1,1].text(0.88,0.88,s='cleaning mask\nfrom ctapipe',color='red')
axes[1,1].text(-1.5, 0.88, s=f'n_islands={row["n_islands"]}', color='black')
display.overlay_moments(h)
display = CameraDisplay(geom, row['photo_electron_image'], ax=axes[0,2])
display.add_colorbar(ax=axes[0,2])
axes[0,2].set_title('true pe image')
display.highlight_pixels(clean_mask_ctapipe_on_lstchain.astype(bool), color='red')
axes[0,2].text(0.88, 0.88, s='cleaning mask\nfrom ctapipe', color='red')
display = CameraDisplay(geom, row['photo_electron_image'], ax=axes[1,2])
display.add_colorbar(ax=axes[1,2])
axes[1,2].set_title('true pe image')
display.highlight_pixels(clean_mask_hipecta, color='red')
axes[1,2].text(0.88,0.88,s='cleaning mask\nfrom hipecta',color='red')
plt.tight_layout()
pp.savefig(dpi=100)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Reconstruct events")
parser.add_argument('--dl1_lstchain', '-lst',
type=str,
dest='dl1_lstchain_filename',
help='path to the lstchain DL1 file',
default=dl1_lstchain_filename)
parser.add_argument('--dl1_hipecta', '-hipe',
type=str,
dest='dl1_hipecta_filename',
help='path to the hiperta debug DL1 file',
default=dl1_hipecta_filename)
args = parser.parse_args()
main(args.dl1_hipecta_filename, args.dl1_lstchain_filename)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.