commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
d6a8e42cb3bd963632500541b5e4e71c700c246e
|
Fix migration
|
nodeconductor/cost_tracking/migrations/0006_add_pricelist_backend_ids.py
|
nodeconductor/cost_tracking/migrations/0006_add_pricelist_backend_ids.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
('cost_tracking', '0005_expand_item_type_size'),
]
operations = [
migrations.RenameField(
model_name='defaultpricelistitem',
old_name='service_content_type',
new_name='resource_content_type',
),
migrations.AddField(
model_name='defaultpricelistitem',
name='backend_choice_id',
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='defaultpricelistitem',
name='backend_option_id',
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='defaultpricelistitem',
name='backend_product_id',
field=models.CharField(max_length=255, blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='pricelistitem',
name='resource_content_type',
field=models.ForeignKey(related_name='+', default=0, to='contenttypes.ContentType'),
preserve_default=False,
),
]
|
Python
| 0 |
@@ -1314,17 +1314,17 @@
default=
-0
+1
, to='co
|
3d8f627a25cb83a202878897607e5095270c332d
|
Convert ruuvi_rx beacon timestamp to UTC time. (#54)
|
ruuvitag_sensor/ruuvi_rx.py
|
ruuvitag_sensor/ruuvi_rx.py
|
from datetime import datetime
from multiprocessing import Manager
from threading import Thread
import time
from concurrent.futures import ProcessPoolExecutor
from rx.subjects import Subject
from ruuvitag_sensor.ruuvi import RuuviTagSensor, RunFlag
def _run_get_data_background(macs, queue, shared_data, bt_device):
"""
Background process function for RuuviTag Sensors
"""
run_flag = RunFlag()
def add_data(data):
if not shared_data['run_flag']:
run_flag.running = False
data[1]['time'] = str(datetime.now())
queue.put(data)
RuuviTagSensor.get_datas(add_data, macs, run_flag, bt_device)
class RuuviTagReactive(object):
"""
Reactive wrapper and background process for RuuviTagSensor get_datas
"""
@staticmethod
def _data_update(subjects, queue, run_flag):
"""
Get data from backgound process and notify all subscribed observers with the new data
"""
while run_flag.running:
while not queue.empty():
data = queue.get()
for subject in [s for s in subjects if not s.is_disposed]:
subject.on_next(data)
time.sleep(0.1)
def __init__(self, macs=[], bt_device=''):
"""
Start background process for get_datas and async task for notifying all subscribed observers
Args:
macs (list): MAC addresses
bt_device (string): Bluetooth device id
"""
self._run_flag = RunFlag()
self._subjects = []
m = Manager()
q = m.Queue()
# Use Manager dict to share data between processes
self._shared_data = m.dict()
self._shared_data['run_flag'] = True
# Start data updater
notify_thread = Thread(target=RuuviTagReactive._data_update, args=(self._subjects, q, self._run_flag))
notify_thread.start()
# Start background process
executor = ProcessPoolExecutor(1)
executor.submit(_run_get_data_background, macs, q, self._shared_data, bt_device)
def get_subject(self):
"""
Returns:
subject : Reactive Extension Subject
"""
if not self._run_flag.running:
raise Exception('RuuviTagReactive stopped')
subject = Subject()
self._subjects.append(subject)
return subject
def stop(self):
"""
Stop get_datas
"""
self._run_flag.running = False
self._shared_data['run_flag'] = False
for s in self._subjects:
s.dispose()
|
Python
| 0.999997 |
@@ -539,12 +539,8 @@
%5D =
-str(
date
@@ -548,13 +548,27 @@
ime.
+utc
now()
+.isoformat(
)%0A
|
0b14f93121f3feaa4433eaf8275f5ad40c646b48
|
Update NumberPathShuffled.py
|
_includes/NumberPathShuffled.py
|
_includes/NumberPathShuffled.py
|
from random import shuffle
N = 100
shufflePeriod = 10000000
print(N)
connected = [[]]
for i in range(N):
connected.append([])
for m in range(1,N+1):
# for n in range(1,N+1):
for n in range(N,0,-1):
if ((not m == n) and (m%n == 0 or n%m == 0)):
connected[m].append(n)
def explore(path):
global longestLength, longestPath, connected, shuffleCounter, shufflePeriod
shuffleCounter += 1
if shuffleCounter == shufflePeriod:
shuffleCounter = 0
for L in connected:
shuffle(L)
print "Shuffled"
isExtendable = 0
n = path[-1]
# shuffledconnected = list(connected[n])
# shuffle(shuffledconnected)
for m in connected[n]:
#for m in shuffledconnected:
if not m in path:
isExtendable = 1
newPath = list(path)
newPath.append(m)
explore(newPath)
if not isExtendable:
if len(path) > longestLength:
longestLength = len(path)
longestPath = path
print longestLength,longestPath
longestPath = []
longestLength = 0
#for n in range(1,N+1):
# print(n)
# explore([n])
shuffleCounter = 0
explore([81])
print("Longest path length is",longestLength)
print(longestPath)
|
Python
| 0.000001 |
@@ -499,17 +499,49 @@
Shuffled
-%22
+ still%22,longestLength,longestPath
%0A%0A%09isExt
|
936382b1744c2a9b5f3082abe9a3e0f2fbba58d0
|
Return None when an error while reading config occurs
|
src/config.py
|
src/config.py
|
import yaml
SECTION_APP = "app"
SECTION_DEVICE = "device"
KEY_DEFAULT = "default"
def read_value(section, key):
with open(".adbons.yml", 'r') as ymlfile:
config = yaml.safe_load(ymlfile)
try:
return config[section][key]
except:
return ""
def write_value(section, key, value):
try:
with open(".adbons.yml", 'r+') as ymlfile:
config = yaml.safe_load(ymlfile)
if section not in config:
config[section] = {}
config[section][key] = value
except:
config = {}
config[section] = {}
config[section][key] = value
with open(".adbons.yml", 'w') as ymlfile:
yaml.dump(config, ymlfile, default_flow_style=False)
|
Python
| 0.000002 |
@@ -108,16 +108,29 @@
, key):%0A
+ try:%0A
with
@@ -167,32 +167,36 @@
mlfile:%0A
+
config = yaml.sa
@@ -212,25 +212,16 @@
mlfile)%0A
- try:%0A
@@ -272,17 +272,12 @@
-return %22%22
+pass
%0A%0A%0Ad
|
4b5cc8e2c75ae191bc134a7b3c62aa9c67ebe837
|
Use six.iteritems instead of iteritems() in psutil_compat
|
salt/utils/psutil_compat.py
|
salt/utils/psutil_compat.py
|
# -*- coding: utf-8 -*-
'''
Version agnostic psutil hack to fully support both old (<2.0) and new (>=2.0)
psutil versions.
The old <1.0 psutil API is dropped in psutil 3.0
Should be removed once support for psutil <2.0 is dropped. (eg RHEL 6)
Built off of http://grodola.blogspot.com/2014/01/psutil-20-porting.html
'''
from __future__ import absolute_import
# No exception handling, as we want ImportError if psutil doesn't exist
import psutil
if psutil.version_info >= (2, 0):
from psutil import * # pylint: disable=wildcard-import,unused-wildcard-import
else:
# Import hack to work around bugs in old psutil's
# Psuedo "from psutil import *"
_globals = globals()
for attr in psutil.__all__:
_temp = __import__('psutil', globals(), locals(), [attr], -1)
try:
_globals[attr] = getattr(_temp, attr)
except AttributeError:
pass
# Import functions not in __all__
from psutil import disk_partitions # pylint: disable=unused-import
from psutil import disk_usage # pylint: disable=unused-import
# Alias new module functions
def boot_time():
return psutil.BOOT_TIME
def cpu_count():
return psutil.NUM_CPUS
# Alias renamed module functions
pids = psutil.get_pid_list
try:
users = psutil.get_users
except AttributeError:
users = lambda: (_ for _ in ()).throw(NotImplementedError('Your '
'psutil version is too old'))
# Deprecated in 1.0.1, but not mentioned in blog post
if psutil.version_info < (1, 0, 1):
net_io_counters = psutil.network_io_counters()
class Process(psutil.Process): # pylint: disable=no-init
# Reimplement overloaded getters/setters
def cpu_affinity(self, *args, **kwargs):
if args or kwargs:
return self.set_cpu_affinity(*args, **kwargs)
else:
return self.get_cpu_affinity()
def ionice(self, *args, **kwargs):
if args or kwargs:
return self.set_ionice(*args, **kwargs)
else:
return self.get_ionice()
def nice(self, *args, **kwargs):
if args or kwargs:
return self.set_nice(*args, **kwargs)
else:
return self.get_nice()
def rlimit(self, *args, **kwargs):
'''
set_rlimit and get_limit were not introduced until psutil v1.1.0
'''
if psutil.version_info >= (1, 1, 0):
if args or kwargs:
return self.set_rlimit(*args, **kwargs)
else:
return self.get_rlimit()
else:
pass
# Alias renamed Process functions
_PROCESS_FUNCTION_MAP = {
"children": "get_children",
"connections": "get_connections",
"cpu_percent": "get_cpu_percent",
"cpu_times": "get_cpu_times",
"io_counters": "get_io_counters",
"memory_info": "get_memory_info",
"memory_info_ex": "get_ext_memory_info",
"memory_maps": "get_memory_maps",
"memory_percent": "get_memory_percent",
"num_ctx_switches": "get_num_ctx_switches",
"num_fds": "get_num_fds",
"num_threads": "get_num_threads",
"open_files": "get_open_files",
"threads": "get_threads",
"cwd": "getcwd",
}
for new, old in _PROCESS_FUNCTION_MAP.iteritems():
try:
setattr(Process, new, psutil.Process.__dict__[old])
except KeyError:
pass
|
Python
| 0 |
@@ -316,16 +316,37 @@
ml%0A'''%0A%0A
+# Import Python libs%0A
from __f
@@ -377,16 +377,63 @@
import%0A%0A
+# Import Salt libs%0Aimport salt.ext.six as six%0A%0A
# No exc
@@ -3523,16 +3523,30 @@
old in
+six.iteritems(
_PROCESS
@@ -3558,27 +3558,16 @@
TION_MAP
-.iteritems(
):%0A
|
89a1a37e91ace4af2983e63ef68ff1d22811aa32
|
Fix syntax error
|
hackeriet/cardreaderd/__init__.py
|
hackeriet/cardreaderd/__init__.py
|
#!/usr/bin/env python
from hackeriet import mifare
from hackeriet.mqtt import MQTT
from hackeriet.door import users
import os, logging
logging.basicConfig(level=logging.INFO, format='%(asctime)-15s %(message)s')
door_name = os.getenv("DOOR_NAME", 'hackeriet')
door_topic = "hackeriet/door/%s/open" % door_name
door_timeout = int(os.getenv("DOOR_TIMEOUT", 2))
mqtt = MQTT()
def main():
logging.debug('Starting main loop')
while True:
users.load()
# Read data from card reader
logging.debug('mifare: waiting for data...')
data = mifare.try_read()
if data:
logging.debug('mifare: data read')
user = users.auth(data[0:16])
if user:
ascii_user = user.encode('ascii', 'replace').decode('ascii')
logging.info('auth: card read for user %s' % ascii_user)
mqtt(door_topic, user)
else:
logging.debug('auth: card data does not belong to a user: %s' % data[0:16])
# Avoid spewing messages every single ms while a card is in front of the reader
time.sleep(door_timeout)
else
logging.debug('mifare: no data read in last attempt')
if __name__ == "__main__":
main()
|
Python
| 0.000585 |
@@ -1053,16 +1053,17 @@
else
+:
%0A l
|
0d0ca65120927c0a2585800fb1602f282719c40d
|
Remove a few unused things in test
|
flavio/physics/bdecays/formfactors/b_v/test_btov.py
|
flavio/physics/bdecays/formfactors/b_v/test_btov.py
|
import unittest
from math import sqrt,radians,asin
from flavio.physics.bdecays.formfactors.b_v import btov, bsz_parameters, lattice_parameters
import numpy as np
from flavio.classes import Constraints, Implementation
from flavio.parameters import default_parameters
import copy
par = {
'm_B0': 5.27961,
'm_Bs': 5.36679,
'm_K*0': 0.89166,
'm_rho0': 0.077526,
'm_omega': 0.78265,
'm_phi': 1.019461,
}
class TestBtoV(unittest.TestCase):
def test_bsz3(self):
c = copy.copy(default_parameters)
bsz_parameters.bsz_load_v1_lcsr(c)
# compare to numbers in table 4 of arXiv:1503.05534v1
# B->K* all FFs
ffbsz3 = Implementation.get_instance('B->K* BSZ3').get_central(constraints_obj=c, wc_obj=None, q2=0)
self.assertAlmostEqual(ffbsz3['A0'], 0.391, places=2)
self.assertAlmostEqual(ffbsz3['A1'], 0.289, places=3)
self.assertAlmostEqual(ffbsz3['A12'], 0.281, places=3)
self.assertAlmostEqual(ffbsz3['V'], 0.366, places=3)
self.assertAlmostEqual(ffbsz3['T1'], 0.308, places=3)
self.assertAlmostEqual(ffbsz3['T23'], 0.793, places=3)
self.assertAlmostEqual(ffbsz3['T1'], ffbsz3['T2'], places=16)
# A1 for the remaining transitions
ffbsz3 = Implementation.get_instance('B->rho BSZ3').get_central(constraints_obj=c, wc_obj=None, q2=0)
self.assertAlmostEqual(ffbsz3['A1'], 0.267, places=3)
ffbsz3 = Implementation.get_instance('B->omega BSZ3').get_central(constraints_obj=c, wc_obj=None, q2=0)
self.assertAlmostEqual(ffbsz3['A1'], 0.237, places=3)
ffbsz3 = Implementation.get_instance('Bs->phi BSZ3').get_central(constraints_obj=c, wc_obj=None, q2=0)
self.assertAlmostEqual(ffbsz3['A1'], 0.315, places=3)
ffbsz3 = Implementation.get_instance('Bs->K* BSZ3').get_central(constraints_obj=c, wc_obj=None, q2=0)
self.assertAlmostEqual(ffbsz3['A1'], 0.246, places=3)
#
def test_lattice(self):
c = copy.copy(default_parameters)
lattice_parameters.lattice_load(c)
fflatt = Implementation.get_instance('B->K* lattice').get_central(constraints_obj=c, wc_obj=None, q2=12.)
self.assertAlmostEqual(fflatt['V'], 0.84, places=2)
self.assertAlmostEqual(fflatt['A0'], 0.861, places=3)
self.assertAlmostEqual(fflatt['A1'], 0.440, places=3)
self.assertAlmostEqual(fflatt['A12'], 0.339, places=3)
self.assertAlmostEqual(fflatt['T1'], 0.711, places=3)
self.assertAlmostEqual(fflatt['T2'], 0.433, places=3)
self.assertAlmostEqual(fflatt['T23'], 0.809, places=3)
# FIXME this still doesn't work well due to the resonance mass problem
fflatt = Implementation.get_instance('Bs->phi lattice').get_central(constraints_obj=c, wc_obj=None, q2=12.)
self.assertAlmostEqual(fflatt['V'], 0.767, places=2)
self.assertAlmostEqual(fflatt['A0'], 0.907, places=2)
self.assertAlmostEqual(fflatt['A1'], 0.439, places=2)
self.assertAlmostEqual(fflatt['A12'], 0.321, places=2)
self.assertAlmostEqual(fflatt['T1'], 0.680, places=2)
self.assertAlmostEqual(fflatt['T2'], 0.439, places=2)
self.assertAlmostEqual(fflatt['T23'], 0.810, places=2)
fflatt = Implementation.get_instance('Bs->K* lattice').get_central(constraints_obj=c, wc_obj=None, q2=12.)
self.assertAlmostEqual(fflatt['V'], 0.584, places=3)
self.assertAlmostEqual(fflatt['A0'], 0.884, places=3)
self.assertAlmostEqual(fflatt['A1'], 0.370, places=3)
self.assertAlmostEqual(fflatt['A12'], 0.321, places=3)
self.assertAlmostEqual(fflatt['T1'], 0.605, places=3)
self.assertAlmostEqual(fflatt['T2'], 0.383, places=3)
self.assertAlmostEqual(fflatt['T23'], 0.743, places=3)
|
Python
| 0 |
@@ -185,21 +185,8 @@
port
- Constraints,
Imp
@@ -263,153 +263,8 @@
py%0A%0A
-par = %7B%0A 'm_B0': 5.27961,%0A 'm_Bs': 5.36679,%0A 'm_K*0': 0.89166,%0A 'm_rho0': 0.077526,%0A 'm_omega': 0.78265,%0A 'm_phi': 1.019461,%0A%7D%0A
%0Acla
|
857a251c7491b626bf948b58806b917ab20e3d1b
|
Make concat_example always choose the device on to_gpu
|
chainer/dataset/convert.py
|
chainer/dataset/convert.py
|
import numpy
import six
from chainer import cuda
def concat_examples(batch, device=None, padding=None):
"""Concatenates a list of examples into array(s).
Dataset iterator yields a list of examples. If each example is an array,
this function concatenates them along the newly-inserted first axis (called
`batch dimension`) into one array. The basic behavior is same for examples
consisting of multiple arrays, i.e., corresponding arrays of all examples
are concatenated.
For instance, consider each example consists of two arrays ``(x, y)``.
Then, this function concatenates ``x`` 's into one array, and ``y`` 's
into another array, and returns a tuple of these two arrays. Another
example: consider each example is a dictionary of two arrays. Two arrays
have keys ``'x'`` and ``'y'``. Then, this function concatenates ``x`` 's
into one array, and ``y`` 's into another array, and returns a dictionary
with two arrays ``x`` and ``y``.
When the arrays to concatenate have different shapes, the behavior depends
on the ``padding`` value. If ``padding`` is None (default), it raises an
error. Otherwise, it builds an array of the minimum shape that the contents
of all arrays can be substituted to. The padding value is then used to the
extra elements of the resulting arrays.
TODO(beam2d): Add an example.
Args:
batch (list): A list of examples. This is typically given by a dataset
iterator.
device (int): Device ID to which each array is sent. Negative value
indicates the host memory (CPU). If it is omitted, all arrays are
left in the original device.
padding: Padding value for extra elements. If this is None (default),
an error is raised on shape mismatch. Otherwise, an array of
minimum dimensionalities that can accomodate all arrays is created,
and elements outside of the examples are padded by this value.
Returns:
Array, a tuple of arrays, or a dictionary of arrays. The type depends
on the type of each example in the batch.
"""
if len(batch) == 0:
raise ValueError('batch is empty')
if device is None:
def to_device(x):
return x
elif device < 0:
to_device = cuda.to_cpu
else:
to_device = cuda.to_gpu
first_elem = batch[0]
if isinstance(first_elem, tuple):
result = []
if not isinstance(padding, tuple):
padding = [padding] * len(first_elem)
for i in six.moves.range(len(first_elem)):
result.append(to_device(_concat_arrays(
[example[i] for example in batch], padding[i])))
return tuple(result)
elif isinstance(first_elem, dict):
result = {}
if not isinstance(padding, dict):
padding = {key: padding for key in first_elem}
for key in first_elem:
result[key] = to_device(_concat_arrays(
[example[key] for example in batch], padding[key]))
return result
else:
return to_device(_concat_arrays(batch, padding))
def _concat_arrays(arrays, padding):
if padding is not None:
return _concate_arrays_with_padding(arrays, padding)
xp = cuda.get_array_module(arrays[0])
with cuda.get_device(arrays[0]):
return xp.concatenate([array[None] for array in arrays])
def _concate_arrays_with_padding(arrays, padding):
shape = numpy.array(arrays[0].shape, dtype=int)
for array in arrays[1:]:
if numpy.any(shape != array.shape):
if padding is None:
raise ValueError('shape mismatch within a batch')
else:
numpy.maximum(shape, array.shape, shape)
shape = tuple(numpy.insert(shape, 0, len(arrays)))
xp = cuda.get_array_module(arrays[0])
with cuda.get_device(arrays[0]):
result = xp.full(shape, padding, dtype=arrays[0].dtype)
for i in six.moves.range(len(arrays)):
src = arrays[i]
slices = tuple(slice(dim) for dim in src.shape)
result[(i,) + slices] = src
return result
|
Python
| 0.000016 |
@@ -2355,32 +2355,42 @@
to_device =
+ lambda x:
cuda.to_gpu%0A%0A
@@ -2385,16 +2385,27 @@
a.to_gpu
+(x, device)
%0A%0A fi
|
f67e0a8d5f06e8d6834050cf007bf2e4674aad12
|
Improve the Updater docstring
|
chainer/trainer/updater.py
|
chainer/trainer/updater.py
|
import six
from chainer.dataset import iterator as iterator_module
from chainer.dataset import convert
from chainer import optimizer as optimizer_module
from chainer import variable
class Updater(object):
"""Interface of updater objects for trainers.
TODO(beam2d): document it.
"""
@property
def epoch(self):
"""Current number of completed sweeps over the trianing dataset."""
raise NotImplementedError
@property
def iteration(self):
"""Current number of completed updates."""
raise NotImplementedError
def finalize(self):
"""Finalizes the updater object.
This method is called at the end of training loops. It should finalize
each dataset iterator used in this updater.
"""
raise NotImplementedError
def get_optimizer(self, name):
"""Gets the optimizer of given name.
Updater holds one or more optimizers with names. They can be retrieved
by this method.
Args:
name (str): Name of the optimizer.
Returns:
~chainer.Optimizer: Optimizer of the name.
"""
raise NotImplementedError
def get_all_optimizers(self):
"""Gets a dictionary of all optimizers for this updater.
Returns:
dict: Dictionary that maps names to optimizers.
"""
raise NotImplementedError
def update(self):
"""Updates the parameters of the target model.
This method implements an update formula for the training task,
including data loading, forward/backward computations, and actual
updates of parameters.
This method is called once at each iteration of the training loop.
"""
raise NotImplementedError
def serialize(self, serializer):
"""Serializes the current state of the updater object."""
raise NotImplementedError
class StandardUpdater(Updater):
"""Standard implementation of Updater.
This is the standard implementation of :class:`Updater`. It accepts one or
more training datasets and one or more optimizers. The default update
routine assumes that there is only one training dataset and one optimizer,
while users can specify their own update routines. Each batch is converted
to input arrays by :func:`~chainer.datasets.concat_examples` by default,
which can also be manually set.
There are two ways to modify the update behavior. One is by setting a
custom update function via the ``update_func`` argument. The other one is
by inheriting this class and overriding the :meth:`update` method. In
latter case, do not forget to update the iteration counter at each call of
this method, because this value is watched by the trainer for deciding when
to invoke extensions and when to exit the training loop.
Args:
iterator: Dataset iterator for the training dataset. It can also be a
dictionary of iterators. If this is just an iterator, then the
iterator is registered by the name ``'main'``.
optimizer: Optimizer to update parameters. It can also be a dictionary
of optimizers. If this is just an optimizer, then the optimizer is
registered by the name ``'main'``.
update_func: Update routine. This is a function that takes the updater
object as the argument. The default routine uses ``converter`` and
``loss_func`` if specified.
converter: Converter function to build input arrays. If it is omitted,
:func:`~chainer.dataset.concat_examples` is used. If
``update_func`` is specified, this argument is ignored and not
used.
device: Device to which the training data is sent. Negative value
indicates the host memory (CPU). If ``update_func`` or
``converter`` is specified, this argument is ignored and not used.
loss_func: Loss function. The target link of the main optimizer is used
by default. If ``update_func`` is specified, this argument is
ignored and not used.
Attributes:
iteration: Current number of completed updates.
"""
def __init__(self, iterator, optimizer, update_func=None, converter=None,
device=None, loss_func=None):
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
if isinstance(optimizer, optimizer_module.Optimizer):
optimizer = {'main': optimizer}
self._optimizers = optimizer
self._update_func = update_func or _default_update(
self, converter, device, loss_func)
self.iteration = 0
@property
def epoch(self):
return self._iterators['main'].epoch
def finalize(self):
for iterator in six.itervalues(self._iterstors):
iterator.finalize()
def get_optimizer(self, name):
return self._optimizers[name]
def get_all_optimizers(self):
return dict(self._optimizers)
def get_iterator(self, name):
"""Gets the dataset iterator of given name.
Args:
name (str): Name of the dataset iterator.
Returns:
~chainer.dataset.Iterator: Corresponding dataset iterator.
"""
return self._iterators[name]
def update(self):
self._update_func(self)
self.iteration += 1
def serialize(self, serializer):
for name, iterator in six.iteritems(self._iterators):
iterator.serialize(serializer['iterator:' + name])
for name, optimizer in six.iteritems(self._optimizers):
optmizer.serialize(serializer['optimizer:' + name])
self.iteration = serializer('iteration', self.iteration)
def _default_update(updater, converter, device, loss_func):
if not converter:
def convert(batch):
return convert.concat_examples(batch, device=device)
converter = convert
iterator = updater.get_iterator('main')
optimizer = updater.get_optimizer('main')
loss_func = loss_func or optimizer.target
def update(_):
batch = iterator.next()
in_arrays = converter(batch)
if isinstance(in_arrays, tuple):
in_vars = tuple(variable.Variable(x) for x in in_arrays)
optimizer.update(_loss_func, *in_vars)
elif isinstance(in_arrays, dict):
in_vars = {key: variable.Variable(x)
for key, x in six.iteritems(in_arrays)}
optimizer.update(_loss_func, **in_vars)
else:
in_var = variable.Variable(in_arrays)
optimizer.update(_loss_func, in_var)
return update
|
Python
| 0.00006 |
@@ -2473,16 +2473,59 @@
behavior
+ besides setting a custom%0A loss function
. One is
@@ -2537,20 +2537,16 @@
etting a
-%0A
custom
@@ -2568,16 +2568,20 @@
via the
+%0A
%60%60updat
@@ -2615,20 +2615,16 @@
r one is
-%0A
by inhe
@@ -2644,16 +2644,20 @@
lass and
+%0A
overrid
@@ -2689,20 +2689,16 @@
thod. In
-%0A
latter
@@ -2719,16 +2719,20 @@
orget to
+%0A
update
@@ -2768,20 +2768,16 @@
call of
-%0A
this me
@@ -2794,16 +2794,20 @@
use this
+%0A
value i
@@ -2848,20 +2848,16 @@
ing when
-%0A
to invo
@@ -2873,16 +2873,20 @@
ions and
+%0A
when to
|
b7f790d03511c30bfab87f1db0afb30317a7ff2e
|
Add retry logic for 50x responses
|
acapi/resources/acquiadata.py
|
acapi/resources/acquiadata.py
|
""" Acquia Cloud API data resource. """
import json
import logging
import requests
import requests_cache
from platform import python_version
from pprint import pformat
from ..version import __version__
LOGGER = logging.getLogger('acapi.resources.acquiadata')
class AcquiaData(object):
"""Acquia Cloud API abstract network resource."""
#: User Agent string
USER_AGENT = 'Acquia Cloud API Client/{mver} (Python {pver})'.format(mver=__version__,
pver=python_version())
def __init__(self, uri, auth, data=None):
""" Constructor.
Parameters
----------
uri : str
The base URI for the resource.
auth : tuple
The authentication credentials to use for the request.
data : dict
Raw data from ACAPI.
"""
self.uri = uri
self.auth = auth
self.data = data
self.last_response = None
def create_task(self, uri, data):
""" Create a new task object from a responses response object.
Parameters
----------
uri: str
The URI for the action that triggered the task.
data: dict
The task data returned by the triggering request.
Returns
-------
Task
The Task object.
"""
# We have to do this here to avoid circular dependencies
from .task import Task
task = Task(uri, self.auth, data=data)
return task
def get_last_response(self):
""" Fetch the last response object. """
return self.last_response
def request(self, uri=None, method='GET', data=None, params=None, decode_json=True):
"""Perform a HTTP requests.
Parameters
----------
uri : str
The URI to use for the request.
method : str
The HTTP method to use for the request.
auth : tuple
The authentication credentials to use for the request.
data : dict
Any data to send as part of a post request body.
params : dict
Query string parameters.
Returns
-------
dict
Decoded JSON response data as a dict object.
"""
self.last_response = None
if None == uri:
uri = self.uri
headers = {'User-Agent': self.USER_AGENT}
uri = '{}.json'.format(uri)
if 'GET' == method:
resp = requests.get(uri, auth=self.auth, headers=headers, params=params)
if 'POST' == method:
jdata = json.dumps(data)
resp = requests.post(uri, auth=self.auth, headers=headers, params=params, data=jdata)
# This is a sledgehammer but fine grained invalidation is messy.
requests_cache.clear()
if 'DELETE' == method:
resp = requests.delete(uri, auth=self.auth, headers=headers, params=params)
# Quickest and easiest way to do this.
requests_cache.clear()
if hasattr(resp, 'from_cache') and resp.from_cache:
LOGGER.info("%s %s returned from cache", method, uri)
self.last_response = resp
if resp.status_code != requests.codes.ok:
try:
raise resp.raise_for_status()
except requests.exceptions.HTTPError as exp:
LOGGER.info("Failed request response headers: \n%s",
pformat(exp.response.headers, indent=2))
raise
if decode_json:
return resp.json()
return resp.content
|
Python
| 0.000001 |
@@ -98,16 +98,28 @@
ts_cache
+%0Aimport time
%0A%0Afrom p
@@ -2506,32 +2506,92 @@
GET' == method:%0A
+ attempt = 0%0A while attempt %3C= 5:%0A
resp
@@ -2660,16 +2660,451 @@
arams)%0A%0A
+ if resp.status_code not in range(500, 505):%0A # No need to retry for if not a server error type.%0A break%0A%0A attempt += 1%0A params%5B'acapi_retry'%5D = attempt%0A time.sleep((attempt ** 2.0) / 10)%0A%0A%0A # We need to unset the property or it sticks around.%0A if 'acapi_retry' in params:%0A del params%5B'acapi_retry'%5D%0A%0A
|
15403668edf9b81b9dbb2c3b0075416e422ce55c
|
bump version to dev55
|
symposion/__init__.py
|
symposion/__init__.py
|
__version__ = "1.0b1.dev54"
|
Python
| 0 |
@@ -22,7 +22,7 @@
dev5
-4
+5
%22%0A
|
c25cf82668817996b45d824cff59eed3b37b9686
|
Allow QVR Pro port to be optional on config (#33901)
|
homeassistant/components/qvr_pro/__init__.py
|
homeassistant/components/qvr_pro/__init__.py
|
"""Support for QVR Pro NVR software by QNAP."""
import logging
from pyqvrpro import Client
from pyqvrpro.client import AuthenticationError, InsufficientPermissionsError
from requests.exceptions import ConnectionError as RequestsConnectionError
import voluptuous as vol
from homeassistant.components.camera import DOMAIN as CAMERA_DOMAIN
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from .const import (
CONF_EXCLUDE_CHANNELS,
DOMAIN,
SERVICE_START_RECORD,
SERVICE_STOP_RECORD,
)
SERVICE_CHANNEL_GUID = "guid"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_EXCLUDE_CHANNELS, default=[]): vol.All(
cv.ensure_list_csv, [cv.positive_int]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_CHANNEL_RECORD_SCHEMA = vol.Schema(
{vol.Required(SERVICE_CHANNEL_GUID): cv.string}
)
def setup(hass, config):
"""Set up the QVR Pro component."""
conf = config[DOMAIN]
user = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
host = conf[CONF_HOST]
port = conf.get(CONF_PORT)
excluded_channels = conf[CONF_EXCLUDE_CHANNELS]
try:
qvrpro = Client(user, password, host, port=port)
channel_resp = qvrpro.get_channel_list()
except InsufficientPermissionsError:
_LOGGER.error("User must have Surveillance Management permission")
return False
except AuthenticationError:
_LOGGER.error("Authentication failed")
return False
except RequestsConnectionError:
_LOGGER.error("Error connecting to QVR server")
return False
channels = []
for channel in channel_resp["channels"]:
if channel["channel_index"] + 1 in excluded_channels:
continue
channels.append(channel)
hass.data[DOMAIN] = {"channels": channels, "client": qvrpro}
load_platform(hass, CAMERA_DOMAIN, DOMAIN, {}, config)
# Register services
def handle_start_record(call):
guid = call.data[SERVICE_CHANNEL_GUID]
qvrpro.start_recording(guid)
def handle_stop_record(call):
guid = call.data[SERVICE_CHANNEL_GUID]
qvrpro.stop_recording(guid)
hass.services.register(
DOMAIN,
SERVICE_START_RECORD,
handle_start_record,
schema=SERVICE_CHANNEL_RECORD_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_STOP_RECORD,
handle_stop_record,
schema=SERVICE_CHANNEL_RECORD_SCHEMA,
)
return True
|
Python
| 0 |
@@ -638,24 +638,45 @@
_RECORD,%0A)%0A%0A
+DEFAULT_PORT = 8080%0A%0A
SERVICE_CHAN
@@ -1011,16 +1011,38 @@
ONF_PORT
+, default=DEFAULT_PORT
): cv.po
@@ -1560,13 +1560,9 @@
conf
-.get(
+%5B
CONF
@@ -1566,17 +1566,17 @@
ONF_PORT
-)
+%5D
%0A exc
|
c06d92900a6f0bacd09e06ae5475a7731fb88f93
|
Align away state tag with device_trackers (#9884)
|
homeassistant/components/sensor/mqtt_room.py
|
homeassistant/components/sensor/mqtt_room.py
|
"""
Support for MQTT room presence detection.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.mqtt_room/
"""
import asyncio
import logging
import json
from datetime import timedelta
import voluptuous as vol
from homeassistant.core import callback
import homeassistant.components.mqtt as mqtt
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_TIMEOUT)
from homeassistant.components.mqtt import CONF_STATE_TOPIC
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import dt, slugify
_LOGGER = logging.getLogger(__name__)
DEPENDENCIES = ['mqtt']
ATTR_DEVICE_ID = 'device_id'
ATTR_DISTANCE = 'distance'
ATTR_ID = 'id'
ATTR_ROOM = 'room'
CONF_DEVICE_ID = 'device_id'
CONF_ROOM = 'room'
CONF_AWAY_TIMEOUT = 'away_timeout'
DEFAULT_NAME = 'Room Sensor'
DEFAULT_TIMEOUT = 5
DEFAULT_AWAY_TIMEOUT = 0
DEFAULT_TOPIC = 'room_presence'
STATE_AWAY = 'away'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Required(CONF_STATE_TOPIC, default=DEFAULT_TOPIC): cv.string,
vol.Required(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_AWAY_TIMEOUT,
default=DEFAULT_AWAY_TIMEOUT): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string
})
MQTT_PAYLOAD = vol.Schema(vol.All(json.loads, vol.Schema({
vol.Required(ATTR_ID): cv.string,
vol.Required(ATTR_DISTANCE): vol.Coerce(float),
}, extra=vol.ALLOW_EXTRA)))
@asyncio.coroutine
def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up MQTT room Sensor."""
async_add_devices([MQTTRoomSensor(
config.get(CONF_NAME),
config.get(CONF_STATE_TOPIC),
config.get(CONF_DEVICE_ID),
config.get(CONF_TIMEOUT),
config.get(CONF_AWAY_TIMEOUT)
)])
class MQTTRoomSensor(Entity):
"""Representation of a room sensor that is updated via MQTT."""
def __init__(self, name, state_topic, device_id, timeout, consider_home):
"""Initialize the sensor."""
self._state = STATE_AWAY
self._name = name
self._state_topic = '{}{}'.format(state_topic, '/+')
self._device_id = slugify(device_id).upper()
self._timeout = timeout
self._consider_home = \
timedelta(seconds=consider_home) if consider_home \
else None
self._distance = None
self._updated = None
def async_added_to_hass(self):
"""Subscribe to MQTT events.
This method must be run in the event loop and returns a coroutine.
"""
@callback
def update_state(device_id, room, distance):
"""Update the sensor state."""
self._state = room
self._distance = distance
self._updated = dt.utcnow()
self.async_schedule_update_ha_state()
@callback
def message_received(topic, payload, qos):
"""Handle new MQTT messages."""
try:
data = MQTT_PAYLOAD(payload)
except vol.MultipleInvalid as error:
_LOGGER.debug(
"Skipping update because of malformatted data: %s", error)
return
device = _parse_update_data(topic, data)
if device.get(CONF_DEVICE_ID) == self._device_id:
if self._distance is None or self._updated is None:
update_state(**device)
else:
# update if:
# device is in the same room OR
# device is closer to another room OR
# last update from other room was too long ago
timediff = dt.utcnow() - self._updated
if device.get(ATTR_ROOM) == self._state \
or device.get(ATTR_DISTANCE) < self._distance \
or timediff.seconds >= self._timeout:
update_state(**device)
return mqtt.async_subscribe(
self.hass, self._state_topic, message_received, 1)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_DISTANCE: self._distance
}
@property
def state(self):
"""Return the current room of the entity."""
return self._state
def update(self):
"""Update the state for absent devices."""
if self._updated \
and self._consider_home \
and dt.utcnow() - self._updated > self._consider_home:
self._state = STATE_AWAY
def _parse_update_data(topic, data):
"""Parse the room presence update."""
parts = topic.split('/')
room = parts[-1]
device_id = slugify(data.get(ATTR_ID)).upper()
distance = data.get('distance')
parsed_data = {
ATTR_DEVICE_ID: device_id,
ATTR_ROOM: room,
ATTR_DISTANCE: distance
}
return parsed_data
|
Python
| 0 |
@@ -480,16 +480,32 @@
_TIMEOUT
+, STATE_NOT_HOME
)%0Afrom h
@@ -1052,29 +1052,8 @@
e'%0A%0A
-STATE_AWAY = 'away'%0A%0A
PLAT
@@ -2227,20 +2227,24 @@
= STATE_
-AWAY
+NOT_HOME
%0A
@@ -4882,20 +4882,24 @@
= STATE_
-AWAY
+NOT_HOME
%0A%0A%0Adef _
|
508dca3ee509b1a7b8a5c79a0b00ade6dc959bb8
|
Disable user related views for now
|
hubology/__init__.py
|
hubology/__init__.py
|
from flask import Flask
from flask import request, jsonify
from flask import render_template, current_app
from functools import wraps
import logging
import json
import urllib
import urllib2
import uuid
from flask.ext.login import LoginManager, current_user
from hubology.models import HubUser
def geocode_location(location_name):
try:
location = None
if location_name not in ('', None):
response = urllib2.urlopen("https://maps.googleapis.com/maps/api/geocode/json?%s" %
urllib.urlencode({'address': location_name, 'sensor':'false'}))
data = response.read()
geo_info = json.loads(data)
results = geo_info.get('results')
if results is not None and len(results) > 0:
geometry = results[0].get('geometry')
if geometry is not None:
location = geometry.get('location')
return location
except:
logging.exception("problem geocoding location")
return None
app = Flask(__name__)
app.secret_key = str(uuid.uuid4())
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.user_loader
def load_user(userid):
return HubUser.find(userid)
login_manager.login_view = "/sign-in"
login_manager.login_message = u"Please sign in to access hub-ology."
#Setup 404 handler
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
#Setup 500 handler
@app.errorhandler(500)
def internal_server_error(e):
if current_user:
from hubology.views.sign_out import sign_out
sign_out()
return render_template('500.html'), 500
def templated(template=None):
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
template_name = template
if template_name is None:
template_name = request.endpoint \
.replace('.', '/') + '.html'
ctx = f(*args, **kwargs)
if ctx is None:
ctx = {}
elif not isinstance(ctx, dict):
return ctx
return render_template(template_name, **ctx)
return decorated_function
return decorator
@app.route('/')
@templated('index.html')
def index():
#render the main site page
return dict()
#Import other views
import hubology.views.about
import hubology.views.aboutlogo
import hubology.views.educators
import hubology.views.mentors
import hubology.views.developers
import hubology.views.designers
import hubology.views.inspire
import hubology.views.educate
import hubology.views.do
# import hubology.views.sign_in
# import hubology.views.sign_out
import hubology.views.hub
import hubology.views.map
import hubology.views.people
import hubology.views.profile
import hubology.views.delete_profile
|
Python
| 0 |
@@ -251,16 +251,18 @@
t_user%0A%0A
+#
from hub
@@ -1167,16 +1167,18 @@
p(app)%0A%0A
+#
@login_m
@@ -1196,16 +1196,18 @@
_loader%0A
+#
def load
@@ -1221,16 +1221,18 @@
serid):%0A
+#
retu
@@ -2778,16 +2778,18 @@
ews.map%0A
+#
import h
@@ -2839,16 +2839,18 @@
profile%0A
+#
import h
|
ec14293f02de84a12ce602d6a0dfbb3c21203bc4
|
fix data types from ENV
|
channelstream/cli/utils.py
|
channelstream/cli/utils.py
|
import argparse
import copy
import logging
import json
import pkg_resources
import jinja2
import os
from channelstream.cli import CONFIGURABLE_PARAMS, SHARED_DEFAULTS
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def main():
config = copy.deepcopy(SHARED_DEFAULTS)
parser = argparse.ArgumentParser(add_help=True)
parser.add_argument(
"operation", help="Operation", default=None, choices=["make_config"]
)
parser.add_argument("-j", "--json", dest="json", help="Config JSON", default=None)
parser.add_argument(
"-o", "--output", dest="output", help="Output file", required=True
)
args = parser.parse_args()
if args.json:
data_json = json.loads(args.json)
for key in CONFIGURABLE_PARAMS:
conf_value = data_json.get(key)
if conf_value:
config[key] = conf_value
else:
for key in CONFIGURABLE_PARAMS:
conf_value = os.environ.get(f"channelstream_{key}".upper())
if conf_value is not None:
config[key] = conf_value
if args.operation == "make_config":
template_path = os.path.join("templates", "ini", "channelstream.ini.jinja2")
template_str = pkg_resources.resource_string("channelstream", template_path)
template = jinja2.Template(template_str.decode("utf8"))
template_vars = config
compiled = template.render(**template_vars)
with open(args.output, "w") as f:
f.write(compiled)
log.info("Config written")
|
Python
| 0.000002 |
@@ -160,16 +160,65 @@
DEFAULTS
+%0Afrom channelstream.utils import set_config_types
%0A%0Alog =
@@ -1128,16 +1128,58 @@
nf_value
+%0A config = set_config_types(config)
%0A%0A if
|
63b6c523bf1d19747cf8dfbb0693dc306b880aa4
|
Enable 1-time correlation test again
|
skxray/core/tests/test_correlation.py
|
skxray/core/tests/test_correlation.py
|
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import numpy as np
from numpy.testing import (assert_array_almost_equal,
assert_almost_equal)
from skimage import data
import skxray.core.correlation as corr
import skxray.core.roi as roi
from skxray.testing.decorators import skip_if
logger = logging.getLogger(__name__)
# It is unclear why this test is so slow. Can we speed this up at all?
@skip_if(True)
def test_correlation():
num_levels = 4
num_bufs = 8 # must be even
num_qs = 2 # number of interested roi's (rings)
img_dim = (50, 50) # detector size
roi_data = np.array(([10, 20, 12, 14], [40, 10, 9, 10]),
dtype=np.int64)
indices = roi.rectangles(roi_data, img_dim)
img_stack = np.random.randint(1, 5, size=(500, ) + img_dim)
g2, lag_steps = corr.multi_tau_auto_corr(num_levels, num_bufs, indices,
img_stack)
assert_array_almost_equal(lag_steps, np.array([0, 1, 2, 3, 4, 5, 6, 7, 8,
10, 12, 14, 16, 20, 24, 28,
32, 40, 48, 56]))
assert_array_almost_equal(g2[1:, 0], 1.00, decimal=2)
assert_array_almost_equal(g2[1:, 1], 1.00, decimal=2)
coins = data.camera()
coins_stack = []
for i in range(500):
coins_stack.append(coins)
coins_mesh = np.zeros_like(coins)
coins_mesh[coins < 30] = 1
coins_mesh[coins > 50] = 2
g2, lag_steps = corr.multi_tau_auto_corr(num_levels, num_bufs, coins_mesh,
coins_stack)
assert_almost_equal(True, np.all(g2[:, 0], axis=0))
assert_almost_equal(True, np.all(g2[:, 1], axis=0))
|
Python
| 0 |
@@ -2977,23 +2977,8 @@
ll?%0A
-@skip_if(True)%0A
def
@@ -3837,16 +3837,105 @@
mal=2)%0A%0A
+%0Adef test_image_stack_correlation():%0A num_levels = 1%0A num_bufs = 2 # must be even%0A
coin
@@ -3993,19 +3993,17 @@
n range(
-500
+2
):%0A
|
ac1f44247a2c3b943641e076154bacab3299ceec
|
Remove unused user.(show|hide)PastEvents (jsonrpc)
|
indico/MaKaC/services/implementation/user.py
|
indico/MaKaC/services/implementation/user.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from flask import session
from indico.modules.users import User
from indico.util.i18n import _
from indico.util.redis import avatar_links
from MaKaC.services.interface.rpc.common import ServiceError
from MaKaC.services.implementation.base import LoggedOnlyService, AdminService, ParameterManager
from MaKaC.user import AvatarHolder
class UserBaseService(LoggedOnlyService):
def _checkParams(self):
self._pm = ParameterManager(self._params)
userId = self._pm.extract("userId", None)
if userId is not None:
ah = AvatarHolder()
self._target = ah.getById(userId)
else:
raise ServiceError("ERR-U5", _("User id not specified"))
class UserModifyBase(UserBaseService):
def _checkProtection(self):
LoggedOnlyService._checkProtection(self)
if self._aw.getUser():
if not self._target.canModify(self._aw):
raise ServiceError("ERR-U6", _("You are not allowed to perform this request"))
else:
raise ServiceError("ERR-U7", _("You are currently not authenticated. Please log in again."))
class UserGetEmail(LoggedOnlyService):
def _checkParams(self):
LoggedOnlyService._checkParams(self)
self._target = self.getAW().getUser()
def _getAnswer(self):
if self._target:
return self._target.getEmail()
else:
raise ServiceError("ERR-U4", "User is not logged in")
class UserShowPastEvents(UserModifyBase):
def _getAnswer(self):
self._target.getPersonalInfo().setShowPastEvents(True)
return True
class UserHidePastEvents(UserModifyBase):
def _getAnswer(self):
self._target.getPersonalInfo().setShowPastEvents(False)
return True
class UserRefreshRedisLinks(AdminService):
def _checkParams(self):
AdminService._checkParams(self)
self._pm = ParameterManager(self._params)
user_id = self._pm.extract("userId", pType=int, allowEmpty=True)
self._user = User.get(user_id) if user_id is not None else session.user
def _getAnswer(self):
avatar_links.delete_avatar(self._user) # clean start
avatar_links.init_links(self._user)
methodMap = {
"data.email.get": UserGetEmail,
"showPastEvents": UserShowPastEvents,
"hidePastEvents": UserHidePastEvents,
"refreshRedisLinks": UserRefreshRedisLinks
}
|
Python
| 0.000001 |
@@ -2179,315 +2179,8 @@
)%0A%0A%0A
-class UserShowPastEvents(UserModifyBase):%0A def _getAnswer(self):%0A self._target.getPersonalInfo().setShowPastEvents(True)%0A return True%0A%0A%0Aclass UserHidePastEvents(UserModifyBase):%0A def _getAnswer(self):%0A self._target.getPersonalInfo().setShowPastEvents(False)%0A return True%0A%0A%0A
clas
@@ -2678,92 +2678,8 @@
il,%0A
- %22showPastEvents%22: UserShowPastEvents,%0A %22hidePastEvents%22: UserHidePastEvents,%0A
|
ae780b08e27f8567b028dd3411de8829f4f1bfed
|
Add an option for the number of dimensions in the external FSI config.
|
SU2_PY/FSI/io/FSI_config.py
|
SU2_PY/FSI/io/FSI_config.py
|
#!/usr/bin/env python
# -*-coding:utf-8 -*
# \file FSI_config.py
# \brief Python class for handling configuration file for FSI computation.
# \author THOMAS David, University of Liege, Belgium. Department of Aerospace and Mechanical Engineering
# \version BETA
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
from ..util import switch
# ----------------------------------------------------------------------
# FSI Configuration Class
# ----------------------------------------------------------------------
class FSIConfig:
"""
Class that contains all the parameters coming from the FSI configuration file.
Read the file and store all the options into a dictionary.
"""
def __init__(self,FileName):
self.ConfigFileName = FileName
self._ConfigContent = {}
self.readConfig()
def __str__(self):
tempString = str()
for key, value in self._ConfigContent.items():
tempString += "{} = {}\n".format(key,value)
return tempString
def __getitem__(self,key):
return self._ConfigContent[key]
def __setitem__(self, key, value):
self._ConfigContent[key] = value
def readConfig(self):
input_file = open(self.ConfigFileName)
while 1:
line = input_file.readline()
if not line:
break
# remove line returns
line = line.strip('\r\n')
# make sure it has useful data
if (not "=" in line) or (line[0] == '%'):
continue
# split across equal sign
line = line.split("=",1)
this_param = line[0].strip()
this_value = line[1].strip()
for case in switch(this_param):
#integer values
#if case("NDIM") : pass
#if case("MESH_DEF_LIN_ITER") : pass
#if case("MESH_DEF_NONLIN_ITER") : pass
if case("RESTART_ITER") : pass
if case("NB_EXT_ITER") : pass
if case("NB_FSI_ITER") :
self._ConfigContent[this_param] = int(this_value)
break
#float values
if case("AITKEN_PARAM") : pass
if case("START_TIME") : pass
if case("UNST_TIMESTEP") : pass
if case("UNST_TIME") : pass
if case("FSI_TOLERANCE") :
self._ConfigContent[this_param] = float(this_value)
break
#string values
if case("CFD_CONFIG_FILE_NAME") : pass
if case("CSD_SOLVER") : pass
if case("CSD_CONFIG_FILE_NAME") : pass
if case("RESTART_SOL") : pass
if case("MATCHING_MESH") : pass
if case("DISP_PRED") : pass
if case("AITKEN_RELAX") : pass
if case("UNSTEADY_SIMULATION") : pass
if case("INTERNAL_FLOW") :
#if case("MESH_DEF_METHOD") : pass
self._ConfigContent[this_param] = this_value
break
if case():
print(this_param + " is an invalid option !")
break
#end for
#def dump()
|
Python
| 0 |
@@ -1899,17 +1899,16 @@
lues%0D%0A%09%09
-#
if case(
|
23fd2953a41d8b087fa5252df2de0baf36244e43
|
remove stupid debug string
|
doc/readthedoc/conf.py
|
doc/readthedoc/conf.py
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
from recommonmark.parser import CommonMarkParser
sys.path.insert(0, os.path.abspath('_build_temp/python'))
print("sage sage sage")
# -- Project information -----------------------------------------------------
project = 'FATE'
copyright = '2020, FederatedAI'
author = 'FederatedAI'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosummary',
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'autodocsumm',
'recommonmark'
]
autosummary_generate = True
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
add_module_names = False
master_doc = 'index'
# hack to replace rst file link to html link
def ultimateReplace(app, docname, source):
result = source[0]
result = result.replace(".rst", ".html")
source[0] = result
def setup(app):
if not os.path.exists("_build_temp"):
import shutil
import tempfile
from pathlib import Path
with tempfile.TemporaryDirectory() as d:
shutil.copytree("../..", Path(d).joinpath("_build_temp"))
shutil.copytree(Path(d).joinpath("_build_temp"), "_build_temp")
app.add_config_value('ultimate_replacements', {}, True)
app.connect('source-read', ultimateReplace)
|
Python
| 0.001727 |
@@ -681,33 +681,8 @@
))%0A%0A
-print(%22sage sage sage%22)%0A%0A
# --
|
51e5aadd1db42d2f1aa4a69df89a57e7e7954a7c
|
simplify formdata type infering. #111
|
scrapy/http/request/form.py
|
scrapy/http/request/form.py
|
"""
This module implements the FormRequest class which is a more covenient class
(than Request) to generate Requests based on form data.
See documentation in docs/topics/request-response.rst
"""
import urllib
import lxml.html
from scrapy.http.request import Request
from scrapy.utils.python import unicode_to_str
class FormRequest(Request):
def __init__(self, *args, **kwargs):
formdata = kwargs.pop('formdata', None)
if formdata and kwargs.get('method') is None:
kwargs['method'] = 'POST'
super(FormRequest, self).__init__(*args, **kwargs)
if formdata:
items = formdata.iteritems() if isinstance(formdata, dict) else formdata
querystr = _urlencode(items, self.encoding)
if self.method == 'POST':
self.headers.setdefault('Content-Type', 'application/x-www-form-urlencoded')
self._set_body(querystr)
else:
self._set_url(self.url + ('&' if '?' in self.url else '?') + querystr)
@classmethod
def from_response(cls, response, formname=None, formnumber=0, formdata=None,
clickdata=None, dont_click=False, **kwargs):
from scrapy.selector.lxmldocument import LxmlDocument
if not hasattr(formdata, "items"):
try:
formdata = dict(formdata) if formdata else {}
except (ValueError, TypeError):
raise ValueError('formdata should be a dict or iterable of tuples')
kwargs.setdefault('encoding', response.encoding)
root = LxmlDocument(response, lxml.html.HTMLParser)
form = _get_form(root, formname, formnumber, response)
formdata = _get_inputs(form, formdata, dont_click, clickdata, response)
url = form.action or form.base_url
return cls(url, method=form.method, formdata=formdata, **kwargs)
def _urlencode(seq, enc):
values = [(unicode_to_str(k, enc), unicode_to_str(v, enc))
for k, vs in seq
for v in (vs if hasattr(vs, '__iter__') else [vs])]
return urllib.urlencode(values, doseq=1)
def _get_form(root, formname, formnumber, response):
"""
Uses all the passed arguments to get the required form
element
"""
if not root.forms:
raise ValueError("No <form> element found in %s" % response)
if formname is not None:
f = root.xpath('//form[@name="%s"]' % formname)
if f:
return f[0]
# If we get here, it means that either formname was None
# or invalid
if formnumber is not None:
try:
form = root.forms[formnumber]
except IndexError:
raise IndexError("Form number %d not found in %s" %
(formnumber, response))
else:
return form
def _get_inputs(form, formdata, dont_click, clickdata, response):
inputs = [(n, v) for n, v in form.form_values() if n not in formdata]
if not dont_click:
clickables = [el for el in form.inputs if el.type == 'submit']
if clickables:
clickable = _get_clickable(clickdata, clickables, form)
inputs.append(clickable)
inputs.extend(formdata.iteritems())
return inputs
def _get_clickable(clickdata, clickables, form):
"""
Returns the clickable element specified in clickdata,
if the latter is given. If not, it returns the first
clickable element found
"""
# If we don't have clickdata, we just use the first clickable element
if clickdata is None:
el = clickables.pop(0)
return (el.name, el.value)
# If clickdata is given, we compare it to the clickable elements to find a
# match. We first look to see if the number is specified in clickdata,
# because that uniquely identifies the element
nr = clickdata.get('nr', None)
if nr is not None:
try:
el = list(form.inputs)[nr]
except IndexError:
pass
else:
return (el.name, el.value)
# We didn't find it, so now we build an XPath expression out of the other
# arguments, because they can be used as such
xpath = u'.//*' + \
u''.join(u'[@%s="%s"]' % tuple(c) for c in clickdata.iteritems())
el = form.xpath(xpath)
if len(el) == 1:
return (el[0].name, el[0].value)
elif len(el) > 1:
raise ValueError("Multiple elements found (%r) matching the criteria "
"in clickdata: %r" % (el, clickdata))
else:
raise ValueError('No clickeable element matching clickdata: %r' % (clickdata,))
|
Python
| 0.000026 |
@@ -1252,259 +1252,8 @@
ent%0A
- if not hasattr(formdata, %22items%22):%0A try:%0A formdata = dict(formdata) if formdata else %7B%7D%0A except (ValueError, TypeError):%0A raise ValueError('formdata should be a dict or iterable of tuples')%0A%0A
@@ -2627,24 +2627,186 @@
response):%0A
+ try:%0A formdata = dict(formdata or ())%0A except (ValueError, TypeError):%0A raise ValueError('formdata should be a dict or iterable of tuples')%0A%0A
inputs =
|
f2181d50fb17be9e1db6129300d720139ca00636
|
use absolute imports for compatibility with python 2.5
|
scrapy/selector/__init__.py
|
scrapy/selector/__init__.py
|
"""
XPath selectors
Two backends are currently available: libxml2 and lxml
To select the backend explicitly use the SELECTORS_BACKEND variable in your
project. Otherwise, libxml2 will be tried first. If libxml2 is not available,
lxml will be used.
"""
from scrapy.conf import settings
if settings['SELECTORS_BACKEND'] == 'lxml':
from .lxmlsel import *
elif settings['SELECTORS_BACKEND'] == 'libxml2':
from .libxml2sel import *
elif settings['SELECTORS_BACKEND'] == 'dummy':
from .dummysel import *
else:
try:
import libxml2
except ImportError:
try:
import lxml
except ImportError:
from .dummysel import *
else:
from .lxmlsel import *
else:
from .libxml2sel import *
|
Python
| 0 |
@@ -327,32 +327,47 @@
lxml':%0A from
+scrapy.selector
.lxmlsel import
@@ -418,32 +418,47 @@
xml2':%0A from
+scrapy.selector
.libxml2sel impo
@@ -510,32 +510,47 @@
ummy':%0A from
+scrapy.selector
.dummysel import
@@ -696,16 +696,31 @@
from
+scrapy.selector
.dummyse
@@ -761,16 +761,31 @@
from
+scrapy.selector
.lxmlsel
@@ -817,16 +817,31 @@
from
+scrapy.selector
.libxml2
|
e5e60ed15b4da27087883c811d0a6ac8af74fccd
|
Don't repeat sentences in summary
|
hnSummarized/sentenceSelection.py
|
hnSummarized/sentenceSelection.py
|
"""
Author: Nicholas Rutherford
License: MIT
"""
import nltk
import networkx as nx
from nltk.corpus import stopwords
import re
def word_tokenize(s, stop_words):
"""Convert a sentence into a list of words, excluding stop words
Args:
s (str) - A sentence to split into words
stop_words ([str]) - A list of words to ignore
Returns:
[str] - The words of the sentence, not including stop_words
"""
quality_words = []
base = nltk.word_tokenize(s)
for word in base:
if word not in stop_words:
quality_words.append(word)
return quality_words
def tokeniseSentences(rawText):
"""Convert a block of text into a list of sentences
Args:
rawText (str) - A block of text
Returns:
[str] - List of sentences
"""
# Remove newlines, sometimes mess up sentence detector
rawText = rawText.replace("\n", " ")
# Load pre-learned sentence detector, and split into sentences
sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
return sent_detector.tokenize(rawText)
def add_nodes(g, sentList):
"""Add sentence nodes to the graph
Args:
g (nx.graph)- The graph
sentList ([str]) - List of sentences
"""
for sentence in sentList:
g.add_node(sentence)
def add_edges(g, sentList,stop_words):
"""Add weighted edges to the graph
Args:
g (nx.graph) - The graph
sentList ([str]) - List of sentences
stop_words ([str]) - List of words to ignore
"""
# Word tokenize each sentence
token_list = []
for sent in sentList:
token_list.append((sent, word_tokenize(sent, stop_words)))
# Compute the edge weight for two sentences
for i, pair1 in enumerate(token_list):
for j, pair2 in enumerate(token_list):
if i < j:
words1 = pair1[1]
words2 = pair2[1]
wordCount = 0
for word in words1:
if word in words2:
wordCount += 1
w = wordCount / float((len(words1) + len(words2)))
g.add_edge(pair1[0], pair2[0], weight=w)
def construct_graph(g, sentList, stop_words):
"""Add nodes and edges to the graph according to the textRank algorithm
Args:
g (nx.graph) - The graph
sentList ([str]) - List of sentences
stop_words ([str]) - List of words to ignore
"""
add_nodes(g, sentList)
add_edges(g, sentList, stop_words)
def text_rank(sentList, stop_words):
"""Performs the textRank algorithm to obtain 'importance' scores for
each sentence.
Args:
sentList ([str]) - List of sentences
stop_words ([str]) - List of words to ignore
Returns:
[("str", Float)] - List of sentence, score pairs sorted descending by
score value
"""
g = nx.Graph()
construct_graph(g, sentList, stop_words)
scores = nx.pagerank(g).items()
scores = sorted(scores, key=lambda x: x[1], reverse=True)
return scores
def summary_para(scores, sentList, K):
"""Constructs the summary text selecting the K best sentences and
formatting them in cronological order
Args:
scores [("str", Float)] - List of sentence, score pairs sorted
descending by score value
sentList ([str]) - List of sentencses
K (int) - The number of sentences that the summary should be
Returns:
str - The K-sentence summary
"""
good_sent = [x[0] for x in scores[:K]]
# Return sentences above cutoff in the order they appeared in the text
toReturn = ""
skip = False # Used to insert '[...]' when sentences are skipped
for sentence in sentList:
if sentence in good_sent:
if skip:
toReturn += " [...] "
else:
toReturn += " "
toReturn += sentence
skip = False
else:
skip = True
# Remove all excessive whitespace
return re.sub(r'\s+', ' ', toReturn).strip()
def selectSentences(rawText, K):
"""Summarise text into K sentences using textRank
Args:
rawText (str) - Block of text to be summarized
K (int) - Number of sentences that the summary should be
Returns:
str - The K-sentence summary
"""
stop_words = stopwords.words('english')
sentList = tokeniseSentences(rawText)
scores = text_rank(sentList, stop_words)
return summary_para(scores, sentList, K)
|
Python
| 0.999843 |
@@ -3615,16 +3615,79 @@
res%5B:K%5D%5D
+%0A count_check = dict(zip(good_sent, %5B0 for x in good_sent%5D))
%0A%0A #
@@ -3908,16 +3908,83 @@
d_sent:%0A
+ if count_check%5Bsentence%5D %3E 0:%0A continue%0A
|
f3da704e0c603574d7ff56b8b4d66ac2c34d015a
|
Output image fix
|
Server/src/server/reporters/tiled_brick_position_reporter.py
|
Server/src/server/reporters/tiled_brick_position_reporter.py
|
import cv2
from reporter import Reporter
class TiledBrickPositionReporter(Reporter):
def __init__(self, valid_locations, board_recognizer, board_descriptor, tile_brick_detector, camera):
"""
:param valid_locations Locations to search for brick in
:param board_recognizer Board recognizer
:param board_descriptor Board descriptor
:param tile_brick_detector Tile brick detector
:param camera Camera
"""
self.valid_locations = valid_locations
self.board_recognizer = board_recognizer
self.board_descriptor = board_descriptor
self.tile_brick_detector = tile_brick_detector
self.camera = camera
def run(self):
"""
Waits for brick to be positioned at any of the valid positions.
Callback function: (tile) -> ()
"""
while not self.stopped:
image = self.camera.read()
if image is None:
continue
self.board_descriptor.snapshot = self.board_recognizer.find_board(image, self.board_descriptor)
if self.board_descriptor.is_recognized():
cv2.imwrite("output_board_recognized.png", self.board_descriptor.snapshot.board_image)
tile = self.tile_brick_detector.find_brick_among_tiles(self.board_descriptor, self.valid_locations)
if tile is not None:
cv2.imwrite("output_brick_recognized.png", image)
self.callback_function(tile)
self.stop()
else:
cv2.imwrite("output_board_not_recognized.png", self.board_descriptor.snapshot.board_image)
|
Python
| 0.999992 |
@@ -1623,45 +1623,8 @@
g%22,
-self.board_descriptor.snapshot.board_
imag
|
d2cadcb9be08730f5ccefec5f3e0316265ebf307
|
Check request ID value
|
integration-tests/features/src/json_utils.py
|
integration-tests/features/src/json_utils.py
|
"""Functions for handling JSON responses returned by various API endpoints."""
import string
from src.attribute_checks import *
def get_value_using_path(obj, path):
"""Get the attribute value using the XMLpath-like path specification.
Return any attribute stored in the nested object and list hierarchy using
the 'path' where path consists of:
keys (selectors)
indexes (in case of arrays)
separated by slash, ie. "key1/0/key_x".
Usage:
get_value_using_path({"x" : {"y" : "z"}}, "x")) -> {"y" : "z"}
get_value_using_path({"x" : {"y" : "z"}}, "x/y")) -> "z"
get_value_using_path(["x", "y", "z"], "0")) -> "x"
get_value_using_path(["x", "y", "z"], "1")) -> "y"
get_value_using_path({"key1" : ["x", "y", "z"],
"key2" : ["a", "b", "c", "d"]}, "key1/1")) -> "y"
get_value_using_path({"key1" : ["x", "y", "z"],
"key2" : ["a", "b", "c", "d"]}, "key2/1")) -> "b"
"""
keys = path.split("/")
for key in keys:
if key.isdigit():
obj = obj[int(key)]
else:
obj = obj[key]
return obj
def check_timestamp_in_json_response(context, attribute):
"""Check if the timestamp stored in given attribute is correct."""
timestamp = context.response.json().get(attribute)
check_timestamp(timestamp)
def check_id_value_in_json_response(context, id_attribute_name):
"""Check the ID attribute in the JSON response.
Check if ID is stored in a format like: '477e85660c504b698beae2b5f2a28b4e'
ie. it is a string with 32 characters containing 32 hexadecimal digits
"""
response = context.response
assert response is not None
json_data = response.json()
assert json_data is not None
check_attribute_presence(json_data, id_attribute_name)
id_attribute = json_data[id_attribute_name]
assert id_attribute is not None
assert isinstance(id_attribute, str) and len(id_attribute) == 32
assert all(char in string.hexdigits for char in id_attribute)
def is_empty_json_response(context):
"""Check if the JSON response is empty (but not None)."""
return context.response.json() == {}
|
Python
| 0 |
@@ -1363,24 +1363,553 @@
imestamp)%0A%0A%0A
+def check_request_id_value_in_json_response(context, attribute_name):%0A %22%22%22Check the request ID attribute in the JSON response.%0A%0A Check if ID is stored in a format like: '71769af6-0a39-4242-94be-1f84f04c8a56'%0A %22%22%22%0A response = context.response%0A assert response is not None%0A%0A json_data = response.json()%0A assert json_data is not None%0A%0A check_attribute_presence(json_data, attribute_name)%0A id_attribute = json_data%5Battribute_name%5D%0A%0A assert id_attribute is not None%0A assert check_uuid(id_attribute)%0A%0A%0A
def check_id
|
de4e5a34aaa322b2ce83161dd4bce7897953ab73
|
add Unix socket support to API collector
|
intelmq/bots/collectors/api/collector_api.py
|
intelmq/bots/collectors/api/collector_api.py
|
# SPDX-FileCopyrightText: 2018 tavi.poldma
#
# SPDX-License-Identifier: AGPL-3.0-or-later
# -*- coding: utf-8 -*-
"""
API Collector bot
"""
from threading import Thread
from intelmq.lib.bot import CollectorBot
from intelmq.lib.exceptions import MissingDependencyError
try:
import tornado.web
from tornado.ioloop import IOLoop
except ImportError:
IOLoop = None
else:
class Application(tornado.web.Application):
def __init__(self, request_handler, *args, **kwargs):
self.request_handler = request_handler
super().__init__(*args, **kwargs)
class MainHandler(tornado.web.RequestHandler):
def post(self):
data = self.request.body
self.application.request_handler(data)
class APICollectorBot(CollectorBot):
"""Collect data by exposing a HTTP API interface"""
name: str = "API"
port: int = 5000
__collector_empty_process: bool = True
provider: str = "APICollector"
__is_multithreadable: bool = False
def init(self):
if IOLoop is None:
raise MissingDependencyError("tornado")
app = Application(self.request_handler, [
("/intelmq/push", MainHandler),
])
self.server = app.listen(self.port)
self.eventLoopThread = Thread(target=IOLoop.current().start)
self.eventLoopThread.daemon = True
self.eventLoopThread.start()
def request_handler(self, data):
report = self.new_report()
report.add("raw", data)
self.send_message(report)
def process(self):
pass
def shutdown(self):
if self.server:
# Closes the server and the socket, prevents address already in use
self.server.stop()
if IOLoop.current():
IOLoop.current().stop()
BOT = APICollectorBot
|
Python
| 0 |
@@ -162,16 +162,68 @@
t Thread
+%0Afrom typing import Optional%0Aimport os%0Aimport socket
%0A%0Afrom i
@@ -382,16 +382,111 @@
IOLoop%0A
+ from tornado.netutil import bind_unix_socket%0A from tornado.httpserver import HTTPServer%0A
except I
@@ -1146,16 +1146,177 @@
= False
+%0A use_socket = False%0A socket_path = '/tmp/imq_api_default_socket'%0A _server: Optional%5BHTTPServer%5D = None%0A _unix_socket: Optional%5Bsocket.socket%5D = None
%0A%0A de
@@ -1515,16 +1515,225 @@
%5D)%0A%0A
+ if self.use_socket:%0A self.server = HTTPServer(app)%0A self._unix_socket = bind_unix_socket(self.socket_path)%0A self.server.add_socket(self._unix_socket)%0A else:%0A
@@ -1764,24 +1764,25 @@
(self.port)%0A
+%0A
self
|
73eba2f87efc52e97f7559f9b3eeab7ed00ae567
|
disable feed_fetch_unit_test
|
python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py
|
python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import paddle.dataset.flowers as flowers
import math
import paddle.fluid as fluid
import paddle.fluid.core as core
import unittest
import numpy as np
import paddle
import os
def Lenet(data, class_dim):
conv1 = fluid.layers.conv2d(data, 32, 5, 1, act=None)
bn1 = fluid.layers.batch_norm(conv1, act='relu')
pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2)
conv2 = fluid.layers.conv2d(pool1, 50, 5, 1, act=None)
bn2 = fluid.layers.batch_norm(conv2, act='relu')
pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2)
fc1 = fluid.layers.fc(pool2, size=500, act='relu')
fc2 = fluid.layers.fc(fc1, size=class_dim, act='softmax')
return fc2
class TestFetchOp(unittest.TestCase):
def parallel_exe(self, train_inputs, seed, use_cuda):
main = fluid.Program()
startup = fluid.Program()
startup.random_seed = seed
with fluid.program_guard(main, startup):
data = fluid.layers.data(
name='image', shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
out = Lenet(data, class_dim=102)
loss = fluid.layers.cross_entropy(input=out, label=label)
loss = fluid.layers.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opt.minimize(loss)
# TODO(zcd): I found that onece the memory optimizer is open,
# parallel_exe doesn't fetch some variable, such as conv2d_0.b_0@GRAD,
# conv2d_1.b_0@GRAD. Those variables should not be pruned.
# fluid.memory_optimize(main)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup)
feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
pe = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=loss.name, main_program=main)
fetch_list = []
all_vars = main.global_block().vars
for k, v in all_vars.items():
if 'tmp' not in k and k[0] is not '_' or v.persistable:
fetch_list.append(k)
for data in train_inputs:
ret = pe.run(fetch_list,
feed=feeder.feed(data),
return_numpy=True)
for i in range(len(fetch_list)):
assert not math.isnan(np.sum(ret[i])) and \
not math.isinf(np.sum(ret[i]))
def test_fetch_op(self):
tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16)
tst_reader_iter = tst_reader()
iters = 3
train_inputs = []
for i in range(iters):
train_inputs.append(next(tst_reader_iter))
os.environ['CPU_NUM'] = str(4)
if core.is_compiled_with_cuda():
self.parallel_exe(train_inputs, seed=1, use_cuda=True)
self.parallel_exe(train_inputs, seed=1, use_cuda=False)
class TestFeedParallel(unittest.TestCase):
def parallel_exe(self, use_cuda, seed):
main = fluid.Program()
startup = fluid.Program()
startup.random_seed = seed
with fluid.scope_guard(fluid.core.Scope()):
with fluid.program_guard(main, startup):
data = fluid.layers.data(
name='image', shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(
name='label', shape=[1], dtype='int64')
out = Lenet(data, class_dim=102)
loss = fluid.layers.cross_entropy(input=out, label=label)
loss = fluid.layers.mean(loss)
opt = fluid.optimizer.Momentum(
learning_rate=0.1,
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
opt.minimize(loss)
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
feeder = fluid.DataFeeder(place=place, feed_list=[data, label])
reader = feeder.decorate_reader(
paddle.batch(
flowers.train(), batch_size=16), multi_devices=True)
exe = fluid.Executor(place)
exe.run(startup)
pe = fluid.ParallelExecutor(
use_cuda=use_cuda, loss_name=loss.name, main_program=main)
for batch_id, data in enumerate(reader()):
loss_np = pe.run(feed=data, fetch_list=[loss.name])[0]
print(batch_id, loss_np)
if batch_id == 2:
break
def test_feed_op(self):
os.environ['CPU_NUM'] = str(4)
if core.is_compiled_with_cuda():
self.parallel_exe(use_cuda=True, seed=1)
self.parallel_exe(use_cuda=False, seed=1)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000003 |
@@ -3287,24 +3287,64 @@
m(ret%5Bi%5D))%0A%0A
+ @unittest.skip(reason=%22CI timeout%22)%0A
def test
@@ -5385,16 +5385,56 @@
break%0A%0A
+ @unittest.skip(reason=%22CI timeout%22)%0A
def
|
16e8c5ad4adf31def560fc98ce943a878ffadb6c
|
Fix inst_var_0 column not found issue
|
rsqueakvm/model/database.py
|
rsqueakvm/model/database.py
|
from rsqueakvm.model.pointers import W_PointersObject
from rpython.rlib import jit
from rsqueakvm.plugins.database_plugin import dbm, SQLConnection
from rsqueakvm.error import PrimitiveFailedError
class DBType(object): pass
NIL = DBType()
TEXT = DBType()
INTEGER = DBType()
REAL = DBType()
BLOB = DBType()
ALTER_SQL = "ALTER TABLE %s ADD COLUMN inst_var_%s %s;"
CREATE_SQL = "CREATE TABLE IF NOT EXISTS %s (id INTEGER);"
INSERT_SQL = "INSERT INTO %s (id) VALUES (?);"
SELECT_SQL = "SELECT inst_var_%s FROM %s WHERE id=?;"
UPDATE_SQL = "UPDATE %s SET inst_var_%s=? WHERE id=?"
@jit.elidable
def insert_sql(class_name):
return INSERT_SQL % class_name
@jit.elidable
def select_sql(class_name, n0):
return SELECT_SQL % (n0, class_name)
@jit.elidable
def alter_sql(class_name, n0, dbtype):
if dbtype is NIL:
strtype = ""
elif dbtype is TEXT:
strtype = "text"
elif dbtype is INTEGER:
strtype = "integer"
elif dbtype is REAL:
strtype = "real"
elif dbtype is BLOB:
strtype = "blob"
else:
assert False
return ALTER_SQL % (class_name, n0, strtype)
@jit.elidable
def update_sql(class_name, n0):
return UPDATE_SQL % (class_name, n0)
@jit.elidable
def create_sql(class_name):
return CREATE_SQL % class_name
class W_DBObject_State:
_immutable_fields_ = ["db_connection?", "column_types_for_table",
"db_objects", "class_names"]
def __init__(self):
self.id_counter = 0
self.column_types_for_table = {}
# Maps from DBObject id to DBObject and only includes DBObjects which
# are referenced from an attribute of a DBObject.
self.db_objects = {}
self.class_names = {}
def get_column_type(self, class_name, n0):
dbtype = self.get_column_types(class_name)[n0]
if dbtype != NIL:
return jit.promote(dbtype)
else:
return NIL
@jit.elidable
def get_column_types(self, class_name):
return self.column_types_for_table[class_name]
def set_column_type(self, class_name, position, value):
self.get_column_types(class_name)[position] = value
# This is only ever called once per classname. We always promote the
# classname to a constant, so any time the classname changes, we have to
# break out of the trace and compile a new bridge, anyway. When that
# happens, this was already run once, so we don't need to do it again.
@jit.not_in_trace
def init_column_types_if_neccessary(self, class_name, size):
if class_name not in self.column_types_for_table:
W_DBObject.state.column_types_for_table[class_name] = [NIL] * size
# Same reason as above
@jit.not_in_trace
def create_table_if_neccessary(self, class_name, connection):
if class_name not in W_DBObject.state.class_names:
connection.execute(create_sql(class_name))
W_DBObject.state.class_names[class_name] = True
class W_DBObject(W_PointersObject):
_attrs_ = ["id"]
_immutable_fields_ = ["id"]
state = W_DBObject_State()
@staticmethod
def next_id():
theId = W_DBObject.state.id_counter
W_DBObject.state.id_counter += 1
return theId
def __init__(self, space, w_class, size, weak=False):
W_PointersObject.__init__(self, space, w_class, size, weak)
self.id = W_DBObject.next_id()
class_name = self.class_name(space)
W_DBObject.state.init_column_types_if_neccessary(class_name, size)
connection = dbm.connection(space)
W_DBObject.state.create_table_if_neccessary(class_name, connection)
connection.execute(insert_sql(class_name), [self.w_id(space)])
def class_name(self, space):
return jit.promote_string(self.classname(space))
def w_id(self, space):
return space.wrap_int(self.id)
def fetch(self, space, n0):
class_name = self.class_name(space)
if not W_DBObject.state.get_column_type(class_name, n0):
# print "Can't find column. Falling back to default fetch."
return W_PointersObject.fetch(self, space, n0)
cursor = dbm.connection(space).execute(
select_sql(class_name, n0), [self.w_id(space)])
w_result = space.unwrap_array(cursor.next())
if w_result:
if W_DBObject.state.get_column_type(class_name, n0) is BLOB:
db_id = space.unwrap_int(w_result[0])
return W_DBObject.state.db_objects[db_id]
else:
return w_result[0]
else:
raise PrimitiveFailedError
def store(self, space, n0, w_value):
cls = w_value.getclass(space)
if (cls.is_same_object(space.w_String)):
aType = TEXT
elif cls.is_same_object(space.w_SmallInteger):
aType = INTEGER
elif cls.is_same_object(space.w_Float):
aType = REAL
elif cls.is_same_object(space.w_nil):
aType = NIL
else:
if isinstance(w_value, W_DBObject):
aType = BLOB
W_DBObject.state.db_objects[w_value.id] = w_value
# Save id in database.
w_value = w_value.w_id(space)
else:
# print 'Unable to unwrap %s' % w_value.getclass(space)
# print 'Falling back to standard store.'
return W_PointersObject.store(self, space, n0, w_value)
aType = jit.promote(aType)
class_name = self.class_name(space)
if (aType is not NIL and
W_DBObject.state.get_column_type(class_name, n0) is NIL):
connection = dbm.connection(space)
connection.execute(alter_sql(class_name, n0, aType))
# print "invalidate cache"
connection.statement_cache.invalidate()
W_DBObject.state.set_column_type(class_name, n0, aType)
connection = dbm.connection(space)
connection.execute(update_sql(class_name, n0),
[w_value, self.w_id(space)])
|
Python
| 0.000001 |
@@ -3976,20 +3976,16 @@
if
-not
W_DBObje
@@ -4016,32 +4016,39 @@
(class_name, n0)
+ is NIL
:%0A #
|
5b9c9ab8f8aef01c53b761714bb6b7072fa01aa4
|
clean up commandArgs construction for HadoopJob in Python client
|
genie-client/src/main/python/pygenie/jobs/hadoop.py
|
genie-client/src/main/python/pygenie/jobs/hadoop.py
|
"""
genie.jobs.hadoop
This module implements creating Hadoop jobs.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
from .core import GenieJob
from .utils import (add_to_repr,
arg_string)
logger = logging.getLogger('com.netflix.genie.jobs.hadoop')
class HadoopJob(GenieJob):
"""Hadoop job."""
def __init__(self, conf=None):
super(HadoopJob, self).__init__(conf=conf)
self._properties = dict()
self._property_file = None
self._script = None
@property
def cmd_args(self):
"""
The constructed command line arguments using the job's definition. If the
command line arguments are set explicitly (by calling
:py:meth:`command_arguments`) this will be the same.
"""
if self._command_arguments is not None:
return self._command_arguments
props_str = ' '.join([
'-D{name}={value}'.format(name=k, value=v) \
for k, v in self._properties.iteritems()
])
prop_file_str = '-conf {}'.format(os.path.basename(self._property_file)) \
if self._property_file \
else ''
return '{prop_file} {props} {cmd}' \
.format(prop_file=prop_file_str,
props=props_str,
cmd=self._script) \
.strip()
def command(self, script):
"""Alias for :py:meth:`HadoopJob.script`"""
return self.script(script)
@add_to_repr('append')
def property(self, name, value):
"""
Sets a property for the job.
Using the name and value passed in, the following will be constructed in
the command-line when executing:
'-Dname=value'
Example:
>>> job = HadoopJob() \\
... .property('mapred.foo', 'fizz') \\
... .property('mapred.bar', 'buzz')
Args:
name (str): The property name.
value (str): The property value.
Returns:
:py:class:`HadoopJob`: self
"""
self._properties[name] = value
return self
@arg_string
@add_to_repr('overwrite')
def property_file(self, _property_file):
"""
Sets a configuration/property file for the job.
Using the value passed in, the following will be constructed in the
command-line when executing:
'-conf file'
Example:
>>> job = HadoopJob() \\
... .property_file('/Users/jsmith/my_properties.conf')
Args:
_property_file (str): The path to the property file.
Returns:
:py:class:`HadoopJob`: self
"""
self._add_dependency(_property_file)
return self
@arg_string
@add_to_repr('overwrite')
def script(self, _script):
"""
Sets the script to run for the job.
Example:
>>> job = HadoopJob() \\
... .script("/Users/jdoe/my_job.jar")
>>> job = HadoopJob() \\
... .script("version")
>>> job = HadoopJob() \\
... .script("fs -ls /dir/")
Args:
script (str): A path to a script file or the code to run.
Returns:
:py:class:`HadoopJob`: self
"""
|
Python
| 0 |
@@ -1386,16 +1386,22 @@
._script
+ or ''
) %5C%0A
|
3d15d6c416a7de7d8c38e0794c01e758a82ace85
|
Fix test reference
|
tests/oxford_tests/TestFace.py
|
tests/oxford_tests/TestFace.py
|
import inspect
import json
import os
import unittest
import uuid
from test import test_support
import sys, os, os.path
rootDirectory = os.path.dirname(os.path.realpath('__file__'))
if rootDirectory not in sys.path:
sys.path.append(os.path.join(rootDirectory, '..'))
from oxford.Face import Face
from oxford.Person import Person
# local file path to test images
localFilePrefix = os.path.join(rootDirectory, 'tests', 'images')
knownFaceIds = []
client = Face(os.environ['OXFORD_API_KEY'])
class TestFace(unittest.TestCase):
'''Tests the oxford face API client'''
def test_face_constructor_throws_with_no_instrumentation_key(self):
self.assertRaises(Exception, Face, None)
def test_face_constructor_sets_instrumentation_key(self):
face = Face('key')
self.assertEqual('key', face.key)
def test_face_constructor_sets_person_group_client(self):
face = Face('key')
self.assertIsInstance(face.person, Person)
def test_face_return_throws_for_bad_request(self):
self.assertRaises(Exception, client.detect, {'url': 'http://bing.com'});
def _learnFaceIds(self):
if not knownFaceIds:
face1 = client.detect({'path': os.path.join(localFilePrefix, 'face1.jpg')})
face2 = client.detect({'path': os.path.join(localFilePrefix, 'face2.jpg')})
knownFaceIds.append(face1[0]['faceId'])
knownFaceIds.append(face2[0]['faceId'])
#
# test the detect API
#
def _getDetectOptions(self):
return {
'analyzesFaceLandmarks': True,
'analyzesAge': True,
'analyzesGender': True,
'analyzesHeadPose': True
}
def _verifyDetect(self, detectResult):
faceIdResult = detectResult[0]
self.assertIsInstance(faceIdResult['faceId'], str, 'face ID is returned')
self.assertIsInstance(faceIdResult['faceRectangle'], object, 'faceRectangle is returned')
self.assertIsInstance(faceIdResult['faceLandmarks'], object, 'faceLandmarks are returned')
attributes = faceIdResult['attributes']
self.assertIsInstance(attributes, object, 'attributes are returned')
self.assertIsInstance(attributes['gender'], str, 'gender is returned')
self.assertIsInstance(attributes['age'], int, 'age is returned')
def test_face_detect_url(self):
options = self._getDetectOptions();
options['url'] = 'https://upload.wikimedia.org/wikipedia/commons/1/19/Bill_Gates_June_2015.jpg'
detectResult = client.detect(options)
self._verifyDetect(detectResult)
def test_face_detect_file(self):
options = self._getDetectOptions();
options['path'] = os.path.join(localFilePrefix, 'face1.jpg')
detectResult = client.detect(options)
self._verifyDetect(detectResult)
def test_face_detect_stream(self):
options = self._getDetectOptions();
with open(os.path.join(localFilePrefix, 'face1.jpg'), 'rb') as file:
options['stream'] = file.read()
detectResult = client.detect(options)
self._verifyDetect(detectResult)
def test_face_detect_throws_invalid_options(self):
self.assertRaises(Exception, client.detect, {})
#
# test the similar API
#
def test_face_similar(self):
self._learnFaceIds()
similarResult = client.similar(knownFaceIds[0], [knownFaceIds[1]])
self.assertIsInstance(similarResult, list, 'similar result is returned')
self.assertEqual(knownFaceIds[1], similarResult[0]['faceId'], 'expected similar face is returned')
#
# test the grouping API
#
def test_face_grouping(self):
faces = client.detect({'path': os.path.join(localFilePrefix, 'face-group.jpg')})
faceIds = []
for face in faces:
faceIds.append(face['faceId'])
groupingResult = client.grouping(faceIds)
self.assertIsInstance(groupingResult, object, 'grouping result is returned')
self.assertIsInstance(groupingResult['groups'], list, 'groups list is returned')
self.assertIsInstance(groupingResult['messyGroup'], list, 'messygroup list is returned')
#
# test the verify API
#
def test_face_verify(self):
self._learnFaceIds()
verifyResult = client.verify(knownFaceIds[0], knownFaceIds[1])
self.assertIsInstance(verifyResult, object, 'grouping result is returned')
self.assertEqual(verifyResult['isIdentical'], True, 'verify succeeded')
self.assertGreaterEqual(verifyResult['confidence'], 0.5, 'confidence is returned')
|
Python
| 0.000001 |
@@ -314,16 +314,21 @@
d.Person
+Group
import
@@ -333,16 +333,21 @@
t Person
+Group
%0A%0A# loca
@@ -969,16 +969,21 @@
rson
+Group
, Person
)%0A%0A
@@ -978,16 +978,21 @@
, Person
+Group
)%0A%0A d
|
7dc01fa4593e81448db2749d460737cbfa57b63d
|
Return normalized version
|
wger/__init__.py
|
wger/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
:copyright: 2011, 2012 by OpenSlides team, see AUTHORS.
:license: GNU GPL, see LICENSE for more details.
"""
VERSION = (1, 9, 0, 'beta', 1)
RELEASE = False
def get_version(version=None, release=None):
"""Derives a PEP386-compliant version number from VERSION."""
if version is None:
version = VERSION
if release is None:
release = RELEASE
assert len(version) == 5
assert version[3] in ('alpha', 'beta', 'rc', 'final')
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
main_parts = 2 if version[2] == 0 else 3
main = '.'.join(str(x) for x in version[:main_parts])
if version[3] != 'final':
mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'}
sub = mapping[version[3]] + str(version[4])
else:
sub = ''
if not release:
sub += '-dev'
return main + sub
|
Python
| 0.000005 |
@@ -994,12 +994,13 @@
+= '
--
+.
dev
+0
'%0A%0A
|
a7a14619f7662ccb510b6a0031a58647cf0b34e7
|
Remove duplicated path for build script
|
whack/builder.py
|
whack/builder.py
|
import os
import subprocess
from . import downloads
from .tempdir import create_temporary_dir
from .common import WHACK_ROOT
from .files import mkdir_p, write_file
from .errors import FileNotFoundError
def build(package_request, package_dir):
with create_temporary_dir() as build_dir:
_build_in_dir(package_request, build_dir, package_dir)
def _build_in_dir(package_request, build_dir, package_dir):
params = package_request.params()
package_request.write_source_to(build_dir)
build_script = os.path.join(build_dir, "whack/build")
if not os.path.exists(build_script):
message = "whack/build script not found in package source {0}".format(
package_request.source_uri
)
raise FileNotFoundError(message)
build_env = _params_to_build_env(params)
_fetch_downloads(build_dir, build_env)
mkdir_p(package_dir)
build_command = [
"whack-run",
os.path.abspath(package_dir), # package_dir is mounted at WHACK_ROOT
build_script, # build_script is executed
WHACK_ROOT # WHACK_ROOT is passed as the first argument to build_script
]
subprocess.check_call(build_command, cwd=build_dir, env=build_env)
write_file(
os.path.join(package_dir, ".whack-package-name"),
package_request.name()
)
def _fetch_downloads(build_dir, build_env):
downloads_file_path = os.path.join(build_dir, "whack/downloads")
downloads.fetch_downloads(downloads_file_path, build_env, build_dir)
def _params_to_build_env(params):
build_env = os.environ.copy()
for name, value in (params or {}).iteritems():
build_env[name.upper()] = str(value)
return build_env
|
Python
| 0.000001 |
@@ -523,16 +523,54 @@
script =
+ %22whack/build%22%0A build_script_path =
os.path
@@ -586,29 +586,28 @@
ld_dir,
-%22whack/build%22
+build_script
)%0A if
@@ -638,16 +638,21 @@
d_script
+_path
):%0A
@@ -665,27 +665,19 @@
sage = %22
-whack/build
+%7B0%7D
script
@@ -705,17 +705,17 @@
source %7B
-0
+1
%7D%22.forma
@@ -724,24 +724,38 @@
%0A
+ build_script,
package_req
@@ -1079,16 +1079,21 @@
d_script
+_path
, # buil
|
b2b137b0566ffb601f96c95a20acfae9bd0ae97c
|
Bump due to pypi issues.
|
win/gstreamer.py
|
win/gstreamer.py
|
from __future__ import absolute_import, print_function
import sys
from os.path import join, sep
from shutil import rmtree
from os import walk, listdir
from .common import *
__version__ = '0.1.8'
gst_ver = '1.10.2'
def get_gstreamer(cache, build_path, arch, pyver, package, output, compiler='mingw'):
data = []
bitness = 'x86_64' if arch == '64' else 'x86'
runtime_name = 'gstreamer-1.0-{}-{}.msi'.format(bitness, gst_ver)
devel_name = 'gstreamer-1.0-devel-{}-{}.msi'.format(bitness, gst_ver)
gst = join(build_path, package)
makedirs(gst)
for name in (runtime_name, devel_name):
url = (
'https://gstreamer.freedesktop.org/data/pkg/windows/{}/{}'.format(gst_ver, name))
local_url = download_cache(cache, url, build_path)
exec_binary(
"Extracting {} to {}".format(local_url, gst),
['msiexec', '/a', local_url, '/qb', 'TARGETDIR={}'.format(gst)],
cwd=gst, shell=False)
gst = join(gst, 'gstreamer')
gst = join(gst, list(listdir(gst))[0], bitness)
pkg_url = 'pkg-config_0.28-1_win{}.zip'.format('64' if arch == '64' else '32')
url = 'http://win32builder.gnome.org/packages/3.6/{}'.format(pkg_url)
local_url = download_cache(cache, url, build_path)
base_dir = join(build_path, splitext(pkg_url)[0])
makedirs(base_dir)
with open(local_url, 'rb') as fd:
ZipFile(fd).extractall(base_dir)
data.append((join(base_dir, 'bin', 'pkg-config.exe'), 'pkg-config.exe',
'Scripts', True))
inc = join(gst, 'include')
for f in listdir(inc):
if f in ('glib-2.0', 'gstreamer-1.0'):
continue
f = join(inc, f)
if isdir(f):
rmtree(f)
else:
remove(f)
gstreamer = join(inc, 'gstreamer-1.0')
for f in listdir(gstreamer):
if f == 'gst':
continue
f = join(gstreamer, f)
if isdir(f):
rmtree(f)
else:
remove(f)
gstinc = join(gstreamer, 'gst')
for f in listdir(gstinc):
f = join(gstinc, f)
if isdir(f):
rmtree(f)
lib_files = [
['gio'],
['glib-2.0'],
['gstreamer-1.0'],
['glib-2.0.lib'],
['gmodule-2.0.lib'],
['gobject-2.0.lib'],
['gstreamer-1.0.lib'],
['intl.lib'],
['libglib-2.0.dll.a'],
['libglib-2.0.la'],
['libgmodule-2.0.dll.a'],
['libgmodule-2.0.la'],
['libgobject-2.0.dll.a'],
['libgobject-2.0.la'],
['libgstreamer-1.0.a'],
['libgstreamer-1.0.dll.a'],
['libgstreamer-1.0.la'],
['pkgconfig', 'glib-2.0.pc'],
['pkgconfig', 'gmodule-2.0.pc'],
['pkgconfig', 'gmodule-no-export-2.0.pc'],
['pkgconfig', 'gobject-2.0.pc'],
['pkgconfig', 'gstreamer-1.0.pc'],
]
remove_from_dir(join(gst, 'lib'), lib_files)
move_by_ext(join(gst, 'lib', 'gio'), '.dll', join(gst, 'bin'))
move_by_ext(join(gst, 'lib', 'gstreamer-1.0'), '.dll', join(gst, 'bin'))
move_by_ext(join(gst, 'lib', 'glib-2.0'), '.h', join(gst, 'include'))
rmtree(join(gst, 'lib', 'gio'))
rmtree(join(gst, 'lib', 'glib-2.0'))
rmtree(join(gst, 'lib', 'gstreamer-1.0'))
items = list(listdir(gst))
items.remove('include')
items.remove('lib')
for d in ('lib', 'include'):
src = join(gst, d)
for dirpath, dirnames, filenames in walk(src):
root = dirpath
dirpath = dirpath.replace(src, '').strip(sep)
inc_dirpath = dirpath
if d == 'include':
# for these, copy the contents but not the high level directory
if inc_dirpath.startswith('glib-2.0'):
inc_dirpath = inc_dirpath[8:].strip(sep)
if inc_dirpath.startswith('gstreamer-1.0'):
inc_dirpath = inc_dirpath[13:].strip(sep)
for filename in filenames:
data.append((
join(root, filename), join(d, dirpath, filename),
join('libs' if d == 'lib' else d, inc_dirpath), True))
for d in items:
src = join(gst, d)
for dirpath, dirnames, filenames in walk(src):
root = dirpath
dirpath = dirpath.replace(src, '')
if dirpath and dirpath[0] == sep:
dirpath = dirpath[1:]
for filename in filenames:
data.append((
join(root, filename), join('gstreamer', d, dirpath, filename),
join('share', package, d, dirpath), False))
if filename in ('libintl-8.dll', 'libglib-2.0-0.dll'):
data.append((join(root, filename), filename, 'Scripts', True))
l_imports = 'from os import environ'
l_code = '''
if dep_bins and isdir(dep_bins[0]):
if environ.get('GST_PLUGIN_PATH'):
environ['GST_PLUGIN_PATH'] = '{};{}'.format(environ['GST_PLUGIN_PATH'], dep_bins[0])
else:
environ['GST_PLUGIN_PATH'] = dep_bins[0]
if not environ.get('GST_REGISTRY'):
environ['GST_REGISTRY'] = join(dirname(dep_bins[0]), 'registry.bin')
'''
make_package(join(build_path, 'project'), package, data, __version__, output,
'LGPL', (l_imports, l_code))
if __name__ == '__main__':
parse_args(get_gstreamer)
|
Python
| 0 |
@@ -186,17 +186,17 @@
= '0.1.
-8
+9
'%0A%0Agst_v
|
4c017462c41ad080c1f6a98f8be7ef843f379253
|
Fix test name
|
tests/search_backend_sphinx.py
|
tests/search_backend_sphinx.py
|
from wolis.test_case import WolisTestCase
from wolis import utils
class SearchBackendMysqlTest(WolisTestCase):
@utils.restrict_database('mysql*', 'postgres')
@utils.restrict_phpbb_version('>=3.1.0')
def test_set_search_backend(self):
self.login('morpheus', 'morpheus')
self.acp_login('morpheus', 'morpheus')
self.change_acp_knob(
link_text='Search settings',
check_page_text='Here you can define what search backend will be used',
name='config[search_type]',
value='phpbb_search_fulltext_sphinx',
confirm=True,
)
if __name__ == '__main__':
import unittest
unittest.main()
|
Python
| 0.001029 |
@@ -83,13 +83,14 @@
kend
-Mysql
+Sphinx
Test
|
58c6cf44fd73aa4d33d48f1defe2ec65e6f20c50
|
Add debugging info.
|
docs/src/conf.py
|
docs/src/conf.py
|
# -*- coding: utf-8 -*-
import os
import shutil
from datetime import datetime
from subprocess import call, Popen, PIPE
try:
import simplejson as json
except ImportError:
import json
def prepare(globs, locs):
git = Popen('which git 2> %s' % os.devnull, shell=True, stdout=PIPE
).stdout.read().strip()
doxygen = Popen('which doxygen 2> %s' % os.devnull, shell=True, stdout=PIPE
).stdout.read().strip()
cwd = os.getcwd()
root = os.path.abspath(os.path.join(cwd, '..', '..'))
print "Running from %s..." % (root, )
os.chdir(root)
buildenv = os.path.join(root, 'vendor', 'erebot', 'buildenv')
generic_doc = os.path.join(root, 'docs', 'src', 'generic')
origin = Popen([git, 'config', '--local', 'remote.origin.url'],
stdout=PIPE).stdout.read().strip()
project = origin.rpartition('/')[2]
if project.endswith('.git'):
project = project[:-4]
locs['project'] = project
git_tag = Popen(['git', 'describe', '--tags', '--exact', '--first-parent'],
stdout=PIPE).communicate()[0].strip()
if git_tag:
locs['version'] = locs['release'] = git_tag
else:
locs['version'] = locs['release'] = 'latest'
for repository, path in (
('git://github.com/Erebot/Erebot_Buildenv.git', buildenv),
('git://github.com/Erebot/Erebot_Module_Skeleton_Doc.git', generic_doc)
):
if not os.path.isdir(path):
os.makedirs(path)
print "Cloning %s into %s..." % (repository, path)
call([git, 'clone', repository, path])
else:
os.chdir(path)
print "Updating clone of %s in %s..." % (repository, path)
call([git, 'checkout', 'master'])
call([git, 'pull'])
os.chdir(root)
composer = json.load(open(os.path.join(root, 'composer.json'), 'r'))
# Run doxygen
call([doxygen, os.path.join(root, 'Doxyfile')], env={
'COMPONENT_NAME': locs['project'],
'COMPONENT_VERSION': locs['version'],
'COMPONENT_BRIEF': composer.get('description', ''),
})
# Copy doxygen output to Sphinx's output folder
try:
shutil.copytree(
os.path.join(root, 'docs', 'api', 'html'),
os.path.join(root, 'docs', 'enduser', 'html', 'api'),
)
except OSError:
pass
os.chdir(cwd)
real_conf = os.path.join(buildenv, 'sphinx', 'conf.py')
print "Including real configuration file (%s)..." % (real_conf, )
execfile(real_conf, globs, locs)
locs['copyright'] = u'2012-%d, XRL Team. All rights reserved' % \
datetime.now().year
prepare(globals(), locals())
|
Python
| 0 |
@@ -42,16 +42,31 @@
shutil%0A
+import logging%0A
from dat
@@ -129,16 +129,51 @@
, PIPE%0A%0A
+log = logging.getLogger(__name__)%0A%0A
try:%0A
@@ -236,16 +236,163 @@
t json%0A%0A
+def fake_ignore(cwd, contents):%0A for entry in contents:%0A log.info('Copying %25s/%25s to its final destination...', cwd, entry)%0A return %5B%5D%0A
%0Adef pre
@@ -2533,24 +2533,56 @@
l', 'api'),%0A
+ ignore=fake_ignore,%0A
)%0A
|
e721511a24f98e57e8bfeb45a953d7d42cf78f33
|
increase the max length of a link that is to be shortenend to 500 characters
|
teeny_weeny/models.py
|
teeny_weeny/models.py
|
from django.db import models
from django.utils import timezone
class ShortLink(models.Model):
short = models.CharField(max_length=128, unique=True)
link = models.URLField()
hit = models.BigIntegerField(default=0)
date = models.DateTimeField(default=timezone.now)
def __unicode__(self):
return u'%s' % (self.short)
|
Python
| 0.000061 |
@@ -173,16 +173,30 @@
RLField(
+max_length=500
)%0A hi
@@ -350,8 +350,9 @@
f.short)
+%0A
|
517ffe9a3d2ca3608b8044e88d74d16fe5e65db1
|
Use new Sphinx Autodoc mock import path (#17634)
|
docs/exts/docroles.py
|
docs/exts/docroles.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
"""Document roles"""
from functools import partial
from docutils import nodes, utils
from sphinx.ext.autodoc.importer import import_module, mock
class RoleException(Exception):
"""Exception for roles extension"""
def get_template_field(env, fullname):
"""
Gets template fields for specific operator class.
:param env: env config
:param fullname: Full path to operator class.
For example: ``airflow.providers.google.cloud.operators.vision.CloudVisionCreateProductSetOperator``
:return: List of template field
:rtype: list[str]
"""
modname, classname = fullname.rsplit(".", 1)
try:
with mock(env.config.autodoc_mock_imports):
mod = import_module(modname)
except ImportError:
raise RoleException(f"Error loading {modname} module.")
clazz = getattr(mod, classname)
if not clazz:
raise RoleException(f"Error finding {classname} class in {modname} module.")
template_fields = getattr(clazz, "template_fields")
if not template_fields:
raise RoleException(f"Could not find the template fields for {classname} class in {modname} module.")
return list(template_fields)
def template_field_role(
app,
typ,
rawtext,
text,
lineno,
inliner,
options=None,
content=None,
):
"""
A role that allows you to include a list of template fields in the middle of the text. This is especially
useful when writing guides describing how to use the operator.
The result is a list of fields where each field is shorted in the literal block.
Sample usage::
:template-fields:`airflow.operators.bash.BashOperator`
For further information look at:
* [http://docutils.sourceforge.net/docs/howto/rst-roles.html](Creating reStructuredText Interpreted
Text Roles)
"""
if options is None:
options = {}
if content is None:
content = []
text = utils.unescape(text)
try:
template_fields = get_template_field(app.env, text)
except RoleException as e:
msg = inliner.reporter.error(
f"invalid class name {text} \n{e}",
line=lineno,
)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
node = nodes.inline(rawtext=rawtext)
for i, field in enumerate(template_fields):
if i != 0:
node += nodes.Text(", ")
node += nodes.literal(field, "", nodes.Text(field))
return [node], []
def setup(app):
"""Sets the extension up"""
from docutils.parsers.rst import roles
roles.register_local_role("template-fields", partial(template_field_role, app))
return {"version": "builtin", "parallel_read_safe": True, "parallel_write_safe": True}
|
Python
| 0 |
@@ -922,17 +922,52 @@
t_module
-,
+%0Afrom sphinx.ext.autodoc.mock import
mock%0A%0A%0A
|
027c9d24ecf00a8435ad012fdab9e64b4201ed42
|
fix migration conflict, re #7128
|
arches/app/models/migrations/7128_resource_instance_filter.py
|
arches/app/models/migrations/7128_resource_instance_filter.py
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('models', '7262_report_template_data_fetch_bool'),
]
operations = [
migrations.RunSQL("""
UPDATE d_data_types
SET defaultconfig = defaultconfig || '{"searchString": "", "searchDsl": ""}'::jsonb
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE nodes
SET config = config || '{"searchString": "", "searchDsl": ""}'::jsonb
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE public.widgets
SET defaultconfig = defaultconfig || '{"defaultResourceInstance": []}'::jsonb
WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget';
""","""
UPDATE nodes
SET config = config - 'searchString' - 'searchDsl'
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE d_data_types
SET defaultconfig = defaultconfig - 'searchString' - 'searchDsl'
WHERE datatype = 'resource-instance' OR datatype = 'resource-instance-list';
UPDATE public.widgets
SET defaultconfig = defaultconfig - 'defaultResourceInstance'
WHERE name = 'resource-instance-select-widget' or name = 'resource-instance-multiselect-widget';
""")
]
|
Python
| 0 |
@@ -122,43 +122,40 @@
, '7
-262_report_template_data_fetch_bool
+442_delete_manifest_images_table
'),%0A
|
e987a010f2242735ad60008774d25c00b7f89f76
|
Tweak CI report
|
CI/CITests.py
|
CI/CITests.py
|
import os
from OMPython import OMCSessionZMQ
class CITests():
'''
Python class used to run CI tests
'''
def __init__(self, rootPath):
'''
Constructor starts omc and loads MSL
'''
self.rootPath = rootPath
self.omc = OMCSessionZMQ()
os.chdir(self.rootPath)
self.omc.sendExpression("loadModel(Modelica)")
def loadLib(self, libPath):
# Attempt to load the library
if self.omc.sendExpression('loadFile("%s")' % (self.rootPath + libPath)):
print "%s is successfully loaded." % libPath
else:
errmsg = libPath + " was not loaded! Check the library path."
raise Exception(errmsg)
def runSyntaxCheck(self, libName, libPath):
# Load library
self.loadLib(libPath)
'''
Checks all of the models in the library and returns number of faild checks
'''
# Get the list of all classes in OpenIPSL
test_list = self.omc.sendExpression('getClassNames(%s,recursive=true)' % libName)
nFailed = 0
nPassed = 0
# Run the check for all classes that are model and print result msgs
for test in test_list:
if self.omc.sendExpression("isModel(%s)" % (test)): # Check if a class is a model
passMsg = self.omc.sendExpression("checkModel(%s)" % (test))
if "completed successfully." in passMsg:
# print passMsg
nPassed += 1
else:
failMsg = self.omc.sendExpression("getErrorString()")
print failMsg
nFailed += 1
# Print a check summary
if nFailed == 0:
str1 = "== %s --------------------" % libName
print "%s OK! (%s models checked)" % (str1[:20], nPassed)
else:
print "==== Check Summary for %s ====" % libName
print "Number of models that passed the check is: %s" % nPassed
print "Number of models that failed the check is: %s" % nFailed
# Return test result
return (nFailed == 0)
|
Python
| 0 |
@@ -387,24 +387,33 @@
oadLib(self,
+ libName,
libPath):%0A
@@ -554,21 +554,20 @@
nt %22
-%25s is
+Load
success
full
@@ -566,32 +566,23 @@
cess
-fully loaded.
+: %25s
%22 %25 lib
-Path
+Name
%0A
@@ -616,20 +616,20 @@
sg = lib
-Path
+Name
+ %22 was
@@ -663,18 +663,30 @@
ary path
-.%22
+:%5Cn%22 + libPath
%0A
@@ -807,16 +807,24 @@
loadLib(
+libName,
libPath)
@@ -1453,44 +1453,8 @@
sg:%0A
- # print passMsg%0A
@@ -1748,16 +1748,18 @@
--------
+--
%22 %25 libN
@@ -1792,13 +1792,12 @@
OK!
-(%25s m
+== M
odel
@@ -1805,17 +1805,20 @@
checked
-)
+: %25s
%22 %25 (str
@@ -1821,17 +1821,17 @@
(str1%5B:2
-0
+2
%5D, nPass
|
94405e6c4911669532b3648e91f2f5c5b58e5d26
|
Bump up dataflow python container version to beam-master-20220914 (#23238)
|
sdks/python/apache_beam/runners/dataflow/internal/names.py
|
sdks/python/apache_beam/runners/dataflow/internal/names.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Various names for properties, transforms, etc."""
# All constants are for internal use only; no backwards-compatibility
# guarantees.
# pytype: skip-file
# Referenced by Dataflow legacy worker.
from apache_beam.runners.internal.names import PICKLED_MAIN_SESSION_FILE # pylint: disable=unused-import
# String constants related to sources framework
SOURCE_FORMAT = 'custom_source'
SOURCE_TYPE = 'CustomSourcesType'
SERIALIZED_SOURCE_KEY = 'serialized_source'
# In a released SDK, container tags are selected based on the SDK version.
# Unreleased versions use container versions based on values of
# BEAM_CONTAINER_VERSION and BEAM_FNAPI_CONTAINER_VERSION (see below).
# Update this version to the next version whenever there is a change that will
# require changes to legacy Dataflow worker execution environment.
BEAM_CONTAINER_VERSION = 'beam-master-20220811'
# Update this version to the next version whenever there is a change that
# requires changes to SDK harness container or SDK harness launcher.
BEAM_FNAPI_CONTAINER_VERSION = 'beam-master-20220811'
DATAFLOW_CONTAINER_IMAGE_REPOSITORY = 'gcr.io/cloud-dataflow/v1beta3'
class TransformNames(object):
"""For internal use only; no backwards-compatibility guarantees.
Transform strings as they are expected in the CloudWorkflow protos.
"""
COLLECTION_TO_SINGLETON = 'CollectionToSingleton'
COMBINE = 'CombineValues'
CREATE_PCOLLECTION = 'CreateCollection'
DO = 'ParallelDo'
FLATTEN = 'Flatten'
GROUP = 'GroupByKey'
READ = 'ParallelRead'
WRITE = 'ParallelWrite'
class PropertyNames(object):
"""For internal use only; no backwards-compatibility guarantees.
Property strings as they are expected in the CloudWorkflow protos.
"""
# If uses_keyed_state, whether the state can be sharded.
ALLOWS_SHARDABLE_STATE = 'allows_shardable_state'
BIGQUERY_CREATE_DISPOSITION = 'create_disposition'
BIGQUERY_DATASET = 'dataset'
BIGQUERY_EXPORT_FORMAT = 'bigquery_export_format'
BIGQUERY_FLATTEN_RESULTS = 'bigquery_flatten_results'
BIGQUERY_KMS_KEY = 'bigquery_kms_key'
BIGQUERY_PROJECT = 'project'
BIGQUERY_QUERY = 'bigquery_query'
BIGQUERY_SCHEMA = 'schema'
BIGQUERY_TABLE = 'table'
BIGQUERY_USE_LEGACY_SQL = 'bigquery_use_legacy_sql'
BIGQUERY_WRITE_DISPOSITION = 'write_disposition'
DISPLAY_DATA = 'display_data'
ELEMENT = 'element'
ELEMENTS = 'elements'
ENCODING = 'encoding'
FILE_PATTERN = 'filepattern'
FILE_NAME_PREFIX = 'filename_prefix'
FILE_NAME_SUFFIX = 'filename_suffix'
FORMAT = 'format'
INPUTS = 'inputs'
IMPULSE_ELEMENT = 'impulse_element'
NON_PARALLEL_INPUTS = 'non_parallel_inputs'
NUM_SHARDS = 'num_shards'
OUT = 'out'
OUTPUT = 'output'
OUTPUT_INFO = 'output_info'
OUTPUT_NAME = 'output_name'
PARALLEL_INPUT = 'parallel_input'
PIPELINE_PROTO_TRANSFORM_ID = 'pipeline_proto_transform_id'
# If the input element is a key/value pair, then the output element(s) all
# have the same key as the input.
PRESERVES_KEYS = 'preserves_keys'
PUBSUB_ID_LABEL = 'pubsub_id_label'
PUBSUB_SERIALIZED_ATTRIBUTES_FN = 'pubsub_serialized_attributes_fn'
PUBSUB_SUBSCRIPTION = 'pubsub_subscription'
PUBSUB_TIMESTAMP_ATTRIBUTE = 'pubsub_timestamp_label'
PUBSUB_TOPIC = 'pubsub_topic'
RESOURCE_HINTS = 'resource_hints'
RESTRICTION_ENCODING = 'restriction_encoding'
SERIALIZED_FN = 'serialized_fn'
SHARD_NAME_TEMPLATE = 'shard_template'
SOURCE_STEP_INPUT = 'custom_source_step_input'
SERIALIZED_TEST_STREAM = 'serialized_test_stream'
STEP_NAME = 'step_name'
USE_INDEXED_FORMAT = 'use_indexed_format'
USER_FN = 'user_fn'
USER_NAME = 'user_name'
USES_KEYED_STATE = 'uses_keyed_state'
VALIDATE_SINK = 'validate_sink'
VALIDATE_SOURCE = 'validate_source'
VALUE = 'value'
WINDOWING_STRATEGY = 'windowing_strategy'
|
Python
| 0 |
@@ -1644,19 +1644,19 @@
er-20220
-811
+914
'%0A# Upda
@@ -1845,11 +1845,11 @@
0220
-811
+914
'%0A%0AD
|
31a607f13536fcaefa8decffe1769d1dc66e78e4
|
Use empty dict for default package description
|
whack/sources.py
|
whack/sources.py
|
import os
import json
import shutil
import tempfile
import uuid
import blah
from .hashes import Hasher
from .files import mkdir_p, copy_dir
class PackageSourceNotFound(Exception):
def __init__(self, package_name):
message = "Could not find source for package: {0}".format(package_name)
Exception.__init__(self, message)
class PackageSourceFetcher(object):
def fetch(self, package):
if blah.is_source_control_uri(package):
return self._fetch_package_from_source_control(package)
elif self._is_local_path(package):
return PackageSource(package)
else:
raise PackageSourceNotFound(package)
def _fetch_package_from_source_control(self, package):
package_source_dir = _temporary_path()
try:
blah.archive(package, package_source_dir)
return TemporaryPackageSource(package_source_dir)
except:
shutil.rmtree(package_source_dir)
raise
def _is_local_uri(self, uri):
return "://" not in uri
def _is_local_path(self, path):
return path.startswith("/") or path.startswith(".")
def _temporary_path():
return os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
class PackageSource(object):
def __init__(self, path):
self.path = path
self._description = _read_package_description(path)
def name(self):
return self._description.name()
def source_hash(self):
hasher = Hasher()
for source_path in self._source_paths():
absolute_source_path = os.path.join(self.path, source_path)
hasher.update_with_dir(absolute_source_path)
return hasher.ascii_digest()
def write_to(self, target_dir):
for source_dir in self._source_paths():
target_sub_dir = os.path.join(target_dir, source_dir)
mkdir_p(target_sub_dir)
copy_dir(os.path.join(self.path, source_dir), target_sub_dir)
def _source_paths(self):
return ["whack"]
def __enter__(self):
return self
def __exit__(self, *args):
pass
class TemporaryPackageSource(object):
def __init__(self, path):
self._path = path
def __enter__(self):
return PackageSource(self._path)
def __exit__(self, *args):
shutil.rmtree(self._path)
def _read_package_description(package_src_dir):
whack_json_path = os.path.join(package_src_dir, "whack/whack.json")
if os.path.exists(whack_json_path):
with open(whack_json_path, "r") as whack_json_file:
whack_json = json.load(whack_json_file)
return DictBackedPackageDescription(whack_json)
else:
return DefaultPackageDescription()
class DefaultPackageDescription(object):
def name(self):
return None
class DictBackedPackageDescription(object):
def __init__(self, values):
self._values = values
def name(self):
return self._values.get("name", None)
|
Python
| 0 |
@@ -2702,74 +2702,38 @@
- return DictBackedPackageDescription(whack_json)%0A else:%0A
+else:%0A whack_json = %7B%7D%0A
@@ -2740,22 +2740,25 @@
return D
-efault
+ictBacked
PackageD
@@ -2772,110 +2772,37 @@
ion(
-)%0A %0A %0Aclass DefaultPackageDescription(object):%0A def name(self):%0A
+whack_json)%0A
+%0A
-return None%0A%0A
+
%0Acla
|
4f83984c518a55feac88cbe70cc19a6945b02b59
|
fix doc-gen (#1151)
|
python/dllib/src/bigdl/dllib/feature/dataset/mnist.py
|
python/dllib/src/bigdl/dllib/feature/dataset/mnist.py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Part of the code originally from Tensorflow
import gzip
import numpy
from bigdl.dataset import base
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_MEAN = 0.13066047740239506 * 255
TRAIN_STD = 0.3081078 * 255
TEST_MEAN = 0.13251460696903547 * 255
TEST_STD = 0.31048024 * 255
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(f):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D unit8 numpy array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def extract_labels(f):
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
return labels
def read_data_sets(train_dir, data_type="train"):
"""
Parse or download mnist data if train_dir is empty.
:param train_dir: The directory storing the mnist data
:param data_type: Reading training set or testing set.It can be either "train" or "test"
:return: (ndarray, ndarray) representing (features, labels)
features is a 4D unit8 numpy array [index, y, x, depth]
representing each pixel valued from 0 to 255. labels
is 1D unit8 nunpy array representing the label valued
from 0 to 9.
"""
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
if data_type == "train":
local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = base.maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f)
return train_images, train_labels
else:
local_file = base.maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = base.maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f)
return test_images, test_labels
if __name__ == "__main__":
train, _ = read_data_sets("/tmp/mnist/", "train")
test, _ = read_data_sets("/tmp/mnist", "test")
assert numpy.abs(numpy.mean(train) - TRAIN_MEAN) / TRAIN_MEAN < 1e-7
assert numpy.abs(numpy.std(train) - TRAIN_STD) / TRAIN_STD < 1e-7
assert numpy.abs(numpy.mean(test) - TEST_MEAN) / TEST_MEAN < 1e-7
assert numpy.abs(numpy.std(test) - TEST_STD) / TEST_STD < 1e-7
|
Python
| 0 |
@@ -1116,24 +1116,19 @@
%5D.%0A%0A
-%0A
-Args:%0A
+:param:
f:
@@ -1184,28 +1184,20 @@
er.%0A
-%0A%0A
-R
+:r
eturn
-s:%0A
+:
dat
@@ -1248,27 +1248,19 @@
h%5D.%0A
-%0A%0A
-R
+:r
aise
-s:%0A
+:
Val
@@ -1313,17 +1313,16 @@
2051.%0A%0A
-%0A
%22%22%22%0A
@@ -2480,16 +2480,17 @@
:param
+:
train_d
@@ -2530,16 +2530,17 @@
st data%0A
+%0A
:par
@@ -2541,16 +2541,17 @@
:param
+:
data_ty
@@ -2625,16 +2625,17 @@
%22test%22%0A
+%0A
:ret
@@ -2638,16 +2638,29 @@
:return:
+%0A%0A %60%60%60%0A
(ndarra
@@ -2703,24 +2703,16 @@
labels)%0A
-
feat
@@ -2762,28 +2762,16 @@
, depth%5D
-%0A
represe
@@ -2812,27 +2812,19 @@
255.
+%0A
labels
-%0A
is
@@ -2873,29 +2873,16 @@
l valued
-%0A
from 0
@@ -2887,16 +2887,25 @@
0 to 9.%0A
+ %60%60%60%0A%0A
%22%22%22%0A
|
7725821156795b613340bd8098583fdbb189a6d3
|
fix minor bug and update response msg
|
wildlife/rest.py
|
wildlife/rest.py
|
from wildlife import WildApp
import os
from flask import jsonify, make_response
from wildlife import kz_exceptions
import logging
import json
import exceptions
import functools
# change to current directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
conf_path = "./config/wildlife.yml"
app = WildApp("wildlife for zookeeper",
conf_path)
def cluster_znode_exception(func):
@functools.wraps(func)
def wrapper(cluster_name, znode):
try:
func(cluster_name, znode)
except (kz_exceptions.ConnectionClosedError,
kz_exceptions.ConnectionDropped,
kz_exceptions.ConnectionLoss,
kz_exceptions.ConnectionLossException):
return make_response("Connection Exception When Interacts "
"with Cluster %s.\n" % cluster_name,
408)
except kz_exceptions.NoNodeError:
return make_response("Cannot Find Znode %s in Cluster"
"%s.\n" % (znode, cluster_name),
404)
except kz_exceptions.InvalidACLException:
return make_response("Invalid ACLs on Accessing Znode %s in "
"Cluster %s.\n" % (znode, cluster_name),
401)
except exceptions:
return make_response("Unable to Handle this Request.\n",
500)
return wrapper
@app.route("/")
def hello():
return make_response("Welcome to WildLife: The REST API for ZooKeeper!\n",
200)
@app.route("/wildlife", methods=["GET"])
def clusters():
return make_response(jsonify({"clusters": app.clusters.keys()}),
200)
@app.route("/wildlife/<cluster_name>", methods=["GET"])
def detail_cluster(cluster_name):
return make_response(jsonify(app.clusters[cluster_name].__dict__),
200)
@app.route("/wildlife/<cluster_name>/<znode>", methods=["GET"])
@cluster_znode_exception
def cluster_znode(cluster_name, znode):
_zclient_manager = app.managers[cluster_name]
_zclient = _zclient_manager._client
zdata = _zclient.get(znode)
return make_response(jsonify({"data": zdata[0],
"znodeStat": convert_zstat(zdata[1])
}),
200)
@app.route("/wildlife/<cluster_name>/<znode>/data", methods=["GET"])
@cluster_znode_exception
def cluster_znode_data(cluster_name, znode):
zdata = cluster_znode(cluster_name, znode)
zdata = json.loads(zdata)
return make_response(zdata["data"],
200)
@app.route("/wildlife/<cluster_name>/<znode>/children", methods=["GET"])
@cluster_znode_exception
def cluster_znode_children(cluster_name, znode):
_zclient_manager = app.managers[cluster_name]
_zclient = _zclient_manager._client
zchildren = _zclient.get_children(znode)
return make_response(str(zchildren),
200)
def convert_zstat(znodestat):
return {"czxid": znodestat.czxid,
"mzxid": znodestat.mzxid,
"ctime": znodestat.ctime,
"mtime": znodestat.mtime,
"version": znodestat.version,
"cversion": znodestat.cversion,
"aversion": znodestat.aversion,
"ephemeralOwner": znodestat.ephemeralOwner,
"dataLength": znodestat.dataLength,
"numChildren": znodestat.numChildren,
"pzxid": znodestat.pzxid}
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s: '
'(%(threadName)-10s) %(message)s')
app.run()
|
Python
| 0 |
@@ -485,16 +485,23 @@
+return
func(clu
@@ -845,26 +845,28 @@
ith Cluster
+%5B
%25s
+%5D
.%5Cn%22 %25 clust
@@ -952,20 +952,24 @@
.NoNodeE
-rror
+xception
:%0A
@@ -1010,26 +1010,28 @@
Find Znode
+%5B
%25s
+%5D
in Cluster%22
@@ -1065,18 +1065,20 @@
%22
+%5B
%25s
+%5D
.%5Cn%22 %25 (
@@ -1253,18 +1253,20 @@
g Znode
+%5B
%25s
+%5D
in %22%0A
@@ -1305,18 +1305,20 @@
Cluster
+%5B
%25s
+%5D
.%5Cn%22 %25 (
@@ -1377,16 +1377,257 @@
401)%0A
+ except kz_exceptions.NoAuthException:%0A return make_response(%22Please Provide ACLs to Access Znode %5B%25s%5D in %22%0A %22Cluster %5B%25s%5D.%5Cn%22 %25 (znode, cluster_name),%0A 401)%0A%0A
|
08516e8cc202e19d5dd144048ede0c04d2c27f4a
|
add patch_ssl after patch_socket
|
source/jormungandr/jormungandr/api.py
|
source/jormungandr/jormungandr/api.py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import importlib
from flask_restful.representations import json
from flask import request, make_response
from jormungandr import rest_api, app
from jormungandr.index import index
from jormungandr.modules_loader import ModulesLoader
import ujson
import logging
from jormungandr.new_relic import record_custom_parameter
from jormungandr.authentication import get_user, get_token, get_app_name, get_used_coverages
# http://www.gevent.org/intro.html#monkey-patching
import gevent.monkey
gevent.monkey.patch_socket()
@rest_api.representation("text/jsonp")
@rest_api.representation("application/jsonp")
def output_jsonp(data, code, headers=None):
resp = json.output_json(data, code, headers)
callback = request.args.get('callback', False)
if callback:
resp.data = unicode(callback) + '(' + resp.data + ')'
return resp
@rest_api.representation("text/json")
@rest_api.representation("application/json")
def output_json(data, code, headers=None):
resp = make_response(ujson.dumps(data), code)
resp.headers.extend(headers or {})
return resp
@app.after_request
def access_log(response, *args, **kwargs):
logger = logging.getLogger('jormungandr.access')
query_string = request.query_string.decode(request.url_charset, 'replace')
logger.info(u'"%s %s?%s" %s', request.method, request.path, query_string, response.status_code)
return response
@app.after_request
def add_request_id(response, *args, **kwargs):
response.headers['navitia-request-id'] = request.id
return response
@app.after_request
def add_info_newrelic(response, *args, **kwargs):
try:
record_custom_parameter('navitia-request-id', request.id)
token = get_token()
user = get_user(token=token, abort_if_no_token=False)
app_name = get_app_name(token)
if user:
record_custom_parameter('user_id', str(user.id))
record_custom_parameter('token_name', app_name)
coverages = get_used_coverages()
if coverages:
record_custom_parameter('coverage', coverages[0])
except:
logger = logging.getLogger(__name__)
logger.exception('error while reporting to newrelic:')
return response
# If modules are configured, then load and run them
if 'MODULES' in rest_api.app.config:
rest_api.module_loader = ModulesLoader(rest_api)
for prefix, module_info in rest_api.app.config['MODULES'].items():
module_file = importlib.import_module(module_info['import_path'])
module = getattr(module_file, module_info['class_name'])
rest_api.module_loader.load(module(rest_api, prefix))
rest_api.module_loader.run()
else:
rest_api.app.logger.warning('MODULES isn\'t defined in config. No module will be loaded, then no route '
'will be defined.')
if rest_api.app.config.get('ACTIVATE_PROFILING'):
rest_api.app.logger.warning('=======================================================')
rest_api.app.logger.warning('activation of the profiling, all query will be slow !')
rest_api.app.logger.warning('=======================================================')
from werkzeug.contrib.profiler import ProfilerMiddleware
rest_api.app.config['PROFILE'] = True
f = open('/tmp/profiler.log', 'a')
rest_api.app.wsgi_app = ProfilerMiddleware(rest_api.app.wsgi_app, f, restrictions=[80], profile_dir='/tmp/profile')
index(rest_api)
|
Python
| 0.000027 |
@@ -1819,16 +1819,42 @@
socket()
+%0Agevent.monkey.patch_ssl()
%0A%0A@rest_
|
c11c2a65b3662ca9dacac2b57fef2590432a958f
|
Update src/compas_rhino/interop/primitives.py
|
src/compas_rhino/interop/primitives.py
|
src/compas_rhino/interop/primitives.py
|
from compas.geometry import Point
from compas.geometry import Vector
from compas.geometry import Line
from compas.geometry import Plane
from compas.geometry import Frame
from compas.geometry import Circle
from compas.geometry import Ellipse
from compas.geometry import Polyline
from compas.geometry import Polygon
from Rhino.Geometry import Point3d
from Rhino.Geometry import Vector3d
from Rhino.Geometry import Line as RhinoLine
from Rhino.Geometry import Plane as RhinoPlane
from Rhino.Geometry import Circle as RhinoCircle
from Rhino.Geometry import Ellipse as RhinoEllipse
from Rhino.Geometry import Polyline as RhinoPolyline
def rhino_point_to_compas_point(point):
"""Convert a Rhino point to a COMPAS point.
Parameters
----------
point : :class:`Rhino.Geometry.Point3d`
Returns
-------
:class:`compas.geometry.Point`
"""
return Point(point.X, point.Y, point.Z)
def compas_point_to_rhino_point(point):
"""Convert a COMPAS point to a Rhino point.
Parameters
----------
point : :class:`compas.geometry.Point`
Returns
-------
:class:`Rhino.Geometry.Point3d`
"""
return Point3d(point.x, point.y, point.z)
def rhino_vector_to_compas_vector(vector):
"""Convert a Rhino vector to a COMPAS vector.
Parameters
----------
vector : :class:`Rhino.Geometry.Vector3d`
Returns
-------
:class:`compas.geometry.Vector`
"""
return Vector(vector.X, vector.Y, vector.Z)
def compas_vector_to_rhino_vector(vector):
"""Convert a COMPAS vector to a Rhino vector.
Parameters
----------
vector : :class:`compas.geometry.Vector`
Returns
-------
:class:`Rhino.Geometry.Vector3d`
"""
return Vector3d(vector.x, vector.y, vector.z)
def rhino_line_to_compas_line(line):
"""Convert a Rhino line to a COMPAS line.
Parameters
----------
line : :class:`Rhino.Geometry.Line`
Returns
-------
:class:`compas.geometry.Line`
"""
return Line(rhino_point_to_compas_point(line.From),
rhino_point_to_compas_point(line.To))
def compas_line_to_rhino_line(line):
"""Convert a COMPAS line to a Rhino line.
Parameters
----------
line : :class:`compas.geometry.Line`
Returns
-------
:class:`Rhino.Geometry.Line`
"""
return RhinoLine(compas_point_to_rhino_point(line.start),
compas_point_to_rhino_point(line.end))
def rhino_plane_to_compas_plane(plane):
"""Convert a Rhino plane to a COMPAS plane.
Parameters
----------
plane : :class:`Rhino.Geometry.Plane`
Returns
-------
:class:`compas.geometry.Plane`
"""
return Plane(rhino_point_to_compas_point(plane.Origin),
rhino_vector_to_compas_vector(plane.Normal))
def compas_plane_to_rhino_plane(plane):
"""Convert a COMPAS plane to a Rhino plane.
Parameters
----------
plane : :class:`compas.geometry.Plane`
Returns
-------
:class:`Rhino.Geometry.Plane`
"""
return RhinoPlane(compas_point_to_rhino_point(plane.point),
compas_vector_to_rhino_vector(plane.normal))
def rhino_plane_to_compas_frame(plane):
"""Convert a Rhino plane to a COMPAS frame.
Parameters
----------
plane : :class:`Rhino.Geometry.Plane`
Returns
-------
:class:`compas.geometry.Frame`
"""
return Frame(rhino_point_to_compas_point(plane.Origin),
rhino_vector_to_compas_vector(plane.XAxis),
rhino_vector_to_compas_vector(plane.YAxis))
def compas_frame_to_rhino_plane(frame):
"""Convert a COMPAS frame to a Rhino plane.
Parameters
----------
frame : :class:`compas.geometry.Frame`
Returns
-------
:class:`Rhino.Geometry.Plane`
"""
return RhinoPlane(compas_point_to_rhino_point(frame.point),
compas_vector_to_rhino_vector(frame.xaxis),
compas_vector_to_rhino_vector(frame.yaxis))
def rhino_circle_to_compas_circle(circle):
"""Convert a Rhino circle to a COMPAS circle.
Parameters
----------
circle : :class:`Rhino.Geometry.Circle`
Returns
-------
:class:`compas.geometry.Circle`
"""
return Circle(rhino_plane_to_compas_plane(circle.Plane), circle.Radius)
def compas_circle_to_rhino_circle(circle):
"""Convert a COMPAS circle to a Rhino circle.
Parameters
----------
circle : :class:`compas.geometry.Circle`
Returns
-------
:class:`Rhino.Geometry.Circle`
"""
return RhinoCircle(compas_plane_to_rhino_plane(circle.plane), circle.radius)
def rhino_ellipse_to_compas_ellipse(ellipse):
"""Convert a Rhino ellipse to a COMPAS ellipse.
Parameters
----------
ellipse : :class:`Rhino.Geometry.Ellipse`
Returns
-------
:class:`compas.geometry.Ellipse`
"""
return Ellipse(rhino_plane_to_compas_plane(ellipse.Plane), ellipse.major, ellipse.minor)
def compas_ellipse_to_rhino_ellipse(ellipse):
"""Convert a COMPAS ellipse to a Rhino ellipse.
Parameters
----------
ellipse : :class:`compas.geometry.Ellipse`
Returns
-------
:class:`Rhino.Geometry.Ellipse`
"""
return RhinoEllipse(compas_plane_to_rhino_plane(ellipse.plane), ellipse.major, ellipse.minor)
def rhino_polyline_to_compas_polyline(polyline):
"""Convert a Rhino polyline to a COMPAS polyline.
Parameters
----------
polyline : :class:`Rhino.Geometry.Polyline`
Returns
-------
:class:`compas.geometry.Ellipse`
"""
return Polyline([rhino_point_to_compas_point(point) for point in polyline])
def compas_polyline_to_rhino_polyline(polyline):
"""Convert a COMPAS polyline to a Rhino polyline.
Parameters
----------
polyline : :class:`compas.geometry.Ellipse`
Returns
-------
:class:`Rhino.Geometry.Ellipse`
"""
return RhinoPolyline([compas_point_to_rhino_point(point) for point in polyline])
def rhino_polygon_to_compas_polygon(polygon):
"""Convert a Rhino polygon to a COMPAS polygon.
Parameters
----------
polygon : :class:`Rhino.Geometry.Polygon`
Returns
-------
:class:`compas.geometry.Ellipse`
"""
return Polygon([rhino_point_to_compas_point(point) for point in polygon])
def compas_polygon_to_rhino_polygon(polygon):
"""Convert a COMPAS polygon to a Rhino polygon.
Parameters
----------
polygon : :class:`compas.geometry.Ellipse`
Returns
-------
:class:`Rhino.Geometry.Ellipse`
"""
raise NotImplementedError
|
Python
| 0 |
@@ -5542,38 +5542,39 @@
compas.geometry.
-Ellips
+Polylin
e%60%0A %22%22%22%0A r
|
33c51e6a0612aece239bf01236f110ef9fb40c86
|
Add some uncovered code
|
wordcount_lib.py
|
wordcount_lib.py
|
def consume(filename):
chars = 0
words = 0
lines = 0
with open(filename, 'rt') as fp:
for line in fp:
lines += 1
words += len(line.strip().split())
chars += len(line)
return chars, words, lines
|
Python
| 0.000002 |
@@ -253,8 +253,161 @@
, lines%0A
+%0Adef daaaangerous(param=0):%0A print(%22I'm the most dangerous function West of the Missippi, no test %22%5C%0A %22will cover me!%22)%0A return 3 / param%0A
|
491fdd17768c8de2b547fb74071368858a9a30a6
|
add 'show_ids' flag
|
Synopsis/Formatters/Dump.py
|
Synopsis/Formatters/Dump.py
|
# $Id: Dump.py,v 1.6 2003/11/25 04:52:26 stefan Exp $
#
# Copyright (C) 2003 Stefan Seefeld
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
"""
Verbose attribute-oriented xml dump of AST, useful for validation,
introspection, and debugging.
"""
from Synopsis import config
from Synopsis.Processor import Processor, Parameter
from Synopsis import Type, AST
import sys, getopt, os, os.path, string, types
from xml.dom.minidom import getDOMImplementation
dom = getDOMImplementation().createDocument(None, "dump", None)
class Formatter(Processor):
show_declarations = Parameter(True, 'output declarations')
show_types = Parameter(True, 'output types')
show_files = Parameter(True, 'output files')
stylesheet = Parameter(config.datadir + '/dump.css', 'stylesheet to be referenced for rendering')
def process(self, ast, **kwds):
self.set_parameters(kwds)
self.ast = self.merge_input(ast)
self.handlers = {types.NoneType : self.visit_none,
types.TypeType : self.visit_type,
types.IntType : self.visit_string,
types.LongType : self.visit_string,
types.FloatType : self.visit_string,
types.StringType : self.visit_string,
types.TupleType : self.visit_tuple,
types.ListType : self.visit_list,
types.DictType : self.visit_dict,
types.InstanceType : self.visit_instance}
self.visited = {}
self.os = open(self.output, "w")
self.os.write("<?xml version='1.0' encoding='ISO-8859-1'?>\n")
if self.stylesheet:
self.os.write("<?xml-stylesheet href='%s' type='text/css'?>\n"%self.stylesheet)
self.os.write("<ast>\n")
if self.show_declarations:
self.write_declarations(self.ast.declarations())
if self.show_types:
self.write_types(self.ast.types())
if self.show_files:
self.write_files(self.ast.files())
self.os.write("</ast>\n")
return self.ast
def push(self, name):
element = dom.createElement(name)
self.node.appendChild(element)
self.node = element
def pop(self):
self.node = self.node.parentNode
def add_text(self, text):
node = dom.createTextNode(text)
self.node.appendChild(node)
def visit(self, obj):
i,t = id(obj), type(obj)
if self.visited.has_key(i):
self.node.setAttribute('xref', str(i))
return
if self.handlers.has_key(t):
self.handlers[t](obj)
else:
print "Unknown type %s for object: '%s'"%(t,obj)
def visit_none(self, obj): pass
def visit_string(self, obj): self.add_text(str(obj))
def visit_type(self, obj): self.write(obj) # where is that used ??
def visit_tuple(self, obj):
if len(obj) == 0: return
for i in obj:
#self.push('item')
self.visit(i)
#self.pop()
def visit_list(self, obj):
if len(obj) == 0: return
for i in obj:
#self.push('item')
self.visit(i)
#self.pop()
def visit_dict(self, dict):
items = dict.items()
if len(items) == 0: return
items.sort()
for i in items:
self.push("key")
self.visit(i[0])
self.pop()
self.push("value")
self.visit(i[1])
self.pop()
def visit_instance(self, obj):
if isinstance(obj, AST.SourceFile): # just write down the filename
self.add_text(obj.filename())
return
if isinstance(obj, AST.Include):
self.write("Include: (macro:%d, next:%d) '%s'"%(obj.is_macro(),
obj.is_next(),
obj.target().filename()))
return
self.visited[id(obj)] = None
self.push("instance")
self.node.setAttribute('class', "%s.%s"%(obj.__class__.__module__,obj.__class__.__name__))
self.node.setAttribute('id', str(id(obj)))
attrs = obj.__dict__.items()
attrs.sort()
for name, value in attrs:
# ignore None values
if (value == None
or value == []
or value == ()):
continue
# special case for some known attributes...
if name == '_Named__name':
self.node.setAttribute('name', string.join(value, '.'))
continue
if name == '_Declaration__name':
self.node.setAttribute('name', string.join(value, '.'))
continue
if name == '_Declaration__file':
if value:
self.node.setAttribute('file', value.filename())
continue
if name[0] == '_':
index = string.find(name, '__')
if index >= 0:
#name = "%s.%s"%(name[1:index],name[index+2:])
name = name[index+2:]
if (self.handlers[type(value)] == self.visit_string
and not (obj.__class__.__name__ == 'Comment'
and (name == 'summary' or name == 'text'))):
self.node.setAttribute(name, str(value))
else:
self.push(name)
self.visit(value)
self.pop()
self.pop()
def write_declarations(self, declarations):
self.node = dom.createElement("declarations")
for d in declarations: self.visit(d)
self.node.writexml(self.os, indent=" ", addindent=" ", newl="\n")
self.node.unlink()
del self.node
def write_types(self, types):
self.node = dom.createElement("types")
for t in types.values(): self.visit(t)
self.node.writexml(self.os, indent=" ", addindent=" ", newl="\n")
self.node.unlink()
del self.node
def write_files(self, files):
self.node = dom.createElement("files")
for f in files:
self.push("file")
self.visit(f)
self.pop()
self.node.writexml(self.os, indent=" ", addindent=" ", newl="\n")
self.node.unlink()
del self.node
|
Python
| 0.000154 |
@@ -12,17 +12,17 @@
.py,v 1.
-6
+7
2003/11
@@ -26,19 +26,19 @@
/11/
-25 04:52:26
+30 01:28:57
ste
@@ -621,16 +621,81 @@
ssor):%0A%0A
+ show_ids = Parameter(True, 'output object ids as attributes')%0A
show_
@@ -2614,16 +2614,46 @@
key(i):%0A
+ if self.show_ids:%0A
@@ -2692,16 +2692,16 @@
str(i))%0A
-
@@ -4203,16 +4203,43 @@
ame__))%0A
+ if self.show_ids:%0A
se
|
610446ee84b02372bdd98e4530e9be9e6898c3ec
|
Fix #3 issue.
|
textmagic/rest/models/chats.py
|
textmagic/rest/models/chats.py
|
from . import Model, CollectionModel
class ChatMessage(Model):
"""
A Chat Message object model
.. attribute:: id
.. attribute:: direction
.. attribute:: sender
.. attribute:: messageTime
.. attribute:: text
.. attribute:: receiver
.. attribute:: deleted
.. attribute:: userId
.. attribute:: status
.. attribute:: total
.. attribute:: firstName
.. attribute:: lastName
"""
class ChatMessages(CollectionModel):
instance = ChatMessage
name = "chats"
searchable = False
class Chat(Model):
"""
A Chat object model
.. attribute:: id
.. attribute:: phone
.. attribute:: contact
Dictionary like this:
::
{
"id": 4329702,
"firstName": "Jonh",
"lastName": "Doe",
"companyName": "",
"phone": "19025555555",
"email": "",
"country": {
"id": "CA",
"name": "Canada"
},
"customFields": [
{
"value": "1970-01-01",
"id": 1111,
"name": "Birthday",
"createdAt": "2015-04-10T06:51:02+0000"
}
]
}
.. attribute:: unread
.. attribute:: updatedAt
"""
class Chats(CollectionModel):
name = "chats"
instance = Chat
searchable = False
def list(self, **kwargs):
"""
Returns a list of :class:`Chat` objects and a pager dict.
:Example:
chats, pager = client.chats.list()
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
kwargs["search"] = False
return self.get_instances(kwargs)
def by_phone(self, phone=0, **kwargs):
"""
Fetch messages from chat with specified phone number.
:Example:
chat = client.chats.by_phone(phone="447624800500")
:param str phone: Phone number in E.164 format.
:param int page: Fetch specified results page. Default=1
:param int limit: How many results on page. Default=10
"""
chat_messages = ChatMessages(self.base_uri, self.auth)
return self.get_subresource_instances(uid=phone, instance=chat_messages, params=kwargs)
|
Python
| 0 |
@@ -1939,10 +1939,8 @@
hone
-=0
, **
|
39326adf06e675766fa28d6508052196cf99f0a3
|
Fix global name 'cls' is not defined
|
rx/linq/observable/timer.py
|
rx/linq/observable/timer.py
|
import logging
from datetime import datetime
from six import add_metaclass
from rx.observable import Observable
from rx.anonymousobservable import AnonymousObservable
from rx.disposables import CompositeDisposable, \
SingleAssignmentDisposable, SerialDisposable
from rx.concurrency import timeout_scheduler, Scheduler
from rx.internal import ExtensionMethod
log = logging.getLogger("Rx")
# Rx Utils
class TimeInterval(object):
def __init__(self, value, interval):
self.value = value
self.interval = interval
class Timestamp(object):
def __init__(self, value, timestamp):
self.value = value
self.timestamp = timestamp
@add_metaclass(ExtensionMethod)
class ObservableTimer(Observable):
"""Uses a meta class to extend Observable with the methods in this class"""
@classmethod
def observable_timer_timespan_and_period(cls, duetime, period, scheduler):
if duetime == period:
def subscribe(observer):
def action(count):
observer.on_next(count)
count += 1
return count
return scheduler.schedule_periodic(period, action, 0)
return AnonymousObservable(subscribe)
def deferred():
return cls.observable_timer_date_and_period(scheduler.now() + duetime, period, scheduler)
return Observable.defer(deferred)
@staticmethod
def observable_timer_date(duetime, scheduler):
def subscribe(observer):
def action(scheduler, state):
observer.on_next(0)
observer.on_completed()
return scheduler.schedule_absolute(duetime, action)
return AnonymousObservable(subscribe)
@staticmethod
def observable_timer_date_and_period(duetime, period, scheduler):
p = Scheduler.normalize(period)
def subscribe(observer):
count = [0]
d = [duetime]
def action(scheduler, state):
if p > 0:
now = scheduler.now()
d[0] = d[0] + p
if d[0] <= now:
d[0] = now + p
observer.on_next(count[0])
count[0] += 1
state(d[0])
return scheduler.schedule_recursive(d, action)
return AnonymousObservable(subscribe)
@staticmethod
def observable_timer_timespan(duetime, scheduler):
d = Scheduler.normalize(duetime)
def subscribe(observer):
def action(scheduler, state):
observer.on_next(0)
observer.on_completed()
return scheduler.schedule_relative(d, action)
return AnonymousObservable(subscribe)
@staticmethod
def observable_timer_timespan_and_period(duetime, period, scheduler):
if duetime == period:
def subscribe(observer):
def action(count):
observer.on_next(count)
return count + 1
return scheduler.schedule_periodic(period, action, state=0)
return AnonymousObservable(subscribe)
def defer():
return cls.observable_timer_date_and_period(scheduler.now() + duetime, period, scheduler)
return Observable.defer(defer)
@classmethod
def timer(cls, duetime, period=None, scheduler=None):
"""Returns an observable sequence that produces a value after duetime
has elapsed and then after each period.
1 - res = Observable.timer(datetime(...))
2 - res = Observable.timer(datetime(...), 1000)
3 - res = Observable.timer(datetime(...), Scheduler.timeout)
4 - res = Observable.timer(datetime(...), 1000, Scheduler.timeout)
5 - res = Observable.timer(5000)
6 - res = Observable.timer(5000, 1000)
7 - res = Observable.timer(5000, scheduler=Scheduler.timeout)
8 - res = Observable.timer(5000, 1000, Scheduler.timeout)
Keyword arguments:
duetime -- Absolute (specified as a Date object) or relative time
(specified as an integer denoting milliseconds) at which to produce
the first value.</param>
period -- [Optional] Period to produce subsequent values (specified as
an integer denoting milliseconds), or the scheduler to run the
timer on. If not specified, the resulting timer is not recurring.
scheduler -- [Optional] Scheduler to run the timer on. If not
specified, the timeout scheduler is used.
Returns an observable sequence that produces a value after due time has
elapsed and then each period.
"""
log.debug("Observable.timer(duetime=%s, period=%s)", duetime, period)
scheduler = scheduler or timeout_scheduler
if isinstance(duetime, datetime) and period is None:
return cls.observable_timer_date(duetime, scheduler)
if isinstance(duetime, datetime) and period:
return cls.observable_timer_date_and_period(duetime, period, scheduler)
if period is None:
return cls.observable_timer_timespan(duetime, scheduler)
return cls.observable_timer_timespan_and_period(duetime, period, scheduler)
|
Python
| 0.999172 |
@@ -2748,38 +2748,37 @@
ubscribe)%0A%0A @
-static
+class
method%0A def o
@@ -2805,32 +2805,37 @@
span_and_period(
+cls,
duetime, period,
|
05cb079fd4e6b7a9bfd32c1470c9c638af5b7bc9
|
Add comments clarifying implementation choices
|
importlib_metadata/_py39compat.py
|
importlib_metadata/_py39compat.py
|
"""
Compatibility layer with Python 3.8/3.9
"""
from typing import TYPE_CHECKING, Any, Optional, Tuple
if TYPE_CHECKING:
from . import Distribution, EntryPoint
else:
Distribution = EntryPoint = Any
def normalized_name(dist: Distribution) -> Optional[str]:
"""
Honor name normalization for distributions that don't provide ``_normalized_name``.
"""
try:
return dist._normalized_name
except AttributeError:
from . import Prepared
return Prepared.normalize(getattr(dist, "name", None) or dist.metadata['Name'])
def ep_matches(ep: EntryPoint, **params) -> Tuple[EntryPoint, bool]:
"""
Workaround for ``EntryPoint`` objects without the ``matches`` method.
For the sake of convenience, a tuple is returned containing not only the
boolean value corresponding to the predicate evalutation, but also a compatible
``EntryPoint`` object that can be safely used at a later stage.
For example, the following sequences of expressions should be compatible:
# Sequence 1: using the compatibility layer
candidates = (_py39compat.ep_matches(ep, **params) for ep in entry_points)
[ep for ep, predicate in candidates if predicate]
# Sequence 2: using Python 3.9+
[ep for ep in entry_points if ep.matches(**params)]
"""
try:
return ep, ep.matches(**params)
except AttributeError:
from . import EntryPoint
# Reconstruct the EntryPoint object to make sure it is compatible.
_ep = EntryPoint(ep.name, ep.value, ep.group)
return _ep, _ep.matches(**params)
|
Python
| 0 |
@@ -97,17 +97,16 @@
Tuple%0A%0A
-%0A
if TYPE_
@@ -114,16 +114,59 @@
HECKING:
+ # -%3E prevent circular imports on runtime.
%0A fro
@@ -510,16 +510,57 @@
Prepared
+ # -%3E delay to prevent circular imports.
%0A%0A
@@ -1513,16 +1513,57 @@
tryPoint
+ # -%3E delay to prevent circular imports.
%0A%0A
|
faaffde191fe3b8a4d12a94878835d6f80f92548
|
Add --show-score option to rebalance
|
kafka_utils/kafka_cluster_manager/cmds/rebalance.py
|
kafka_utils/kafka_cluster_manager/cmds/rebalance.py
|
# -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
from .command import ClusterManagerCmd
from kafka_utils.util import positive_float
from kafka_utils.util import positive_int
from kafka_utils.util.validation import assignment_to_plan
from kafka_utils.util.validation import validate_plan
DEFAULT_MAX_PARTITION_MOVEMENTS = 1
DEFAULT_MAX_LEADER_CHANGES = 5
class RebalanceCmd(ClusterManagerCmd):
def __init__(self):
super(RebalanceCmd, self).__init__()
self.log = logging.getLogger('ClusterRebalance')
def build_subparser(self, subparsers):
subparser = subparsers.add_parser(
'rebalance',
description='Rebalance cluster by moving partitions across brokers '
'and changing the preferred replica.',
help='This command is used to rebalance a Kafka cluster. Based on '
'the given flags this tool will generate and submit a reassinment '
'plan that will evenly distribute partitions and leaders '
'across the brokers of the cluster. The replication groups option '
'moves the replicas of the same partition to separate replication '
'making the cluster resilient to the failure of one of more zones.'
)
subparser.add_argument(
'--replication-groups',
action='store_true',
help='Evenly distributes replicas over replication-groups.',
)
subparser.add_argument(
'--brokers',
action='store_true',
help='Evenly distributes partitions optimally over brokers'
' with minimal movements for each replication-group.',
)
subparser.add_argument(
'--leaders',
action='store_true',
help='Evenly distributes leaders optimally over brokers.',
)
subparser.add_argument(
'--max-partition-movements',
type=positive_int,
default=DEFAULT_MAX_PARTITION_MOVEMENTS,
help='Maximum number of partition-movements in final set of actions.'
' DEFAULT: %(default)s. RECOMMENDATION: Should be at least max '
'replication-factor across the cluster.',
)
subparser.add_argument(
'--max-leader-changes',
type=positive_int,
default=DEFAULT_MAX_LEADER_CHANGES,
help='Maximum number of actions with leader-only changes.'
' DEFAULT: %(default)s',
)
subparser.add_argument(
'--max-movement-size',
type=positive_float,
default=None,
help='Maximum total size of the partitions moved in the final set'
' of actions. Since each PartitionMeasurer implementation'
' defines its own notion of size, the size unit to use will'
' depend on the selected PartitionMeasurer implementation.'
' DEFAULT: No limit.'
' RECOMMENDATION: Should be at least the maximum partition-size'
' on the cluster.',
)
return subparser
def run_command(self, cluster_topology, cluster_balancer):
"""Get executable proposed plan(if any) for display or execution."""
base_assignment = cluster_topology.assignment
cluster_balancer.rebalance()
assignment = cluster_topology.assignment
if not validate_plan(
assignment_to_plan(assignment),
assignment_to_plan(base_assignment),
):
self.log.error('Invalid latest-cluster assignment. Exiting.')
sys.exit(1)
# Reduce the proposed assignment based on max_partition_movements
# and max_leader_changes
reduced_assignment = self.get_reduced_assignment(
base_assignment,
assignment,
self.args.max_partition_movements,
self.args.max_leader_changes,
)
if reduced_assignment:
self.process_assignment(reduced_assignment)
else:
self.log.info("Cluster already balanced. No actions to perform.")
|
Python
| 0.000001 |
@@ -656,16 +656,120 @@
agerCmd%0A
+from kafka_utils.kafka_cluster_manager.cluster_info.display %5C%0A import display_cluster_topology_stats%0A
from kaf
@@ -3772,32 +3772,201 @@
er.',%0A )%0A
+ subparser.add_argument(%0A '--show-stats',%0A action='store_true',%0A help='Output post-rebalance cluster topology stats.',%0A )%0A
return s
@@ -4169,16 +4169,62 @@
ignment%0A
+ base_score = cluster_balancer.score()%0A
@@ -4300,16 +4300,407 @@
signment
+%0A score = cluster_balancer.score()%0A%0A if self.args.show_stats:%0A display_cluster_topology_stats(cluster_topology, base_assignment)%0A if base_score is not None and score is not None:%0A print('%5CnScore before: %25f' %25 base_score)%0A print('Score after: %25f' %25 score)%0A print('Score improvement: %25f' %25 (score - base_score))
%0A%0A
|
4bd53d96be49c01c04a30d2c064774bac23fc20a
|
Rewrite entry update in DatabaseStorage without explicit update call
|
speedinfo/storage/database/storage.py
|
speedinfo/storage/database/storage.py
|
# coding: utf-8
from django.db import IntegrityError
from django.db.models import ExpressionWrapper, F, FloatField, IntegerField
from django.forms import model_to_dict
from speedinfo.models import ViewProfiler
from speedinfo.storage.base import AbstractStorage
from speedinfo.storage.database.models import Storage
class DatabaseStorage(AbstractStorage):
def add(self, view_name, method, is_anon_call, is_cache_hit, sql_time, sql_count, view_execution_time):
try:
vp, created = Storage.objects.get_or_create(view_name=view_name, method=method)
except IntegrityError:
# IntegrityError raised in the case of concurrent access
# to get_or_create method from another application worker/thread
vp = Storage.objects.get(view_name=view_name, method=method)
Storage.objects.filter(pk=vp.pk).update(
anon_calls=F("anon_calls") + (is_anon_call and 1 or 0),
cache_hits=F("cache_hits") + (is_cache_hit and 1 or 0),
sql_total_time=F("sql_total_time") + sql_time,
sql_total_count=F("sql_total_count") + sql_count,
total_calls=F("total_calls") + 1,
total_time=F("total_time") + view_execution_time,
)
def fetch_all(self, ordering=None):
qs = Storage.objects.annotate(
anon_calls_ratio=ExpressionWrapper(100.0 * F("anon_calls") / F("total_calls"), output_field=FloatField()),
cache_hits_ratio=ExpressionWrapper(100.0 * F("cache_hits") / F("total_calls"), output_field=FloatField()),
sql_count_per_call=ExpressionWrapper(F("sql_total_count") / F("total_calls"), output_field=IntegerField()),
sql_time_ratio=ExpressionWrapper(100.0 * F("sql_total_time") / F("total_time"), output_field=FloatField()),
time_per_call=ExpressionWrapper(F("total_time") / F("total_calls"), output_field=FloatField()),
)
if ordering:
qs = qs.order_by(*ordering)
return [ViewProfiler(**model_to_dict(item)) for item in qs]
def reset(self):
Storage.objects.all().delete()
|
Python
| 0 |
@@ -828,61 +828,11 @@
-Storage.objects.filter(pk=vp.pk).update(%0A
+vp.
anon
@@ -837,17 +837,19 @@
on_calls
-=
+ =
F(%22anon_
@@ -875,33 +875,32 @@
call and 1 or 0)
-,
%0A cac
@@ -884,36 +884,35 @@
1 or 0)%0A
-
+vp.
cache_hits=F(%22ca
@@ -905,17 +905,19 @@
che_hits
-=
+ =
F(%22cache
@@ -951,17 +951,16 @@
1 or 0)
-,
%0A
@@ -952,36 +952,35 @@
1 or 0)%0A
-
+vp.
sql_total_time=F
@@ -977,17 +977,19 @@
tal_time
-=
+ =
F(%22sql_t
@@ -1010,30 +1010,25 @@
sql_time
-,%0A
+%0A
sql_tota
@@ -1015,24 +1015,27 @@
ime%0A
+vp.
sql_total_co
@@ -1037,17 +1037,19 @@
al_count
-=
+ =
F(%22sql_t
@@ -1072,17 +1072,16 @@
ql_count
-,
%0A
@@ -1077,28 +1077,27 @@
unt%0A
-
+vp.
total_calls=
@@ -1095,17 +1095,19 @@
al_calls
-=
+ =
F(%22total
@@ -1118,17 +1118,16 @@
ls%22) + 1
-,
%0A
@@ -1127,20 +1127,19 @@
-
+vp.
total_ti
@@ -1140,17 +1140,19 @@
tal_time
-=
+ =
F(%22total
@@ -1180,17 +1180,16 @@
ion_time
-,
%0A
@@ -1185,24 +1185,32 @@
ime%0A
+vp.save(
)%0A%0A def f
|
f1de47de39129642e748977392c2e348c1d0218c
|
replace SPasswordScimUserV3Controller by SPasswordUserV3Controller
|
keystone_spassword/contrib/spassword/controllers.py
|
keystone_spassword/contrib/spassword/controllers.py
|
#
# Copyright 2015 Telefonica Investigacion y Desarrollo, S.A.U
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Extensions supporting Strong Passwords."""
import copy
import uuid
from keystone import config
from keystone.common import controller
from keystone.common import dependency
from keystone.common import extension
from keystone.common import wsgi
from keystone import exception
from keystone import identity
from keystone.identity.controllers import UserV3
from keystone_scim.contrib.scim.controllers import ScimUserV3Controller
from keystone_scim.contrib.scim import converter as conv
from keystone_spassword.contrib.spassword.checker import CheckPassword
try: from oslo_log import log
except ImportError: from keystone.openstack.common import log
try: from oslo_config import cfg
except ImportError: from oslo.config import cfg
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class SPasswordScimUserV3Controller(ScimUserV3Controller, CheckPassword):
def __init__(self):
super(SPasswordScimUserV3Controller, self).__init__()
def patch_user(self, context, user_id, **kwargs):
scim = self._denormalize(kwargs)
user = conv.user_scim2key(scim)
if CONF.spassword.enabled and 'password' in user:
super(SPasswordScimUserV3Controller, self).strong_check_password(
user['password'])
# TODO: update_user_modification_time()
return super(SPasswordScimUserV3Controller, self).patch_user(context,
user_id,
**kwargs)
def put_user(self, context, user_id, **kwargs):
return self.patch_user(context, user_id, **kwargs)
def create_user(self, context, user):
if CONF.spassword.enabled and 'password' in user:
super(SPasswordScimUserV3Controller, self).strong_check_password(
user['password'])
return super(SPasswordScimUserV3Controller, self).create_user(context,
user=user)
def delete_user(self, context, user_id):
# Delete user from spassword table
LOG.info('deleting user %s scimusercontroller' % user_id)
return super(SPasswordScimUserV3Controller, self).delete_user(context,
user_id)
class SPasswordUserV3Controller(UserV3, CheckPassword):
def __init__(self):
super(SPasswordUserV3Controller, self).__init__()
@controller.protected()
def create_user(self, context, user):
if CONF.spassword.enabled and 'password' in user:
super(SPasswordUserV3Controller, self).strong_check_password(
user['password'])
return super(SPasswordUserV3Controller, self).create_user(context,
user=user)
@controller.protected()
def update_user(self, context, user_id, user):
if CONF.spassword.enabled and 'password' in user:
super(SPasswordUserV3Controller, self).strong_check_password(
user['password'])
return super(SPasswordUserV3Controller, self).update_user(context,
user_id=user_id,
user=user)
@controller.protected()
def delete_user(self, context, user_id):
# Delete user from spassword table
LOG.info('deleting user %s spasswordscimusercontroller' % user_id)
return super(SPasswordScimUserV3Controller, self).delete_user(context,
user_id=user_id)
@controller.protected()
def change_password(self, context, user_id, user):
if CONF.spassword.enabled and 'password' in user:
super(SPasswordUserV3Controller, self).strong_check_password(
user['password'])
return super(SPasswordUserV3Controller, self).change_password(context,
user_id=user_id,
user=user)
def recover_password(self, context, user_id):
"""Perform user password recover procedure."""
if not CONF.spassword.enabled:
raise exception.NotImplemented()
user_info = self.identity_api.get_user(user_id)
LOG.debug('recover password invoked for user %s %s' % (user_info['id'],
user_info['name']))
# Check if user has a email defined
if not 'email' in user_info:
msg = 'User %s %s has no email defined' % (user_info['id'],
user_info['name'])
LOG.error('%s' % msg)
raise exception.Unauthorized(msg)
# Create a new password randonly
new_password = uuid.uuid4().hex
# Set new user password
try:
update_dict = {'password': new_password}
self.identity_api.update_user( user_id, user_ref=update_dict)
except AssertionError:
# authentication failed because of invalid username or password
msg = 'Invalid username or password'
LOG.error('%s' % msg)
raise exception.Unauthorized(msg)
self.send_recovery_password_email(user_info['email'],
new_password)
def send_recovery_password_email(self, user_email, user_password):
import smtplib
TO = [user_email] # must be a list
SUBJECT = "IoT Platform recovery password"
TEXT = "Your new password is %s" % user_password
#
# Prepare actual message
#
mail_headers = ("From: \"%s\" <%s>\r\nTo: %s\r\n"
% (CONF.spassword.smtp_from,
CONF.spassword.smtp_from,
", ".join(TO)))
msg = mail_headers
msg += ("Subject: %s\r\n\r\n" % SUBJECT)
msg += TEXT
#
# Send the mail
#
try:
server = smtplib.SMTP(CONF.spassword.smtp_server,
CONF.spassword.smtp_port)
except smtplib.socket.gaierror:
LOG.error('SMTP socket error')
return False
server.ehlo()
server.starttls()
server.ehlo
try:
server.login(CONF.spassword.smtpuser,
CONF.spassword.smtppassword)
except smtplib.SMTPAuthenticationError:
LOG.error('SMTP autentication error')
return False
try:
server.sendmail(CONF.spassword.smtp_from, TO, msg)
except Exception: # try to avoid catching Exception unless you have too
LOG.error('SMTP autentication error')
return False
finally:
server.quit()
LOG.info('recover password email sent to %s' % user_email)
|
Python
| 0.000008 |
@@ -4399,36 +4399,32 @@
super(SPassword
-Scim
UserV3Controller
@@ -4444,36 +4444,32 @@
e_user(context,%0A
-
|
5cd0ad7e865794401506dbc9358261b5fa020704
|
Move and name region_lookup
|
saau/sections/age/median.py
|
saau/sections/age/median.py
|
import logging
from operator import itemgetter
from matplotlib.cm import get_cmap
import matplotlib as mpl
import cartopy.crs as ccrs
from ...utils.download.abs import get_generic_data, abs_data_to_dataframe
from ..image_provider import ImageProvider
from ...utils.header import render_header_to
DATASETID = 'ABS_CENSUS2011_B02'
FILENAME = 'median_ages.json'
class MedianAgeImageProvider(ImageProvider):
def has_required_data(self):
return self.data_dir_exists(FILENAME)
def obtain_data(self):
data = get_generic_data(
DATASETID,
and_=[
'FREQUENCY.A',
'REGIONTYPE.SA2',
'MEASURE.MAGE'
],
or_=[
'STATE.0',
'STATE.1',
'STATE.2',
'STATE.3',
'STATE.4',
'STATE.5',
'STATE.6',
'STATE.7',
'STATE.8',
'STATE.9'
]
)
assert data['series']
return self.save_json(FILENAME, data)
def build_image(self):
colors = get_cmap('Purples')
age_data = abs_data_to_dataframe(self.load_json(FILENAME))
region_lookup = lambda sa3: self.services.sa3.get(
'SA3_CODE11', int(sa3)
)
age_data = [
(
region_lookup(data_point.REGION),
data_point.Value
)
for _, data_point in age_data.iterrows()
]
values = list(map(itemgetter(1), age_data))
norm = mpl.colors.Normalize(
vmin=min(values),
vmax=max(values)
)
logging.info(
'%d -> %d',
min(values),
max(values)
)
aus_map = self.services.aus_map.get_map()
for shapes, mage in age_data:
aus_map.add_geometries(
[
shape.geometry
for shape in shapes.rec
if shape.geometry
],
crs=ccrs.PlateCarree(),
color=colors(norm(mage))
)
cax = aus_map.figure.add_axes([0.95, 0.2, 0.02, 0.6])
cb = mpl.colorbar.ColorbarBase(
cax,
cmap=colors,
norm=norm,
spacing='props'
)
cb.set_label('Average age')
return render_header_to(
aus_map,
19.25,
[
"<b>MAP</b>",
"SHOWING THE DISTRIBUTION OF",
"<b>MEDIAN AGE</b>",
"<i>Compiled using data from the 2011 Australian Census</i>"
]
)
|
Python
| 0.000001 |
@@ -1084,16 +1084,112 @@
data)%0A%0A
+ def region_lookup(self, sa3):%0A return self.services.sa3.get('SA3_CODE11', int(sa3))%0A%0A
def
@@ -1316,114 +1316,8 @@
E))%0A
-%0A region_lookup = lambda sa3: self.services.sa3.get(%0A 'SA3_CODE11', int(sa3)%0A )%0A%0A
@@ -1363,16 +1363,21 @@
+self.
region_l
|
953d83119005075b9bc59d040389c209208263d5
|
Integrate LLVM at llvm/llvm-project@7354a73945f1
|
third_party/llvm/workspace.bzl
|
third_party/llvm/workspace.bzl
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "7f2b016b820487f2fb69b93e784fff5d8297dea0"
LLVM_SHA256 = "348e586173038ab248e76be34d4a3e5667d56429350150a4a8130fba5a318e05"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
|
Python
| 0.000001 |
@@ -163,132 +163,132 @@
= %227
-f2b016b820487f2fb69b93e784fff5d8297dea0%22%0A LLVM_SHA256 = %22348e586173038ab248e76be34d4a3e5667d56429350150a4a8130fba5a318e05
+354a73945f1c123d66b01f51374ecbdba18fab3%22%0A LLVM_SHA256 = %2273a86e6f9d263a812bfdda5120b8f08467bd8ee39564b75da752854328a72803
%22%0A%0A
|
509a542fd5e3171979fb74aec9226c057d289623
|
Integrate LLVM at llvm/llvm-project@04a5ca862bb9
|
third_party/llvm/workspace.bzl
|
third_party/llvm/workspace.bzl
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "b3a0bed5fb8766dcf27583ab1f73edc6e7232657"
LLVM_SHA256 = "0ee751d5754af930e05cea8b54b061e819e4254e06f64d211e07f2faf3395adf"
tfrt_http_archive(
name = name,
build_file = "//third_party/llvm:BUILD",
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
)
|
Python
| 0.000001 |
@@ -162,133 +162,133 @@
= %22
-b3a0bed5fb8766dcf27583ab1f73edc6e7232657%22%0A LLVM_SHA256 = %220ee751d5754af930e05cea8b54b061e819e4254e06f64d211e07f2faf3395adf
+04a5ca862bb989acdd2729d0991b4e5a104bf244%22%0A LLVM_SHA256 = %2210a0c150c477a36eff25d49f0f50379fddf626a7d87a2b1846fb101173c742c9
%22%0A%0A
|
94fbcf6224624810a30a17cc9bc8d4c1f3458954
|
Integrate LLVM at llvm/llvm-project@5c7b43aa8298
|
third_party/llvm/workspace.bzl
|
third_party/llvm/workspace.bzl
|
"""Provides the repository macro to import LLVM."""
load("//third_party:repo.bzl", "tfrt_http_archive")
def repo(name):
"""Imports LLVM."""
LLVM_COMMIT = "9ba661f91276dd8cc728f9b2e82905b78c0119b4"
LLVM_SHA256 = "f89c033b0e8e6d4e6ff5ce3883aadc82a502b063a830cd685672cec4bea3dfb1"
tfrt_http_archive(
name = name,
sha256 = LLVM_SHA256,
strip_prefix = "llvm-project-" + LLVM_COMMIT,
urls = [
"https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
"https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT),
],
link_files = {
"//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD",
"//third_party/mlir:BUILD": "mlir/BUILD",
"//third_party/mlir:test.BUILD": "mlir/test/BUILD",
},
)
|
Python
| 0.000001 |
@@ -162,133 +162,133 @@
= %22
-9ba661f91276dd8cc728f9b2e82905b78c0119b4%22%0A LLVM_SHA256 = %22f89c033b0e8e6d4e6ff5ce3883aadc82a502b063a830cd685672cec4bea3dfb1
+5c7b43aa8298a389b906d72c792941a0ce57782e%22%0A LLVM_SHA256 = %22e34534a864e2bedaff6811effb757d2eed3a50c9c1e540515ed1568addf1815d
%22%0A%0A
|
fda8088ec3330ec5bc6ea7769c79d2fb9f227728
|
Fix bug with valid hostnames with dashes. I added underscores even though they aren't valid just for good measure
|
salmon/apps/monitor/urls.py
|
salmon/apps/monitor/urls.py
|
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('',
url(r'^$', views.dashboard, name="dashboard"),
url(r'^(?P<name>[\w\.]*)$', views.history, name="history"),
)
|
Python
| 0.000003 |
@@ -160,12 +160,14 @@
me%3E%5B
+-
%5Cw%5C.
+_
%5D*)$
|
8c9adeec07e67c7fbb46709933ada2776722c2ed
|
Remove futile import
|
sqlalchemy_continuum/model_builder.py
|
sqlalchemy_continuum/model_builder.py
|
from copy import copy
import six
import sqlalchemy as sa
from sqlalchemy_utils.functions import primary_keys, declarative_base
from .expression_reflector import ClassExpressionReflector
from .utils import option
from .version import VersionClassBase
class ModelBuilder(object):
"""
VersionedModelBuilder handles the building of Version models based on
parent table attributes and versioning configuration.
"""
def __init__(self, versioning_manager, model):
"""
:param versioning_manager:
VersioningManager object
:param model:
SQLAlchemy declarative model object that acts as a parent for the
built version model
"""
self.manager = versioning_manager
self.model = model
def build_parent_relationship(self):
"""
Builds a relationship between currently built version class and
parent class (the model whose history the currently build version
class represents).
"""
conditions = []
foreign_keys = []
for key, column in sa.inspect(self.model).columns.items():
if column.primary_key:
conditions.append(
getattr(self.model, key)
==
getattr(self.version_class, key)
)
foreign_keys.append(
getattr(self.version_class, key)
)
# We need to check if versions relation was already set for parent
# class.
if not hasattr(self.model, 'versions'):
self.model.versions = sa.orm.relationship(
self.version_class,
primaryjoin=sa.and_(*conditions),
foreign_keys=foreign_keys,
order_by=lambda: getattr(
self.version_class,
option(self.model, 'transaction_column_name')
),
lazy='dynamic',
backref=sa.orm.backref(
'version_parent'
),
viewonly=True
)
def build_transaction_relationship(self, tx_log_class):
"""
Builds a relationship between currently built version class and
Transaction class.
:param tx_log_class: Transaction class
"""
# Only define transaction relation if it doesn't already exist in
# parent class.
backref_name = option(self.model, 'relation_naming_function')(
self.model.__name__
)
transaction_column = getattr(
self.version_class,
option(self.model, 'transaction_column_name')
)
if not hasattr(self.version_class, 'transaction'):
self.version_class.transaction = sa.orm.relationship(
tx_log_class,
primaryjoin=tx_log_class.id == transaction_column,
foreign_keys=[transaction_column],
backref=backref_name
)
else:
setattr(
tx_log_class,
backref_name,
sa.orm.relationship(
self.version_class,
primaryjoin=tx_log_class.id == transaction_column,
foreign_keys=[transaction_column]
)
)
def find_closest_versioned_parent(self):
"""
Finds the closest versioned parent for current parent model.
"""
for class_ in self.model.__bases__:
if class_ in self.manager.version_class_map:
return (self.manager.version_class_map[class_], )
def base_classes(self):
"""
Returns all base classes for history model.
"""
parents = (
self.find_closest_versioned_parent()
or option(self.model, 'base_classes')
or (declarative_base(self.model), )
)
return parents + (VersionClassBase, )
def copy_polymorphism_args(self):
args = {}
if hasattr(self.model, '__mapper_args__'):
arg_names = (
'with_polymorphic',
'polymorphic_identity',
'concrete',
'order_by'
)
for arg in arg_names:
if arg in self.model.__mapper_args__:
args[arg] = (
self.model.__mapper_args__[arg]
)
if 'polymorphic_on' in self.model.__mapper_args__:
column = self.model.__mapper_args__['polymorphic_on']
if isinstance(column, six.string_types):
args['polymorphic_on'] = column
else:
args['polymorphic_on'] = column.key
return args
def inheritance_args(self, table):
"""
Return mapper inheritance args for currently built history model.
"""
args = {}
parent_tuple = self.find_closest_versioned_parent()
if parent_tuple:
# The version classes do not contain foreign keys, hence we need
# to map inheritance condition manually for classes that use
# joined table inheritance
parent = parent_tuple[0]
if parent.__table__.name != table.name:
reflector = ClassExpressionReflector(self.model)
mapper = sa.inspect(self.model)
inherit_condition = reflector(mapper.inherit_condition)
args['inherit_condition'] = sa.and_(
inherit_condition,
'%s.transaction_id = %s_version.transaction_id' % (
parent.__table__.name,
self.model.__table__.name
)
)
args.update(self.copy_polymorphism_args())
return args
def build_model(self, table):
"""
Build history model class.
"""
mapper_args = {}
mapper_args.update(self.inheritance_args(table))
args = {
'__mapper_args__': mapper_args
}
if not sa.inspect(self.model).single:
args['__table__'] = table
return type(
'%sVersion' % self.model.__name__,
self.base_classes(),
args
)
def __call__(self, table, tx_log_class):
"""
Build history model and relationships to parent model, transaction
log model.
"""
# versioned attributes need to be copied for each child class,
# otherwise each child class would share the same __versioned__
# option dict
self.model.__versioned__ = copy(self.model.__versioned__)
self.model.__versioning_manager__ = self.manager
self.version_class = self.build_model(table)
self.build_parent_relationship()
self.build_transaction_relationship(tx_log_class)
self.version_class.__versioning_manager__ = self.manager
self.manager.version_class_map[self.model] = self.version_class
self.manager.parent_class_map[self.version_class] = self.model
return self.version_class
|
Python
| 0.000004 |
@@ -92,22 +92,8 @@
port
- primary_keys,
dec
|
48b79c03b6000cceef1fd16eb478cb77dc461e00
|
Implement module lister
|
salt/modules/ansiblegate.py
|
salt/modules/ansiblegate.py
|
# -*- coding: utf-8 -*-
#
# Author: Bo Maryniuk <[email protected]>
#
# Copyright 2017 SUSE LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import importlib
import yaml
from salt.exceptions import LoaderError
try:
import ansible
import ansible.constants
import ansible.modules
from ansible.plugins import module_loader
except ImportError:
ansible = None
__virtualname__ = 'ansible'
log = logging.getLogger(__name__)
class AnsibleModuleResolver(object):
'''
This class is to resolve all available modules in Ansible.
'''
def __init__(self, opts):
self._opts = opts
self._modules_map = {}
def _get_modules_map(self, path=None):
'''
Get installed Ansible modules
:return:
'''
paths = {}
root = ansible.modules.__path__[0]
if not path:
path = root
for p_el in os.listdir(path):
p_el_path = os.path.join(path, p_el)
if os.path.islink(p_el_path): continue
if os.path.isdir(p_el_path):
paths.update(self._get_modules_map(p_el_path))
else:
if (any(p_el.startswith(elm) for elm in ['__', '.']) or
not p_el.endswith('.py') or
p_el in ansible.constants.IGNORE_FILES):
continue
m_name = p_el.split('.')[0]
als_name = m_name[1:] if m_name.startswith('_') else m_name
paths[als_name] = p_el_path.replace(root, '')
return paths
def load_module(self, module):
'''
Introspect Ansible module.
:param module:
:return:
'''
m_ref = self._modules_map.get(module)
if m_ref is None:
raise LoaderError('Module "{0}" was not found'.format(module))
mod = importlib.import_module('ansible.modules{0}'.format(
'.'.join([elm.split('.')[0] for elm in m_ref.split(os.path.sep)])))
return mod
def resolve(self):
log.debug('Resolving Ansible modules')
self._modules_map = self._get_modules_map()
return self
def install(self):
log.debug('Installing Ansible modules')
return self
_resolver = None
def __virtual__():
'''
Ansible module caller.
:return:
'''
ret = ansible is not None
msg = not ret and "Ansible is not installed on this system" or None
if msg:
log.warning(msg)
else:
global _resolver
_resolver = AnsibleModuleResolver(__opts__).resolve().install()
return ret, msg
def help(module=None, *args):
'''
Display help on Ansible standard module.
:param module:
:return:
'''
if not module:
raise CommandExecutionError('Please tell me what module you want to have a help on. '
'Or call ansible.list to know what is available.')
try:
module = _resolver.load_module(module.split('.')[-1])
except ImportError as err:
raise CommandExecutionError('Module "{0}" is currently not functional on your system.'.format(module))
doc = {}
ret = {}
for docset in module.DOCUMENTATION.split('---'):
try:
docset = yaml.load(docset)
if docset:
doc.update(docset)
except Exception as err:
log.error("Error parsing doc section: {0}".format(err))
if not args:
ret['Available sections on module "{}"'.format(module.__name__.replace('ansible.modules.', ''))] = doc.keys()
else:
for arg in args:
info = doc.get(arg)
if info is not None:
ret[arg] = info
return ret
|
Python
| 0.000001 |
@@ -676,16 +676,31 @@
ort yaml
+%0Aimport fnmatch
%0A%0Afrom s
@@ -732,16 +732,39 @@
derError
+, CommandExecutionError
%0Atry:%0A
@@ -2552,16 +2552,578 @@
rn mod%0A%0A
+ def get_modules_list(self, pattern=None):%0A '''%0A Return module map references.%0A :return:%0A '''%0A if pattern and '*' not in pattern:%0A pattern = '*%7B0%7D*'.format(pattern)%0A modules = %5B%5D%0A for m_name, m_path in self._modules_map.items():%0A m_path = m_path.split('.')%5B0%5D%0A m_name = '.'.join(%5Belm for elm in m_path.split(os.path.sep) if elm%5D)%0A if pattern and fnmatch.fnmatch(m_name, pattern) or not pattern:%0A modules.append(m_name)%0A return sorted(modules)%0A%0A
def
@@ -4822,8 +4822,147 @@
urn ret%0A
+%0A%0Adef list(pattern=None):%0A '''%0A Lists available modules.%0A :return:%0A '''%0A return _resolver.get_modules_list(pattern=pattern)%0A
|
87656428f02134c36c052fe60cfaa25536291cfe
|
Fix syntax error
|
upcloud_api/cloud_manager/storage_mixin.py
|
upcloud_api/cloud_manager/storage_mixin.py
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from upcloud_api import Storage
class StorageManager(object):
"""
Functions for managing Storage disks. Intended to be used as a mixin for CloudManager.
"""
def get_storages(self, storage_type='normal'):
"""
Return a list of Storage objects from the API.
Storage types: public, private, normal, backup, cdrom, template, favorite
"""
res = self.get_request('/storage/' + storage_type)
return Storage._create_storage_objs(res['storages'], cloud_manager=self)
def get_storage(self, storage):
"""
Return a Storage object from the API.
"""
res = self.get_request('/storage/' + str(storage))
return Storage(cloud_manager=self, **res['storage'])
def create_storage(self, size=10, tier='maxiops', title='Storage disk', zone='fi-hel1', backup_rule={}):
"""
Create a Storage object. Returns an object based on the API's response.
"""
body = {
'storage': {
'size': size,
'tier': tier,
'title': title,
'zone': zone
'backup_rule': backup_rule
}
}
res = self.post_request('/storage', body)
return Storage(cloud_manager=self, **res['storage'])
def _modify_storage(self, storage, size, title, backup_rule={}):
body = {'storage': {}}
if size:
body['storage']['size'] = size
if title:
body['storage']['title'] = title
if backup_rule:
body['storage']['backup_rule'] = backup_rule
return self.request('PUT', '/storage/' + str(storage), body)
def modify_storage(self, storage, size, title, backup_rule={}):
"""
Modify a Storage object. Returns an object based on the API's response.
"""
res = self._modify_storage(str(storage), size, title, backup_rule)
return Storage(cloud_manager=self, **res['storage'])
def delete_storage(self, UUID):
"""
Destroy a Storage object.
"""
return self.request('DELETE', '/storage/' + UUID)
def attach_storage(self, server, storage, storage_type, address):
"""
Attach a Storage object to a Server. Return a list of the server's storages.
"""
body = {'storage_device': {}}
if storage:
body['storage_device']['storage'] = str(storage)
if storage_type:
body['storage_device']['type'] = storage_type
if address:
body['storage_device']['address'] = address
url = '/server/{0}/storage/attach'.format(server)
res = self.post_request(url, body)
return Storage._create_storage_objs(res['server']['storage_devices'], cloud_manager=self)
def detach_storage(self, server, address):
"""
Detach a Storage object to a Server. Return a list of the server's storages.
"""
body = {'storage_device': {'address': address}}
url = '/server/{0}/storage/detach'.format(server)
res = self.post_request(url, body)
return Storage._create_storage_objs(res['server']['storage_devices'], cloud_manager=self)
|
Python
| 0.000004 |
@@ -1271,16 +1271,17 @@
e': zone
+,
%0A
|
9a6c74bdb8c7b386f75100ab2fafabae3a5a9997
|
Add utility functions to stix.Entity
|
stix/__init__.py
|
stix/__init__.py
|
# Copyright (c) 2014, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
__version__ = "1.0.1.0"
import json
from StringIO import StringIO
class Entity(object):
"""Base class for all classes in the STIX API."""
def to_obj(self, return_obj=None):
"""Export an object as a binding object representation"""
pass
def from_obj(self, obj):
"""Create an object from a binding object"""
pass
def _get_namespaces(self, ns_dict):
import stix.utils.nsparser as nsparser
import cybox.utils.nsparser as cybox_nsparser
import stix.utils.idgen as idgen
if not ns_dict: ns_dict = {}
xml_ns_dict = {'http://www.w3.org/2001/XMLSchema-instance': 'xsi',
'http://stix.mitre.org/stix-1': 'stix',
'http://stix.mitre.org/common-1': 'stixCommon',
'http://stix.mitre.org/default_vocabularies-1': 'stixVocabs',
idgen.get_id_namespace() : idgen.get_id_namespace_alias()}
namespace_parser = nsparser.NamespaceParser()
all_ns_dict = dict(xml_ns_dict)
ns_set = namespace_parser.get_namespaces(self)
for ns in ns_set:
if ns in ns_dict:
all_ns_dict[ns] = ns_dict[ns]
elif ns.startswith("http://cybox.mitre.org"):
for cybox_ns_tup in cybox_nsparser.NS_LIST:
if ns == cybox_ns_tup[0]:
all_ns_dict[ns] = cybox_ns_tup[1]
elif ns in nsparser.DEFAULT_EXT_TO_PREFIX:
all_ns_dict[ns] = nsparser.DEFAULT_EXT_TO_PREFIX[ns]
else:
all_ns_dict[ns] = nsparser.DEFAULT_STIX_NS_TO_PREFIX[ns]
return all_ns_dict
def _get_schema_locations(self):
import stix.utils.nsparser as nsparser
schemaloc_dict = nsparser.NamespaceParser().get_namespace_schemalocation_dict(self)
return schemaloc_dict
def to_xml(self, include_namespaces=True, ns_dict=None, pretty=True):
"""Export an object as an XML String"""
s = StringIO()
namespace_def = ""
if include_namespaces:
if not ns_dict: ns_dict = {}
all_ns_dict = self._get_namespaces(ns_dict)
schemaloc_dict = self._get_schema_locations()
import stix.utils.nsparser as nsparser
namespace_def = nsparser.NamespaceParser().get_namespace_def_str(all_ns_dict, schemaloc_dict)
if not pretty:
namespace_def = namespace_def.replace('\n\t', ' ')
self.to_obj().export(s, 0, all_ns_dict, pretty_print=pretty, namespacedef_=namespace_def)
return s.getvalue()
def to_json(self):
return json.dumps(self.to_dict())
@staticmethod
def from_dict(dict_repr, return_obj=None):
"""Convert from dict representation to object representation."""
return return_obj
|
Python
| 0.000002 |
@@ -3103,8 +3103,388 @@
rn_obj%0D%0A
+%0D%0A @classmethod%0D%0A def object_from_dict(cls, entity_dict):%0D%0A %22%22%22Convert from dict representation to object representation.%22%22%22%0D%0A return cls.from_dict(entity_dict).to_obj()%0D%0A%0D%0A @classmethod%0D%0A def dict_from_object(cls, entity_obj):%0D%0A %22%22%22Convert from object representation to dict representation.%22%22%22%0D%0A return cls.from_obj(entity_obj).to_dict()%0D%0A
|
dde76066d3c7ecbc4408104cb212c92d532e50ba
|
Fix undefined info error and accept HTTP 201 response code (#2643)
|
lib/ansible/modules/extras/notification/campfire.py
|
lib/ansible/modules/extras/notification/campfire.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: campfire
version_added: "1.2"
short_description: Send a message to Campfire
description:
- Send a message to Campfire.
- Messages with newlines will result in a "Paste" message being sent.
options:
subscription:
description:
- The subscription name to use.
required: true
token:
description:
- API token.
required: true
room:
description:
- Room number to which the message should be sent.
required: true
msg:
description:
- The message body.
required: true
notify:
description:
- Send a notification sound before the message.
required: false
choices: ["56k", "bell", "bezos", "bueller", "clowntown",
"cottoneyejoe", "crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama", "greatjob", "greyjoy",
"guarantee", "heygirl", "horn", "horror",
"inconceivable", "live", "loggins", "makeitso", "noooo",
"nyan", "ohmy", "ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret", "sexyback",
"story", "tada", "tmyk", "trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah", "yodel"]
# informational: requirements for nodes
requirements: [ ]
author: "Adam Garside (@fabulops)"
'''
EXAMPLES = '''
- campfire: subscription=foo token=12345 room=123 msg="Task completed."
- campfire: subscription=foo token=12345 room=123 notify=loggins
msg="Task completed ... with feeling."
'''
import cgi
def main():
module = AnsibleModule(
argument_spec=dict(
subscription=dict(required=True),
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
notify=dict(required=False,
choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
"greatjob", "greyjoy", "guarantee",
"heygirl", "horn", "horror",
"inconceivable", "live", "loggins",
"makeitso", "noooo", "nyan", "ohmy",
"ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret",
"sexyback", "story", "tada", "tmyk",
"trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah",
"yodel"]),
),
supports_check_mode=False
)
subscription = module.params["subscription"]
token = module.params["token"]
room = module.params["room"]
msg = module.params["msg"]
notify = module.params["notify"]
URI = "https://%s.campfirenow.com" % subscription
NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
MSTR = "<message><body>%s</body></message>"
AGENT = "Ansible/1.2"
# Hack to add basic auth username and password the way fetch_url expects
module.params['url_username'] = token
module.params['url_password'] = 'X'
target_url = '%s/room/%s/speak.xml' % (URI, room)
headers = {'Content-Type': 'application/xml',
'User-agent': AGENT}
# Send some audible notification if requested
if notify:
response, info = fetch_url(module, target_url, data=NSTR % cgi.escape(notify), headers=headers)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(notify, info['status']))
# Send the message
response, info = fetch_url(module, target_url, data=MSTR %cgi.escape(msg), headers=headers)
if info['status'] != 200:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(msg, info['status']))
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
|
Python
| 0 |
@@ -4351,32 +4351,36 @@
eaders=headers)%0A
+
if info%5B'sta
@@ -4381,32 +4381,47 @@
o%5B'status'%5D
-!= 200:%0A
+not in %5B200, 201%5D:%0A
modu
@@ -4498,32 +4498,36 @@
+
%22 returned error
@@ -4533,32 +4533,36 @@
r code: '%25s'%22 %25%0A
+
@@ -4746,14 +4746,25 @@
s'%5D
-!= 200
+not in %5B200, 201%5D
:%0A
|
0efe8e9cfbd3a5d3319553aabf4f0dd17fa53d33
|
fix license test
|
awx/main/tests/functional/api/test_settings.py
|
awx/main/tests/functional/api/test_settings.py
|
# Copyright (c) 2016 Ansible, Inc.
# All Rights Reserved.
# Python
import pytest
# Django
from django.core.urlresolvers import reverse
# AWX
from awx.conf.models import Setting
@pytest.mark.django_db
def test_license_cannot_be_removed_via_system_settings(get, put, patch, delete, admin, enterprise_license):
url = reverse('api:setting_singleton_detail', args=('system',))
response = get(url, user=admin, expect=200)
assert not response.data['LICENSE']
Setting.objects.create(key='LICENSE', value=enterprise_license)
response = get(url, user=admin, expect=200)
assert response.data['LICENSE']
put(url, user=admin, data=response.data, expect=200)
response = get(url, user=admin, expect=200)
assert response.data['LICENSE']
patch(url, user=admin, data={}, expect=200)
response = get(url, user=admin, expect=200)
assert response.data['LICENSE']
delete(url, user=admin, expect=204)
response = get(url, user=admin, expect=200)
assert response.data['LICENSE']
|
Python
| 0 |
@@ -74,16 +74,26 @@
t pytest
+%0Aimport os
%0A%0A# Djan
@@ -184,16 +184,205 @@
etting%0A%0A
+'''%0AEnsures that tests don't pick up dev container license file%0A'''%[email protected]%0Adef mock_no_license_file(mocker):%0A os.environ%5B'AWX_LICENSE_FILE'%5D = '/does_not_exist'%0A return None%0A
%0A@pytest
@@ -452,16 +452,38 @@
ettings(
+mock_no_license_file,
get, put
@@ -527,16 +527,17 @@
cense):%0A
+%0A
url
|
f5e62a0611eca453696d9a8b5fc3ee4f3941a297
|
update junos_template module
|
lib/ansible/modules/network/junos/junos_template.py
|
lib/ansible/modules/network/junos/junos_template.py
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: junos_template
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage configuration on remote devices running Junos
description:
- This module will load a candidate configuration
from a template file onto a remote device running Junos. The
module will return the differences in configuration if the diff
option is specified on the Ansible command line
extends_documentation_fragment: junos
options:
src:
description:
- The path to the config source. The source can be either a
file with config or a template that will be merged during
runtime. By default the task will search for the source
file in role or playbook root folder in templates directory.
required: true
default: null
backup:
description:
- When this argument is configured true, the module will backup
the configuration from the node prior to making any changes.
The backup file will be written to backup_{{ hostname }} in
the root of the playbook directory.
required: false
default: false
choices: ["true", "false"]
confirm:
description:
- The C(confirm) argument will configure a time out value for
the commit to be confirmed before it is automatically
rolled back. If the C(confirm) argument is set to False, this
argument is silently ignored. If the value for this argument
is set to 0, the commit is confirmed immediately.
required: false
default: 0
comment:
description:
- The C(comment) argument specifies a text string to be used
when committing the configuration. If the C(confirm) argument
is set to False, this argument is silently ignored.
required: false
default: configured by junos_template
action:
description:
- The C(action) argument specifies how the module will apply changes.
required: false
default: merge
choices: ['merge', 'overwrite', 'replace']
version_added: "2.2"
config_format:
description:
- The C(format) argument specifies the format of the configuration
template specified in C(src). If the format argument is not
specified, the module will attempt to infer the configuration
format based of file extension. Files that end in I(xml) will set
the format to xml. Files that end in I(set) will set the format
to set and all other files will default the format to text.
required: false
default: null
choices: ['text', 'xml', 'set']
requirements:
- junos-eznc
notes:
- This module requires the netconf system service be enabled on
the remote device being managed
"""
EXAMPLES = """
- junos_template:
src: config.j2
comment: update system config
- name: replace config hierarchy
src: config.j2
action: replace
- name: overwrite the config
src: config.j2
action: overwrite
"""
DEFAULT_COMMENT = 'configured by junos_template'
def main():
argument_spec = dict(
src=dict(required=True, type='path'),
confirm=dict(default=0, type='int'),
comment=dict(default=DEFAULT_COMMENT),
action=dict(default='merge', choices=['merge', 'overwrite', 'replace']),
config_format=dict(choices=['text', 'set', 'xml']),
backup=dict(default=False, type='bool'),
transport=dict(default='netconf', choices=['netconf'])
)
module = get_module(argument_spec=argument_spec,
supports_check_mode=True)
comment = module.params['comment']
confirm = module.params['confirm']
commit = not module.check_mode
action = module.params['action']
src = module.params['src']
fmt = module.params['config_format']
if action == 'overwrite' and fmt == 'set':
module.fail_json(msg="overwrite cannot be used when format is "
"set per junos documentation")
results = dict(changed=False)
results['_backup'] = str(module.get_config()).strip()
diff = module.load_config(src, action=action, comment=comment,
format=fmt, commit=commit, confirm=confirm)
if diff:
results['changed'] = True
results['diff'] = dict(prepared=diff)
module.exit_json(**results)
from ansible.module_utils.basic import *
from ansible.module_utils.junos import *
if __name__ == '__main__':
main()
|
Python
| 0 |
@@ -3626,16 +3626,69 @@
rite%0A%22%22%22
+%0Afrom ansible.module_utils.junos import NetworkModule
%0A%0ADEFAUL
@@ -4185,13 +4185,16 @@
e =
-get_m
+NetworkM
odul
@@ -4224,16 +4224,19 @@
t_spec,%0A
+
@@ -4736,16 +4736,23 @@
(module.
+config.
get_conf
@@ -4784,16 +4784,23 @@
module.
+config.
load_con
@@ -5029,91 +5029,8 @@
)%0A%0A%0A
-from ansible.module_utils.basic import *%0Afrom ansible.module_utils.junos import *%0A%0A
if _
|
c91e7dcc969485644d8e26c459c894925b3f0720
|
add in fasta format
|
scripts/dump_biodatabase.py
|
scripts/dump_biodatabase.py
|
#!/usr/bin/env python
from getpass import getpass
from BioSQL import BioSeqDatabase
from common import standard_options, generate_placeholders, chunks, extract_feature_sql
def get_seqfeature_for_db(server, biodb):
''' find all seqfeatures that have the given value for the qualifier
returns a list of seqfeature_id
'''
sql = "SELECT qv.seqfeature_id FROM seqfeature_qualifier_value qv join seqfeature s using(seqfeature_id) join bioentry b using(bioentry_id) join biodatabase bd using(biodatabase_id) WHERE bd.name = %s"
return server.adaptor.execute_and_fetchall(sql, (biodb,))
def main(args):
server = BioSeqDatabase.open_database(driver=args.driver, db=args.database, user=args.user, host=args.host, passwd=args.password)
seqfeature_ids = get_seqfeature_for_db(server, args.database_name)
if args.output_format == 'feat-prot':
extract_feature_sql(server, seqfeature_ids, type=['CDS'], translate=True )
elif args.output_format == 'feat-nucl':
extract_feature_sql(server, seqfeature_ids )
if __name__ == "__main__":
parser = standard_options()
parser.add_argument('-D', '--database-name', help='namespace of the database that you want to add into', dest='database_name', required=True)
parser.add_argument('-o', '--output_format', help='output format of the selected sequences', choices=['feat-prot', 'feat-nucl'], default='feat-prot')
args = parser.parse_args()
if args.password is None:
args.password = getpass("Please enter the password for user " + \
args.user + " on database " + args.database)
main(args)
|
Python
| 0.999986 |
@@ -15,16 +15,27 @@
python%0A
+import sys%0A
from get
@@ -760,16 +760,232 @@
sword)%0A%0A
+ if args.output_format == 'fasta':%0A from Bio import SeqIO%0A db = server%5Bargs.database_name%5D%0A for rec in db.values():%0A SeqIO.write(rec, sys.stdout, args.output_format)%0A else:%0A%0A
seqf
@@ -1048,24 +1048,28 @@
_name)%0A%0A
+
+
if args.outp
@@ -1094,16 +1094,20 @@
-prot':%0A
+
@@ -1185,16 +1185,20 @@
e )%0A
+
+
elif arg
@@ -1229,16 +1229,20 @@
-nucl':%0A
+
@@ -1623,16 +1623,25 @@
at-nucl'
+, 'fasta'
%5D, defau
|
4a9e34a57476c92a4147f9ecafc357a681f1a19a
|
Add wrapper for testing fixing functionality
|
scripts/flaskext_migrate.py
|
scripts/flaskext_migrate.py
|
# Script which modifies source code away from the deprecated "flask.ext"
# format. Does not yet fully support imports in the style:
#
# "import flask.ext.foo"
#
# these are converted to "import flask_foo" in the
# main import statement, but does not handle function calls in the source.
#
# Run in the terminal by typing: `python flaskext_migrate.py <source_file.py>`
#
# Author: Keyan Pishdadian 2015
from redbaron import RedBaron
import sys
def read_source(input_file):
"""Parses the input_file into a RedBaron FST."""
with open(input_file, "r") as source_code:
red = RedBaron(source_code.read())
return red
def write_source(red, input_file):
"""Overwrites the input_file once the FST has been modified."""
with open(input_file, "w") as source_code:
source_code.write(red.dumps())
def fix_imports(red):
"""Wrapper which fixes "from" style imports and then "import" style."""
red = fix_standard_imports(red)
red = fix_from_imports(red)
return red
def fix_from_imports(red):
"""
Converts "from" style imports to not use "flask.ext".
Handles:
Case 1: from flask.ext.foo import bam --> from flask_foo import bam
Case 2: from flask.ext import foo --> import flask_foo as foo
"""
from_imports = red.find_all("FromImport")
for x in range(len(from_imports)):
values = from_imports[x].value
if (values[0].value == 'flask') and (values[1].value == 'ext'):
# Case 1
if len(from_imports[x].value) == 3:
package = values[2].value
modules = from_imports[x].modules()
r = "{}," * len(modules)
from_imports[x].replace("from flask_%s import %s"
% (package, r.format(*modules)[:-1]))
# Case 2
else:
module = from_imports[x].modules()[0]
from_imports[x].replace("import flask_%s as %s"
% (module, module))
return red
def fix_standard_imports(red):
"""
Handles import modification in the form:
import flask.ext.foo" --> import flask_foo
Does not modify function calls elsewhere in the source outside of the
original import statement.
"""
imports = red.find_all("ImportNode")
for x in range(len(imports)):
values = imports[x].value
try:
if (values[x].value[0].value == 'flask' and
values[x].value[1].value == 'ext'):
package = values[x].value[2].value
imports[x].replace("import flask_%s" % package)
except IndexError:
pass
return red
if __name__ == "__main__":
input_file = sys.argv[1]
ast = read_source(input_file)
new_ast = fix_imports(ast)
write_source(new_ast, input_file)
|
Python
| 0 |
@@ -2685,63 +2685,28 @@
d%0A%0A%0A
-if __name__ == %22__main__%22:%0A input_file = sys.argv%5B1%5D
+def fix(input_file):
%0A
@@ -2793,20 +2793,97 @@
ew_ast, input_file)%0A
+%0Aif __name__ == %22__main__%22:%0A input_file = sys.argv%5B1%5D%0A fix(input_file)%0A
|
5264a58ca8735ee59753a5b621b4559475bfd701
|
Allow writing roster file
|
saltcontainers/factories.py
|
saltcontainers/factories.py
|
import os
import py
import yaml
import string
import logging
import tarfile
import factory
import factory.fuzzy
from docker import Client
from models import ContainerModel, MasterModel, MinionModel
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class BaseFactory(factory.Factory):
class Meta:
model = dict
strategy = factory.BUILD_STRATEGY
class DockerClientFactory(factory.StubFactory):
@classmethod
def stub(cls, **kwargs):
return Client(base_url='unix://var/run/docker.sock')
class SaltConfigFactory(BaseFactory):
tmpdir = None
root = factory.LazyAttribute(lambda o: o.tmpdir.ensure_dir(o.factory_parent.name))
conf_path = factory.LazyAttribute(
lambda o: o.tmpdir / '{0}.conf.tar'.format(o.factory_parent.name))
conf_type = None
config = {}
pillar = {}
sls = {}
id = factory.fuzzy.FuzzyText(length=5, prefix='id_', chars=string.ascii_letters)
@factory.post_generation
def extra_configs(obj, create, extracted, **kwargs):
if extracted:
config_path = obj['root'] / '{}.d'.format(obj['conf_type'])
config_path.ensure_dir()
for name, config in extracted.items():
config_file = config_path / '{0}.conf'.format(name)
config_file.write(yaml.safe_dump(config, default_flow_style=False))
@factory.post_generation
def post(obj, create, extracted, **kwargs):
config_path = obj['root'] / '{}.d'.format(obj['conf_type'])
config_path.ensure_dir()
config_file = obj['root'] / obj['conf_type']
main_config = {
'include': '{0}.d/*'.format(obj['conf_type'])
}
if obj['conf_type'] in ['minion', 'proxy']:
main_config['id'] = obj['id']
config_file.write(
yaml.safe_dump(main_config, default_flow_style=False))
config_path = obj['root'] / '{}.d'.format(obj['conf_type'])
for name, config in obj['config'].items():
config_file = config_path / '{0}.conf'.format(name)
config_file.write(yaml.safe_dump(config, default_flow_style=False))
pillar_path = obj['root'].ensure_dir('pillar')
for name, content in obj['pillar'].items():
sls_file = pillar_path / '{0}.sls'.format(name)
sls_file.write(yaml.safe_dump(content, default_flow_style=False))
sls_path = obj['root'].ensure_dir('sls')
for name, source in obj['sls'].items():
sls_file = sls_path / '{0}.sls'.format(name)
sls_file.write(py.path.local(source).read())
class MasterSaltConfigFactory(SaltConfigFactory):
@factory.post_generation
def apply_states(obj, create, extracted, **kwargs):
if extracted:
destination = 'masterless'
config_path = obj['root'] / 'minion.d'
config_path.ensure_dir()
(config_path / 'masterless.conf').write(
yaml.safe_dump(
{
'file_client': 'local',
'file_roots': {
'base': ["/etc/salt/{}".format(destination)]
},
'pillar_roots': {'base': ["/etc/salt/pillar"]}
},
default_flow_style=False
)
)
sls_path = obj['root'].ensure_dir(destination)
for name, source in extracted.items():
sls_file = sls_path / '{0}.sls'.format(name)
sls_file.write(py.path.local(source).read())
class ContainerConfigFactory(BaseFactory):
name = factory.fuzzy.FuzzyText(
length=5, prefix='container_', chars=string.ascii_letters)
salt_config = factory.SubFactory(SaltConfigFactory)
image = None
entrypoint = '/bin/bash'
environment = dict()
tty = True
stdin_open = True
working_dir = "/salt-toaster/"
ports = [4000, 4506]
docker_client = None
@factory.lazy_attribute
def volumes(self):
volumes = [os.getcwd()]
return volumes
@factory.lazy_attribute
def host_config(self):
params = dict(
port_bindings={},
binds={
os.getcwd(): {
'bind': "/salt-toaster/",
'mode': 'ro'
}
}
)
return self.docker_client.create_host_config(**params)
class ContainerFactory(BaseFactory):
config = factory.SubFactory(ContainerConfigFactory)
ip = None
class Meta:
model = ContainerModel
@classmethod
def build(cls, **kwargs):
obj = super(ContainerFactory, cls).build(**kwargs)
assert obj['config']['image']
docker_client = obj['config']['docker_client']
docker_client.create_container(
**{
k: obj['config'][k] for k in obj['config'].keys()
if k not in ['salt_config', 'docker_client']
}
)
docker_client.start(obj['config']['name'])
data = docker_client.inspect_container(obj['config']['name'])
obj['ip'] = data['NetworkSettings']['IPAddress']
try:
message = "{0}: {1}".format(
obj['config']['salt_config']['conf_type'],
obj.run('salt --version').strip())
logger.info(message)
except TypeError:
pass
return obj
class SaltFactory(BaseFactory):
container = factory.SubFactory(ContainerFactory)
@classmethod
def build(cls, **kwargs):
obj = super(SaltFactory, cls).build(**kwargs)
docker_client = obj['container']['config']['docker_client']
conf_path = obj['container']['config']['salt_config']['conf_path']
with tarfile.open(conf_path.strpath, mode='w') as archive:
root = obj['container']['config']['salt_config']['root']
for item in root.listdir():
archive.add(
item.strpath,
arcname=item.strpath.replace(root.strpath, '.'))
with conf_path.open('rb') as f:
docker_client.put_archive(
obj['container']['config']['name'], '/etc/salt', f.read())
res = docker_client.exec_create(
obj['container']['config']['name'], obj['cmd']
)
output = docker_client.exec_start(res['Id'])
assert 'executable file not found' not in output
return obj
class MasterFactory(SaltFactory):
id = factory.LazyAttribute(lambda o: o.container['config']['salt_config']['id'])
cmd = 'salt-master -d -l debug'
container = factory.SubFactory(
ContainerFactory,
config__salt_config=factory.SubFactory(MasterSaltConfigFactory)
)
class Meta:
model = MasterModel
@classmethod
def build(cls, **kwargs):
obj = super(MasterFactory, cls).build(**kwargs)
obj['container'].run("salt-call --local state.apply")
return obj
class MinionFactory(SaltFactory):
id = factory.LazyAttribute(lambda o: o.container['config']['salt_config']['id'])
cmd = 'salt-minion -d -l debug'
class Meta:
model = MinionModel
|
Python
| 0.000001 |
@@ -3585,24 +3585,248 @@
e).read())%0A%0A
+ @factory.post_generation%0A def roster(obj, create, extracted, **kwargs):%0A if extracted:%0A roster = obj%5B'root'%5D / 'roster'%0A roster.write(yaml.safe_dump(extracted, default_flow_style=False))%0A%0A
%0Aclass Conta
|
95d8f915e4aee6fbab4ca741197a3563eb3a5ff2
|
bump version to 0.4
|
aldryn_bootstrap3/__init__.py
|
aldryn_bootstrap3/__init__.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
__version__ = '0.3'
|
Python
| 0 |
@@ -96,7 +96,7 @@
'0.
-3
+4
'%0A
|
bf9c2782bde285107960eb2de8d746ec84c68477
|
add test
|
aligot/tests/test_notebook.py
|
aligot/tests/test_notebook.py
|
# coding: utf-8
from django.test import TestCase
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
from rest_framework import status
from ..models import NoteBook, User
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
class TestNoteBookApi(TestCase):
def setUp(self):
self.client = APIClient()
self.user = User.objects.create(username='user', password='pass')
self.client.force_authenticate(user=self.user)
def test_create_without_params(self):
self.assertEquals(status.HTTP_400_BAD_REQUEST, self.client.post(reverse('notebook-list')).status_code)
self.assertEquals(0, NoteBook.objects.count())
def test_create(self):
response = self.client.post(
reverse('notebook-list'),
{'title': 'a title', 'created_by': reverse('user-detail', args=[self.user.id])}
)
self.assertEquals(status.HTTP_201_CREATED, response.status_code, response.content)
self.assertEquals(1, NoteBook.objects.count())
def test_update(self):
notebook = NoteBook.objects.create(title='a title', created_by=self.user)
self.assertEquals(1, NoteBook.objects.count())
response = self.client.put(
reverse('notebook-detail', args=[notebook.id]),
{'title': 'new title', 'created_by': reverse('user-detail', args=[self.user.id])}
)
self.assertEquals(status.HTTP_200_OK, response.status_code, response.content)
self.assertEquals(1, NoteBook.objects.count())
self.assertEquals('new title', NoteBook.objects.all()[0].title)
def test_delete(self):
notebook = NoteBook.objects.create(title='a title', created_by=self.user)
self.assertEquals(1, NoteBook.objects.count())
response = self.client.delete(reverse('notebook-detail', args=[notebook.id]))
self.assertEquals(status.HTTP_204_NO_CONTENT, response.status_code, response.content)
self.assertEquals(0, NoteBook.objects.count())
def test_get(self):
notebook = NoteBook.objects.create(title='a title', created_by=self.user)
self.assertEquals(1, NoteBook.objects.count())
response = self.client.get(reverse('notebook-detail', args=[notebook.id]))
self.assertEquals(status.HTTP_200_OK, response.status_code, response.content)
self.assertEquals('a title', response.data['title'], response.data)
def test_get_all(self):
NoteBook.objects.create(title='notebook 1', created_by=self.user)
NoteBook.objects.create(title='notebook 2', created_by=self.user)
response = self.client.get(reverse('notebook-list'))
self.assertEquals(status.HTTP_200_OK, response.status_code, response.content)
self.assertEquals(2, len(response.data))
self.assertEquals('notebook 1', response.data[0]['title'])
self.assertEquals('notebook 2', response.data[1]['title'])
class TestNoteBookApiWithDifferentUser(TestCase):
def setUp(self):
self.client = APIClient()
self.user1 = User.objects.create(username='user1', password='pass')
self.user2 = User.objects.create(username='user2', password='pass')
self.client.force_authenticate(user=self.user1)
self.url = reverse('notebook-list')
def test_get_all(self):
NoteBook.objects.create(title='notebook 1', created_by=self.user1)
NoteBook.objects.create(title='notebook 2', created_by=self.user1)
NoteBook.objects.create(title='notebook 3', created_by=self.user2)
response = self.client.get(reverse('notebook-list'))
self.assertEquals(status.HTTP_200_OK, response.status_code, response.content)
self.assertEquals(2, len(response.data))
self.assertEquals('notebook 1', response.data[0]['title'])
self.assertEquals('notebook 2', response.data[1]['title'])
|
Python
| 0.000002 |
@@ -3877,28 +3877,315 @@
response.data%5B1%5D%5B'title'%5D)%0A
+%0A def test_get(self):%0A notebook = NoteBook.objects.create(title='notebook 1', created_by=self.user2)%0A response = self.client.get(reverse('notebook-detail', args=%5Bnotebook.id%5D))%0A self.assertEquals(status.HTTP_403_FORBIDDEN, response.status_code, response.content)%0A
|
ae652c8078b0cbde57934ffaf9ffce0e4a18e99e
|
Update generatejson.py
|
ingest/autoingest/generatejson.py
|
ingest/autoingest/generatejson.py
|
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import argparse
import requests
import os
import requests
import ndio.utils.autoingest as AI
SITE_HOST = "http://openconnecto.me"
def main():
ai = AI.AutoIngest()
"""
Edit the below values, type and default information can be found on the ingesting page of the ndio docs page.
"""
dataset_name='hbp16' #(type=str, help='Name of Dataset')
imagesize=(7389,6572,50) #(type=int[], help='Image size (X,Y,Z)')
voxelres=(1.0,1.0,1.0) #(type=float[], help='Voxel scale (X,Y,Z)')
offset=(0,0,0) #(type=int[], default=[0, 0, 0], help='Image Offset in X,Y,Z')
timerange=(0,0) #(type=int[], default=[0, 0], help='Time Dimensions')
scalinglevels=3 #(type=int, default=0, help='Required Scaling levels/ Zoom out levels')
scaling=0 #(type=int, default=0, help='Type of Scaling - Isotropic or Normal')
channel_name='image' #(type=str, help='Name of Channel. Has to be unique in the same project. User Defined.')
datatype='uint8' #(type=str, help='Channel Datatype')
channel_type='Image' #(type=enum, help='Type of channel - image, annotation, timeseries, probmap')
exceptions=0 #(type=int, default=0, help='Exceptions')
resolution=0 #(type=int, default=0, help='Start Resolution')
windowrange=(0,0) #(type=int[], default=[0, 0], help='Window clamp function for 16-bit channels with low max value of pixels')
readonly=0 #(type=int, default=0, help='Read-only Channel or Not. You can remotely post to channel if it is not readonly and overwrite data')
data_url= 'http://neurodata-public.s3.amazonaws.com' #(type=str, help='This url points to the root directory of the files. Dropbox is not an acceptable HTTP Server.')
file_format='SLICE' #(type=str, help='This is overal the file format type. For now we support only Slice stacks and CATMAID tiles.')
file_type='jpg' #(type=str, help='This is the specific file format type (tiff, tif, png))
project_name='' #(type=str, help='Name of Project. Has to be unique in OCP. User Defined')
token_name='' #(type=str, default='', help='Token Name. User Defined')
public=0 #(type=int, default=0, help='Make your project publicly visible')
metadata="" #(type=Any, default='', help='Any metadata as appropriate from the LIMS schema')
#Adds data set information
ai.add_dataset(dataset_name, imagesize, voxelres, offset, timerange, scalinglevels, scaling)
#Adds project information
ai.add_project(project_name, token_name, public)
#Adds a channel
ai.add_channel(channel_name, datatype, channel_type, data_url, file_format, file_type, exceptions,
resolution, windowrange, readonly)
"""
If you wish to add additional channels to the object, simply call the
add_channel function for as many channels as you have
"""
#Adds metada
ai.add_metadata(metadata)
"""
EDIT ABOVE HERE
"""
#Uncomment this line if you wish to get a json file names file_name
#ai.output_json("ocp.json")
#Post the data
ai.post_data(SITE_HOST)
if __name__ == "__main__":
main()
|
Python
| 0.000012 |
@@ -693,18 +693,17 @@
dio.
-utils.auto
+remote.nd
inge
@@ -708,17 +708,17 @@
gest as
-A
+N
I%0ASITE_H
@@ -770,20 +770,18 @@
-a
+n
i =
-AI.Auto
+NI.ND
Inge
@@ -3108,25 +3108,25 @@
rmation%0A
-a
+n
i.add_datase
@@ -3236,25 +3236,25 @@
rmation%0A
-a
+n
i.add_projec
@@ -3310,25 +3310,25 @@
channel%0A
-a
+n
i.add_channe
@@ -3631,17 +3631,17 @@
ada%0A
-a
+n
i.add_me
@@ -3823,17 +3823,17 @@
ata%0A
-a
+n
i.post_d
|
de0fd677d94b7fb8b044fa597b687dba0f3e1c0e
|
Test coercions for generic type constructors
|
blaze/datashape/tests/test_type_constructor.py
|
blaze/datashape/tests/test_type_constructor.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from blaze import error
from blaze.datashape import unify_simple, promote, coerce, dshapes, coretypes as T
#------------------------------------------------------------------------
# Test data
#------------------------------------------------------------------------
Complex = T.TypeConstructor('Complex', 1, [{'coercible': True}])
t1 = Complex(T.int64)
t2 = Complex(T.int64)
t3 = Complex(T.int32)
RigidComplex = T.TypeConstructor('Complex', 1, [{'coercible': False}])
rt1 = RigidComplex(T.int64)
rt2 = RigidComplex(T.int64)
rt3 = RigidComplex(T.int32)
#------------------------------------------------------------------------
# Tests
#------------------------------------------------------------------------
class TestTypeConstructors(unittest.TestCase):
def test_equality(self):
self.assertEqual(t1, t2)
self.assertNotEqual(t1, t3)
def test_unification_concrete(self):
self.assertEqual(unify_simple(t1, t2), t1)
def test_unification_typevar(self):
tvar = Complex(T.TypeVar('A'))
self.assertEqual(unify_simple(t1, tvar), t1)
def test_promotion(self):
self.assertEqual(promote(t1, t2), t1)
self.assertEqual(promote(t1, t3), t1)
self.assertEqual(promote(t3, t2), t1)
self.assertEqual(promote(rt1, rt2), rt1)
class TestErrors(unittest.TestCase):
def test_promotion_error(self):
self.assertRaises(error.UnificationError, promote, rt1, rt3)
if __name__ == '__main__':
# TestTypeConstructors('test_unification').debug()
unittest.main()
|
Python
| 0 |
@@ -1405,16 +1405,182 @@
, rt1)%0A%0A
+ def test_coercion(self):%0A self.assertEqual(coerce(t1, t2), 0)%0A self.assertGreater(coerce(t3, t2), 0)%0A self.assertEqual(coerce(rt1, rt2), 0)%0A%0A
%0Aclass T
@@ -1747,63 +1747,8 @@
_':%0A
- # TestTypeConstructors('test_unification').debug()%0A
|
7da15f2e16c95a4be179a1cc1efd108dbaaa3be9
|
Update forward_ZMQ_Angle.py
|
ProBot_BeagleBone/forward_ZMQ_Angle.py
|
ProBot_BeagleBone/forward_ZMQ_Angle.py
|
import zmq
def main():
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5583")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5584")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception, e:
print e
print "bringing down zmq device"
finally:
pass
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
|
Python
| 0.000001 |
@@ -1,8 +1,27 @@
+#!/usr/bin/python%0A%0A
import z
|
c01f7fb0c9bda24efbd1d1597350a77d03047027
|
Add Grid and Align support
|
wytch/builder.py
|
wytch/builder.py
|
# The MIT License (MIT)
#
# Copyright (c) 2015 Josef Gajdusek
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from wytch import view, colors
class Builder:
def __init__(self, view, parent = None):
self.view = view
self.parent = parent
self.nested = []
def add(self, c):
self.nested.append(Builder(c, parent = self))
return self
def labels(self, strs, fg = colors.WHITE, bg = colors.BLACK):
for s in strs:
self.add(view.Label(s, fg = fg, bg = bg))
return self
def spacer(self, width = 0, height = 0):
return self.add(view.Spacer(width = width, height = height))
def hline(self, title = None):
return self.add(view.HLine(title = title))
def nest(self, cont):
#self.view.add_child(cont)
ret = Builder(cont, parent = self)
self.nested.append(ret)
return ret
def vertical(self, width = 0):
return self.nest(view.Vertical(width = width))
def horizontal(self, height = 0):
return self.nest(view.Horizontal(height = height))
def box(self, title = None):
return self.nest(view.Box(title = title))
def endall(self):
self.end()
if self.parent:
self.parent.endall()
def end(self):
return self.parent
def __enter__(self):
return self
def __exit__(self, extype, exval, trace):
for b in self.nested:
b.__exit__(extype, exval, trace)
if self.parent:
self.parent.view.add_child(self.view)
return self.parent
|
Python
| 0 |
@@ -1909,16 +1909,309 @@
rn ret%0A%0A
+ def align(self, halign = view.HOR_MID, valign = view.VER_MID):%0A return self.nest(view.Align(halign = halign, valign = valign))%0A%0A def grid(self, width, height):%0A ret = GridBuilder(view.Grid(width, height), parent = self)%0A self.nested.append(ret)%0A return ret%0A%0A
def
@@ -2869,28 +2869,520 @@
%0A return self.parent%0A
+%0Aclass GridBuilder(Builder):%0A%0A def __init__(self, view, parent = None):%0A super(GridBuilder, self).__init__(view, parent = parent)%0A self.atx = 0%0A self.aty = 0%0A%0A def add(self, c = None, rowspan = 1, colspan = 1):%0A if c:%0A self.view.set(self.atx, self.aty, c,%0A rowspan = rowspan, colspan = colspan)%0A self.atx += 1%0A if self.atx %3E= self.view.width:%0A self.atx = 0%0A self.aty += 1%0A return self%0A
|
a7ac41830ac0472442069deead739ddd4c137be3
|
add future import for print
|
examples/receive_notify.py
|
examples/receive_notify.py
|
#!/usr/bin/env python3
# This is just a toy, real code would check that the received message
# really was a NOTIFY, and otherwise handle errors.
import socket
import dns.flags
import dns.message
import dns.rdataclass
import dns.rdatatype
address = '127.0.0.1'
port = 53535
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.bind((address, port))
while True:
(wire, address) = s.recvfrom(512)
notify = dns.message.from_wire(wire)
soa = notify.find_rrset(notify.answer, notify.question[0].name,
dns.rdataclass.IN, dns.rdatatype.SOA)
# Do something with the SOA RR here
print('The serial number for', soa.name, 'is', soa[0].serial)
response = dns.message.make_response(notify)
response.flags |= dns.flags.AA
wire = response.to_wire(response)
s.sendto(wire, address)
|
Python
| 0 |
@@ -140,16 +140,55 @@
rrors.%0A%0A
+from __future__ import print_function%0A%0A
import s
|
6e313af1512de3520f9e6b91487c8a06eb2c5ee2
|
Fix copy/paste documentation return type.
|
yagocd/client.py
|
yagocd/client.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# The MIT License
#
# Copyright (c) 2016 Grigory Chernyshev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import copy
from yagocd.session import Session
from yagocd.resources.agent import AgentManager
from yagocd.resources.user import UserManager
from yagocd.resources.material import MaterialManager
from yagocd.resources.stage import StageManager
from yagocd.resources.pipeline import PipelineManager
from yagocd.resources.property import PropertyManager
from yagocd.resources.configuration import ConfigurationManager
from yagocd.resources.feed import FeedManager
class Client(object):
"""
Main class of the package, that gives user access to Go REST API methods.
"""
DEFAULT_OPTIONS = {
'context_path': 'go/',
'api_path': 'api/',
'verify': True,
'headers': {
'Accept': 'application/vnd.go.cd.v1+json',
}
}
def __init__(self, server='http://localhost:8153', auth=None, options=None):
"""
Construct a GOCD client instance.
:param server: url of the Go server
:param auth: authorization, that will be passed to requests.
Could tuple of (username, password) for basic authentication.
:param options: dictionary of additional options.
* context_path -- server context path to use (default is ``go/``)
* api_path -- api endpoint to use. By default ``api/`` will be used, but in some cases this will be
overwritten by some managers, because of API.
* verify -- verify SSL certs. Defaults to ``True``.
* headers -- default headers for requests (default is ``'Accept': 'application/vnd.go.cd.v1+json'``)
"""
options = {} if options is None else options
options['server'] = server
merged = copy.deepcopy(self.DEFAULT_OPTIONS)
merged.update(options)
self._session = Session(auth, merged)
@property
def agents(self):
"""
Property for accessing ``AgentManager`` instance, which is used to manage agents.
:rtype: yagocd.resources.agent.AgentManager
"""
return AgentManager(session=self._session)
@property
def users(self):
"""
Property for accessing ``UserManager`` instance, which is used to manage users.
:rtype: yagocd.resources.agent.UserManager
"""
return UserManager(session=self._session)
@property
def materials(self):
"""
Property for accessing ``MaterialManager`` instance, which is used to manage materials.
:rtype: yagocd.resources.agent.MaterialManager
"""
return MaterialManager(session=self._session)
@property
def pipelines(self):
"""
Property for accessing ``PipelineManager`` instance, which is used to manage pipelines.
:rtype: yagocd.resources.agent.PipelineManager
"""
return PipelineManager(session=self._session)
@property
def stages(self):
"""
Property for accessing ``StageManager`` instance, which is used to manage stages.
:rtype: yagocd.resources.agent.StageManager
"""
return StageManager(session=self._session)
@property
def properties(self):
"""
Property for accessing ``PropertyManager`` instance, which is used to manage properties of the jobs.
:rtype: yagocd.resources.agent.PropertyManager
"""
return PropertyManager(session=self._session)
@property
def configurations(self):
"""
Property for accessing ``ConfigurationManager`` instance, which is used to manage configurations.
:rtype: yagocd.resources.agent.ConfigurationManager
"""
return ConfigurationManager(session=self._session)
@property
def feeds(self):
"""
Property for accessing ``FeedManager`` instance, which is used to manage feeds.
:rtype: yagocd.resources.agent.FeedManager
"""
return FeedManager(session=self._session)
if __name__ == '__main__':
pass
|
Python
| 0 |
@@ -3619,21 +3619,20 @@
sources.
-agent
+user
.UserMan
@@ -3880,21 +3880,24 @@
sources.
-agent
+material
.Materia
@@ -4153,21 +4153,24 @@
sources.
-agent
+pipeline
.Pipelin
@@ -4417,21 +4417,21 @@
sources.
+st
age
-nt
.StageMa
@@ -4695,21 +4695,24 @@
sources.
-agent
+property
.Propert
@@ -4983,21 +4983,29 @@
sources.
-agent
+configuration
.Configu
@@ -5259,21 +5259,20 @@
sources.
-agent
+feed
.FeedMan
|
ecf679f5ed805e1286bd6cb8a4bada57c9bb9710
|
use ldap sync function
|
addons/users_ldap/users_ldap.py
|
addons/users_ldap/users_ldap.py
|
##############################################################################
#
# Copyright (c) 2004-2007 TINY SPRL. (http://tiny.be) All Rights Reserved.
#
# $Id: account.py 1005 2005-07-25 08:41:42Z nicoe $
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import fields,osv
from service import security
import pooler
try:
import ldap
except ImportError:
import netsvc
logger = netsvc.Logger()
logger.notifyChannel("init", netsvc.LOG_ERROR, "could not import ldap!")
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'ldap_server': fields.char('LDAP Server address', size=64),
'ldap_binddn': fields.char('LDAP binddn', size=64),
'ldap_password': fields.char('LDAP password', size=64),
'ldap_filter': fields.char('LDAP filter', size=64),
'ldap_base': fields.char('LDAP base', size=64),
}
res_company()
def ldap_login(oldfnc):
def _ldap_login(db, login, passwd):
cr = pooler.get_db(db).cursor()
module_obj = pooler.get_pool(cr.dbname).get('ir.module.module')
module_ids = module_obj.search(cr, 1, [('name', '=', 'users_ldap')])
if module_ids:
state = module_obj.read(cr, 1, module_ids, ['state'])[0]['state']
if state in ('installed', 'to upgrade', 'to remove'):
cr.execute("select id, name, ldap_server, ldap_binddn, ldap_password, ldap_filter, ldap_base from res_company where ldap_server != '' and ldap_binddn != ''")
for res_company in cr.dictfetchall():
try:
l = ldap.open(res_company['ldap_server'])
if l.simple_bind(res_company['ldap_binddn'], res_company['ldap_password']):
base = res_company['ldap_base']
scope = ldap.SCOPE_SUBTREE
filter = res_company['ldap_filter']%(login,)
retrieve_attributes = None
result_id = l.search(base, scope, filter, retrieve_attributes)
timeout = 60
result_type, result_data = l.result(result_id, timeout)
if not result_data:
continue
if result_type == ldap.RES_SEARCH_RESULT and len(result_data) == 1:
dn=result_data[0][0]
name=result_data[0][1]['cn']
if l.bind(dn, passwd):
cr.execute("select id from res_users where login=%s",(login.encode('utf-8'),))
res = cr.fetchone()
if res:
cr.close()
return res[0]
users_obj = pooler.get_pool(cr.dbname).get('res.users')
action_obj = pooler.get_pool(cr.dbname).get('ir.actions.actions')
action_id = action_obj.search(cr, 1, [('usage', '=', 'menu')])[0]
res = users_obj.create(cr, 1, {'name': name, 'login': login.encode('utf-8'), 'company_id': res_company['id'], 'action_id': action_id})
cr.commit()
cr.close()
return res
except Exception, e:
continue
cr.close()
return oldfnc(db, login, passwd)
return _ldap_login
security.login = ldap_login(security.login)
def ldap_check(oldfnc):
def _ldap_check(db, uid, passwd):
if security._uid_cache.has_key(uid) and (security._uid_cache[uid]==passwd):
return True
cr = pooler.get_db(db).cursor()
module_obj = pooler.get_pool(cr.dbname).get('ir.module.module')
module_ids = module_obj.search(cr, 1, [('name', '=', 'users_ldap')])
if module_ids:
state = module_obj.read(cr, 1, module_ids, ['state'])[0]['state']
if state in ('installed', 'to upgrade', 'to remove'):
users_obj = pooler.get_pool(cr.dbname).get('res.users')
user = users_obj.browse(cr, 1, uid)
if user and user.company_id.ldap_server and user.company_id.ldap_binddn:
company = user.company_id
try:
l = ldap.open(company.ldap_server)
if l.simple_bind(company.ldap_binddn, company.ldap_password):
base = company['ldap_base']
scope = ldap.SCOPE_SUBTREE
filter = company['ldap_filter']%(user.login,)
retrieve_attributes = None
result_id = l.search(base, scope, filter, retrieve_attributes)
timeout = 60
result_type, result_data = l.result(result_id, timeout)
if result_data and result_type == ldap.RES_SEARCH_RESULT and len(result_data) == 1:
dn=result_data[0][0]
name=result_data[0][1]['cn']
if l.bind(dn, passwd):
security._uid_cache[uid] = passwd
cr.close()
return True
except Exception, e:
pass
cr.close()
return oldfnc(db, uid, passwd)
return _ldap_check
security.check = ldap_check(security.check)
|
Python
| 0.000002 |
@@ -2614,16 +2614,18 @@
ple_bind
+_s
(res_com
@@ -2854,32 +2854,34 @@
lt_id = l.search
+_s
(base, scope, fi
@@ -3184,32 +3184,34 @@
%09%09%09%09%09%09%09if l.bind
+_s
(dn, passwd):%0A%09%09
@@ -4709,16 +4709,18 @@
ple_bind
+_s
(company
@@ -4940,16 +4940,18 @@
l.search
+_s
(base, s
@@ -5242,16 +5242,18 @@
f l.bind
+_s
(dn, pas
|
3c28ff454fc2209c2df18e94e5650da35ff86c10
|
Remove unused mako option
|
adhocracy/config/environment.py
|
adhocracy/config/environment.py
|
"""Pylons environment configuration"""
import os
import time
import traceback
from mako.lookup import TemplateLookup
from paste.deploy.converters import asbool
from pylons import tmpl_context as c
from pylons.error import handle_mako_error
from pylons.configuration import PylonsConfig
import sqlalchemy
from sqlalchemy import engine_from_config
from sqlalchemy.interfaces import ConnectionProxy
import adhocracy.lib.app_globals as app_globals
import adhocracy.lib.helpers
from adhocracy.config.routing import make_map
from adhocracy.model import init_model
from adhocracy.lib.search import init_search
from adhocracy.lib.democracy import init_democracy
from adhocracy.lib.util import create_site_subdirectory
from adhocracy.lib import init_site
def load_environment(global_conf, app_conf, with_db=True):
"""Configure the Pylons environment via the ``pylons.config``
object
"""
# Pylons paths
conf_copy = global_conf.copy()
conf_copy.update(app_conf)
site_templates = create_site_subdirectory('templates', app_conf=conf_copy)
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
paths = dict(root=root,
controllers=os.path.join(root, 'controllers'),
static_files=os.path.join(root, 'static'),
templates=[site_templates,
os.path.join(root, 'templates')])
# Initialize config with the basic options
config = PylonsConfig()
config.init_app(global_conf, app_conf, package='adhocracy', paths=paths)
config['routes.map'] = make_map(config)
config['pylons.app_globals'] = app_globals.Globals(config)
config['pylons.h'] = adhocracy.lib.helpers
# Create the Mako TemplateLookup, with the default auto-escaping
config['pylons.app_globals'].mako_lookup = TemplateLookup(
directories=paths['templates'],
error_handler=handle_mako_error,
module_directory=os.path.join(app_conf['cache_dir'], 'templates'),
input_encoding='utf-8', default_filters=['escape'],
imports=['from markupsafe import escape'])
config['pylons.strict_tmpl_context'] = False
# Setup the SQLAlchemy database engine
engineOpts = {}
if asbool(config.get('adhocracy.debug.sql', False)):
engineOpts['connectionproxy'] = TimerProxy()
# Work around a bug in sqlite and sqlalchemy<0.7
# See https://github.com/Pylons/pyramid/issues/174
if tuple(map(int, sqlalchemy.__version__.split('.'))) < (0,7,0) and config['sqlalchemy.url'].startswith('sqlite:'):
engineOpts['poolclass'] = sqlalchemy.pool.NullPool
engine = engine_from_config(config, 'sqlalchemy.', **engineOpts)
init_model(engine)
# CONFIGURATION OPTIONS HERE (note: all config options will override
# any Pylons config options)
init_site(config)
if with_db:
init_search()
init_democracy()
return config
class TimerProxy(ConnectionProxy):
'''
A timing proxy with code borrowed from spline and
pyramid_debugtoolbar. This will work for sqlalchemy 0.6,
but not 0.7. pyramid_debugtoolbar works for 0.7.
'''
def cursor_execute(self, execute, cursor, statement, parameters, context,
executemany):
start_time = time.time()
try:
return execute(cursor, statement, parameters, context)
finally:
duration = time.time() - start_time
# Find who spawned this query. Rewind up the stack until we
# escape from sqlalchemy code -- including this file, which
# contains proxy stuff
caller = '(unknown)'
for frame_file, frame_line, frame_func, frame_code in \
reversed(traceback.extract_stack()):
if __file__.startswith(frame_file) \
or '/sqlalchemy/' in frame_file:
continue
# OK, this is it
caller = "{0}:{1} in {2}".format(
frame_file, frame_line, frame_func)
break
# save interesting information for presentation later
try:
if not c.pdtb_sqla_queries:
c.pdtb_sqla_queries = []
queries = c.pdtb_sqla_queries
query_data = {
'duration': duration,
'statement': statement,
'parameters': parameters,
'context': context,
'caller': caller,
}
queries.append(query_data)
except TypeError:
# happens when sql is emitted before pylons has started
# or outside of a request
pass
|
Python
| 0 |
@@ -2012,87 +2012,8 @@
f-8'
-, default_filters=%5B'escape'%5D,%0A imports=%5B'from markupsafe import escape'%5D
)%0A%0A
|
c8120d91330e1ac2ea023778f25ebab826637388
|
fix broken build
|
internal/rollup/rollup_bundle.bzl
|
internal/rollup/rollup_bundle.bzl
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for production rollup bundling.
"""
load("//internal:collect_es6_sources.bzl", "collect_es6_sources")
load("//internal/common:module_mappings.bzl", "get_module_mappings")
_ROLLUP_MODULE_MAPPINGS_ATTR = "rollup_module_mappings"
def _rollup_module_mappings_aspect_impl(target, ctx):
mappings = get_module_mappings(target.label, ctx.rule.attr)
return struct(rollup_module_mappings = mappings)
rollup_module_mappings_aspect = aspect(
_rollup_module_mappings_aspect_impl,
attr_aspects = ["deps"],
)
def write_rollup_config(ctx, plugins=[]):
config = ctx.actions.declare_file("_%s.rollup.conf.js" % ctx.label.name)
# build_file_path includes the BUILD.bazel file, transform here to only include the dirname
buildFileDirname = "/".join(ctx.build_file_path.split("/")[:-1])
mappings = dict()
all_deps = ctx.attr.deps + ctx.attr.srcs
for dep in all_deps:
if hasattr(dep, _ROLLUP_MODULE_MAPPINGS_ATTR):
for k, v in getattr(dep, _ROLLUP_MODULE_MAPPINGS_ATTR).items():
if k in mappings and mappings[k] != v:
fail(("duplicate module mapping at %s: %s maps to both %s and %s" %
(dep.label, k, mappings[k], v)), "deps")
mappings[k] = v
ctx.actions.expand_template(
output = config,
template = ctx.file._rollup_config_tmpl,
substitutions = {
"TMPL_bin_dir_path": ctx.bin_dir.path,
"TMPL_workspace_name": ctx.workspace_name,
"TMPL_build_file_dirname": buildFileDirname,
"TMPL_label_name": ctx.label.name,
"TMPL_module_mappings": str(mappings),
"TMPL_additional_plugins": ",\n".join(plugins),,
})
return config
def run_rollup(ctx, config, output):
entryPoint = "/".join([ctx.workspace_name, ctx.attr.entry_point])
args = ctx.actions.args()
args.add(["--config", config.path])
args.add(["--output.file", output.path])
args.add(["--input", entryPoint])
es6_sources = collect_es6_sources(ctx)
ctx.action(
executable = ctx.executable._rollup,
inputs = es6_sources + ctx.files.node_modules + [config],
outputs = [output],
arguments = [args]
)
def run_tsc(ctx, input, output):
args = ctx.actions.args()
args.add(["--target", "es5"])
args.add("--allowJS")
args.add(input.path)
args.add(["--outFile", output.path])
ctx.action(
executable = ctx.executable._tsc,
inputs = [input],
outputs = [output],
arguments = [args]
)
def run_uglify(ctx, input, output, debug = False):
config = ctx.actions.declare_file("_%s%s.uglify.json" % (
ctx.label.name, ".debug" if debug else ""))
ctx.actions.expand_template(
output = config,
template = ctx.file._uglify_config_tmpl,
substitutions = {
"TMPL_mangle": "false" if debug else "true"
},
)
args = ctx.actions.args()
args.add(input.path)
args.add(["--config-file", config.path])
args.add(["--output", output.path])
if debug:
args.add("--beautify")
ctx.action(
executable = ctx.executable._uglify,
inputs = [input, config],
outputs = [output],
arguments = [args]
)
def _rollup_bundle(ctx):
rollup_config = write_rollup_config(ctx)
run_rollup(ctx, rollup_config, ctx.outputs.build_es6)
run_tsc(ctx, ctx.outputs.build_es6, ctx.outputs.build_es5)
run_uglify(ctx, ctx.outputs.build_es5, ctx.outputs.build_es5_min)
run_uglify(ctx, ctx.outputs.build_es5, ctx.outputs.build_es5_min_debug, debug = True)
return DefaultInfo(files=depset([ctx.outputs.build_es5_min]))
ROLLUP_ATTRS = {
"entry_point": attr.string(mandatory = True),
"srcs": attr.label_list(allow_files = [".js"]),
"deps": attr.label_list(aspects = [rollup_module_mappings_aspect]),
"node_modules": attr.label(default = Label("@//:node_modules")),
"_rollup": attr.label(
executable = True,
cfg="host",
default = Label("@build_bazel_rules_nodejs//internal/rollup:rollup")),
"_tsc": attr.label(
executable = True,
cfg="host",
default = Label("@build_bazel_rules_nodejs//internal/rollup:tsc")),
"_uglify": attr.label(
executable = True,
cfg="host",
default = Label("@build_bazel_rules_nodejs//internal/rollup:uglify")),
"_rollup_config_tmpl": attr.label(
default = Label("@build_bazel_rules_nodejs//internal/rollup:rollup.config.js"),
allow_files = True,
single_file = True),
"_uglify_config_tmpl": attr.label(
default = Label("@build_bazel_rules_nodejs//internal/rollup:uglify.config.json"),
allow_files = True,
single_file = True),
}
ROLLUP_OUTPUTS = {
"build_es6": "%{name}.es6.js",
"build_es5": "%{name}.js",
"build_es5_min": "%{name}.min.js",
"build_es5_min_debug": "%{name}.min_debug.js",
}
rollup_bundle = rule(
implementation = _rollup_bundle,
attrs = ROLLUP_ATTRS,
outputs = ROLLUP_OUTPUTS,
)
|
Python
| 0.000001 |
@@ -2243,17 +2243,16 @@
lugins),
-,
%0A %7D
|
9bbe8057a627ba81282a76de94e57ca0b0e02b89
|
change default port
|
backend/src/gosa/backend/plugins/foreman/gosa_integration.py
|
backend/src/gosa/backend/plugins/foreman/gosa_integration.py
|
#!/usr/bin/env python3
"""
Foreman / GOsa3 integration to send hook events data to GOsa3
"""
import hmac
import sys
import requests
import json
#. /etc/sysconfig/foreman-gosa
# Gosa settings
GOSA_SERVER = "http://localhost"
GOSA_PORT = 8000
HTTP_X_HUB_SENDER = "foreman-hook"
SECRET = "e540f417-4c36-4e5d-b78a-4d36f51727ec"
HOOK_TEMP_DIR = "/usr/share/foreman/tmp"
# HOOK_EVENT = update, create, before_destroy etc.
# HOOK_OBJECT = to_s representation of the object, e.g. host's fqdn
HOOK_EVENT, HOOK_OBJECT = (sys.argv[1], sys.argv[2])
payload = json.loads(sys.stdin.read())
# add event + object to payload
payload = json.dumps({
"event": HOOK_EVENT,
"object": HOOK_OBJECT,
"data": payload
}).encode('utf-8')
signature_hash = hmac.new(bytes(SECRET, 'ascii'), msg=payload, digestmod="sha512")
signature = 'sha1=' + signature_hash.hexdigest()
headers = {
'Content-Type': 'application/vnd.foreman.hookevent+json',
'HTTP_X_HUB_SENDER': HTTP_X_HUB_SENDER,
'HTTP_X_HUB_SIGNATURE': signature
}
requests.post("%s:%s/hooks" % (GOSA_SERVER, GOSA_PORT), data=payload, headers=headers)
|
Python
| 0.000001 |
@@ -232,17 +232,17 @@
ORT = 80
-0
+5
0%0AHTTP_X
|
94b216fb8c15db7228e54e35058c7143b02d103f
|
prepare 1.1.1 bugfix release, from now on tests for new features..
|
cmsplugin_blog/__init__.py
|
cmsplugin_blog/__init__.py
|
# -*- coding: utf-8 -*-
VERSION = (1, 1, 0, 'post', 0)
def get_version(): # pragma: no cover
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s %s' % (version, VERSION[3], VERSION[4])
return version
|
Python
| 0 |
@@ -35,17 +35,17 @@
(1, 1,
-0
+1
, 'post'
|
a41f4f6554a210da718606b4d438c83d2be5a90a
|
Add reboot and destroy methods.
|
libcloud/compute/drivers/ninefold.py
|
libcloud/compute/drivers/ninefold.py
|
import base64
import hashlib
import hmac
import urllib
try:
import json
except:
import simplejson as json
from libcloud.common.base import ConnectionUserAndKey, Response
from libcloud.compute.base import Node, NodeDriver, NodeImage, NodeLocation, \
NodeSize
from libcloud.compute.providers import Provider
from libcloud.compute.types import MalformedResponseError, NodeState
class NinefoldComputeResponse(Response):
def parse_body(self):
try:
body = json.loads(self.body)
except:
raise MalformedResponseError(
"Failed to parse JSON",
body=self.body,
driver=NinefoldNodeDriver)
return body
class NinefoldComputeConnection(ConnectionUserAndKey):
host = 'api.ninefold.com'
responseCls = NinefoldComputeResponse
def add_default_params(self, params):
params['apiKey'] = self.user_id
params['response'] = 'json'
return params
def pre_connect_hook(self, params, headers):
signature = [(k.lower(), v) for k, v in params.items()]
signature.sort(key=lambda x: x[0])
signature = urllib.urlencode(signature)
signature = signature.lower().replace('+', '%20')
signature = hmac.new(self.key, msg=signature, digestmod=hashlib.sha1)
params['signature'] = base64.b64encode(signature.digest())
return params, headers
class NinefoldNodeDriver(NodeDriver):
API_PATH = '/compute/v1.0/'
NODE_STATE_MAP = {
'Running': NodeState.RUNNING,
'Starting': NodeState.REBOOTING,
'Stopped': NodeState.TERMINATED,
'Stopping': NodeState.TERMINATED
}
type = Provider.NINEFOLD
name = 'Ninefold'
connectionCls = NinefoldComputeConnection
def _api_request(self, command, **kwargs):
kwargs['command'] = command
result = self.connection.request(self.API_PATH, params=kwargs).object
command = command.lower() + 'response'
if command not in result:
raise MalformedResponseError(
"Unknown response format",
body=result.body,
driver=NinefoldNodeDriver)
return result[command]
def list_images(self, location=None):
args = {
'templatefilter': 'executable'
}
if location is not None:
args['zoneid'] = location.id
imgs = self._api_request('listTemplates', **args)
images = []
for img in imgs['template']:
images.append(NodeImage(img['id'], img['name'], self, {
'hypervisor': img['hypervisor'],
'format': img['format'],
'os': img['ostypename'],
}))
return images
def list_locations(self):
locs = self._api_request('listZones')
locations = []
for loc in locs['zone']:
locations.append(NodeLocation(loc['id'], loc['name'], 'AU', self))
return locations
def list_nodes(self):
vms = self._api_request('listVirtualMachines')
addrs = self._api_request('listPublicIpAddresses')
public_ips = {}
for addr in addrs['publicipaddress']:
if 'virtualmachineid' not in addr:
continue
vm_id = addr['virtualmachineid']
if vm_id not in public_ips:
public_ips[vm_id] = []
public_ips[vm_id].append(addr['ipaddress'])
nodes = []
for vm in vms['virtualmachine']:
nodes.append(Node(id=vm['id'],
name=vm.get('displayname', None),
state=self.NODE_STATE_MAP[vm['state']],
public_ip=public_ips.get(vm['id'], []),
private_ip=[x['ipaddress'] for x in vm['nic']],
driver=self))
return nodes
def list_sizes(self, location=None):
szs = self._api_request('listServiceOfferings')
sizes = []
for sz in szs['serviceoffering']:
sizes.append(NodeSize(sz['id'], sz['name'], sz['memory'], 0, 0,
0, self))
return sizes
|
Python
| 0 |
@@ -34,16 +34,28 @@
rt hmac%0A
+import time%0A
import u
@@ -60,16 +60,16 @@
urllib%0A
-
%0Atry:%0A
@@ -1710,16 +1710,97 @@
ED%0A %7D
+%0A JOB_STATUS_MAP = %7B%0A 0: None,%0A 1: True,%0A 2: False,%0A %7D
%0A%0A ty
@@ -2327,16 +2327,277 @@
mmand%5D%0A%0A
+ def _job_result(self, job_id):%0A result = %7B%7D%0A while result.get('jobstatus', 0) == 0:%0A time.sleep(1)%0A result = self._api_request('queryAsyncJobResult', jobid=job_id)%0A return self.JOB_STATUS_MAP%5Bresult%5B'jobstatus'%5D%5D%0A%0A
def
@@ -4551,32 +4551,32 @@
0, self))%0A
-
return s
@@ -4575,12 +4575,323 @@
return sizes
+%0A%0A def destroy_node(self, node):%0A result = self._api_request('destroyVirtualMachine', id=node.id)%0A return self._job_result(result%5B'jobid'%5D)%0A%0A def reboot_node(self, node):%0A result = self._api_request('rebootVirtualMachine', id=node.id)%0A return self._job_result(result%5B'jobid'%5D)%0A
|
dc238c3a2295a440ebeb538b59871b1fef1b5cd7
|
fix failing unit test
|
sass_processor/processor.py
|
sass_processor/processor.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import os
import json
import subprocess
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.template import Context
from django.templatetags.static import PrefixNode
from django.utils.encoding import force_bytes
from django.utils.six.moves.urllib.parse import quote, urljoin
from sass_processor.utils import get_setting
from .storage import SassFileStorage, find_file
from .apps import APPS_INCLUDE_DIRS
try:
import sass
except ImportError:
sass = None
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
class SassProcessor(object):
storage = SassFileStorage()
include_paths = list(getattr(settings, 'SASS_PROCESSOR_INCLUDE_DIRS', []))
try:
sass_precision = int(settings.SASS_PRECISION)
except (AttributeError, TypeError, ValueError):
sass_precision = None
sass_output_style = getattr(
settings,
'SASS_OUTPUT_STYLE',
'nested' if settings.DEBUG else 'compressed')
processor_enabled = getattr(settings, 'SASS_PROCESSOR_ENABLED', settings.DEBUG)
sass_extensions = ('.scss', '.sass')
node_modules_dir = dict(getattr(settings, 'STATICFILES_DIRS', [])).get('node_modules')
node_npx_path = getattr(settings, 'NODE_NPX_PATH', 'npx')
def __init__(self, path=None):
self._path = path
def __call__(self, path):
basename, ext = os.path.splitext(path)
filename = find_file(path)
if filename is None:
raise FileNotFoundError("Unable to locate file {path}".format(path=path))
if ext not in self.sass_extensions:
# return the given path, since it ends neither in `.scss` nor in `.sass`
return path
# compare timestamp of sourcemap file with all its dependencies, and check if we must recompile
css_filename = basename + '.css'
if not self.processor_enabled:
return css_filename
sourcemap_filename = css_filename + '.map'
if find_file(css_filename) and self.is_latest(sourcemap_filename):
return css_filename
# with offline compilation, raise an error, if css file could not be found.
if sass is None:
msg = "Offline compiled file `{}` is missing and libsass has not been installed."
raise ImproperlyConfigured(msg.format(css_filename))
# add a function to be used from inside SASS
custom_functions = {'get-setting': get_setting}
# otherwise compile the SASS/SCSS file into .css and store it
sourcemap_url = self.storage.url(sourcemap_filename)
compile_kwargs = {
'filename': filename,
'source_map_filename': sourcemap_url,
'include_paths': self.include_paths + APPS_INCLUDE_DIRS,
'custom_functions': custom_functions,
}
if self.sass_precision:
compile_kwargs['precision'] = self.sass_precision
if self.sass_output_style:
compile_kwargs['output_style'] = self.sass_output_style
content, sourcemap = sass.compile(**compile_kwargs)
if os.path.isdir(self.node_modules_dir or ''):
os.environ['NODE_PATH'] = self.node_modules_dir
try:
proc = subprocess.Popen([self.node_npx_path, 'postcss', '--use autoprefixer', '--no-map'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc.stdin.write(force_bytes(content))
proc.stdin.close()
content = proc.stdout.read()
proc.wait()
except (FileNotFoundError, BrokenPipeError):
content = force_bytes(content)
sourcemap = force_bytes(sourcemap)
if self.storage.exists(css_filename):
self.storage.delete(css_filename)
self.storage.save(css_filename, ContentFile(content))
if self.storage.exists(sourcemap_filename):
self.storage.delete(sourcemap_filename)
self.storage.save(sourcemap_filename, ContentFile(sourcemap))
return css_filename
def resolve_path(self, context=None):
if context is None:
context = Context()
return self._path.resolve(context)
def is_sass(self):
_, ext = os.path.splitext(self.resolve_path())
return ext in self.sass_extensions
def is_latest(self, sourcemap_filename):
sourcemap_file = find_file(sourcemap_filename)
if not sourcemap_file or not os.path.isfile(sourcemap_file):
return False
sourcemap_mtime = os.stat(sourcemap_file).st_mtime
with io.open(sourcemap_file, 'r') as fp:
sourcemap = json.load(fp)
for srcfilename in sourcemap.get('sources'):
components = os.path.normpath(srcfilename).split('/')
srcfilename = ''.join([os.path.sep + c for c in components if c != os.path.pardir])
if not os.path.isfile(srcfilename) or os.stat(srcfilename).st_mtime > sourcemap_mtime:
# at least one of the source is younger that the sourcemap referring it
return False
return True
@classmethod
def handle_simple(cls, path):
if apps.is_installed('django.contrib.staticfiles'):
from django.contrib.staticfiles.storage import staticfiles_storage
return staticfiles_storage.url(path)
else:
return urljoin(PrefixNode.handle_simple('STATIC_URL'), quote(path))
_sass_processor = SassProcessor()
def sass_processor(filename):
path = _sass_processor(filename)
return SassProcessor.handle_simple(path)
|
Python
| 0.000001 |
@@ -1284,99 +1284,8 @@
s')%0A
- node_modules_dir = dict(getattr(settings, 'STATICFILES_DIRS', %5B%5D)).get('node_modules')%0A
@@ -1399,24 +1399,231 @@
_path = path
+%0A nmd = %5Bd%5B1%5D for d in getattr(settings, 'STATICFILES_DIRS', %5B%5D)%0A if isinstance(d, (list, tuple)) and d%5B0%5D == 'node_modules'%5D%0A self.node_modules_dir = nmd%5B0%5D if len(nmd) else None
%0A%0A def __
|
5a1212a8239212db71cdd7ec0d67e590df63d982
|
Stop sleeping, why ? let system work.
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/GPDBStorageBaseTestCase.py
|
src/test/tinc/tincrepo/mpp/gpdb/tests/storage/GPDBStorageBaseTestCase.py
|
"""
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import sys
import glob
from time import sleep
import tinctest
from tinctest.lib import local_path
from gppylib.commands.base import Command
from mpp.lib.PSQL import PSQL
from tinctest.lib import local_path, Gpdiff
from mpp.lib.filerep_util import Filerepe2e_Util
from mpp.lib.gprecoverseg import GpRecover
from mpp.lib.gpstart import GpStart
from mpp.lib.gpstop import GpStop
from mpp.lib.config import GPDBConfig
from mpp.lib.gpfilespace import Gpfilespace
from mpp.lib.gpdbverify import GpdbVerify
from mpp.models import MPPTestCase
from mpp.gpdb.tests.storage.lib.dbstate import DbStateClass
from mpp.gpdb.tests.storage.lib.common_utils import *
class GPDBStorageBaseTestCase():
'''
Base Class for Storage test-suits like Crash Recovery,
Pg_Two_Phase, sub_transaction_limit_removal
'''
def __init__(self, config=None):
if config is not None:
self.config = config
else:
self.config = GPDBConfig()
self.filereputil = Filerepe2e_Util()
self.gprecover = GpRecover(self.config)
self.gpstop = GpStop()
self.gpstart = GpStart()
self.gpfile = Gpfilespace(self.config)
self.gpverify = GpdbVerify(config=self.config)
self.dbstate = DbStateClass('run_validation', self.config)
self.port = os.getenv('PGPORT')
def invoke_fault(self, fault_name, type, role='mirror', port=None, occurence=None, sleeptime=None, seg_id=None):
''' Reset the fault and then issue the fault with the given type'''
self.filereputil.inject_fault(f=fault_name, y='reset', r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
self.filereputil.inject_fault(f=fault_name, y=type, r=role, p=port , o=occurence, sleeptime=sleeptime, seg_id=seg_id)
tinctest.logger.info('Successfully injected fault_name : %s fault_type : %s occurence : %s ' % (fault_name, type, occurence))
sleep(30)
def start_db(self):
'''Gpstart '''
rc = self.gpstart.run_gpstart_cmd()
if not rc:
raise Exception('Failed to start the cluster')
tinctest.logger.info('Started the cluster successfully')
def stop_db(self):
''' Gpstop and dont check for rc '''
cmd = Command('Gpstop_a', 'gpstop -a')
tinctest.logger.info('Executing command: gpstop -a')
cmd.run()
def get_trigger_status(self, trigger_count,max_cnt=50):
'''Compare the pg_stat_activity count with the total number of trigger_sqls executed '''
psql_count=0
for i in range(1,trigger_count):
psql_count = PSQL.run_sql_command('select count(*) from pg_stat_activity;', flags='-q -t', dbname='postgres')
sleep(1)
tinctest.logger.info('Count of trigger sqls %s And it should be %s' % (psql_count, trigger_count))
if psql_count < trigger_count :
tinctest.logger.info('coming to the if loop in get_trigger_status')
return False
return True
def check_trigger_sql_hang(self, test_dir):
'''
@param ddl_type : create/drop
@param fault_type : commit/abort/end_prepare_two_phase_sleep
@description : Return the status of the trigger sqls: whether they are waiting on the fault
Since gpfaultinjector has no way to check if all the sqls are triggered, we are using
a count(*) on pg_stat_activity and compare the total number of trigger_sqls
'''
trigger_dir = local_path('%s_tests/trigger_sql/' % (test_dir))
trigger_count = len(glob.glob1(trigger_dir,"*.ans"))
return self.get_trigger_status(trigger_count)
def get_items_list(test_file):
''' Get file contents to a list '''
with open(test_file, 'r') as f:
test_list = [line.strip() for line in f]
return test_list
def validate_sql(filename):
''' Compare the out and ans files '''
out_file = local_path(filename.replace(".sql", ".out"))
ans_file = local_path(filename.replace('.sql' , '.ans'))
assert Gpdiff.are_files_equal(out_file, ans_file)
def run_sql(filename, verify=True):
''' Run the provided sql and validate it '''
out_file = local_path(filename.replace(".sql", ".out"))
PSQL.run_sql_file(sql_file = filename, out_file = out_file)
if verify == True:
validate_sql(filename)
|
Python
| 0 |
@@ -2596,26 +2596,8 @@
ce))
-%0A sleep(30)
%0A%0A
|
d03513e41ee1cc0edcd696c5ed08274db4f782bd
|
add editor for page icons
|
cmsplugin_cascade/admin.py
|
cmsplugin_cascade/admin.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from django.contrib import admin
from django.db.models import Q
from django.http import JsonResponse, HttpResponseForbidden
from django.utils.translation import get_language_from_request
from cms.models.pagemodel import Page
from cms.extensions import PageExtensionAdmin
from cmsplugin_cascade.models import CascadePage
from cmsplugin_cascade.link.forms import format_page_link
@admin.register(CascadePage)
class CascadePageAdmin(PageExtensionAdmin):
def get_urls(self):
urls = [
url(r'^get_page_sections/$', lambda: None, name='get_page_sections'), # just to reverse
url(r'^get_page_sections/(?P<page_pk>\d+)$',
self.admin_site.admin_view(self.get_page_sections)),
url(r'^published_pages/$', self.get_published_pagelist, name='get_published_pagelist'),
]
urls.extend(super(CascadePageAdmin, self).get_urls())
return urls
def get_page_sections(self, request, page_pk=None):
choices = []
try:
for key, val in self.model.objects.get(extended_object_id=page_pk).glossary['element_ids'].items():
choices.append((key, val))
except (self.model.DoesNotExist, KeyError):
pass
return JsonResponse({'element_ids': choices})
def get_published_pagelist(self, request, *args, **kwargs):
"""
This view is used by the SearchLinkField as the user types to feed the autocomplete drop-down.
"""
if not request.is_ajax():
return HttpResponseForbidden()
query_term = request.GET.get('term','').strip('/')
language = get_language_from_request(request)
matching_published_pages = Page.objects.published().public().filter(
Q(title_set__title__icontains=query_term, title_set__language=language)
| Q(title_set__path__icontains=query_term, title_set__language=language)
| Q(title_set__menu_title__icontains=query_term, title_set__language=language)
| Q(title_set__page_title__icontains=query_term, title_set__language=language)
).distinct().order_by('title_set__title').iterator()
data = {'results': []}
for page in matching_published_pages:
title = page.get_title(language=language)
path = page.get_absolute_url(language=language)
data['results'].append({
'id': page.pk,
'text': format_page_link(title, path),
})
if len(data['results']) > 15:
break
return JsonResponse(data)
|
Python
| 0.000001 |
@@ -544,16 +544,91 @@
Admin):%0A
+ def get_fields(self, request, obj=None):%0A return %5B'icon_font'%5D%0A%0A
def
|
0089ed6473d898ee3aa766e78a8e1324f89cd436
|
Fix Python 2.5 compatibility issue.
|
libcloud/loadbalancer/drivers/elb.py
|
libcloud/loadbalancer/drivers/elb.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'ElasticLBDriver'
]
from libcloud.utils.xml import findtext, findall
from libcloud.loadbalancer.types import State
from libcloud.loadbalancer.base import Driver, LoadBalancer, Member
from libcloud.common.aws import AWSGenericResponse, SignedAWSConnection
VERSION = '2012-06-01'
HOST = 'elasticloadbalancing.%s.amazonaws.com'
ROOT = '/%s/' % (VERSION)
NS = 'http://elasticloadbalancing.amazonaws.com/doc/%s/' % (VERSION, )
class ELBResponse(AWSGenericResponse):
"""
Amazon ELB response class.
"""
namespace = NS
class ELBConnection(SignedAWSConnection):
version = VERSION
host = HOST
responseCls = ELBResponse
class ElasticLBDriver(Driver):
name = 'ELB'
website = 'http://aws.amazon.com/elasticloadbalancing/'
connectionCls = ELBConnection
def __init__(self, access_id, secret, region):
super(ElasticLBDriver, self).__init__(access_id, secret)
self.region = region
self.connection.host = HOST % (region)
def list_protocols(self):
return ['tcp', 'ssl', 'http', 'https']
def list_balancers(self):
params = {'Action': 'DescribeLoadBalancers'}
data = self.connection.request(ROOT, params=params).object
return self._to_balancers(data)
def create_balancer(self, name, port, protocol, algorithm, members,
ex_members_availability_zones=None):
if ex_members_availability_zones is None:
ex_members_availability_zones = ['a']
params = {
'Action': 'CreateLoadBalancer',
'LoadBalancerName': name,
'Listeners.member.1.InstancePort': str(port),
'Listeners.member.1.InstanceProtocol': protocol.upper(),
'Listeners.member.1.LoadBalancerPort': str(port),
'Listeners.member.1.Protocol': protocol.upper(),
}
for i, z in enumerate(ex_members_availability_zones, 1):
zone = '-'.join((self.region, z))
params['AvailabilityZones.member.%d' % i] = zone
data = self.connection.request(ROOT, params=params).object
balancer = LoadBalancer(
id=name,
name=name,
state=State.PENDING,
ip=findtext(element=data, xpath='DNSName', namespace=NS),
port=port,
driver=self.connection.driver
)
balancer._members = []
return balancer
def destroy_balancer(self, balancer):
params = {
'Action': 'DeleteLoadBalancer',
'LoadBalancerName': balancer.id
}
self.connection.request(ROOT, params=params)
return True
def get_balancer(self, balancer_id):
params = {
'Action': 'DescribeLoadBalancers',
'LoadBalancerNames.member.1': balancer_id
}
data = self.connection.request(ROOT, params=params).object
return self._to_balancers(data)[0]
def balancer_attach_compute_node(self, balancer, node):
params = {
'Action': 'RegisterInstancesWithLoadBalancer',
'LoadBalancerName': balancer.id,
'Instances.member.1.InstanceId': node.id
}
self.connection.request(ROOT, params=params)
balancer._members.append(Member(node.id, None, None, balancer=self))
def balancer_detach_member(self, balancer, member):
params = {
'Action': 'DeregisterInstancesFromLoadBalancer',
'LoadBalancerName': balancer.id,
'Instances.member.1.InstanceId': member.id
}
self.connection.request(ROOT, params=params)
balancer._members = [m for m in balancer._members if m.id != member.id]
return True
def balancer_list_members(self, balancer):
return balancer._members
def _to_balancers(self, data):
xpath = 'DescribeLoadBalancersResult/LoadBalancerDescriptions/member'
return [self._to_balancer(el)
for el in findall(element=data, xpath=xpath, namespace=NS)]
def _to_balancer(self, el):
name = findtext(element=el, xpath='LoadBalancerName', namespace=NS)
dns_name = findtext(el, xpath='DNSName', namespace=NS)
port = findtext(el, xpath='LoadBalancerPort', namespace=NS)
balancer = LoadBalancer(
id=name,
name=name,
state=State.UNKNOWN,
ip=dns_name,
port=port,
driver=self.connection.driver
)
xpath = 'Instances/member/InstanceId'
members = findall(element=el, xpath=xpath, namespace=NS)
balancer._members = []
for m in members:
balancer._members.append(Member(m.text, None, None,
balancer=balancer))
return balancer
|
Python
| 0.999784 |
@@ -2702,11 +2702,8 @@
ones
-, 1
):%0A
@@ -2798,17 +2798,23 @@
r.%25d' %25
-i
+(i + 1)
%5D = zone
|
28333726030c99e788480ea334a29ecb5d79e66f
|
Allow deletion of *.mp4 timelapse files
|
src/octoprint/server/api/timelapse.py
|
src/octoprint/server/api/timelapse.py
|
# coding=utf-8
from __future__ import absolute_import
__author__ = "Gina Häußge <[email protected]>"
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
__copyright__ = "Copyright (C) 2014 The OctoPrint Project - Released under terms of the AGPLv3 License"
import os
from flask import request, jsonify, url_for, make_response
from werkzeug.utils import secure_filename
import octoprint.timelapse
import octoprint.util as util
from octoprint.settings import settings, valid_boolean_trues
from octoprint.server import admin_permission, printer
from octoprint.server.util.flask import redirect_to_tornado, restricted_access, get_json_command_from_request
from octoprint.server.api import api
from octoprint.server import NO_CONTENT
#~~ timelapse handling
@api.route("/timelapse", methods=["GET"])
def getTimelapseData():
timelapse = octoprint.timelapse.current
config = {"type": "off"}
if timelapse is not None and isinstance(timelapse, octoprint.timelapse.ZTimelapse):
config["type"] = "zchange"
config["postRoll"] = timelapse.post_roll
config["fps"] = timelapse.fps
elif timelapse is not None and isinstance(timelapse, octoprint.timelapse.TimedTimelapse):
config["type"] = "timed"
config["postRoll"] = timelapse.post_roll
config["fps"] = timelapse.fps
config.update({
"interval": timelapse.interval
})
files = octoprint.timelapse.get_finished_timelapses()
for file in files:
file["url"] = url_for("index") + "downloads/timelapse/" + file["name"]
result = dict(config=config,
files=files)
if "unrendered" in request.values and request.values["unrendered"] in valid_boolean_trues:
result.update(unrendered=octoprint.timelapse.get_unrendered_timelapses())
return jsonify(result)
@api.route("/timelapse/<filename>", methods=["GET"])
def downloadTimelapse(filename):
return redirect_to_tornado(request, url_for("index") + "downloads/timelapse/" + filename)
@api.route("/timelapse/<filename>", methods=["DELETE"])
@restricted_access
def deleteTimelapse(filename):
if util.is_allowed_file(filename, ["mpg"]):
timelapse_folder = settings().getBaseFolder("timelapse")
full_path = os.path.realpath(os.path.join(timelapse_folder, filename))
if full_path.startswith(timelapse_folder) and os.path.exists(full_path):
os.remove(full_path)
return getTimelapseData()
@api.route("/timelapse/unrendered/<name>", methods=["DELETE"])
@restricted_access
def deleteUnrenderedTimelapse(name):
octoprint.timelapse.delete_unrendered_timelapse(name)
return NO_CONTENT
@api.route("/timelapse/unrendered/<name>", methods=["POST"])
@restricted_access
def processUnrenderedTimelapseCommand(name):
# valid file commands, dict mapping command name to mandatory parameters
valid_commands = {
"render": []
}
command, data, response = get_json_command_from_request(request, valid_commands)
if response is not None:
return response
if command == "render":
if printer.is_printing() or printer.is_paused():
return make_response("Printer is currently printing, cannot render timelapse", 409)
octoprint.timelapse.render_unrendered_timelapse(name)
return NO_CONTENT
@api.route("/timelapse", methods=["POST"])
@restricted_access
def setTimelapseConfig():
if "type" in request.values:
config = {
"type": request.values["type"],
"postRoll": 0,
"fps": 25,
"options": {}
}
if "postRoll" in request.values:
try:
postRoll = int(request.values["postRoll"])
except ValueError:
return make_response("Invalid value for postRoll: %r" % request.values["postRoll"], 400)
else:
if postRoll >= 0:
config["postRoll"] = postRoll
else:
return make_response("Invalid value for postRoll: %d" % postRoll, 400)
if "fps" in request.values:
try:
fps = int(request.values["fps"])
except ValueError:
return make_response("Invalid value for fps: %r" % request.values["fps"], 400)
else:
if fps > 0:
config["fps"] = fps
else:
return make_response("Invalid value for fps: %d" % fps, 400)
if "interval" in request.values:
config["options"] = {
"interval": 10
}
try:
interval = int(request.values["interval"])
except ValueError:
return make_response("Invalid value for interval: %r" % request.values["interval"])
else:
if interval > 0:
config["options"]["interval"] = interval
else:
return make_response("Invalid value for interval: %d" % interval)
if admin_permission.can() and "save" in request.values and request.values["save"] in valid_boolean_trues:
octoprint.timelapse.configure_timelapse(config, True)
else:
octoprint.timelapse.configure_timelapse(config)
return getTimelapseData()
|
Python
| 0 |
@@ -2091,16 +2091,31 @@
, %5B%22mpg%22
+, %22mpeg%22, %22mp4%22
%5D):%0A%09%09ti
|
77d8f11277e3b006c9f9137a35291892a73156f2
|
format python code
|
alexBot/cogs/games_reposting.py
|
alexBot/cogs/games_reposting.py
|
import logging
from typing import Dict
import discord
from discord import PartialEmoji
from discord.ext import commands
from discord.message import Message
from discord.webhook import AsyncWebhookAdapter, WebhookMessage
from emoji_data import EmojiSequence
from ..tools import Cog
log = logging.getLogger(__name__)
class GamesReposting(Cog):
def __init__(self, bot: "Bot"):
super().__init__(bot)
self.linked: Dict[int, WebhookMessage] = {}
self.webhook = discord.Webhook.from_url(
self.bot.config.nerdiowo_announcements_webhook, adapter=AsyncWebhookAdapter(session=self.bot.session)
)
@Cog.listener()
async def on_message(self, message: discord.Message):
if message.channel.category_id == 896853287108759615:
additional_content = [await x.to_file() for x in message.attachments]
msg = await self.webhook.send(
content=message.content,
wait=True,
username=message.author.name,
avatar_url=message.author.avatar_url,
files=additional_content,
embeds=message.embeds,
)
self.linked[message.id] = msg
@Cog.listener()
async def on_message_edit(self, before: Message, after: Message):
if before.id in self.linked:
if before.content != after.content:
await self.linked[before.id].edit(content=after.content)
def setup(bot):
bot.add_cog(GamesReposting(bot))
|
Python
| 0.000174 |
@@ -33,17 +33,16 @@
t Dict%0A%0A
-%0A
import d
|
935f094d36ee3a800def4256a4acbf5c795e6071
|
Fix a typo.
|
tensorflow_datasets/testing/mocking.py
|
tensorflow_datasets/testing/mocking.py
|
# coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mock util for tfds.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import random
from absl.testing import absltest
import numpy as np
import tensorflow as tf
from tensorflow_datasets.core import features as features_lib
@contextlib.contextmanager
def mock_data(num_examples=1, as_dataset_fn=None, data_dir=None):
"""Mock tfds to generate random data.
This function requires the true metadata files (dataset_info.json, label.txt,
vocabulary files) to be stored in `data_dir/dataset_name/version`, as they
would be for the true dataset.
The actual examples will be randomly generated using
`builder.info.features.get_tensor_info()`.
Download and prepare step will be skipped.
Warning: As the mocked builder will use the true metadata (label names,...),
the `info.split['train'].num_examples` won't match `len(list(ds_train))`.
Usage (automated):
```
with mock_data(num_examples=5):
ds = tfds.load('some_dataset', split='train')
for ex in ds: # ds will yield randomly generated examples.
ex
```
The examples will be deterministically generated. Train and test split will
yield the same examples.
If you want more fine grain control over the generated examples, you can
manually overwrite the `DatasetBuilder._as_dataset` method.
Usage (manual):
```
def as_dataset(self, *args, **kwargs):
return tf.data.Dataset.from_generator(
lambda: ({
'image': np.ones(shape=(28, 28, 1), dtype=np.uint8),
'label': i % 10,
} for i in range(num_examples)),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
with mock_data(as_dataset_fn=as_dataset):
ds = tfds.load('some_dataset', split='train')
for ex in ds: # ds will yield the fake data example of 'as_dataset'.
ex
```
Args:
num_examples: `int`, the number of fake example to generate.
as_dataset_fn: if provided, will replace the default random example
generator. This function mock the `FileAdapterBuilder._as_dataset`
data_dir: `str`, `data_dir` folder from where to load the metadata.
Will overwrite `data_dir` kwargs from `tfds.load`.
Yields:
None
"""
def mock_download_and_prepare(self, *args, **kwargs):
del args
del kwargs
if not tf.io.gfile.exists(self._data_dir): # pylint: disable=protected-access
raise ValueError(
'TFDS has been mocked, but metadata files where not found in {}. '
'You should copy the real metadata files, so that the dataset '
'can be loaded properly, or set the data_dir kwarg of'
'tfds.testing.mock_tfds(data_dir=...).'
''.format(self._data_dir) # pylint: disable=protected-access
)
def mock_as_dataset(self, *args, **kwargs):
"""Function which overwrite builder._as_dataset."""
del args
del kwargs
ds = tf.data.Dataset.from_generator(
# `from_generator` takes a callable with signature () -> iterable
# Recreating a new generator each time ensure that all pipelines are
# using the same examples
lambda: RandomFakeGenerator(builder=self, num_examples=num_examples),
output_types=self.info.features.dtype,
output_shapes=self.info.features.shape,
)
return ds
if not as_dataset_fn:
as_dataset_fn = mock_as_dataset
if not data_dir:
data_dir = os.path.join(os.path.dirname(__file__), 'metadata')
download_and_prepare_path = 'tensorflow_datasets.core.dataset_builder.DatasetBuilder.download_and_prepare'
as_dataset_path = 'tensorflow_datasets.core.dataset_builder.FileAdapterBuilder._as_dataset'
data_dir_path = 'tensorflow_datasets.core.constants.DATA_DIR'
with absltest.mock.patch(as_dataset_path, as_dataset_fn), \
absltest.mock.patch(
download_and_prepare_path, mock_download_and_prepare), \
absltest.mock.patch(data_dir_path, data_dir):
yield
class RandomFakeGenerator(object):
"""Generator of fake examples randomly and deterministically generated."""
def __init__(self, builder, num_examples, seed=0):
self._rgn = np.random.RandomState(seed) # Could use the split name as seed
self._builder = builder
self._num_examples = num_examples
def _generate_random_array(self, feature, tensor_info):
"""Generates a random tensor for a single feature."""
# TODO(tfds): Could improve the fake generatiion:
# * Use the feature statistics (min, max)
# * For Sequence features
# * For Text
shape = [ # Fill dynamic shape with random values
self._rgn.randint(5, 50) if s is None else s
for s in tensor_info.shape
]
if isinstance(feature, features_lib.ClassLabel):
max_value = feature.num_classes
elif isinstance(feature, features_lib.Text) and feature.vocab_size:
max_value = feature.vocab_size
else:
max_value = 255
# Generate some random values, depending on the dtype
if tensor_info.dtype.is_integer:
return self._rgn.randint(0, max_value, shape)
elif tensor_info.dtype.is_floating:
return self._rgn.random_sample(shape)
elif tensor_info.dtype == tf.string:
return ''.join(
random.choice(' abcdefghij') for _ in range(random.randint(10, 20)))
else:
raise ValueError('Fake generation not supported for {}'.format(
tensor_info.dtype))
def _generate_example(self):
"""Generate the next example."""
root_feature = self._builder.info.features
flat_features = root_feature._flatten(root_feature) # pylint: disable=protected-access
flat_tensor_info = root_feature._flatten(root_feature.get_tensor_info()) # pylint: disable=protected-access
flat_np = [
self._generate_random_array(feature, tensor_info)
for feature, tensor_info in zip(flat_features, flat_tensor_info)
]
return root_feature._nest(flat_np) # pylint: disable=protected-access
def __iter__(self):
"""Yields all fake examples."""
for _ in range(self._num_examples):
yield self._generate_example()
|
Python
| 0.999999 |
@@ -3158,17 +3158,16 @@
files w
-h
ere not
|
cdb7c87fd133b6e99916919b525e9d277a3913dd
|
Fix a typo.
|
libcloud/test/compute/test_ikoula.py
|
libcloud/test/compute/test_ikoula.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from libcloud.compute.drivers.ikoula import IkoulaNodeDriver
from libcloud.test.compute.test_cloudstack import CloudStackCommonTestCase
from libcloud.test import unittest
class ExoscaleNodeDriverTestCase(CloudStackCommonTestCase, unittest.TestCase):
driver_klass = IkoulaNodeDriver
if __name__ == '__main__':
sys.exit(unittest.main())
|
Python
| 0.999957 |
@@ -971,16 +971,14 @@
ass
-Exoscale
+Ikoula
Node
|
8c70752c87eb0519150e7cf17b146c97847b1460
|
add new preview-graylog ip to reversedns.py
|
modules/nagios/files/reversedns.py
|
modules/nagios/files/reversedns.py
|
#!/usr/bin/env python
import socket
import sys
if sys.argv[1] == "ip-10-236-86-54.eu-west-1.compute.internal":
print "frontend.production.alphagov.co.uk"
exit(0)
if sys.argv[1] == "ip-10-250-157-37.eu-west-1.compute.internal":
print "static.production.alphagov.co.uk"
exit(0)
if sys.argv[1] == "ip-10-53-54-49.eu-west-1.compute.internal":
print "frontend.cluster"
exit(0)
if sys.argv[1] == "ip-10-54-182-112.eu-west-1.compute.internal":
print "signonotron.production.alphagov.co.uk"
exit(0)
# hack for the change to whitehalls host not being done correctly
if sys.argv[1] == "ip-10-229-67-207.eu-west-1.compute.internal":
# print "ip-10-224-50-207.eu-west-1.compute.internal"
print "whitehall.production.alphagov.co.uk"
exit(0)
if sys.argv[1] == "ip-10-236-86-54.eu-west-1.compute.internal":
print "frontend.production.alphagov.co.uk"
# hacks to pickup correct graphs, due to local hosts and ganglia name mismatch
if sys.argv[1] in ['ip-10-54-182-112.eu-west-1.compute.internal', 'ip-10-236-86-54.eu-west-1.compute.internal', 'ip-10-250-157-37.eu-west-1.compute.internal', 'ip-10-53-54-49.eu-west-1.compute.internal']:
print sys.argv[1]
exit(0)
try:
print socket.gethostbyaddr(sys.argv[1])[0]
except:
print sys.argv[1]
|
Python
| 0 |
@@ -1212,16 +1212,63 @@
nternal'
+, 'ip-10-32-31-104.eu-west-1.compute.internal'
%5D:%0A
|
56c95eff825b58798515c619a101a2435e5d136f
|
Disable NaCl inbrowser_crash_in_syscall_test
|
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
|
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
if not is_integration_bot and sys.platform == 'darwin':
# TODO: Reenable.
sys.stdout.write('Skipping nacl_integration, see http://crbug.com/100518\n')
return
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# TODO(ncbray): Reenable when this issue is resolved.
# http://code.google.com/p/nativeclient/issues/detail?id=2091
tests_to_disable.append('run_ppapi_bad_browser_test')
# This thread safety stress test is flaky on at least Windows.
# See http://code.google.com/p/nativeclient/issues/detail?id=2124
# TODO(mseaborn): Reenable when this issue is resolved.
tests_to_disable.append('run_ppapi_ppb_var_browser_test')
# The behavior of the URLRequest changed slightly and this test needs to be
# updated. http://code.google.com/p/chromium/issues/detail?id=94352
tests_to_disable.append('run_ppapi_ppb_url_request_info_browser_test')
# This test failed and caused the build's gatekeep to close the tree.
# http://code.google.com/p/chromium/issues/detail?id=96434
tests_to_disable.append('run_ppapi_example_post_message_test')
# TODO(ncbray) why did these tests flake?
# http://code.google.com/p/nativeclient/issues/detail?id=2230
tests_to_disable.extend([
'run_pm_manifest_file_chrome_browser_test',
'run_srpc_basic_chrome_browser_test',
'run_srpc_hw_data_chrome_browser_test',
'run_srpc_hw_chrome_browser_test',
'run_srpc_manifest_file_chrome_browser_test',
'run_srpc_nameservice_chrome_browser_test',
'run_srpc_nrd_xfer_chrome_browser_test',
'run_no_fault_pm_nameservice_chrome_browser_test',
'run_fault_pm_nameservice_chrome_browser_test',
'run_fault_pq_os_pm_nameservice_chrome_browser_test',
'run_fault_pq_dep_pm_nameservice_chrome_browser_test',
])
if sys.platform == 'darwin':
# The following test is failing on Mac OS X 10.5. This may be
# because of a kernel bug that we might need to work around.
# See http://code.google.com/p/nativeclient/issues/detail?id=1835
# TODO(mseaborn): Remove this when the issue is resolved.
tests_to_disable.append('run_async_messaging_test')
# The following test fails on debug builds of Chromium.
# See http://code.google.com/p/nativeclient/issues/detail?id=2077
# TODO(mseaborn): Remove this when the issue is resolved.
tests_to_disable.append('run_ppapi_example_font_test')
script_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.dirname(script_dir)
chrome_dir = os.path.dirname(test_dir)
src_dir = os.path.dirname(chrome_dir)
nacl_integration_script = os.path.join(
src_dir, 'native_client/build/buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
'--disable_tests=%s' % ','.join(tests_to_disable)] + args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
|
Python
| 0.000408 |
@@ -2817,16 +2817,242 @@
%5D)%0A%0A
+ # TODO(mcgrathr): Reenable when resolved.%0A # Was seen to fail repeatedly on Windows.%0A # http://code.google.com/p/nativeclient/issues/detail?id=2173%0A tests_to_disable.append('run_inbrowser_crash_in_syscall_test')%0A%0A
if sys
|
0b6f693bd00f01d7e4ff576690bb1a5880fb80d7
|
Bump version number
|
VMEncryption/main/Common.py
|
VMEncryption/main/Common.py
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
class CommonVariables:
utils_path_name = 'Utils'
extension_name = 'AzureDiskEncryptionForLinuxTest'
extension_version = '0.1.0.999141'
extension_type = extension_name
extension_media_link = 'https://amextpaas.blob.core.windows.net/prod/' + extension_name + '-' + str(extension_version) + '.zip'
extension_label = 'Windows Azure VMEncryption Extension for Linux IaaS'
extension_description = extension_label
"""
disk/file system related
"""
sector_size = 512
luks_header_size = 4096 * 512
default_block_size = 52428800
min_filesystem_size_support = 52428800 * 3
#TODO for the sles 11, we should use the ext3
default_file_system = 'ext4'
default_mount_name = 'encrypted_disk'
dev_mapper_root = '/dev/mapper/'
disk_by_id_root = '/dev/disk/by-id'
BekVolumeFileSystem = 'vfat'
"""
parameter key names
"""
PassphraseFileNameKey = 'BekFileName'
KeyEncryptionKeyURLKey = 'KeyEncryptionKeyURL'
KeyVaultURLKey = 'KeyVaultURL'
AADClientIDKey = 'AADClientID'
KeyEncryptionAlgorithmKey = 'KeyEncryptionAlgorithm'
DiskFormatQuerykey = "DiskFormatQuery"
PassphraseKey = 'Passphrase'
BekVolumeFileSystemKey = "BekVolumeFileSystem"
"""
value for VolumeType could be OS or Data
"""
VolumeTypeKey = 'VolumeType'
AADClientSecretKey = 'AADClientSecret'
SecretUriKey = 'SecretUri'
VolumeTypeOS = 'OS'
VolumeTypeData = 'Data'
VolumeTypeAll = 'All'
SupportedVolumeTypes = [ VolumeTypeOS, VolumeTypeData, VolumeTypeAll ]
"""
command types
"""
EnableEncryption = 'EnableEncryption'
EnableEncryptionFormat = 'EnableEncryptionFormat'
DisableEncryption = 'DisableEncryption'
"""
encryption config keys
"""
EncryptionEncryptionOperationKey = 'EncryptionOperation'
EncryptionDecryptionOperationKey = 'DecryptionOperation'
EncryptionVolumeTypeKey = 'VolumeType'
EncryptionDiskFormatQueryKey = 'DiskFormatQuery'
"""
crypt ongoing item config keys
"""
OngoingItemMapperNameKey = 'MapperName'
OngoingItemHeaderFilePathKey = 'HeaderFilePath'
OngoingItemOriginalDevNamePathKey = 'DevNamePath'
OngoingItemOriginalDevPathKey = 'DevicePath'
OngoingItemPhaseKey = 'Phase'
OngoingItemHeaderSliceFilePathKey = 'HeaderSliceFilePath'
OngoingItemFileSystemKey = 'FileSystem'
OngoingItemMountPointKey = 'MountPoint'
OngoingItemDeviceSizeKey = 'Size'
OngoingItemCurrentSliceIndexKey = 'CurrentSliceIndex'
OngoingItemFromEndKey = 'FromEnd'
OngoingItemCurrentDestinationKey = 'CurrentDestination'
OngoingItemCurrentTotalCopySizeKey = 'CurrentTotalCopySize'
OngoingItemCurrentLuksHeaderFilePathKey = 'CurrentLuksHeaderFilePath'
OngoingItemCurrentSourcePathKey = 'CurrentSourcePath'
OngoingItemCurrentBlockSizeKey = 'CurrentBlockSize'
"""
encryption phase devinitions
"""
EncryptionPhaseBackupHeader = 'BackupHeader'
EncryptionPhaseCopyData = 'EncryptingData'
EncryptionPhaseRecoverHeader = 'RecoverHeader'
EncryptionPhaseEncryptDevice = 'EncryptDevice'
EncryptionPhaseDone = 'Done'
"""
decryption phase constants
"""
DecryptionPhaseCopyData = 'DecryptingData'
DecryptionPhaseDone = 'Done'
"""
logs related
"""
InfoLevel = 'Info'
WarningLevel = 'Warning'
ErrorLevel = 'Error'
"""
error codes
"""
extension_success_status = 'success'
extension_error_status = 'error'
process_success = 0
success = 0
os_not_supported = 1
luks_format_error = 2
scsi_number_not_found = 3
device_not_blank = 4
environment_error = 5
luks_open_error = 6
mkfs_error = 7
folder_conflict_error = 8
mount_error = 9
mount_point_not_exists = 10
passphrase_too_long_or_none = 11
parameter_error = 12
create_encryption_secret_failed = 13
encrypttion_already_enabled = 14
passphrase_file_not_found = 15
command_not_support = 16
volue_type_not_support = 17
copy_data_error = 18
encryption_failed = 19
tmpfs_error = 20
backup_slice_file_error = 21
unmount_oldroot_error = 22
unknown_error = 100
class TestHooks:
search_not_only_ide = False
use_hard_code_passphrase = False
hard_code_passphrase = "Quattro!"
class DeviceItem(object):
def __init__(self):
#NAME,TYPE,FSTYPE,MOUNTPOINT,LABEL,UUID,MODEL
self.name = None
self.type = None
self.file_system = None
self.mount_point = None
self.label = None
self.uuid = None
self.model = None
self.size = None
def __str__(self):
return "name:" + str(self.name) + " type:" + str(self.type) + " fstype:" + str(self.file_system) + " mountpoint:" + str(self.mount_point) + " label:" + str(self.label) + " model:" + str(self.model)
class CryptItem(object):
def __init__(self):
self.mapper_name = None
self.dev_path = None
self.mount_point = None
self.file_system = None
self.luks_header_path = None
self.uses_cleartext_key = None
def __str__(self):
return ("name: " + str(self.mapper_name) + " dev_path:" + str(self.dev_path) +
" mount_point:" + str(self.mount_point) + " file_system:" + str(self.file_system) +
" luks_header_path:" + str(self.luks_header_path) +
" uses_cleartext_key:" + str(self.uses_cleartext_key))
|
Python
| 0.000002 |
@@ -798,17 +798,17 @@
.0.99914
-1
+2
'%0A ex
|
b5b4c1f5b72494e00064b36f2ee1c53d1b5c2aca
|
Revert 83430 - NaCl: Re-enable tests, since they pass on the trybotsBUG=noneTEST=nacl_integrationReview URL: http://codereview.chromium.org/6904067 [email protected] Review URL: http://codereview.chromium.org/6902132
|
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
|
chrome/test/nacl_test_injection/buildbot_nacl_integration.py
|
#!/usr/bin/python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main():
script_dir = os.path.dirname(os.path.abspath(__file__))
test_dir = os.path.dirname(script_dir)
chrome_dir = os.path.dirname(test_dir)
src_dir = os.path.dirname(chrome_dir)
nacl_integration_script = os.path.join(
src_dir, 'native_client/build/buildbot_chrome_nacl_stage.py')
cmd = [sys.executable, nacl_integration_script] + sys.argv[1:]
print cmd
subprocess.check_call(cmd)
if __name__ == '__main__':
Main()
|
Python
| 0.000001 |
@@ -231,16 +231,560 @@
Main():%0A
+ # TODO(ncbray): figure out why this is failing on windows and enable.%0A if (sys.platform in %5B'win32', 'cygwin'%5D and%0A 'xp-nacl-chrome' not in os.environ.get('PWD', '')): return%0A # TODO(ncbray): figure out why this is failing on mac and re-enable.%0A if (sys.platform == 'darwin' and%0A 'mac-nacl-chrome' not in os.environ.get('PWD', '')): return%0A # TODO(ncbray): figure out why this is failing on some linux trybots.%0A if (sys.platform in %5B'linux', 'linux2'%5D and%0A 'hardy64-nacl-chrome' not in os.environ.get('PWD', '')): return%0A%0A
script
|
bbfa0b6fc8e4bb851528084d147ee898fd5af06a
|
fix path issues
|
experimentator/__main__.py
|
experimentator/__main__.py
|
"""experimentator
Usage:
exp run [options] <exp-file> (--next <level> [--not-finished] | (<level> <n>)...)
exp resume [options] <exp-file> <level> [<n> (<level> <n>)...]
exp export <exp-file> <data-file>
exp -h | --help
exp --version
Options:
--not-finished Run the first <level> that hasn't finished (rather than first that hasn't started).
--demo Don't save data.
--debug Set logging level to DEBUG.
--skip-parents Don't call start and end callbacks of parent levels.
-h, --help Show full help.
--version Print the installed version number of experimentator.
Commands:
run <exp-file> --next <level> Runs the first <level> that hasn't started. E.g.:
exp run exp1.dat --next session
run <exp-file> (<level> <n>)... Runs the section specified by any number of <level> <n> pairs. E.g.:
exp run exp1.dat participant 3 session 1
resume <exp-file> <level> Resume the first section at <level> that has been started but not finished.
resume <exp-file> (<level> <n>)... Resume the section specified by any number of <level> <n> pairs. The specified
section must have been started but not finished. E.g.:
exp resume exp1.dat participant 2 session 2
export <exp-file> <data-file> Export the data in <exp-file> to csv format as <data-file>.
Note: This will not produce readable csv files for experiments with
results in multi-element data structures (e.g., timeseries, dicts).
"""
import sys
import os.path
import logging
from docopt import docopt
from schema import Schema, Use, And, Or, Optional
from experimentator import __version__, load_experiment, run_experiment_section, export_experiment_data
def main(args=None):
scheme = Schema({Optional('--debug'): bool,
Optional('--demo'): bool,
Optional('--help'): bool,
Optional('--next'): bool,
Optional('--not-finished'): bool,
Optional('--skip-parents'): bool,
Optional('--version'): bool,
Optional('<data-file>'): Or(lambda x: x is None, os.path.exists),
Optional('<exp-file>'): Or(lambda x: x is None, os.path.exists),
Optional('<level>'): [str],
Optional('<n>'): [And(Use(int), lambda n: n > 0)],
Optional('export'): bool,
Optional('resume'): bool,
Optional('run'): bool,
})
options = scheme.validate(docopt(__doc__, argv=args, version=__version__))
if options['--debug']:
logging.basicConfig(level=logging.DEBUG)
if options['run'] or options['resume']:
exp = load_experiment(options['<exp-file>'])
kwargs = {'demo': options['--demo'],
'parent_callbacks': not options['--skip-parents'],
'resume': options['resume'],
}
if options['--next']:
kwargs.update(section_obj=exp.find_first_not_run(
options['<level>'][0], by_started=options['--not-finished']))
elif options['resume'] and not options['<n>']:
kwargs.update(section_obj=exp.find_first_partially_run(options['<level>'][0]))
else:
kwargs.update(zip(options['<level>'], options['<n>']))
run_experiment_section(exp, **kwargs)
elif options['export']:
export_experiment_data(options['<exp-file>'], options['<data-file>'])
if __name__ == '__main__':
main(sys.argv)
sys.exit(0)
|
Python
| 0.000001 |
@@ -1745,21 +1745,16 @@
mport os
-.path
%0Aimport
@@ -1965,16 +1965,105 @@
=None):%0A
+ # I can't figure out why but this is necessary.%0A sys.path.insert(0, os.getcwd())%0A%0A
sche
@@ -3853,67 +3853,4 @@
'%5D)%0A
-%0Aif __name__ == '__main__':%0A main(sys.argv)%0A sys.exit(0)%0A
|
ebad0c3f8cc65b372dee5aa2ef4ce41ec03986cd
|
use CallRecorder in locabuild tests
|
rhcephpkg/tests/test_localbuild.py
|
rhcephpkg/tests/test_localbuild.py
|
import os
import re
import pytest
from rhcephpkg import Localbuild
from rhcephpkg.localbuild import setup_pbuilder_cache
class TestLocalbuild(object):
def setup_method(self, method):
""" Reset last_cmd before each test. """
self.last_cmd = None
def fake_check_call(self, cmd):
""" Store cmd, in order to verify it later. """
self.last_cmd = cmd
return 0
@pytest.mark.parametrize('args,expected', [
(['localbuild'], '--git-dist=trusty'),
(['localbuild', '--dist', 'trusty'], '--git-dist=trusty'),
(['localbuild', '--dist', 'xenial'], '--git-dist=xenial'),
])
def test_localbuild(self, args, expected, monkeypatch):
monkeypatch.setattr('subprocess.check_call', self.fake_check_call)
monkeypatch.setattr('rhcephpkg.Localbuild._get_j_arg',
lambda *a: '-j2')
localbuild = Localbuild(args)
localbuild.main()
assert self.last_cmd == ['gbp', 'buildpackage', expected,
'--git-arch=amd64', '--git-verbose',
'--git-pbuilder', '-j2', '-us', '-uc']
def test_missing_arg(self):
localbuild = Localbuild(('localbuild', '--dist'))
with pytest.raises(SystemExit) as e:
localbuild.main()
assert 'Specify a distro to --dist' in str(e.value)
class TestGetJArg(object):
""" Test private _get_j_arg() function """
@pytest.mark.parametrize('cpus,ram,expected', [
(2, 2, '-j1'),
(2, 8, '-j2'),
(2, 16, '-j2'),
(2, 32, '-j2'),
(4, 8, '-j2'),
(4, 16, '-j4'),
(4, 32, '-j4'),
(8, 8, '-j2'),
(8, 16, '-j4'),
(8, 32, '-j8'),
])
def test_get_j_arg(self, cpus, ram, expected):
localbuild = Localbuild([])
result = localbuild._get_j_arg(cpus=cpus, total_ram_gb=ram)
assert result == expected
def test_get_j_arg_live(self):
localbuild = Localbuild([])
# Rather than calculating the amount of RAM on this system and
# basically re-implementing the entire code here to get the exact
# expected result, just pattern-match for basic sanity.
result = localbuild._get_j_arg(cpus=1)
assert re.match('-j\d+$', result)
class TestSetupPbuilderCache(object):
def setup_method(self, method):
""" Reset cmds before each test. """
self.cmds = []
def fake_sudo_rm(self, cmd):
""" Fake "sudo rm <foo>" command. """
for filename in cmd[2:]:
if os.path.exists(filename):
os.remove(filename)
def fake_check_call(self, cmd):
""" Store cmd, in order to verify it later. """
self.cmds.append(cmd)
# and fake "sudo rm"...
if cmd[0] == 'sudo' and cmd[1] == 'rm':
self.fake_sudo_rm(cmd)
return 0
@pytest.fixture(autouse=True)
def patch_subprocess(self, monkeypatch):
""" Monkeypatch subprocess for each test. """
monkeypatch.setattr('subprocess.check_call', self.fake_check_call)
@pytest.fixture
def tmpcache(self, tmpdir):
""" Fake pbuilder cache file in a tmpdir. """
cache = tmpdir.join('base-trusty-amd64.tgz')
cache.write('testcachedata')
return cache
def test_exists(self, tmpcache):
pbuilder_cache = str(tmpcache)
setup_pbuilder_cache(pbuilder_cache, 'trusty')
assert self.cmds == []
def test_no_exist(self):
pbuilder_cache = '/noexist/base-trusty-amd64.tgz'
setup_pbuilder_cache(pbuilder_cache, 'trusty')
expected = ['sudo', 'pbuilder', 'create', '--debootstrapopts',
'--variant=buildd', '--basetgz', pbuilder_cache,
'--distribution', 'trusty']
assert self.cmds == [expected]
def test_zero_length(self, tmpcache):
tmpcache.write('')
pbuilder_cache = str(tmpcache)
setup_pbuilder_cache(pbuilder_cache, 'trusty')
rm = ['sudo', 'rm', pbuilder_cache]
create = ['sudo', 'pbuilder', 'create', '--debootstrapopts',
'--variant=buildd', '--basetgz', pbuilder_cache,
'--distribution', 'trusty']
assert self.cmds == [rm, create]
|
Python
| 0 |
@@ -118,293 +118,85 @@
che%0A
-%0A%0Aclass TestLocalbuild(object):%0A%0A def setup_method(self, method):%0A %22%22%22 Reset last_cmd before each test. %22%22%22%0A self.last_cmd = None%0A%0A def fake_check_call(self, cmd):%0A %22%22%22 Store cmd, in order to verify it later. %22%22%22%0A self.last_cmd = cmd%0A return 0%0A
+from rhcephpkg.tests.util import CallRecorder%0A%0A%0Aclass TestLocalbuild(object):
%0A
@@ -480,32 +480,66 @@
, monkeypatch):%0A
+ recorder = CallRecorder()%0A
monkeypa
@@ -575,36 +575,24 @@
_call',
-self.fake_check_call
+recorder
)%0A
@@ -777,21 +777,21 @@
ert
-self.last_cmd
+recorder.args
==
|
0bf63976303515d0702f0a29d14364825a67053a
|
use pid for msgid to avoid duplicate message error
|
mpcontribs-api/mpcontribs/api/notebooks/__init__.py
|
mpcontribs-api/mpcontribs/api/notebooks/__init__.py
|
# -*- coding: utf-8 -*-
from tornado.escape import json_encode, json_decode, url_escape
from websocket import create_connection
from notebook.utils import url_path_join
from notebook.gateway.managers import GatewayClient
def run_cells(kernel_id, cid, cells):
print(f"running {cid} on {kernel_id}")
gw_client = GatewayClient.instance()
url = url_path_join(
gw_client.ws_url, gw_client.kernels_endpoint, url_escape(kernel_id), "channels",
)
outputs = {}
ws = create_connection(url)
for idx, cell in enumerate(cells):
if cell["cell_type"] == "code":
ws.send(
json_encode(
{
"header": {
"username": cid,
"version": "5.3",
"session": "",
"msg_id": f"{cid}-{idx}",
"msg_type": "execute_request",
},
"parent_header": {},
"channel": "shell",
"content": {
"code": cell["source"],
"silent": False,
"store_history": False,
"user_expressions": {},
"allow_stdin": False,
"stop_on_error": True,
},
"metadata": {},
"buffers": {},
}
)
)
outputs[idx] = []
status = None
while status is None or status == "busy" or not len(outputs[idx]):
msg = ws.recv()
msg = json_decode(msg)
msg_type = msg["msg_type"]
if msg_type == "status":
status = msg["content"]["execution_state"]
elif msg_type in ["stream", "display_data", "execute_result"]:
# display_data/execute_result required fields:
# "output_type", "data", "metadata"
# stream required fields: "output_type", "name", "text"
output = msg["content"]
output.pop("transient", None)
output["output_type"] = msg_type
msg_idx = msg["parent_header"]["msg_id"].split("-", 1)[1]
outputs[int(msg_idx)].append(output)
elif msg_type == "error":
tb = msg["content"]["traceback"]
raise ValueError(tb)
ws.close()
return outputs
|
Python
| 0 |
@@ -17,16 +17,26 @@
f-8 -*-%0A
+import os%0A
from tor
@@ -890,16 +890,30 @@
d%7D-%7Bidx%7D
+-%7Bos.getpid()%7D
%22,%0A
@@ -2436,11 +2436,8 @@
(%22-%22
-, 1
)%5B1%5D
|
abb28a62ae8bf6ca95e3cb4419abc6a1bf161f5b
|
Remove outdated notes
|
scopus/utils/get_content.py
|
scopus/utils/get_content.py
|
import os
import requests
from scopus import exception
from scopus.utils import config
errors = {400: exception.Scopus400Error, 401: exception.Scopus401Error,
404: exception.Scopus404Error, 429: exception.Scopus429Error,
500: exception.Scopus500Error}
def download(url, params=None, accept="xml"):
"""Helper function to download a file and return its content.
Parameters
----------
url : string
The URL to be parsed.
params : dict (optional)
Dictionary containing query parameters. For required keys
and accepted values see e.g.
https://api.elsevier.com/documentation/AuthorRetrievalAPI.wadl
accept : str (optional, default=xml)
mime type of the file to be downloaded. Accepted values are json,
atom+xml, xml.
Raises
------
ScopusHtmlError
If the status of the response is not ok.
ValueError
If the accept parameter is not one of the accepted values.
Returns
-------
resp : byte-like object
The content of the file, which needs to be serialized.
Notes
-----
Loads the Authentication creditation into scopus namespace on first run.
If there is a config file, which must contain InstToken, it is given
preference. Alternatively it loads the API key from my_scopus.py file.
"""
# Value check
accepted = ("json", "xml", "atom+xml")
if accept.lower() not in accepted:
raise ValueError('accept parameter must be one of ' +
', '.join(accepted))
# Get credentials
key = config.get('Authentication', 'APIKey')
header = {'X-ELS-APIKey': key}
if config.has_option('Authentication', 'InstToken'):
token = config.get('Authentication', 'InstToken')
header.update({'X-ELS-APIKey': key, 'X-ELS-Insttoken': token})
header.update({'Accept': 'application/{}'.format(accept)})
# Perform request
resp = requests.get(url, headers=header, params=params)
# Raise error if necessary
try:
reason = resp.reason.upper() + " for url: " + url
raise errors[resp.status_code](reason)
except KeyError: # Exception not specified in scopus
resp.raise_for_status() # Will pass when everything is ok
return resp
def get_content(qfile, refresh, *args, **kwds):
"""Helper function to read file content as xml. The file is cached
in a subfolder of ~/.scopus/.
Parameters
----------
qfile : string
The name of the file to be created.
refresh : bool
Whether the file content should be refreshed if it exists.
*args, **kwds :
Arguments and keywords to be passed on to download().
Returns
-------
content : str
The content of the file.
"""
if not refresh and os.path.exists(qfile):
with open(qfile, 'rb') as f:
content = f.read()
else:
content = download(*args, **kwds).text.encode('utf-8')
with open(qfile, 'wb') as f:
f.write(content)
return content
|
Python
| 0.000002 |
@@ -381,16 +381,17 @@
ontent.%0A
+%0A
Para
@@ -1098,255 +1098,8 @@
zed.
-%0A%0A Notes%0A -----%0A Loads the Authentication creditation into scopus namespace on first run.%0A If there is a config file, which must contain InstToken, it is given%0A preference. Alternatively it loads the API key from my_scopus.py file.
%0A
|
d7a1b80659995b867c6e0d470a33fce8970ae7c7
|
Clean up Last.fm scraper import lines
|
scrapers/examples/lastfm.py
|
scrapers/examples/lastfm.py
|
scheduler = globals()['scheduler']
def scrape_lastfm():
# Scraper config:
# Enter your own Last.fm username here.
USERNAME = 'trickybeta'
# Get your app's API key from http://www.last.fm/api/accounts.
API_KEY = 'c0ffeecafefacade'
# Tracks to retrieve per page request. Max 200.
# Recommended: 200 for the first run, 10 after that.
# Leaving this at 200 makes Last.fm requests really slow.
PER_PAGE = 200
# Where to store your Last.fm track info.
DB_TABLE = 'lastfm_tracks'
# Scraper body begins here.
import config
import collections
import dataset
import requests
api_url = ('http://ws.audioscrobbler.com/2.0/?method=user.getrecenttracks&'
'user=%s&api_key=%s&format=json&page=%s&limit=%s')
def recent_tracks(user, page, limit):
"""
Get the most recent tracks from `user` using `api_key`.
Start at page `page` and limit results to `limit`.
"""
return requests.get(api_url % (user, API_KEY, page, limit)).json()
def flatten(d, parent_key=''):
"""
From http://stackoverflow.com/a/6027615/254187.
Modified to strip # symbols from dict keys.
"""
items = []
for k, v in d.items():
new_key = parent_key + '_' + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(flatten(v, new_key).items())
else:
# Strip pound symbols from column names
new_key = new_key.replace('#', '')
items.append((new_key, v))
return dict(items)
def process_track(track):
"""
Removes `image` keys from track data.
Replaces empty strings for values with None.
"""
if 'image' in track:
del track['image']
flattened = flatten(track)
for key, val in flattened.items():
if val == '':
flattened[key] = None
return flattened
def add_page_tracks(page, table):
"""
Iterate through all tracks on a page and add each track to the given
table. Skip tracks that already exist.
Returns a dict with metadata:
{
'num_added': The number of tracks added to the table.
'num_skipped': The number of tracks on the page that already
existed in the table and were skipped.
'num_invalid': The number of tracks without a `date_uts` property.
}
"""
num_added = 0
num_skipped = 0
num_invalid = 0
try:
for raw_track in page['recenttracks']['track']:
track = process_track(raw_track)
if 'date_uts' not in track:
num_invalid += 1
elif table.find_one(date_uts=track['date_uts']):
num_skipped += 1
else:
table.insert(track)
num_added += 1
except Exception:
print(page)
return {'num_added': num_added,
'num_skipped': num_skipped,
'num_invalid': num_invalid}
def scrape_page(page_num):
"""
Scrape the page at the given page number and return the results of
adding all tracks from add_page_tracks.
"""
return recent_tracks(USERNAME, page_num, PER_PAGE)
print('Querying Last.fm...')
db = dataset.connect(config.DB_URI)
tracks = db[DB_TABLE]
# We need to get the first page so we can find out how many total pages
# there are in our listening history.
print('Page', 1, 'of <unknown>')
page = scrape_page(0)
total_pages = int(page['recenttracks']['@attr']['totalPages'])
results = add_page_tracks(page, tracks)
tracks_added = results['num_added']
total_tracks_added = tracks_added
tracks_were_added = tracks_added > 0
print('%s track(s) added.' % tracks_added)
for page_num in range(1, total_pages + 1):
while tracks_were_added:
print('Page', page_num + 1, 'of', total_pages)
page = scrape_page(page_num)
results = add_page_tracks(page, tracks)
tracks_added = results['num_added']
total_tracks_added += tracks_added
tracks_were_added = tracks_added > 0
print('%s track(s) added.' % tracks_added)
# Confirm our tracks were inserted into the database
print('Done! %s track(s) added.' % total_tracks_added)
# Last.fm allows 1 request per second.
scheduler.every(1).minutes.do(scrape_lastfm)
|
Python
| 0 |
@@ -565,16 +565,17 @@
config%0A
+%0A
impo
@@ -589,16 +589,17 @@
ections%0A
+%0A
impo
|
b8fd3a256062a5f7740480bb5b844560e3a47f6d
|
Add more log on test_versioning
|
nuxeo-drive-client/nxdrive/tests/test_versioning.py
|
nuxeo-drive-client/nxdrive/tests/test_versioning.py
|
import time
from nxdrive.tests.common import TEST_WORKSPACE_PATH
from nxdrive.tests.common import OS_STAT_MTIME_RESOLUTION
from nxdrive.tests.common_unit_test import UnitTestCase
class TestVersioning(UnitTestCase):
def test_versioning(self):
# Call the Nuxeo operation to set the versioning delay to 30 seconds
self.versioning_delay = OS_STAT_MTIME_RESOLUTION * 30
self.root_remote_client.execute(
"NuxeoDrive.SetVersioningOptions",
delay=str(self.versioning_delay))
local = self.local_client_1
self.engine_1.start()
# Create a file as user 2
self.remote_document_client_2.make_file('/', 'Test versioning.txt', "This is version 0")
self.assertTrue(self.remote_document_client_2.exists('/Test versioning.txt'))
doc = self.root_remote_client.fetch(TEST_WORKSPACE_PATH + '/Test versioning.txt')
self._assert_version(doc, 0, 0)
# Synchronize it for user 1
self.wait_sync(wait_for_async=True)
self.assertTrue(local.exists('/Test versioning.txt'))
# Update it as user 1 => should be versioned
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/Test versioning.txt', "Modified content")
self.wait_sync()
doc = self.root_remote_client.fetch(
TEST_WORKSPACE_PATH + '/Test versioning.txt')
self._assert_version(doc, 0, 1)
# Update it as user 1 => should NOT be versioned
# since the versioning delay is not passed by
time.sleep(OS_STAT_MTIME_RESOLUTION)
local.update_content('/Test versioning.txt', "Content twice modified")
self.wait_sync()
doc = self.root_remote_client.fetch(
TEST_WORKSPACE_PATH + '/Test versioning.txt')
self._assert_version(doc, 0, 1)
# Wait for versioning delay expiration then update it as user 1 after
# => should be versioned since the versioning delay is passed by
time.sleep(self.versioning_delay + 2.0)
local.update_content('/Test versioning.txt', "Updated again!!")
self.wait_sync()
doc = self.root_remote_client.fetch(
TEST_WORKSPACE_PATH + '/Test versioning.txt')
self._assert_version(doc, 0, 2)
def test_version_restore(self):
remote_client = self.remote_document_client_1
local_client = self.local_client_1
self.engine_1.start()
# Create a remote doc
doc = remote_client.make_file(self.workspace, 'Document to restore.txt', content="Initial content.")
self.wait_sync(wait_for_async=True)
self.assertTrue(local_client.exists('/Document to restore.txt'))
self.assertEquals(local_client.get_content('/Document to restore.txt'),
"Initial content.")
# Create version 1.0, update content, then restore version 1.0
remote_client.create_version(doc, 'Major')
remote_client.update_content(doc, "Updated content.")
self.wait_sync(wait_for_async=True)
self.assertEquals(local_client.get_content('/Document to restore.txt'),
"Updated content.")
version_uid = remote_client.get_versions(doc)[0][0]
remote_client.restore_version(version_uid)
self.wait_sync(wait_for_async=True)
self.assertEquals(local_client.get_content('/Document to restore.txt'),
"Initial content.")
def _assert_version(self, doc, major, minor):
self.assertEquals(doc['properties']['uid:major_version'], major)
self.assertEquals(doc['properties']['uid:minor_version'], minor)
|
Python
| 0 |
@@ -173,16 +173,90 @@
estCase%0A
+from nxdrive.logging_config import get_logger%0A%0Alog = get_logger(__name__)%0A
%0A%0Aclass
@@ -2056,46 +2056,206 @@
-time.sleep(self.versioning_delay + 2.0
+log.debug(%22wait for %25d to end the versioning grace%22, (self.versioning_delay + 2.0))%0A time.sleep(self.versioning_delay + 2.0)%0A log.debug(%22will now update content of Test versioning.txt%22
)%0A
|
a1f11605660836c87c47c3f272b4f8471f831eb8
|
e.message is depracted
|
mopidy/frontends/mpd/dispatcher.py
|
mopidy/frontends/mpd/dispatcher.py
|
import logging
import re
from pykka import ActorDeadError
from pykka.registry import ActorRegistry
from mopidy import settings
from mopidy.backends.base import Backend
from mopidy.frontends.mpd import exceptions
from mopidy.frontends.mpd.protocol import mpd_commands, request_handlers
# Do not remove the following import. The protocol modules must be imported to
# get them registered as request handlers.
# pylint: disable = W0611
from mopidy.frontends.mpd.protocol import (audio_output, command_list,
connection, current_playlist, empty, music_db, playback, reflection,
status, stickers, stored_playlists)
# pylint: enable = W0611
from mopidy.mixers.base import BaseMixer
from mopidy.utils import flatten
logger = logging.getLogger('mopidy.frontends.mpd.dispatcher')
class MpdDispatcher(object):
"""
The MPD session feeds the MPD dispatcher with requests. The dispatcher
finds the correct handler, processes the request and sends the response
back to the MPD session.
"""
def __init__(self, session=None):
self.authenticated = False
self.command_list = False
self.command_list_ok = False
self.command_list_index = None
self.context = MpdContext(self, session=session)
def handle_request(self, request, current_command_list_index=None):
"""Dispatch incoming requests to the correct handler."""
self.command_list_index = current_command_list_index
response = []
filter_chain = [
self._catch_mpd_ack_errors_filter,
self._authenticate_filter,
self._command_list_filter,
self._add_ok_filter,
self._call_handler_filter,
]
return self._call_next_filter(request, response, filter_chain)
def _call_next_filter(self, request, response, filter_chain):
if filter_chain:
next_filter = filter_chain.pop(0)
return next_filter(request, response, filter_chain)
else:
return response
### Filter: catch MPD ACK errors
def _catch_mpd_ack_errors_filter(self, request, response, filter_chain):
try:
return self._call_next_filter(request, response, filter_chain)
except exceptions.MpdAckError as mpd_ack_error:
if self.command_list_index is not None:
mpd_ack_error.index = self.command_list_index
return [mpd_ack_error.get_mpd_ack()]
### Filter: authenticate
def _authenticate_filter(self, request, response, filter_chain):
if self.authenticated:
return self._call_next_filter(request, response, filter_chain)
elif settings.MPD_SERVER_PASSWORD is None:
self.authenticated = True
return self._call_next_filter(request, response, filter_chain)
else:
command_name = request.split(' ')[0]
command_names_not_requiring_auth = [
command.name for command in mpd_commands
if not command.auth_required]
if command_name in command_names_not_requiring_auth:
return self._call_next_filter(request, response, filter_chain)
else:
raise exceptions.MpdPermissionError(command=command_name)
### Filter: command list
def _command_list_filter(self, request, response, filter_chain):
if self._is_receiving_command_list(request):
self.command_list.append(request)
return []
else:
response = self._call_next_filter(request, response, filter_chain)
if (self._is_receiving_command_list(request) or
self._is_processing_command_list(request)):
if response and response[-1] == u'OK':
response = response[:-1]
return response
def _is_receiving_command_list(self, request):
return (self.command_list is not False
and request != u'command_list_end')
def _is_processing_command_list(self, request):
return (self.command_list_index is not None
and request != u'command_list_end')
### Filter: add OK
def _add_ok_filter(self, request, response, filter_chain):
response = self._call_next_filter(request, response, filter_chain)
if not self._has_error(response):
response.append(u'OK')
return response
def _has_error(self, response):
return response and response[-1].startswith(u'ACK')
### Filter: call handler
def _call_handler_filter(self, request, response, filter_chain):
try:
response = self._format_response(self._call_handler(request))
return self._call_next_filter(request, response, filter_chain)
except ActorDeadError as e:
logger.warning(u'Tried to communicate with dead actor.')
raise exceptions.MpdSystemError(e.message)
def _call_handler(self, request):
(handler, kwargs) = self._find_handler(request)
return handler(self.context, **kwargs)
def _find_handler(self, request):
for pattern in request_handlers:
matches = re.match(pattern, request)
if matches is not None:
return (request_handlers[pattern], matches.groupdict())
command_name = request.split(' ')[0]
if command_name in [command.name for command in mpd_commands]:
raise exceptions.MpdArgError(u'incorrect arguments',
command=command_name)
raise exceptions.MpdUnknownCommand(command=command_name)
def _format_response(self, response):
formatted_response = []
for element in self._listify_result(response):
formatted_response.extend(self._format_lines(element))
return formatted_response
def _listify_result(self, result):
if result is None:
return []
if isinstance(result, set):
return flatten(list(result))
if not isinstance(result, list):
return [result]
return flatten(result)
def _format_lines(self, line):
if isinstance(line, dict):
return [u'%s: %s' % (key, value) for (key, value) in line.items()]
if isinstance(line, tuple):
(key, value) = line
return [u'%s: %s' % (key, value)]
return [line]
class MpdContext(object):
"""
This object is passed as the first argument to all MPD command handlers to
give the command handlers access to important parts of Mopidy.
"""
#: The current :class:`MpdDispatcher`.
dispatcher = None
#: The current :class:`mopidy.frontends.mpd.session.MpdSession`.
session = None
def __init__(self, dispatcher, session=None):
self.dispatcher = dispatcher
self.session = session
self._backend = None
self._mixer = None
@property
def backend(self):
"""
The backend. An instance of :class:`mopidy.backends.base.Backend`.
"""
if self._backend is not None:
return self._backend
backend_refs = ActorRegistry.get_by_class(Backend)
assert len(backend_refs) == 1, 'Expected exactly one running backend.'
self._backend = backend_refs[0].proxy()
return self._backend
@property
def mixer(self):
"""
The mixer. An instance of :class:`mopidy.mixers.base.BaseMixer`.
"""
if self._mixer is not None:
return self._mixer
mixer_refs = ActorRegistry.get_by_class(BaseMixer)
assert len(mixer_refs) == 1, 'Expected exactly one running mixer.'
self._mixer = mixer_refs[0].proxy()
return self._mixer
|
Python
| 0.999054 |
@@ -4902,16 +4902,8 @@
or(e
-.message
)%0A%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.