function
stringlengths 11
56k
| repo_name
stringlengths 5
60
| features
sequence |
---|---|---|
def _get_qos_queue(self, context, id):
try:
query = self._model_query(context, QosQueue)
qos_queue = query.filter(QosQueue.id == id).one()
except exc.NoResultFound:
raise ext_qos.QosQueueNotFound(id=id)
return qos_queue | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def _aggregate_rate_of_qos_queue(self, qos_queue):
if qos_queue.subqueues:
return reduce(
lambda x, y: x + y, [q.rate for q in qos_queue.subqueues])
else:
return 0 | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def _check_queue_in_qos(self, qos_id, qos_queue):
if qos_id != qos_queue.qos_id:
raise ext_qos.QosQueueNotInQos(
qos_id=qos_id,
qos_queue_id=qos_queue.qos_id
) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def allocate_tc_class_for_queue(context, qos_queue):
if qos_queue.tc_class:
return
tc_class = None
with context.session.begin(subtransactions=True):
try:
if qos_queue.id == qos_queue.qos.default_queue_id:
tc_class = 65534
else:
tc_class = QosDb._try_allocate_new_tc_class(
context, qos_queue.qos)
except ext_qos.AllocateTCClassFailure:
QosDb._rebuild_tc_class_range(context, qos_queue.qos)
if not tc_class:
tc_class = QosDb._try_allocate_new_tc_class(
context, qos_queue.qos)
qos_queue.update({'tc_class': tc_class}) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def _try_allocate_new_tc_class(context, qos):
select_range = context.session.query(
QosTcClassRange).join(Qos).with_lockmode('update').first()
if select_range:
new_tc_class = select_range['first']
if select_range['first'] == select_range['last']:
LOG.debug("No more free tc class id in this slice, deleting "
"range.")
context.session.delete(select_range)
else:
# Increse the first class id in this range
select_range['first'] = new_tc_class + 1
return new_tc_class
raise ext_qos.AllocateTCClassFailure(qos_id=qos.id) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def _rebuild_tc_class_range(context, qos):
LOG.debug("Rebuilding tc class range for qos: %s", qos.id)
used_classes = sorted(
[1, 65534] +
[queue.tc_class for queue in qos.queues
if queue.tc_class is not None])
for index in range(len(used_classes) - 1):
if used_classes[index] + 1 < used_classes[index + 1]:
tc_class_range = QosTcClassRange(
qos_id=qos.id,
first=used_classes[index] + 1,
last=used_classes[index+1] - 1)
context.session.add(tc_class_range) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def create_qos_queue_bulk(self, context, qos_queue):
return self._create_bulk('qos_queue', context, qos_queue) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def update_qos_queue(self, context, id, qos_queue):
qos_queue = qos_queue['qos_queue']
with context.session.begin(subtransactions=True):
qos_queue_db = self._get_qos_queue(context, id)
if id == qos_queue_db.qos.default_queue_id:
raise ext_qos.QosQueueCannotEditDefault(
qos_id=qos_queue_db.qos_id,
qos_queue_id=id)
new_rate = qos_queue.get('rate', qos_queue_db.rate)
rate_delta = new_rate - qos_queue_db.rate
if rate_delta:
if qos_queue_db.subqueues:
# Check new rate can afford its subqueues' need
self._check_qos_queue_rate(qos_queue_db, 0, new_rate)
if qos_queue_db.parent_queue:
# Check parent queue can afford the delta
self._check_qos_queue_rate(qos_queue_db.parent_queue,
rate_delta)
else:
# Check parent qos can afford the delta
self._check_qos_rate(qos_queue_db.qos, rate_delta)
new_ceil = qos_queue.get('ceil', qos_queue_db.ceil)
# New ceil should not exceed its parent's ceil
if qos_queue_db.parent_queue:
new_ceil = min(new_ceil, qos_queue_db.parent_queue.ceil)
else:
new_ceil = min(new_ceil, qos_queue_db.qos.rate)
if new_ceil < qos_queue_db.ceil:
# Ceil changed to a smaller value
self._set_ceil_for_queues(new_ceil, qos_queue_db.subqueues)
qos_queue_db.update(qos_queue)
return self._make_qos_queue_dict(qos_queue_db) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def get_qos_queues(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'qos_queue', limit, marker)
return self._get_collection(
context, QosQueue, self._make_qos_queue_dict,
filters=filters, fields=fields, sorts=sorts,
limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def get_qos_queue(self, context, id, fields=None):
qos_queue = self._get_qos_queue(context, id)
return self._make_qos_queue_dict(qos_queue, fields) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def _make_qos_filter_dict(self, qos_filter, fields=None):
res = {'id': qos_filter.id,
'tenant_id': qos_filter.tenant_id,
'qos_id': qos_filter.qos_id,
'queue_id': qos_filter.queue_id,
'prio': qos_filter.prio,
'protocol': qos_filter.protocol,
'src_port': qos_filter.src_port,
'dst_port': qos_filter.dst_port,
'src_addr': qos_filter.src_addr,
'dst_addr': qos_filter.dst_addr}
if qos_filter.custom_match is not None:
res.update({'custom_match': qos_filter.custom_match})
return self._fields(res, fields) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def create_qos_filter_bulk(self, context, qos_filter):
return self._create_bulk('qos_filter', context, qos_filter) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def update_qos_filter(self, context, id, qos_filter):
qos_filter = qos_filter['qos_filter']
new_prio = qos_filter.get('prio', None)
with context.session.begin(subtransactions=True):
qos_filter_db = self._get_qos_filter(context, id)
if qos_filter.get('queue_id', None) is not None:
qos_queue_db = self._get_qos_queue(context,
qos_filter['queue_id'])
self._check_queue_in_qos(qos_filter_db.qos_id, qos_queue_db)
if qos_queue_db.subqueues:
raise ext_qos.QosQueueHasSub(qos_queue_id=qos_queue_db.id)
if new_prio is not None and new_prio != qos_filter_db.prio:
if self._same_prio_filter_in_qos(qos_filter_db.qos, new_prio):
raise ext_qos.QosDuplicateFilterPrioValue(prio=new_prio)
qos_filter_db.update(qos_filter)
return self._make_qos_filter_dict(qos_filter_db) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def get_qos_filters(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = self._get_marker_obj(context, 'qos_filter', limit, marker)
return self._get_collection(
context, QosFilter, self._make_qos_filter_dict,
filters=filters, fields=fields, sorts=sorts,
limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def get_qos_filter(self, context, id, fields=None):
qos_filter = self._get_qos_filter(context, id)
return self._make_qos_filter_dict(qos_filter, fields) | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def _is_owner_floatingip(device_owner):
return device_owner == n_constants.DEVICE_OWNER_FLOATINGIP | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def _make_qos_filter_dict_for_agent(self, qos_filter):
qos_filter_dict = {'prio': qos_filter.prio,
'src_addr': qos_filter.src_addr,
'dst_addr': qos_filter.dst_addr}
if qos_filter.protocol:
qos_filter_dict['protocol'] = qos_filter.protocol
if qos_filter.src_port:
qos_filter_dict['src_port'] = qos_filter.src_port
if qos_filter.dst_port:
qos_filter_dict['dst_port'] = qos_filter.dst_port
return qos_filter_dict | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def _get_qos_conf_scheme(self, context, qos):
root_class = {'rate': qos.rate, 'ceil': qos.rate, 'subclasses': [],
'prio': 0}
if qos.burst:
root_class['burst'] = qos.burst
if qos.cburst:
root_class['cburst'] = qos.cburst
scheme = {}
effective_queues = {}
for queue in qos.queues:
if queue.attached_filters or queue.id == qos.default_queue_id:
_queue = queue
while _queue is not None:
try:
self.allocate_tc_class_for_queue(context, _queue)
except ext_qos.AllocateTCClassFailure:
LOG.warn("Failed to allocate tc class for queue %s.",
_queue.id)
# Don't apply any qos scheme for this qos because the
# number of queues exceeds Linux's support(65535).
# However, this should RARELY happen.
return None
if _queue.id not in effective_queues:
effective_queues[_queue.id] = _queue
_queue = _queue.parent_queue
else:
# Current queue and its ancestors are all recorded.
_queue = None
for queue in effective_queues.values():
scheme[queue.tc_class] = self._make_qos_queue_dict_for_agent(queue)
if queue.parent_queue is None:
root_class['subclasses'].append(queue.tc_class)
# Add root class
scheme[1] = root_class
return scheme | eayunstack/neutron-qos | [
2,
4,
2,
1,
1431059236
] |
def parallelwrite(slicenumber): | openconnectome/open-connectome | [
39,
12,
39,
99,
1302639682
] |
def run():
flypool = multiprocessing.Pool(totalprocs)
flypool.map(parallelwrite, totalslices, 16) | openconnectome/open-connectome | [
39,
12,
39,
99,
1302639682
] |
def compute(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs | radio-astro-tools/spectral-cube | [
84,
57,
84,
183,
1397489630
] |
def test_save_to_tmp_dir(data_adv):
pytest.importorskip('zarr')
cube = DaskSpectralCube.read(data_adv)
cube_new = cube.sigma_clip_spectrally(3, save_to_tmp_dir=True)
# The following test won't necessarily always work in future since the name
# is not really guaranteed, but this is pragmatic enough for now
assert cube_new._data.name.startswith('from-zarr') | radio-astro-tools/spectral-cube | [
84,
57,
84,
183,
1397489630
] |
def test_statistics(data_adv):
cube = DaskSpectralCube.read(data_adv).rechunk(chunks=(1, 2, 3))
stats = cube.statistics()
assert_quantity_allclose(stats['npts'], 24)
assert_quantity_allclose(stats['mean'], 0.4941651776136591 * u.K)
assert_quantity_allclose(stats['sigma'], 0.3021908870982011 * u.K)
assert_quantity_allclose(stats['sum'], 11.85996426272782 * u.K)
assert_quantity_allclose(stats['sumsq'], 7.961125988022091 * u.K ** 2)
assert_quantity_allclose(stats['min'], 0.0363300285196364 * u.K)
assert_quantity_allclose(stats['max'], 0.9662900439556562 * u.K)
assert_quantity_allclose(stats['rms'], 0.5759458158839716 * u.K) | radio-astro-tools/spectral-cube | [
84,
57,
84,
183,
1397489630
] |
def test_statistics_consistency_casa(data_adv, tmp_path):
# Similar to test_statistics but compares to CASA directly.
cube = DaskSpectralCube.read(data_adv)
stats = cube.statistics()
make_casa_testimage(data_adv, tmp_path / 'casa.image')
ia = casatools.image()
ia.open(str(tmp_path / 'casa.image'))
stats_casa = ia.statistics()
ia.close()
for key in stats:
if isinstance(stats[key], u.Quantity):
value = stats[key].value
else:
value = stats[key]
assert_allclose(value, stats_casa[key]) | radio-astro-tools/spectral-cube | [
84,
57,
84,
183,
1397489630
] |
def sum_blocks_spectral(data_chunk):
return data_chunk.sum(0) | radio-astro-tools/spectral-cube | [
84,
57,
84,
183,
1397489630
] |
def test_apply_function_parallel_spectral_noncube_withblockinfo(data_adv):
'''
Test receiving block_info information from da.map_blocks so we can place
the chunk's location in the whole cube when needed.
https://docs.dask.org/en/latest/array-api.html#dask.array.map_blocks
'''
chunk_size = (-1, 1, 2)
cube = DaskSpectralCube.read(data_adv).rechunk(chunks=chunk_size)
sum_spectral_plane = cube.sum(axis=0).unitless_filled_data[:]
# Each value should be different. This is important to check the right positions being used
# for the check in sums_block_spectral
assert np.unique(sum_spectral_plane).size == sum_spectral_plane.size
def sum_blocks_spectral(data_chunk, block_info=None, comparison_array=None):
chunk_sum = data_chunk.sum(0)
# When the block_info kwarg is defined, it should not be None
assert block_info is not None
# Check the block location compared to `comparison_array`
# Get the lower corner location in the whole cube.
loc = [block_range[0] for block_range in block_info[0]['array-location']]
# Should have 3 dimensions for the corner.
assert len(loc) == 3
# Slice comparison array to compare with this data chunk
thisslice = (slice(loc[1], loc[1] + chunk_sum.shape[0]),
slice(loc[2], loc[2] + chunk_sum.shape[1]),)
return chunk_sum == comparison_array[thisslice]
# Tell dask.map_blocks that we expect the zeroth axis to be (1,)
output_chunk_size = (1, 2)
test = cube.apply_function_parallel_spectral(sum_blocks_spectral,
return_new_cube=False,
accepts_chunks=True,
drop_axis=[0], # The output will no longer contain the spectral axis
chunks=output_chunk_size,
comparison_array=sum_spectral_plane) # Passed to `sum_blocks_spectral`
# The total shape of test should be the (1,) + cube.shape[1:]
assert test.shape == cube.shape[1:]
# Test all True
assert np.all(test.compute()) | radio-astro-tools/spectral-cube | [
84,
57,
84,
183,
1397489630
] |
def test_apply_function_parallel_shape(accepts_chunks):
# regression test for #772
def func(x, add=None):
if add is not None:
y = x + add
else:
raise ValueError("This test is supposed to have add=1")
return y
fn = data.get_pkg_data_filename('tests/data/example_cube.fits', 'spectral_cube')
cube = SpectralCube.read(fn, use_dask=True)
cube2 = SpectralCube.read(fn, use_dask=False)
# Check dask w/both threaded and unthreaded
rslt3 = cube.apply_function_parallel_spectral(func, add=1,
accepts_chunks=accepts_chunks)
with cube.use_dask_scheduler('threads', num_workers=4):
rslt = cube.apply_function_parallel_spectral(func, add=1,
accepts_chunks=accepts_chunks)
rslt2 = cube2.apply_function_parallel_spectral(func, add=1)
np.testing.assert_almost_equal(cube.filled_data[:].value,
cube2.filled_data[:].value)
np.testing.assert_almost_equal(rslt.filled_data[:].value,
rslt2.filled_data[:].value)
np.testing.assert_almost_equal(rslt.filled_data[:].value,
rslt3.filled_data[:].value) | radio-astro-tools/spectral-cube | [
84,
57,
84,
183,
1397489630
] |
def test_cube_on_cube(filename, request):
if 'image' in filename and not CASA_INSTALLED:
pytest.skip('Requires CASA to be installed')
dataname = request.getfixturevalue(filename)
# regression test for #782
# the regression applies only to VaryingResolutionSpectralCubes
# since they are not SpectralCube subclasses
cube = DaskSpectralCube.read(dataname)
assert isinstance(cube, (DaskSpectralCube, DaskVaryingResolutionSpectralCube))
cube2 = SpectralCube.read(dataname, use_dask=False)
if 'image' not in filename:
# 'image' would be CASA and must be dask
assert not isinstance(cube2, (DaskSpectralCube, DaskVaryingResolutionSpectralCube))
with patch.object(cube, '_cube_on_cube_operation') as mock:
cube * cube
mock.assert_called_once()
with patch.object(cube, '_cube_on_cube_operation') as mock:
cube * cube2
mock.assert_called_once()
with patch.object(cube2, '_cube_on_cube_operation') as mock:
cube2 * cube
mock.assert_called_once() | radio-astro-tools/spectral-cube | [
84,
57,
84,
183,
1397489630
] |
def setUp(self):
self.main_app = MainApp() | soar-telescope/goodman | [
13,
11,
13,
54,
1468010085
] |
def __init__(self, env_var_base):
self.ENV_VAR_BASE = env_var_base | femtotrader/ig-markets-stream-api-python-library | [
255,
177,
255,
13,
1420221470
] |
def get(self, key, default_value=None):
env_var = self._env_var(key)
return os.environ.get(env_var, default_value) | femtotrader/ig-markets-stream-api-python-library | [
255,
177,
255,
13,
1420221470
] |
def __unicode__(self):
return u"%s: %s" % (self.team.name, self.name) | F483/bikesurf.org | [
8,
5,
8,
60,
1413752765
] |
def main(dataset, blockl, local_dir=None, clobber=True):
"""
Split dataset into time chunks
Parameters
----------
dataset : str
Name of MS file to split
blockl : int
Number of time slots per chunk
local_dir : str, optional
Path to local directory for output of t1.copy(). The file is then
copied to the original output directory
clobber : bool, optional
If True, existing files are overwritten
"""
if type(clobber) is str:
if clobber.lower() == 'true':
clobber = True
else:
clobber = False
blockl = int(blockl)
if blockl < 1:
blockl = 1
# Get time per sample and number of samples
t = pt.table(dataset, readonly=True, ack=False)
for t2 in t.iter(["ANTENNA1","ANTENNA2"]):
if (t2.getcell('ANTENNA1',0)) < (t2.getcell('ANTENNA2',0)):
timepersample = t2[1]['TIME']-t2[0]['TIME'] # sec
nsamples = t2.nrows()
break
t.close()
nchunks = int(np.ceil((np.float(nsamples) / np.float(blockl))))
# Don't allow more than 15 chunks for performance reasons
while nchunks > 15:
blockl *= 2
nchunks = int(np.ceil((np.float(nsamples) / np.float(blockl))))
tlen = timepersample * np.float(blockl) / 3600.0 # length of block in hours
tobs = timepersample * nsamples / 3600.0 # length of obs in hours
# Copy to local directory if needed
dataset_original = dataset
if local_dir is not None:
dataset = os.path.join(local_dir, os.path.basename(dataset_original))
os.system('/usr/bin/rsync -a {0} {1}'.format(dataset_original, local_dir))
files = []
for c in range(nchunks):
chunk_file = '{0}_chunk{1}.ms'.format(os.path.splitext(dataset_original)[0], c)
files.append(chunk_file)
t0 = tlen * np.float(c) # hours
t1 = t0 + tlen # hours
if c == 0:
t0 = -0.1 # make sure first chunk gets first slot
if c == nchunks-1 and t1 < tobs:
t1 = tobs + 0.1 # make sure last chunk gets all that remains
split_ms(dataset, chunk_file, t0, t1, local_dir, clobber=clobber)
if local_dir is not None and not os.path.samefile(dataset, dataset_original):
shutil.rmtree(dataset)
return {'files': '[{0}]'.format(','.join(files))} | revoltek/factor | [
20,
12,
20,
24,
1409837156
] |
def test_0_new_account(self):
'''create new account * 5000'''
account_table_file = '/home/%s/.metaverse/mainnet/account_table' % common.get_username()
origin_payload_size = database.get_payload_size(account_table_file)
batch_amount = 5000
lastwords = []
for i in range(batch_amount):
ec, message = mvs_rpc.new_account("Account_%s" % i, "123456")
self.assertEqual(ec, 0, message)
lastwords.append( message[-1] )
try:
current_payload_size = database.get_payload_size(account_table_file)
# each simple account record size < 300, but when getnew address, the account record will be create twice, so 600 is the reasonable record size.
self.assertGreater(600 * batch_amount, current_payload_size - origin_payload_size, "each account record size shall be less than 600.")
finally:
for i in range(batch_amount):
ec, message = mvs_rpc.delete_account("Account_%s" % i, "123456", lastwords[i])
self.assertEqual(ec, 0, message) | mvs-live/metaverse | [
307,
120,
307,
61,
1477813012
] |
def __init__(self, _id, dp_id, conf):
self.rules = []
self.exact_match = None
self.dot1x_assigned = None
self.meter = False
self.matches = {}
self.set_fields = set()
self._ports_resolved = False
# Tunnel info maintains the tunnel output information for each tunnel rule
self.tunnel_dests = {}
# Tunnel sources is a list of the sources in the network for this ACL
self.tunnel_sources = {}
# Tunnel rules is the rules for each tunnel in the ACL for each source
self.dyn_tunnel_rules = {}
self.dyn_reverse_tunnel_rules = {}
for match_fields in (MATCH_FIELDS, OLD_MATCH_FIELDS):
self.rule_types.update({match: (str, int) for match in match_fields})
conf = copy.deepcopy(conf)
if isinstance(conf, dict):
rules = conf.get('rules', [])
elif isinstance(conf, list):
rules = conf
conf = {}
else:
raise InvalidConfigError(
'ACL conf is an invalid type %s' % _id)
conf['rules'] = []
for rule in rules:
normalized_rule = rule
if isinstance(rule, dict):
normalized_rule = rule.get('rule', rule)
if normalized_rule is None:
normalized_rule = {k: v for k, v in rule.items() if v is not None}
test_config_condition(not isinstance(normalized_rule, dict), (
'ACL rule is %s not %s (%s)' % (type(normalized_rule), dict, rules)))
conf['rules'].append(normalized_rule)
super().__init__(_id, dp_id, conf) | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def check_config(self):
test_config_condition(
not self.rules, 'no rules found for ACL %s' % self._id)
for rule in self.rules:
self._check_conf_types(rule, self.rule_types)
for rule_field, rule_conf in rule.items():
if rule_field == 'cookie':
test_config_condition(
rule_conf < 0 or rule_conf > 2**16,
'rule cookie value must be 0-2**16')
elif rule_field == 'actions':
test_config_condition(
not rule_conf,
'Missing rule actions in ACL %s' % self._id)
self._check_conf_types(rule_conf, self.actions_types)
for action_name, action_conf in rule_conf.items():
if action_name == 'output':
if isinstance(action_conf, (list, tuple)):
# New ordered format
for subconf in action_conf:
# Make sure only one specified action per list element
test_config_condition(
len(subconf) > 1,
'ACL ordered output must have only one action per element')
# Ensure correct action format
self._check_conf_types(subconf, self.output_actions_types)
else:
# Old format
self._check_conf_types(
action_conf, self.output_actions_types)
elif action_name == 'ct':
self._check_conf_types(action_conf, self.ct_action_types)
# if clear set, make sure nothing else is
if 'clear' in action_conf and action_conf['clear']:
test_config_condition(
len(action_conf) != 1,
"no other parameters can be set when 'clear' set on "
"conntrack ACL")
else:
test_config_condition(
'table' not in action_conf,
"required parameter 'table' not set for conntrack ACL")
test_config_condition(
'zone' not in action_conf,
"required parameter 'zone' not set for conntrack ACL")
if 'nat' in action_conf:
self._check_conf_types(action_conf['nat'], self.ct_action_nat_types) | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def get_meters(self):
"""Yield meters for each rule in ACL"""
for rule in self.rules:
if 'actions' not in rule or 'meter' not in rule['actions']:
continue
yield rule['actions']['meter'] | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def _resolve_ordered_output_ports(self, output_list, resolve_port_cb, resolve_tunnel_objects):
"""Resolve output actions in the ordered list format"""
result = []
for action in output_list:
for key, value in action.items():
if key == 'tunnel':
tunnel = value
# Fetch tunnel items from the tunnel output dict
test_config_condition(
'dp' not in tunnel,
'ACL (%s) tunnel DP not defined' % self._id)
tunnel_dp = tunnel['dp']
tunnel_port = tunnel.get('port', None)
tunnel_id = tunnel.get('tunnel_id', None)
tunnel_type = tunnel.get('type', 'vlan')
tunnel_exit_instructions = tunnel.get('exit_instructions', [])
tunnel_direction = tunnel.get('bi_directional', False)
tunnel_maintain = tunnel.get('maintain_encapsulation', False)
tunnel_reverse = tunnel.get('reverse', False)
test_config_condition(
tunnel_reverse and tunnel_direction,
('Tunnel ACL %s cannot contain values for the fields'
'`bi_directional` and `reverse` at the same time' % self._id))
# Resolve the tunnel items
dst_dp, dst_port, tunnel_id = resolve_tunnel_objects(
tunnel_dp, tunnel_port, tunnel_id)
# Compile the tunnel into an easy-access dictionary
tunnel_dict = {
'dst_dp': dst_dp,
'dst_port': dst_port,
'tunnel_id': tunnel_id,
'type': tunnel_type,
'exit_instructions': tunnel_exit_instructions,
'bi_directional': tunnel_direction,
'maintain_encapsulation': tunnel_maintain,
'reverse': tunnel_reverse,
}
self.tunnel_dests[tunnel_id] = tunnel_dict
result.append({key: tunnel_id})
elif key == 'port':
port_name = value
port = resolve_port_cb(port_name)
test_config_condition(
not port,
'ACL (%s) output port undefined in DP: %s' % (self._id, self.dp_id))
result.append({key: port})
elif key == 'ports':
resolved_ports = [
resolve_port_cb(p) for p in value]
test_config_condition(
None in resolved_ports,
'ACL (%s) output port(s) not defined in DP: %s' % (self._id, self.dp_id))
result.append({key: resolved_ports})
elif key == 'failover':
failover = value
test_config_condition(not isinstance(failover, dict), (
'failover is not a dictionary'))
failover_dict = {}
for failover_name, failover_values in failover.items():
if failover_name == 'ports':
resolved_ports = [
resolve_port_cb(p) for p in failover_values]
test_config_condition(
None in resolved_ports,
'ACL (%s) failover port(s) not defined in DP: %s' % (
self._id, self.dp_id))
failover_dict[failover_name] = resolved_ports
else:
failover_dict[failover_name] = failover_values
result.append({key: failover_dict})
else:
result.append(action)
return result | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def resolve_ports(self, resolve_port_cb, resolve_tunnel_objects):
"""Resolve the values for the actions of an ACL"""
if self._ports_resolved:
return
for rule_conf in self.rules:
if 'actions' in rule_conf:
actions_conf = rule_conf['actions']
resolved_actions = {}
test_config_condition(not isinstance(actions_conf, dict), (
'actions value is not a dictionary'))
for action_name, action_conf in actions_conf.items():
if action_name == 'mirror':
resolved_port = resolve_port_cb(action_conf)
test_config_condition(
resolved_port is None,
('ACL (%s) mirror port is not defined in DP: %s'
% (self._id, self.dp_id))
)
resolved_actions[action_name] = resolved_port
elif action_name == 'output':
resolved_action = self._resolve_output_ports(
action_conf, resolve_port_cb, resolve_tunnel_objects)
resolved_actions[action_name] = resolved_action
else:
resolved_actions[action_name] = action_conf
rule_conf['actions'] = resolved_actions
self._ports_resolved = True | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def get_num_tunnels(self):
"""Returns the number of tunnels specified in the ACL"""
num_tunnels = 0
for rule_conf in self.rules:
if self.does_rule_contain_tunnel(rule_conf):
output_conf = rule_conf['actions']['output']
if isinstance(output_conf, list):
for action in output_conf:
for key in action:
if key == 'tunnel':
num_tunnels += 1
else:
if 'tunnel' in output_conf:
num_tunnels += 1
return num_tunnels | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def does_rule_contain_tunnel(rule_conf):
"""Return true if the ACL rule contains a tunnel"""
if 'actions' in rule_conf:
if 'output' in rule_conf['actions']:
output_conf = rule_conf['actions']['output']
if isinstance(output_conf, (list, tuple)):
for action in output_conf:
for key in action:
if key == 'tunnel':
return True
else:
if 'tunnel' in output_conf:
return True
return False | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def _tunnel_source_id(source):
"""Return ID for a tunnel source."""
return tuple(sorted(source.items())) | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def verify_tunnel_rules(self):
"""Make sure that matches & set fields are configured correctly to handle tunnels"""
if 'eth_type' not in self.matches:
self.matches['eth_type'] = False
if 'in_port' not in self.matches:
self.matches['in_port'] = False
if 'vlan_vid' not in self.matches:
self.matches['vlan_vid'] = False
if 'vlan_vid' not in self.set_fields:
self.set_fields.add('vlan_vid')
if 'vlan_pcp' not in self.matches:
self.matches['vlan_pcp'] = False
if 'vlan_pcp' not in self.set_fields:
self.set_fields.add('vlan_pcp') | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def update_source_tunnel_rules(self, curr_dp, source_id, tunnel_id, out_port, output_table):
"""Update the tunnel rulelist for when the output port has changed"""
src_dp = self.tunnel_sources[source_id]['dp']
dst_dp = self.tunnel_dests[tunnel_id]['dst_dp']
prev_list = self.dyn_tunnel_rules[tunnel_id].get(source_id, [])
new_list = []
pcp_flag = valve_of.PCP_TUNNEL_FLAG
if self.tunnel_dests[tunnel_id]['reverse']:
pcp_flag = valve_of.PCP_TUNNEL_REVERSE_DIRECTION_FLAG
if curr_dp == src_dp and curr_dp != dst_dp:
# SRC DP: in_port, actions=[push_vlan, output, pop_vlans]
# Ideally, we would be able to detect if the tunnel has an `allow` action clause.
# However, this is difficult as a single ACL can have multiple rules using the same
# tunnel, but with one instance requiring the `allow` clause and another, not.
# This means it is easier to always append the `pop_vlans` in assumption that the
# `allow` action does exist, and then optimize/reduce the redundant rules before
# outputting the flowrule.
# We also set the tunnel VLAN header with a PCP value indicating that we are in
# the tunnel, which will save the VLANs from being reserved.
new_list = [
{'vlan_vids': [{'vid': tunnel_id, 'eth_type': ether.ETH_TYPE_8021Q}]},
{'set_fields': [{'vlan_pcp': pcp_flag}]},
{'port': out_port},
{'pop_vlans': 1}]
elif curr_dp == dst_dp and curr_dp != src_dp:
# DST DP: in_port, vlan_vid, actions=[pop_vlan, additional_instructions, output]
# If exit_instructions are applied, then we want to pop off the tunnel
# VLAN header, then apply the additional instructions, then output
if self.tunnel_dests[tunnel_id]['maintain_encapsulation']:
# We wish to maintain tunnel encapsulation before outputting
# So do not add the pop_vlans rule
new_list = []
else:
new_list = [{'pop_vlans': 1}]
exit_instructions = self.tunnel_dests[tunnel_id].get('exit_instructions', [])
new_list.extend(copy.copy(list(exit_instructions)))
if out_port is None:
# DP dest tunnel, so we fall through into the eth_dst output table
new_list.append({'goto': output_table.table_id})
else:
# Tunnel has port specified, so output to destination
new_list.append({'port': out_port})
elif curr_dp == src_dp and curr_dp == dst_dp:
# SINGLE DP: in_port, actions=[additional_instructions, out_port]
exit_instructions = self.tunnel_dests[tunnel_id].get('exit_instructions', [])
new_list.extend(copy.copy(list(exit_instructions)))
if self.tunnel_dests[tunnel_id].get('maintain_encapsulation', False):
# Maintain encapsulation implies we want the tunnel VID on the packet,
# so ensure it is purposefully put onto the packet, even when
# there would originally be no need to push on a tunnel VID
new_list.extend([
{'vlan_vids': [{'vid': tunnel_id, 'eth_type': ether.ETH_TYPE_8021Q}]},
{'set_fields': [{'vlan_pcp': pcp_flag}]}])
if out_port is None:
# DP dest tunnel, so we fall through into the eth_dst output table
new_list.extend([{'goto': output_table.table_id}])
else:
# Tunnel has port specified, so output to destination
new_list.extend([{'port': out_port}])
else:
# TRANSIT DP: in_port, vlan_vid, actions=[output]
new_list = [{'port': out_port}]
if new_list != prev_list:
self.dyn_tunnel_rules[tunnel_id][source_id] = new_list
return True
return True | REANNZ/faucet | [
480,
166,
480,
33,
1443148776
] |
def HTML_WRAP(app):
"""
Wraps the Application object's results in HTML
"""
def gen(environ, start_response):
"""The standard WSGI interface"""
iterator = app(environ, start_response)
first_yield = iterator.next()
yield "<html>\n"
yield "<body>\n"
yield first_yield
for i in iterator:
yield i
yield "</body>\n"
yield "</html>\n"
return gen | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def normalizeEnviron(environ):
"""
Converts environ variables to strings for wsgi compliance and deletes extraneous
fields.
"""
header_list = []
header_dict = environ['headers']
for key in header_dict:
line = "%s: %s\n" % (key, header_dict[key])
header_list.append(line)
environ['headers'] = ''.join(header_list)
environ['peerport'] = str(environ['peerport'])
environ['localport'] = str(environ['localport'])
del environ['bad'] | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def __init__(self, app_name, app, request, log_writable, WsgiConfig):
super(_WsgiHandler, self).__init__()
self.app_name = app_name
self.request = request
self.environ = request
self.app = app
#self.log_writable = log_writable
self.log_writable = LogWritable.GetLogWritable('wsgi.log', self, '_signal-lw')
self.status = self.response_headers = False
self.wsgi_config = WsgiConfig | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def start_response(self, status, response_headers, exc_info=None):
"""
Method to be passed to WSGI application object
"""
#TODO: Add more exc_info support
if exc_info:
raise exc_info[0], exc_info[1], exc_info[2]
self.status = status
self.response_headers = response_headers
return self.write | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def munge_headers(self):
for header in self.environ["headers"]:
cgi_varname = "HTTP_"+header.replace("-","_").upper()
self.environ[cgi_varname] = self.environ["headers"][header]
pprint.pprint(self.environ)
pprint.pprint(self.environ["headers"]) | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def initRequiredVars(self, wsgi_config):
"""
This method initializes all variables that are required to be present
(including ones that could possibly be empty.
"""
self.environ["REQUEST_METHOD"] = self.request["method"]
# Portion of URL that relates to the application object.
self.environ["SCRIPT_NAME"] = self.app_name
# Remainder of request path after "SCRIPT_NAME"
self.environ["PATH_INFO"] = self.environ["uri-suffix"]
# Server name published to the outside world
self.environ["SERVER_NAME"] = self.server_name
# Server port published to the outside world
self.environ["SERVER_PORT"] = self.server_port
#Protocol to respond to
self.environ["SERVER_PROTOCOL"] = self.request["protocol"]
#==================================
#WSGI variables
#==================================
self.environ["wsgi.version"] = wsgi_config['WSGI_VER']
self.environ["wsgi.url_scheme"] = self.request["protocol"].lower()
self.environ["wsgi.errors"] = self.log_writable
self.environ["wsgi.multithread"] = False
self.environ["wsgi.multiprocess"] = False
self.environ["wsgi.run_once"] = True
self.environ["wsgi.input"] = self.generateRequestMemFile() | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def unsupportedVars(self):
"""
Probably won't be used. This is just a list of environment variables that
aren't implemented as of yet.
"""
consider = " **CONSIDER ADDING THIS -- eg: "
self.environ["HTTP_REFERER"] = consider + "-"
self.environ["SERVER_SIGNATURE"] = consider + "...."
self.environ["SCRIPT_FILENAME"] = consider + \
"/usr/local/httpd/sites/com.thwackety/cgi/test.pl"
self.environ["REQUEST_URI"] = consider + "/cgi-bin/test.pl"
self.environ["SCRIPT_URL"] = consider + "/cgi-bin/test.pl"
self.environ["SCRIPT_URI"] = consider + "http://thwackety.com/cgi-bin/test.pl"
self.environ["REMOTE_ADDR"] = consider + "192.168.2.5"
self.environ["REMOTE_PORT"] = consider + "56669"
self.environ["GATEWAY_INTERFACE"] = consider + "CGI/1.1" | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def _getWsgiHandler(request):
requested_uri = sanitizePath(request['raw-uri'], substituted_path)
print requested_uri
for url_item in urls.UrlList:
print 'trying ' + url_item[0]
if re.search(url_item[0], requested_uri):
print url_item[0] + 'succesful!'
u, mod, app_attr, app_name = url_item
break
module = _importWsgiModule(mod)
app = getattr(module, app_attr)
return _WsgiHandler(app_name, app, request, log_writable, WsgiConfig) | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def HTTPProtocol():
def foo(self,**argd):
print self.routing
return HTTPServer(requestHandlers(self.routing),**argd)
return foo | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def __init__(self):
super(WsgiError, self).__init__() | sparkslabs/kamaelia_ | [
13,
3,
13,
2,
1348148442
] |
def gc(test_result=True):
"""Site-wide garbage collections."""
def days_ago(days):
return datetime.today() - timedelta(days=days)
log.info('Collecting data to delete')
logs = (
ActivityLog.objects.filter(created__lt=days_ago(90))
.exclude(action__in=amo.LOG_KEEP)
.values_list('id', flat=True)
)
for chunk in chunked(logs, 100):
tasks.delete_logs.delay(chunk)
two_weeks_ago = days_ago(15)
# Hard-delete stale add-ons with no versions. No email should be sent.
versionless_addons = Addon.unfiltered.filter(
versions__pk=None, created__lte=two_weeks_ago
).values_list('pk', flat=True)
for chunk in chunked(versionless_addons, 100):
delete_addons.delay(chunk, with_deleted=True)
# Delete stale FileUploads.
stale_uploads = FileUpload.objects.filter(created__lte=two_weeks_ago).order_by('id')
for file_upload in stale_uploads:
log.info(
'[FileUpload:{uuid}] Removing file: {path}'.format(
uuid=file_upload.uuid, path=file_upload.path
)
)
if file_upload.path:
try:
storage.delete(file_upload.path)
except OSError:
pass
file_upload.delete()
# Delete stale ScannerResults.
ScannerResult.objects.filter(upload=None, version=None).delete()
# Delete fake emails older than 90 days
FakeEmail.objects.filter(created__lte=days_ago(90)).delete() | mozilla/olympia | [
810,
542,
810,
201,
1391193855
] |
def __init__(self, layer):
self.layer = layer
self.template = loader.get_template(
'regulations/layers/definition_citation.html')
self.sectional = False
self.version = None
self.rev_urls = SectionUrl()
self.rendered = {}
# precomputation
for def_struct in self.layer['referenced'].values():
def_struct['reference_split'] = def_struct['reference'].split('-') | 18F/regulations-site | [
18,
45,
18,
34,
1417818160
] |
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True):
AWSAuthConnection.__init__(self, host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
validate_certs=validate_certs) | nanocell/lsync | [
5,
2,
5,
3,
1359852579
] |
def make_request(self, action, path, headers=None, data='', params=None):
if params:
pairs = []
for key, val in params.iteritems():
if val is None:
continue
pairs.append(key + '=' + urllib.quote(str(val)))
path += '?' + '&'.join(pairs)
return AWSAuthConnection.make_request(self, action, path,
headers, data) | nanocell/lsync | [
5,
2,
5,
3,
1359852579
] |
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while 'NextMarker' in e['ListHostedZonesResponse']:
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e | nanocell/lsync | [
5,
2,
5,
3,
1359852579
] |
def get_hosted_zone_by_name(self, hosted_zone_name):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
Zone
"""
if hosted_zone_name[-1] != '.':
hosted_zone_name += '.'
all_hosted_zones = self.get_all_hosted_zones()
for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
#check that they gave us the FQDN for their zone
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1]) | nanocell/lsync | [
5,
2,
5,
3,
1359852579
] |
def delete_hosted_zone(self, hosted_zone_id):
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e | nanocell/lsync | [
5,
2,
5,
3,
1359852579
] |
def get_all_rrsets(self, hosted_zone_id, type=None,
name=None, identifier=None, maxitems=None):
"""
Retrieve the Resource Record Sets defined for this Hosted Zone.
Returns the raw XML data returned by the Route53 call.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type type: str
:param type: The type of resource record set to begin the record
listing from. Valid choices are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
Valid values for weighted resource record sets:
* A
* AAAA
* CNAME
* TXT
Valid values for Zone Apex Aliases:
* A
* AAAA
:type name: str
:param name: The first name in the lexicographic ordering of domain
names to be retrieved
:type identifier: str
:param identifier: In a hosted zone that includes weighted resource
record sets (multiple resource record sets with the same DNS
name and type that are differentiated only by SetIdentifier),
if results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record
set that has the current DNS name and type
:type maxitems: int
:param maxitems: The maximum number of records
"""
from boto.route53.record import ResourceRecordSets
params = {'type': type, 'name': name,
'Identifier': identifier, 'maxitems': maxitems}
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs | nanocell/lsync | [
5,
2,
5,
3,
1359852579
] |
def __init__(self):
super(Serializer, self).__init__({
'find_isolate':
evaluators.SequenceEvaluator(
[find_isolate.Serializer(), TaskTransformer]),
'run_test':
evaluators.SequenceEvaluator(
[run_test.Serializer(), TaskTransformer]),
'read_value':
evaluators.SequenceEvaluator(
[read_value.Serializer(), TaskTransformer]),
'find_culprit':
evaluators.SequenceEvaluator(
[performance_bisection.Serializer(), AnalysisTransformer]),
}) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def TaskTransformer(task, _, context):
"""Takes the form:
{
<task id> : {
...
}
}
And turns it into:
{
'state': {
'change': {...}
'quest': <string>
'index': <int>
'add_execution': {
...
}
}
}
"""
if not context:
return None
input_data = context.get(task.id)
if not input_data:
return None
result = {
'state': {
'change': task.payload.get('change'),
'quest': TASK_TYPE_QUEST_MAPPING.get(task.task_type),
'index': task.payload.get('index', 0),
'add_execution': input_data,
}
}
context.clear()
context.update(result) | endlessm/chromium-browser | [
21,
16,
21,
3,
1435959644
] |
def __init__(self,
row_splits,
row_lengths=None,
value_rowids=None,
nrows=None,
uniform_row_length=None,
nvals=None,
internal=False):
"""Creates a `RowPartition` from the specified encoding tensor(s).
This constructor is private -- please use one of the following ops to
build `RowPartition`s:
* `RowPartition.from_row_lengths`
* `RowPartition.from_value_rowids`
* `RowPartition.from_row_splits`
* `RowPartition.from_row_starts`
* `RowPartition.from_row_limits`
* `RowPartition.from_uniform_row_length`
If row_splits is has a constant value, then all other arguments should
have a constant value.
Args:
row_splits: A 1-D integer tensor with shape `[nrows+1]`.
row_lengths: A 1-D integer tensor with shape `[nrows]`
value_rowids: A 1-D integer tensor with shape `[nvals]`.
nrows: A 1-D integer scalar tensor.
uniform_row_length: A scalar tensor.
nvals: A scalar tensor.
internal: Private key value, required to ensure that this private
constructor is *only* called from the factory methods.
Raises:
TypeError: If a row partitioning tensor has an inappropriate dtype.
TypeError: If exactly one row partitioning argument was not specified.
ValueError: If a row partitioning tensor has an inappropriate shape.
ValueError: If multiple partitioning arguments are specified.
ValueError: If nrows is specified but value_rowids is not None.
"""
if internal is not _row_partition_factory_key:
raise ValueError("RowPartition constructor is private; please use one "
"of the factory methods instead (e.g., "
"RowPartition.from_row_lengths())")
# Validate the arguments.
if not isinstance(row_splits, ops.Tensor):
raise TypeError("Row-partitioning argument must be a Tensor, got %r" %
row_splits)
if row_splits.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("Row-partitioning argument must be int32 or int64")
# Validate shapes & dtypes.
row_splits.shape.assert_has_rank(1)
row_splits.set_shape([None])
self._row_splits = row_splits
# Store any cached tensors. These are used to avoid unnecessary
# round-trip conversions when a RowPartition is constructed from
# lengths or rowids, and we later want those lengths/rowids back.
for tensor in [row_lengths, value_rowids, nrows, uniform_row_length, nvals]:
if tensor is not None:
if not isinstance(tensor, ops.Tensor):
raise TypeError("Cached value must be a Tensor or None.")
elif tensor.dtype != row_splits.dtype:
raise ValueError(f"Inconsistent dtype for encoding tensors: "
f"{tensor} vs {row_splits}")
self._row_lengths = row_lengths
self._value_rowids = value_rowids
self._nrows = nrows
self._uniform_row_length = uniform_row_length
self._nvals = nvals | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def from_value_rowids(cls,
value_rowids,
nrows=None,
validate=True,
preferred_dtype=None,
dtype=None,
dtype_hint=None):
"""Creates a `RowPartition` with rows partitioned by `value_rowids`.
This `RowPartition` divides a sequence `values` into rows by specifying
which row each value should be added to:
```python
partitioned_rows = [[] for _ in nrows]
for (value, rowid) in zip(values, value_rowids):
partitioned_rows[rowid].append(value)
``
Args:
value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds
one-to-one with `values`, and specifies each value's row index. Must be
nonnegative, and must be sorted in ascending order.
nrows: An integer scalar specifying the number of rows. This should be
specified if the `RowPartition` may containing empty training rows. Must
be greater than `value_rowids[-1]` (or greater than or equal to zero if
`value_rowids` is empty). Defaults to `value_rowids[-1] + 1` (or zero if
`value_rowids` is empty).
validate: If true, then use assertions to check that the arguments form a
valid `RowPartition`.
preferred_dtype: Deprecated synonym of dtype_hint.
dtype: Optional dtype for the RowPartition. If missing, the type
is inferred from the type of `value_rowids`, dtype_hint, or tf.int64.
dtype_hint: Optional dtype for the RowPartition, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
Returns:
A `RowPartition`.
Raises:
ValueError: If `nrows` is incompatible with `value_rowids`.
#### Example:
>>> print(RowPartition.from_value_rowids(
... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
... nrows=4))
tf.RowPartition(row_splits=[0 4 4 7 8])
"""
dtype_hint = _get_dtype_hint(preferred_dtype, dtype_hint)
# Local import bincount_ops to avoid import-cycle since bincount_ops
# imports ragged_tensor.
from tensorflow.python.ops import bincount_ops # pylint: disable=g-import-not-at-top
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(None, "RowPartitionFromValueRowIds",
[value_rowids, nrows]):
value_rowids = cls._convert_row_partition(
value_rowids, "value_rowids", dtype_hint=dtype_hint, dtype=dtype)
if nrows is None:
const_rowids = tensor_util.constant_value(value_rowids)
if const_rowids is None:
nrows = array_ops.concat([value_rowids[-1:], [-1]], axis=0)[0] + 1
const_nrows = None
else:
const_nrows = const_rowids[-1] + 1 if const_rowids.size > 0 else 0
nrows = ops.convert_to_tensor(
const_nrows, value_rowids.dtype, name="nrows")
else:
nrows = ops.convert_to_tensor(nrows, value_rowids.dtype, "nrows")
const_nrows = tensor_util.constant_value(nrows)
if const_nrows is not None:
if const_nrows < 0:
raise ValueError("Expected nrows >= 0; got %d" % const_nrows)
const_rowids = tensor_util.constant_value(value_rowids)
if const_rowids is not None and const_rowids.size > 0:
if not const_nrows >= const_rowids[-1] + 1:
raise ValueError(
"Expected nrows >= value_rowids[-1] + 1; got nrows=%d, "
"value_rowids[-1]=%d" % (const_nrows, const_rowids[-1]))
value_rowids.shape.assert_has_rank(1)
nrows.shape.assert_has_rank(0)
if validate:
msg = ("Arguments to from_value_rowids do not form a valid "
"RowPartition")
checks = [
check_ops.assert_rank(value_rowids, 1, message=msg),
check_ops.assert_rank(nrows, 0, message=msg),
check_ops.assert_non_negative(value_rowids[:1], message=msg),
_assert_monotonic_increasing(value_rowids, message=msg),
check_ops.assert_less(value_rowids[-1:], nrows, message=msg),
]
value_rowids = control_flow_ops.with_dependencies(checks, value_rowids)
# Convert value_rowids & nrows to row_splits.
# Note: we don't use segment_ids_to_row_splits() here because we want
# to save the intermediate value `row_lengths`, so we can cache it.
# TODO(b/116708836) Upgrade bincount to accept int64 so we can skip the
# cast.
value_rowids_int32 = math_ops.cast(value_rowids, dtypes.int32)
nrows_int32 = math_ops.cast(nrows, dtypes.int32)
row_lengths = bincount_ops.bincount(
value_rowids_int32,
minlength=nrows_int32,
maxlength=nrows_int32,
dtype=value_rowids.dtype)
row_splits = array_ops.concat([[0], math_ops.cumsum(row_lengths)], axis=0)
if const_nrows is not None:
row_lengths.set_shape([const_nrows])
row_splits.set_shape([const_nrows + 1])
return cls(
row_splits=row_splits,
row_lengths=row_lengths,
value_rowids=value_rowids,
nrows=nrows,
internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def from_row_splits(cls,
row_splits,
validate=True,
preferred_dtype=None,
dtype=None,
dtype_hint=None):
"""Creates a `RowPartition` with rows partitioned by `row_splits`.
This `RowPartition` divides a sequence `values` into rows by indicating
where each row begins and ends:
```python
partitioned_rows = []
for i in range(len(row_splits) - 1):
row_start = row_splits[i]
row_end = row_splits[i + 1]
partitioned_rows.append(values[row_start:row_end])
```
Args:
row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be
empty, and must be sorted in ascending order. `row_splits[0]` must be
zero.
validate: If true, then use assertions to check that the arguments form a
valid `RowPartition`.
preferred_dtype: Deprecated synonym of dtype_hint.
dtype: Optional dtype for the RowPartition. If missing, the type
is inferred from the type of `row_splits`, dtype_hint, or tf.int64.
dtype_hint: Optional dtype for the RowPartition, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
Returns:
A `RowPartition`.
Raises:
ValueError: If `row_splits` is an empty list.
"""
dtype_hint = _get_dtype_hint(preferred_dtype, dtype_hint)
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if isinstance(row_splits, (list, tuple)) and not row_splits:
raise ValueError("row_splits tensor may not be empty.")
if isinstance(row_splits, tensor_spec.TensorSpec):
return cls(row_splits=row_splits, internal=_row_partition_factory_key)
with ops.name_scope(None, "RowPartitionFromRowSplits", [row_splits]):
row_splits = cls._convert_row_partition(
row_splits, "row_splits", dtype_hint=dtype_hint, dtype=dtype)
row_splits.shape.assert_has_rank(1)
if validate:
msg = "Arguments to from_row_splits do not form a valid RaggedTensor:"
checks = [
check_ops.assert_rank(row_splits, 1, message=(msg + "rank")),
_assert_zero(row_splits[0], message=(msg + "zero")),
_assert_monotonic_increasing(
row_splits, message=(msg + "monotonic")),
]
row_splits = control_flow_ops.with_dependencies(checks, row_splits)
return cls(row_splits=row_splits, internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def from_row_lengths(cls,
row_lengths,
validate=True,
preferred_dtype=None,
dtype=None,
dtype_hint=None):
"""Creates a `RowPartition` with rows partitioned by `row_lengths`.
This `RowPartition` divides a sequence `values` into rows by indicating
the length of each row:
```python
partitioned_rows = [[values.pop(0) for _ in range(length)]
for length in row_lengths]
```
Args:
row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative.
validate: If true, then use assertions to check that the arguments form a
valid `RowPartition`.
preferred_dtype: Deprecated synonym of dtype_hint.
dtype: Optional dtype for the RowPartition. If missing, the type
is inferred from the type of `row_lengths`, dtype_hint, or tf.int64.
dtype_hint: Optional dtype for the RowPartition, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
Returns:
A `RowPartition`.
"""
dtype_hint = _get_dtype_hint(
preferred_dtype=preferred_dtype, dtype_hint=dtype_hint)
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(None, "RowPartitionFromRowLengths", [row_lengths]):
row_lengths = cls._convert_row_partition(
row_lengths, "row_lengths", dtype_hint=dtype_hint, dtype=dtype)
row_lengths.shape.assert_has_rank(1)
if validate:
msg = "Arguments to from_row_lengths do not form a valid RowPartition"
checks = [
check_ops.assert_rank(row_lengths, 1, message=msg),
check_ops.assert_non_negative(row_lengths, message=msg),
]
row_lengths = control_flow_ops.with_dependencies(checks, row_lengths)
row_limits = math_ops.cumsum(row_lengths)
row_splits = array_ops.concat([[0], row_limits], axis=0)
return cls(
row_splits=row_splits,
row_lengths=row_lengths,
internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def from_row_starts(cls,
row_starts,
nvals,
validate=True,
preferred_dtype=None,
dtype=None,
dtype_hint=None):
"""Creates a `RowPartition` with rows partitioned by `row_starts`.
Equivalent to: `from_row_splits(concat([row_starts, nvals], axis=0))`.
Args:
row_starts: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative and sorted in ascending order. If `nrows>0`, then
`row_starts[0]` must be zero.
nvals: A scalar tensor indicating the number of values.
validate: If true, then use assertions to check that the arguments form a
valid `RowPartition`.
preferred_dtype: Deprecated synonym of dtype_hint.
dtype: Optional dtype for the RowPartition. If missing, the type
is inferred from the type of `row_starts`, dtype_hint, or tf.int64.
dtype_hint: Optional dtype for the RowPartition, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
Returns:
A `RowPartition`.
"""
dtype_hint = _get_dtype_hint(
preferred_dtype=preferred_dtype, dtype_hint=dtype_hint)
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(None, "RowPartitionFromRowStarts", [row_starts]):
row_starts = cls._convert_row_partition(
row_starts, "row_starts", dtype_hint=dtype_hint, dtype=dtype)
row_starts.shape.assert_has_rank(1)
# TODO(martinz): nvals and row_starts could be inconsistent at call time,
# even though they eventually end up the same type.
nvals = math_ops.cast(nvals, row_starts.dtype)
if validate:
msg = "Arguments to from_row_starts do not form a valid RaggedTensor"
checks = [
check_ops.assert_rank(row_starts, 1, message=msg),
_assert_zero(row_starts[:1], message=msg),
_assert_monotonic_increasing(row_starts, message=msg),
check_ops.assert_less_equal(row_starts[-1:], nvals, message=msg),
]
row_starts = control_flow_ops.with_dependencies(checks, row_starts)
row_splits = array_ops.concat([row_starts, [nvals]], axis=0)
return cls(row_splits=row_splits, nvals=nvals,
internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def from_row_limits(cls,
row_limits,
validate=True,
preferred_dtype=None,
dtype=None,
dtype_hint=None):
"""Creates a `RowPartition` with rows partitioned by `row_limits`.
Equivalent to: `from_row_splits(values, concat([0, row_limits], axis=0))`.
Args:
row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in
ascending order.
validate: If true, then use assertions to check that the arguments form a
valid `RowPartition`.
preferred_dtype: Deprecated synonym of dtype_hint.
dtype: Optional dtype for the RowPartition. If missing, the type
is inferred from the type of `row_limits`, dtype_hint, or tf.int64.
dtype_hint: Optional dtype for the RowPartition, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
Returns:
A `RowPartition`.
"""
dtype_hint = _get_dtype_hint(
preferred_dtype=preferred_dtype, dtype_hint=dtype_hint)
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
with ops.name_scope(None, "RowPartitionFromRowLimits", [row_limits]):
row_limits = cls._convert_row_partition(
row_limits, "row_limits", dtype_hint=dtype_hint, dtype=dtype)
row_limits.shape.assert_has_rank(1)
if validate:
msg = "Arguments to from_row_limits do not form a valid RaggedTensor"
checks = [
check_ops.assert_rank(row_limits, 1, message=msg),
check_ops.assert_non_negative(row_limits[:1], message=msg),
_assert_monotonic_increasing(row_limits, message=msg),
]
row_limits = control_flow_ops.with_dependencies(checks, row_limits)
zero = array_ops.zeros([1], row_limits.dtype)
row_splits = array_ops.concat([zero, row_limits], axis=0)
return cls(row_splits=row_splits, internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def from_uniform_row_length(cls,
uniform_row_length,
nvals=None,
nrows=None,
validate=True,
preferred_dtype=None,
dtype=None,
dtype_hint=None):
"""Creates a `RowPartition` with rows partitioned by `uniform_row_length`.
This `RowPartition` divides a sequence `values` into rows that all have
the same length:
```python
partitioned_rows = [[values.pop(0) for _ in range(uniform_row_length)]
for _ in range(nrows)]
```
Note that either or both of nvals and nrows must be specified.
Args:
uniform_row_length: A scalar integer tensor. Must be nonnegative. The
size of the outer axis of `values` must be evenly divisible by
`uniform_row_length`.
nvals: a non-negative scalar integer tensor for the number of values.
Must be specified if nrows is not specified. If not specified,
defaults to uniform_row_length*nrows
nrows: The number of rows in the constructed RowPartition. If not
specified, then it defaults to `nvals/uniform_row_length` (or `0` if
`uniform_row_length==0`). `nrows` only needs to be specified if
`uniform_row_length` might be zero. `uniform_row_length*nrows` must be
`nvals`.
validate: If true, then use assertions to check that the arguments form a
valid `RowPartition`.
preferred_dtype: Deprecated synonym of dtype_hint.
dtype: Optional dtype for the RowPartition. If missing, the type
is inferred from the type of `uniform_row_length`, dtype_hint,
or tf.int64.
dtype_hint: Optional dtype for the RowPartition, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
Returns:
A `RowPartition`.
"""
dtype_hint = _get_dtype_hint(
preferred_dtype=preferred_dtype, dtype_hint=dtype_hint)
if not isinstance(validate, bool):
raise TypeError("validate must have type bool")
if nrows is None and nvals is None:
raise ValueError("Either (or both) of nvals and nrows must be specified")
with ops.name_scope(None, "RowPartitionFromUniformRowLength",
[uniform_row_length, nrows]):
[uniform_row_length, nvals, nrows
] = _convert_all_to_tensors([(uniform_row_length, "uniform_row_length"),
(nvals, "nvals"), (nrows, "nrows")],
dtype=dtype,
dtype_hint=dtype_hint)
uniform_row_length.shape.assert_has_rank(0)
# Find nrows.
const_row_length = tensor_util.constant_value(uniform_row_length)
if nrows is None:
if const_row_length is None:
# Avoid division by zero if uniform_row_length==0 (and nvals==0).
rowlen_or_1 = math_ops.maximum(
uniform_row_length,
constant_op.constant(1, uniform_row_length.dtype))
nrows = nvals // rowlen_or_1
elif const_row_length == 0:
nrows = constant_op.constant(0, dtype=uniform_row_length.dtype)
else:
nrows = nvals // const_row_length
const_nrows = None if nrows is None else tensor_util.constant_value(nrows)
const_nvals = None if nvals is None else tensor_util.constant_value(nvals)
const_uniform_row_length = tensor_util.constant_value(uniform_row_length)
checks = []
if const_nvals is None and const_nrows is not None and const_uniform_row_length is not None:
const_nvals = const_nrows * const_uniform_row_length
if nvals is not None and validate:
checks.append(check_ops.assert_equal(nvals, const_nvals))
nvals = constant_op.constant(const_nvals, uniform_row_length.dtype)
if nvals is None:
nvals = nrows * uniform_row_length
# Find row_splits.
if const_nrows is not None and const_row_length is not None:
row_splits = [v * const_row_length for v in range(const_nrows + 1)]
row_splits = constant_op.constant(row_splits, uniform_row_length.dtype)
else:
row_splits = math_ops.range(
nrows + 1, dtype=uniform_row_length.dtype) * uniform_row_length
if validate:
if (const_nrows is None or const_row_length is None or
const_nvals is None):
checks.append(
check_ops.assert_equal(
nrows * uniform_row_length, nvals,
("uniform_row_length", uniform_row_length, "times nrows",
nrows, "must equal nvals", nvals)))
else:
if const_nrows * const_row_length != const_nvals:
raise ValueError(
"uniform_row_length=%d times nrows=%d must equal nvals=%d" %
(const_row_length, const_nrows, const_nvals))
if uniform_row_length.shape.rank is None:
checks.append(
check_ops.assert_rank(
uniform_row_length,
0,
message="uniform_row_length must be a scalar."))
const_row_length = tensor_util.constant_value(uniform_row_length)
if const_row_length is None:
checks.append(
check_ops.assert_greater_equal(
uniform_row_length,
constant_op.constant(0, uniform_row_length.dtype),
message="uniform_row_length must be >= 0."))
else:
if const_row_length < 0:
raise ValueError("uniform_row_length must be >= 0.")
row_splits = control_flow_ops.with_dependencies(checks, row_splits)
return cls(
row_splits=row_splits,
uniform_row_length=uniform_row_length,
nrows=nrows,
nvals=nvals,
internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def _convert_row_partition(cls, partition, name, dtype=None, dtype_hint=None):
"""Converts `partition` to Tensors.
Args:
partition: A row-partitioning tensor for the `RowPartition` being
constructed. I.e., one of: row_splits, row_lengths, row_starts,
row_limits, value_rowids, uniform_row_length.
name: The name of the row-partitioning tensor.
dtype: Optional dtype for the RowPartition. If missing, the type
is inferred from the type of `uniform_row_length`, dtype_hint,
or tf.int64.
dtype_hint: Optional dtype for the RowPartition, used when dtype
is None. In some cases, a caller may not have a dtype in mind when
converting to a tensor, so dtype_hint can be used as a soft preference.
If the conversion to `dtype_hint` is not possible, this argument has no
effect.
Returns:
A tensor equivalent to partition.
Raises:
ValueError: if dtype is not int32 or int64.
"""
if dtype_hint is None:
dtype_hint = dtypes.int64
if (isinstance(partition, np.ndarray) and
partition.dtype == np.int32 and dtype is None):
partition = ops.convert_to_tensor(partition, name=name)
else:
partition = ops.convert_to_tensor_v2(
partition, dtype_hint=dtype_hint, dtype=dtype, name=name)
if partition.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("%s must have dtype int32 or int64" % name)
return partition | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def dtype(self):
"""The `DType` used to encode the row partition (either int32 or int64)."""
return self._row_splits.dtype | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def value_rowids(self):
"""Returns the row indices for this row partition.
`value_rowids` specifies the row index fo reach value. In particular,
`value_rowids[i]` is the row index for `values[i]`.
Returns:
A 1-D integer `Tensor` with shape `[self.nvals()]`.
The returned tensor is nonnegative, and is sorted in ascending order.
"""
if self._value_rowids is not None:
return self._value_rowids
return segment_id_ops.row_splits_to_segment_ids(self._row_splits) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def nrows(self):
"""Returns the number of rows created by this `RowPartition`.
Returns:
scalar integer Tensor
"""
if self._nrows is not None:
return self._nrows
nsplits = tensor_shape.dimension_at_index(self._row_splits.shape, 0)
if nsplits.value is None:
return array_ops.shape(self._row_splits, out_type=self.dtype)[0] - 1
else:
return constant_op.constant(nsplits.value - 1, dtype=self.dtype) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def row_starts(self):
"""Returns the start indices for rows in this row partition.
These indices specify where the values for each row begin.
`partition.row_starts()` is equal to `partition.row_splits()[:-1]`.
Returns:
A 1-D integer Tensor with shape `[self.nrows()]`.
The returned tensor is nonnegative, and is sorted in ascending order.
`self.row_starts()[0] == 0`.
`self.row_starts()[-1] <= self.nvals()`.
"""
return self._row_splits[:-1] | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def row_lengths(self):
"""Returns the lengths of rows in this `RowPartition`.
Returns:
A 1-D integer Tensor with shape `[self.nrows]`.
The returned tensor is nonnegative.
`tf.reduce_sum(self.row_lengths) == self.nvals()`.
"""
if self._row_lengths is not None:
return self._row_lengths
splits = self._row_splits
return splits[1:] - splits[:-1] | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def static_nrows(self):
"""The number of rows in this partition, if statically known.
```python
self.row_lengths().shape == [self.static_nrows]
self.row_starts().shape == [self.static_nrows]
self.row_limits().shape == [self.static_nrows]
self.row_splits().shape == [self.static_nrows + 1]
```
Returns:
The number of rows in this partition as an `int` (if statically known);
or `None` (otherwise).
"""
if self._row_splits is not None:
nrows_plus_one = tensor_shape.dimension_value(self._row_splits.shape[0])
if nrows_plus_one is not None:
return nrows_plus_one - 1
if self._row_lengths is not None:
nrows = tensor_shape.dimension_value(self._row_lengths.shape[0])
if nrows is not None:
return nrows
if self._nrows is not None:
return tensor_util.constant_value(self._nrows)
return None | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def static_nvals(self):
"""The number of values in this partition, if statically known.
```python
self.value_rowids().shape == [self.static_vals]
```
Returns:
The number of values in this partition as an `int` (if statically known);
or `None` (otherwise).
"""
if self._nvals is not None:
nvals = tensor_util.constant_value(self._nvals)
if nvals is not None:
return nvals
if self._value_rowids is not None:
nvals = tensor_shape.dimension_at_index(self._value_rowids.shape, 0)
if nvals.value is not None:
return nvals.value
return None | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def static_uniform_row_length(self):
"""The number of values in each row of this partition, if statically known.
Returns:
The number of values in each row of this partition as an `int` (if
statically known); or `None` (otherwise).
"""
if self._uniform_row_length is not None:
return tensor_util.constant_value(self._uniform_row_length)
return None | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def is_uniform(self):
"""Returns true if the partition is known to be uniform statically.
This is based upon the existence of self._uniform_row_length. For example:
RowPartition.from_row_lengths([3,3,3]).is_uniform()==false
RowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true
RowPartition.from_row_lengths([2,0,2]).is_uniform()==false
Returns:
Whether a RowPartition is known to be uniform statically.
"""
return self._uniform_row_length is not None | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def with_row_splits_dtype(self, dtype):
"""Returns a copy of this RowPartition with the given `row_splits` dtype.
Args:
dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`.
Returns:
A copy of this RowPartition, with the `row_splits` cast to the given type.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("dtype must be int32 or int64")
if self.dtype == dtype:
return self
return RowPartition(
row_splits=_cast_if_not_none(self._row_splits, dtype),
row_lengths=_cast_if_not_none(self._row_lengths, dtype),
value_rowids=_cast_if_not_none(self._value_rowids, dtype),
nrows=_cast_if_not_none(self._nrows, dtype),
uniform_row_length=_cast_if_not_none(self._uniform_row_length, dtype),
internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def __repr__(self):
if self._uniform_row_length is not None:
return (f"tf.RowPartition(nrows={self._nrows}, "
f"uniform_row_length={self._uniform_row_length})")
else:
return f"tf.RowPartition(row_splits={self._row_splits})" | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def has_precomputed_row_splits(self):
"""Returns true if `row_splits` has already been computed.
If true, then `self.row_splits()` will return its value without calling
any TensorFlow ops.
"""
return self._row_splits is not None | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def has_precomputed_value_rowids(self):
"""Returns true if `value_rowids` has already been computed.
If true, then `self.value_rowids()` will return its value without calling
any TensorFlow ops.
"""
return self._value_rowids is not None | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def has_precomputed_nvals(self):
"""Returns true if `nvals` has already been computed.
If true, then `self.nvals()` will return its value without calling
any TensorFlow ops.
"""
return self._nvals is not None | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def with_precomputed_row_lengths(self):
"""Returns a copy of `self` with `row_lengths` precomputed."""
return RowPartition(
row_splits=self._row_splits,
row_lengths=self.row_lengths(),
value_rowids=self._value_rowids,
nrows=self._nrows,
nvals=self._nvals,
uniform_row_length=self._uniform_row_length,
internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def with_precomputed_nrows(self):
"""Returns a copy of `self` with `nrows` precomputed."""
return RowPartition(
row_splits=self._row_splits,
row_lengths=self._row_lengths,
value_rowids=self._value_rowids,
nrows=self.nrows(),
nvals=self._nvals,
uniform_row_length=self._uniform_row_length,
internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def merge_precomputed_encodings(self, other, validate=True):
"""Returns a RowPartition that merges encodings from `self` and `other`.
Requires that `self` and `other` describe the same partition.
Args:
other: A `RowPartition` that encodes the same partition as `self`.
validate: If true, then add runtime checks to verify that `self` and
`other` encode the same row partition.
Returns:
A `RowPartition`.
"""
# pylint: disable=protected-access
if (self is other or # Fast path if row partitions are equal.
(self._row_splits is other._row_splits and
self._row_lengths is other._row_lengths and
self._value_rowids is other._value_rowids and
self._nrows is other._nrows and
self._nvals is other._nvals and
self._uniform_row_length is other._uniform_row_length)):
return self
# Merge the component tensors. We only need to validate one encoding.
# We merge less-expensive encodings first (to avoid expensive validation).
nrows, nrows_validated = _merge_tensors(self._nrows, other._nrows, "nrows",
validate)
nvals, _ = _merge_tensors(self._nvals, other._nvals, "nvals", validate)
uniform_row_length, uniform_row_length_validated = _merge_tensors(
self._uniform_row_length, other._uniform_row_length,
"uniform_row_length", validate)
if uniform_row_length_validated and nrows_validated:
validate = False # Validation complete.
row_splits, row_splits_validated = _merge_tensors(self._row_splits,
other._row_splits,
"row_splits", validate)
if row_splits_validated:
validate = False # Validation complete.
row_lengths, row_lengths_validated = _merge_tensors(self._row_lengths,
other._row_lengths,
"row_lengths", validate)
if row_lengths_validated:
validate = False # Validation complete.
value_rowids, value_rowids_validated = _merge_tensors(
self._value_rowids, other._value_rowids, "value_rowids", validate)
if value_rowids_validated and nrows_validated:
validate = False # Validation complete.
# TODO(edloper): If we make the row_splits encoding optional, then there
# will be cases where we need to do validation at this point -- e.g. if
# self has only row_splits and other has only value_rowids. But for
# now, we are guaranteed to have done validation by this point.
# Avoid creating new RowPartition objects if we don't need to.
if (row_splits is self._row_splits and row_lengths is self._row_lengths and
value_rowids is self._value_rowids and nrows is self._nrows and
uniform_row_length is self._uniform_row_length):
return self
if (row_splits is other._row_splits and
row_lengths is other._row_lengths and
value_rowids is other._value_rowids and nrows is other._nrows and
uniform_row_length is other._uniform_row_length):
return other
return RowPartition(
row_splits=row_splits,
row_lengths=row_lengths,
value_rowids=value_rowids,
nrows=nrows,
uniform_row_length=uniform_row_length,
nvals=nvals,
internal=_row_partition_factory_key) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def _type_spec(self):
return RowPartitionSpec.from_value(self) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def __init__(self,
nrows=None,
nvals=None,
uniform_row_length=None,
dtype=dtypes.int64):
"""Constructs a new RowPartitionSpec.
Args:
nrows: The number of rows in the RowPartition, or `None` if unspecified.
nvals: The number of values partitioned by the RowPartition, or `None` if
unspecified.
uniform_row_length: The number of values in each row for this
RowPartition, or `None` if rows are ragged or row length is unspecified.
dtype: The data type used to encode the partition. One of `tf.int64` or
`tf.int32`.
"""
# Wrap dimension sizes in 1D TensorShapes so the default implementations
# of TypeSpec methods such as `is_compatile_with` will work.
nrows = tensor_shape.TensorShape([nrows])
nvals = tensor_shape.TensorShape([nvals])
if not isinstance(uniform_row_length, tensor_shape.TensorShape):
uniform_row_length = tensor_shape.TensorShape([uniform_row_length])
else:
uniform_row_length = uniform_row_length.with_rank(1)
self._nrows = nrows
self._nvals = nvals
self._uniform_row_length = uniform_row_length
self._dtype = dtypes.as_dtype(dtype)
if self._dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("dtype must be tf.int32 or tf.int64")
# Check dimension consistency, & infer dimensions when possible.
nrows = tensor_shape.dimension_value(nrows[0])
nvals = tensor_shape.dimension_value(nvals[0])
ncols = tensor_shape.dimension_value(uniform_row_length[0])
if nrows == 0: # no rows -> no values.
if nvals is None:
self._nvals = tensor_shape.TensorShape([0])
elif nvals != 0:
raise ValueError("nvals=%s is not compatible with nrows=%s" %
(nvals, nrows))
if ncols == 0: # there are no values in each row -> no values.
if nvals is None:
self._nvals = tensor_shape.TensorShape([0])
elif nvals != 0:
raise ValueError("nvals=%s is not compatible with uniform_row_length"
"=%s" % (nvals, uniform_row_length))
if ncols is not None and nvals is not None:
if ncols != 0 and nvals % ncols != 0:
raise ValueError("nvals=%s is not compatible with uniform_row_length"
"=%s (doesn't divide evenly)" % (nvals, ncols))
if nrows is not None and nvals != ncols * nrows:
raise ValueError("nvals=%s is not compatible with nrows=%s and "
"uniform_row_length=%s" % (nvals, nrows, ncols))
if nrows is None and ncols != 0:
self._nrows = tensor_shape.TensorShape([nvals // ncols])
if ncols is not None and nrows is not None and nvals is None:
self._nvals = tensor_shape.TensorShape([ncols * nrows]) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def _serialize(self):
return (self._nrows, self._nvals, self._uniform_row_length, self._dtype) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def _deserialize(cls, serialization):
# Remove TensorShape wrappers from serialization.
(nrows, nvals, uniform_row_length, dtype) = serialization
nrows = tensor_shape.dimension_value(nrows[0])
nvals = tensor_shape.dimension_value(nvals[0])
return cls(nrows, nvals, uniform_row_length, dtype) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def nrows(self):
return tensor_shape.dimension_value(self._nrows[0]) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def nvals(self):
return tensor_shape.dimension_value(self._nvals[0]) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def uniform_row_length(self):
return tensor_shape.dimension_value(self._uniform_row_length[0]) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def dtype(self):
return self._dtype | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def _component_specs(self):
row_splits_shape = tensor_shape.TensorShape(
[tensor_shape.dimension_at_index(self._nrows, 0) + 1])
return tensor_spec.TensorSpec(row_splits_shape, self._dtype) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def _from_components(self, tensor):
return RowPartition.from_row_splits(tensor, validate=False) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
def from_value(cls, value):
if not isinstance(value, RowPartition):
raise TypeError("Expected `value` to be a `RowPartition`")
return cls(value.static_nrows, value.static_nvals,
value.static_uniform_row_length, value.dtype) | tensorflow/tensorflow | [
171949,
87931,
171949,
2300,
1446859160
] |
Subsets and Splits