language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def load_config_json(conf_file):
"""Banana?"""
try:
with open(conf_file) as _:
try:
json_conf = json.load(_)
except ValueError as ze_error:
error('invalid-config',
'The provided configuration file %s is not valid json.\n'
'The exact error was %s.\n'
'This often happens because of missing or extra commas, '
'but it may be something else, please fix it!\n' %
(conf_file, str(ze_error)))
except FileNotFoundError:
json_conf = {}
except IOError as _err:
error('setup-issue',
'Passed config file %s could not be opened (%s)' %
(conf_file, _err))
return json_conf |
java | public final void shiftOp() throws RecognitionException {
try {
// src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:465:5: ( ( LESS LESS | GREATER GREATER GREATER | GREATER GREATER ) )
// src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:465:7: ( LESS LESS | GREATER GREATER GREATER | GREATER GREATER )
{
// src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:465:7: ( LESS LESS | GREATER GREATER GREATER | GREATER GREATER )
int alt46=3;
int LA46_0 = input.LA(1);
if ( (LA46_0==LESS) ) {
alt46=1;
}
else if ( (LA46_0==GREATER) ) {
int LA46_2 = input.LA(2);
if ( (LA46_2==GREATER) ) {
int LA46_3 = input.LA(3);
if ( (LA46_3==GREATER) ) {
alt46=2;
}
else if ( (LA46_3==EOF||LA46_3==BOOL||(LA46_3 >= DECIMAL && LA46_3 <= DIV)||LA46_3==DOT||LA46_3==FLOAT||LA46_3==HEX||(LA46_3 >= ID && LA46_3 <= INCR)||(LA46_3 >= LEFT_PAREN && LA46_3 <= LESS)||LA46_3==MINUS||LA46_3==NEGATION||LA46_3==NULL||LA46_3==PLUS||LA46_3==QUESTION_DIV||(LA46_3 >= STAR && LA46_3 <= TIME_INTERVAL)) ) {
alt46=3;
}
else {
if (state.backtracking>0) {state.failed=true; return;}
int nvaeMark = input.mark();
try {
for (int nvaeConsume = 0; nvaeConsume < 3 - 1; nvaeConsume++) {
input.consume();
}
NoViableAltException nvae =
new NoViableAltException("", 46, 3, input);
throw nvae;
} finally {
input.rewind(nvaeMark);
}
}
}
else {
if (state.backtracking>0) {state.failed=true; return;}
int nvaeMark = input.mark();
try {
input.consume();
NoViableAltException nvae =
new NoViableAltException("", 46, 2, input);
throw nvae;
} finally {
input.rewind(nvaeMark);
}
}
}
else {
if (state.backtracking>0) {state.failed=true; return;}
NoViableAltException nvae =
new NoViableAltException("", 46, 0, input);
throw nvae;
}
switch (alt46) {
case 1 :
// src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:465:9: LESS LESS
{
match(input,LESS,FOLLOW_LESS_in_shiftOp2182); if (state.failed) return;
match(input,LESS,FOLLOW_LESS_in_shiftOp2184); if (state.failed) return;
}
break;
case 2 :
// src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:466:11: GREATER GREATER GREATER
{
match(input,GREATER,FOLLOW_GREATER_in_shiftOp2196); if (state.failed) return;
match(input,GREATER,FOLLOW_GREATER_in_shiftOp2198); if (state.failed) return;
match(input,GREATER,FOLLOW_GREATER_in_shiftOp2200); if (state.failed) return;
}
break;
case 3 :
// src/main/resources/org/drools/compiler/lang/DRL6Expressions.g:467:11: GREATER GREATER
{
match(input,GREATER,FOLLOW_GREATER_in_shiftOp2212); if (state.failed) return;
match(input,GREATER,FOLLOW_GREATER_in_shiftOp2214); if (state.failed) return;
}
break;
}
}
}
catch (RecognitionException re) {
throw re;
}
finally {
// do for sure before leaving
}
} |
python | def generate_id(self):
"""Generate a fresh id"""
if self.use_repeatable_ids:
self.repeatable_id_counter += 1
return 'autobaked-{}'.format(self.repeatable_id_counter)
else:
return str(uuid4()) |
python | def run(vcf, conf_fns, lua_fns, data, basepath=None, decomposed=False):
"""Annotate a VCF file using vcfanno (https://github.com/brentp/vcfanno)
decomposed -- if set to true we'll convert allele based output into single values
to match alleles and make compatible with vcf2db
(https://github.com/quinlan-lab/vcf2db/issues/14)
"""
conf_fns.sort(key=lambda x: os.path.basename(x) if x else "")
lua_fns.sort(key=lambda x: os.path.basename(x) if x else "")
ext = "-annotated-%s" % utils.splitext_plus(os.path.basename(conf_fns[0]))[0]
if vcf.find(ext) > 0:
out_file = vcf
else:
out_file = "%s%s.vcf.gz" % (utils.splitext_plus(vcf)[0], ext)
if not utils.file_exists(out_file):
vcfanno = config_utils.get_program("vcfanno", data)
with file_transaction(out_file) as tx_out_file:
conffn = _combine_files(conf_fns, out_file, data, basepath is None)
luafn = _combine_files(lua_fns, out_file, data, False)
luaflag = "-lua {0}".format(luafn) if luafn and utils.file_exists(luafn) else ""
basepathflag = "-base-path {0}".format(basepath) if basepath else ""
cores = dd.get_num_cores(data)
post_ann = "sed -e 's/Number=A/Number=1/g' |" if decomposed else ""
cmd = ("{vcfanno} -p {cores} {luaflag} {basepathflag} {conffn} {vcf} "
"| {post_ann} bgzip -c > {tx_out_file}")
message = "Annotating {vcf} with vcfanno, using {conffn}".format(**locals())
do.run(cmd.format(**locals()), message)
return vcfutils.bgzip_and_index(out_file, data["config"]) |
python | def find_elb(name='', env='', region=''):
"""Get an application's AWS elb dns name.
Args:
name (str): ELB name
env (str): Environment/account of ELB
region (str): AWS Region
Returns:
str: elb DNS record
"""
LOG.info('Find %s ELB in %s [%s].', name, env, region)
url = '{0}/applications/{1}/loadBalancers'.format(API_URL, name)
response = requests.get(url, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
assert response.ok
elb_dns = None
accounts = response.json()
for account in accounts:
if account['account'] == env and account['region'] == region:
elb_dns = account['dnsname']
break
else:
raise SpinnakerElbNotFound('Elb for "{0}" in region {1} not found'.format(name, region))
LOG.info('Found: %s', elb_dns)
return elb_dns |
python | def _is_inside(self, span1, span2, covered_spans):
"""Returns True if both `span1` and `span2` fall within
`covered_spans`.
:param span1: start and end indices of a span
:type span1: 2-`tuple` of `int`
:param span2: start and end indices of a span
:type span2: 2-`tuple` of `int`
:param covered_spans: lists of start and end indices for parts
of the texts already covered by a sequence
:type covered_spans: `list` of two `list`s of 2-`tuple` of `int`
:rtype: `bool`
"""
if self._is_span_inside(span1, covered_spans[0]) and \
self._is_span_inside(span2, covered_spans[1]):
return True
return False |
java | public static void swap(int[] intArray1, int array1Index, int[] intArray2, int array2Index) {
if(intArray1[array1Index] != intArray2[array2Index]) {
intArray1[array1Index] = intArray1[array1Index] ^ intArray2[array2Index];
intArray2[array2Index] = intArray1[array1Index] ^ intArray2[array2Index];
intArray1[array1Index] = intArray1[array1Index] ^ intArray2[array2Index];
}
} |
python | def days(self):
"""Return the 7 days of the week as a list (of datetime.date objects)"""
monday = self.day(0)
return [monday + timedelta(days=i) for i in range(7)] |
java | public JSONObject match(List<MatchRequest> input) {
AipRequest request = new AipRequest();
preOperation(request);
JSONArray arr = new JSONArray();
for (MatchRequest req : input) {
arr.put(req.toJsonObject());
}
request.addBody("body", arr.toString());
request.setBodyFormat(EBodyFormat.RAW_JSON_ARRAY);
request.setUri(FaceConsts.MATCH);
postOperation(request);
return requestServer(request);
} |
python | def CheckRegistryKey(javaKey):
""" Method checks for the java in the registry entries. """
from _winreg import ConnectRegistry, HKEY_LOCAL_MACHINE, OpenKey, QueryValueEx
path = None
try:
aReg = ConnectRegistry(None, HKEY_LOCAL_MACHINE)
rk = OpenKey(aReg, javaKey)
for i in range(1024):
currentVersion = QueryValueEx(rk, "CurrentVersion")
if currentVersion != None:
key = OpenKey(rk, currentVersion[0])
if key != None:
path = QueryValueEx(key, "JavaHome")
return path[0]
except Exception, err:
# TODO: Add Warning/Error messages in Logger.
WriteUcsWarning("Not able to access registry.")
return None |
java | private void obtainFragment(@NonNull final TypedArray typedArray) {
setFragment(typedArray.getString(R.styleable.NavigationPreference_android_fragment));
} |
python | def compute_node_colors(self):
"""Compute the node colors. Also computes the colorbar."""
data = [self.graph.node[n][self.node_color] for n in self.nodes]
if self.group_order == "alphabetically":
data_reduced = sorted(list(set(data)))
elif self.group_order == "default":
data_reduced = list(unique_everseen(data))
dtype = infer_data_type(data)
n_grps = num_discrete_groups(data)
if dtype == "categorical" or dtype == "ordinal":
if n_grps <= 8:
cmap = get_cmap(
cmaps["Accent_{0}".format(n_grps)].mpl_colormap
)
else:
cmap = n_group_colorpallet(n_grps)
elif dtype == "continuous" and not is_data_diverging(data):
cmap = get_cmap(cmaps["continuous"].mpl_colormap)
elif dtype == "continuous" and is_data_diverging(data):
cmap = get_cmap(cmaps["diverging"].mpl_colormap)
for d in data:
idx = data_reduced.index(d) / n_grps
self.node_colors.append(cmap(idx))
# Add colorbar if required.ListedColormap
logging.debug("length of data_reduced: {0}".format(len(data_reduced)))
logging.debug("dtype: {0}".format(dtype))
if len(data_reduced) > 1 and dtype == "continuous":
self.sm = plt.cm.ScalarMappable(
cmap=cmap,
norm=plt.Normalize(
vmin=min(data_reduced),
vmax=max(data_reduced), # noqa # noqa
),
)
self.sm._A = [] |
java | public static <T extends Enum<T> & SaneEnum> Set<T> enumSet(Class<T> enumType, int wireValue) {
T[] enumConstants = enumType.getEnumConstants();
List<T> values = Lists.newArrayListWithCapacity(enumConstants.length);
for (T value : enumConstants) {
if ((wireValue & value.getWireValue()) != 0) {
values.add(value);
}
}
return Sets.immutableEnumSet(values);
} |
python | def satisfy_custom_matcher(self, args, kwargs):
"""Return a boolean indicating if the args satisfy the stub
:return: Whether or not the stub accepts the provided arguments.
:rtype: bool
"""
if not self._custom_matcher:
return False
try:
return self._custom_matcher(*args, **kwargs)
except Exception:
return False |
java | public void dereferenceControllable()
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(tc, "dereferenceControllable");
messageProcessor = null;
destinationIndex = null;
destinationManager = null;
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "dereferenceControllable");
} |
java | @Nonnull
public static DAType from(@Nonnull Class<?> clazz, @Nonnull List<DAType> typeArgs) {
return instance(clazz.getSimpleName(), clazz.getCanonicalName(), typeArgs);
} |
python | async def on_raw_730(self, message):
""" Someone we are monitoring just came online. """
for nick in message.params[1].split(','):
self._create_user(nick)
await self.on_user_online(nickname) |
python | def register(email):
'''
Register a new user account
CLI Example:
.. code-block:: bash
salt-run venafi.register [email protected]
'''
data = __utils__['http.query'](
'{0}/useraccounts'.format(_base_url()),
method='POST',
data=salt.utils.json.dumps({
'username': email,
'userAccountType': 'API',
}),
status=True,
decode=True,
decode_type='json',
header_dict={
'Content-Type': 'application/json',
},
)
status = data['status']
if six.text_type(status).startswith('4') or six.text_type(status).startswith('5'):
raise CommandExecutionError(
'There was an API error: {0}'.format(data['error'])
)
return data.get('dict', {}) |
python | def get(
self,
name=None,
group=None,
index=None,
raster=None,
samples_only=False,
data=None,
raw=False,
ignore_invalidation_bits=False,
source=None,
record_offset=0,
record_count=None,
copy_master=True,
):
"""Gets channel samples. The raw data group samples are not loaded to
memory so it is advised to use ``filter`` or ``select`` instead of
performing several ``get`` calls.
Channel can be specified in two ways:
* using the first positional argument *name*
* if *source* is given this will be first used to validate the
channel selection
* if there are multiple occurances for this channel then the
*group* and *index* arguments can be used to select a specific
group.
* if there are multiple occurances for this channel and either the
*group* or *index* arguments is None then a warning is issued
* using the group number (keyword argument *group*) and the channel
number (keyword argument *index*). Use *info* method for group and
channel numbers
If the *raster* keyword argument is not *None* the output is
interpolated accordingly
Parameters
----------
name : string
name of channel
group : int
0-based group index
index : int
0-based channel index
raster : float
time raster in seconds
samples_only : bool
if *True* return only the channel samples as numpy array; if
*False* return a *Signal* object
data : bytes
prevent redundant data read by providing the raw data group samples
raw : bool
return channel samples without appling the conversion rule; default
`False`
ignore_invalidation_bits : bool
option to ignore invalidation bits
source : str
source name used to select the channel
record_offset : int
if *data=None* use this to select the record offset from which the
group data should be loaded
record_count : int
number of records to read; default *None* and in this case all
available records are used
copy_master : bool
make a copy of the timebase for this channel
Returns
-------
res : (numpy.array, numpy.array) | Signal
returns *Signal* if *samples_only*=*False* (default option),
otherwise returns a (numpy.array, numpy.array) tuple of samples and
invalidation bits. If invalidation bits are not used or if
*ignore_invalidation_bits* if False, then the second item will be
None.
The *Signal* samples are:
* numpy recarray for channels that have composition/channel
array address or for channel of type
CANOPENDATE, CANOPENTIME
* numpy array for all the rest
Raises
------
MdfException :
* if the channel name is not found
* if the group index is out of range
* if the channel index is out of range
Examples
--------
>>> from asammdf import MDF, Signal
>>> import numpy as np
>>> t = np.arange(5)
>>> s = np.ones(5)
>>> mdf = MDF(version='4.10')
>>> for i in range(4):
... sigs = [Signal(s*(i*10+j), t, name='Sig') for j in range(1, 4)]
... mdf.append(sigs)
...
>>> # first group and channel index of the specified channel name
...
>>> mdf.get('Sig')
UserWarning: Multiple occurances for channel "Sig". Using first occurance from data group 4. Provide both "group" and "index" arguments to select another data group
<Signal Sig:
samples=[ 1. 1. 1. 1. 1.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # first channel index in the specified group
...
>>> mdf.get('Sig', 1)
<Signal Sig:
samples=[ 11. 11. 11. 11. 11.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel named Sig from group 1 channel index 2
...
>>> mdf.get('Sig', 1, 2)
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # channel index 1 or group 2
...
>>> mdf.get(None, 2, 1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> mdf.get(group=2, index=1)
<Signal Sig:
samples=[ 21. 21. 21. 21. 21.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
>>> # validation using source name
...
>>> mdf.get('Sig', source='VN7060')
<Signal Sig:
samples=[ 12. 12. 12. 12. 12.]
timestamps=[0 1 2 3 4]
unit=""
info=None
comment="">
"""
gp_nr, ch_nr = self._validate_channel_selection(
name, group, index, source=source
)
grp = self.groups[gp_nr]
interp_mode = self._integer_interpolation
original_data = data
if ch_nr >= 0:
# get the channel object
channel = grp.channels[ch_nr]
dependency_list = grp.channel_dependencies[ch_nr]
# get data group record
parents, dtypes = grp.parents, grp.types
if parents is None:
parents, dtypes = self._prepare_record(grp)
# get group data
if data is None:
data = self._load_data(
grp, record_offset=record_offset, record_count=record_count
)
else:
data = (data,)
channel_invalidation_present = (
channel.flags
& (v4c.FLAG_INVALIDATION_BIT_VALID | v4c.FLAG_ALL_SAMPLES_VALID)
== v4c.FLAG_INVALIDATION_BIT_VALID
)
bit_count = channel.bit_count
else:
# get data group record
parents, dtypes = self._prepare_record(grp)
parent, bit_offset = parents[ch_nr]
channel_invalidation_present = False
dependency_list = None
channel = grp.logging_channels[-ch_nr - 1]
# get group data
if data is None:
data = self._load_data(
grp, record_offset=record_offset, record_count=record_count
)
else:
data = (data,)
bit_count = channel.bit_count
data_type = channel.data_type
channel_type = channel.channel_type
stream_sync = channel_type == v4c.CHANNEL_TYPE_SYNC
encoding = None
master_is_required = not samples_only or raster
# check if this is a channel array
if dependency_list:
if not isinstance(dependency_list[0], ChannelArrayBlock):
# structure channel composition
_dtype = dtype(channel.dtype_fmt)
if _dtype.itemsize == bit_count // 8:
fast_path = True
channel_values = []
timestamps = []
invalidation_bits = []
byte_offset = channel.byte_offset
record_size = (
grp.channel_group.samples_byte_nr
+ grp.channel_group.invalidation_bytes_nr
)
count = 0
for fragment in data:
bts = fragment[0]
types = [
("", f"V{byte_offset}"),
("vals", _dtype),
("", f"V{record_size - _dtype.itemsize - byte_offset}"),
]
channel_values.append(fromstring(bts, types)["vals"].copy())
if master_is_required:
timestamps.append(
self.get_master(
gp_nr, fragment, copy_master=copy_master
)
)
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
count += 1
else:
fast_path = False
names = [grp.channels[ch_nr].name for _, ch_nr in dependency_list]
channel_values = [[] for _ in dependency_list]
timestamps = []
invalidation_bits = []
count = 0
for fragment in data:
for i, (dg_nr, ch_nr) in enumerate(dependency_list):
vals = self.get(
group=dg_nr,
index=ch_nr,
samples_only=True,
data=fragment,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=record_count,
)[0]
channel_values[i].append(vals)
if master_is_required:
timestamps.append(
self.get_master(
gp_nr, fragment, copy_master=copy_master
)
)
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
count += 1
if fast_path:
if count > 1:
vals = concatenate(channel_values)
else:
vals = channel_values[0]
else:
if count > 1:
arrays = [concatenate(lst) for lst in channel_values]
else:
arrays = [lst[0] for lst in channel_values]
types = [
(name_, arr.dtype, arr.shape[1:])
for name_, arr in zip(names, arrays)
]
types = dtype(types)
vals = fromarrays(arrays, dtype=types)
if master_is_required:
if count > 1:
timestamps = concatenate(timestamps)
else:
timestamps = timestamps[0]
if channel_invalidation_present:
if count > 1:
invalidation_bits = concatenate(invalidation_bits)
else:
invalidation_bits = invalidation_bits[0]
if not ignore_invalidation_bits:
vals = vals[nonzero(~invalidation_bits)[0]]
if master_is_required:
timestamps = timestamps[nonzero(~invalidation_bits)[0]]
if raster and len(timestamps) > 1:
t = arange(timestamps[0], timestamps[-1], raster)
vals = (
Signal(vals, timestamps, name="_")
.interp(t, interpolation_mode=interp_mode)
)
vals, timestamps, invalidation_bits = (
vals.samples,
vals.timestamps,
vals.invalidation_bits,
)
else:
# channel arrays
channel_group = grp.channel_group
samples_size = (
channel_group.samples_byte_nr + channel_group.invalidation_bytes_nr
)
channel_values = []
timestamps = []
invalidation_bits = []
count = 0
for fragment in data:
data_bytes, offset, _count = fragment
cycles = len(data_bytes) // samples_size
arrays = []
types = []
try:
parent, bit_offset = parents[ch_nr]
except KeyError:
parent, bit_offset = None, None
if parent is not None:
if grp.record is None:
dtypes = grp.types
if dtypes.itemsize:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = None
else:
record = grp.record
vals = record[parent]
else:
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
vals = vals.copy()
dep = dependency_list[0]
if dep.flags & v4c.FLAG_CA_INVERSE_LAYOUT:
shape = vals.shape
shape = (shape[0],) + shape[1:][::-1]
vals = vals.reshape(shape)
axes = (0,) + tuple(range(len(shape) - 1, 0, -1))
vals = transpose(vals, axes=axes)
cycles_nr = len(vals)
for ca_block in dependency_list[:1]:
dims_nr = ca_block.dims
if ca_block.ca_type == v4c.CA_TYPE_SCALE_AXIS:
shape = (ca_block.dim_size_0,)
arrays.append(vals)
dtype_pair = channel.name, vals.dtype, shape
types.append(dtype_pair)
elif ca_block.ca_type == v4c.CA_TYPE_LOOKUP:
shape = vals.shape[1:]
arrays.append(vals)
dtype_pair = channel.name, vals.dtype, shape
types.append(dtype_pair)
if ca_block.flags & v4c.FLAG_CA_FIXED_AXIS:
for i in range(dims_nr):
shape = (ca_block[f"dim_size_{i}"],)
axis = []
for j in range(shape[0]):
key = f"axis_{i}_value_{j}"
axis.append(ca_block[key])
axis = array([axis for _ in range(cycles_nr)])
arrays.append(axis)
dtype_pair = (f"axis_{i}", axis.dtype, shape)
types.append(dtype_pair)
else:
for i in range(dims_nr):
try:
ref_dg_nr, ref_ch_nr = ca_block.referenced_channels[
i
]
except:
debug_channel(
self, grp, channel, dependency_list
)
raise
axisname = (
self.groups[ref_dg_nr].channels[ref_ch_nr].name
)
shape = (ca_block[f"dim_size_{i}"],)
if ref_dg_nr == gp_nr:
axis_values = self.get(
group=ref_dg_nr,
index=ref_ch_nr,
samples_only=True,
data=fragment,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=cycles,
)[0]
else:
channel_group = grp.channel_group
record_size = channel_group.samples_byte_nr
record_size += (
channel_group.invalidation_bytes_nr
)
start = offset // record_size
end = start + len(data_bytes) // record_size + 1
ref = self.get(
group=ref_dg_nr,
index=ref_ch_nr,
samples_only=True,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=cycles,
)[0]
axis_values = ref[start:end].copy()
axis_values = axis_values[axisname]
arrays.append(axis_values)
dtype_pair = (axisname, axis_values.dtype, shape)
types.append(dtype_pair)
elif ca_block.ca_type == v4c.CA_TYPE_ARRAY:
shape = vals.shape[1:]
arrays.append(vals)
dtype_pair = channel.name, vals.dtype, shape
types.append(dtype_pair)
for ca_block in dependency_list[1:]:
dims_nr = ca_block.dims
if ca_block.flags & v4c.FLAG_CA_FIXED_AXIS:
for i in range(dims_nr):
shape = (ca_block[f"dim_size_{i}"],)
axis = []
for j in range(shape[0]):
key = f"axis_{i}_value_{j}"
axis.append(ca_block[key])
axis = array([axis for _ in range(cycles_nr)])
arrays.append(axis)
types.append((f"axis_{i}", axis.dtype, shape))
else:
for i in range(dims_nr):
ref_dg_nr, ref_ch_nr = ca_block.referenced_channels[i]
axisname = (
self.groups[ref_dg_nr].channels[ref_ch_nr].name
)
shape = (ca_block[f"dim_size_{i}"],)
if ref_dg_nr == gp_nr:
axis_values = self.get(
group=ref_dg_nr,
index=ref_ch_nr,
samples_only=True,
data=fragment,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=cycles,
)[0]
else:
channel_group = grp.channel_group
record_size = channel_group.samples_byte_nr
record_size += channel_group.invalidation_bytes_nr
start = offset // record_size
end = start + len(data_bytes) // record_size + 1
ref = self.get(
group=ref_dg_nr,
index=ref_ch_nr,
samples_only=True,
ignore_invalidation_bits=ignore_invalidation_bits,
record_offset=record_offset,
record_count=cycles,
)[0]
axis_values = ref[start:end].copy()
axis_values = axis_values[axisname]
arrays.append(axis_values)
dtype_pair = axisname, axis_values.dtype, shape
types.append(dtype_pair)
vals = fromarrays(arrays, dtype(types))
if master_is_required:
timestamps.append(
self.get_master(gp_nr, fragment, copy_master=copy_master)
)
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
channel_values.append(vals)
count += 1
if count > 1:
vals = concatenate(channel_values)
elif count == 1:
vals = channel_values[0]
else:
vals = []
if master_is_required:
if count > 1:
timestamps = concatenate(timestamps)
else:
timestamps = timestamps[0]
if channel_invalidation_present:
if count > 1:
invalidation_bits = concatenate(invalidation_bits)
else:
invalidation_bits = invalidation_bits[0]
if not ignore_invalidation_bits:
vals = vals[nonzero(~invalidation_bits)[0]]
if master_is_required:
timestamps = timestamps[nonzero(~invalidation_bits)[0]]
if raster and len(timestamps) > 1:
t = arange(timestamps[0], timestamps[-1], raster)
vals = (
Signal(vals, timestamps, name="_")
.interp(t, interpolation_mode=interp_mode)
)
vals, timestamps, invalidation_bits = (
vals.samples,
vals.timestamps,
vals.invalidation_bits,
)
conversion = channel.conversion
else:
# get channel values
if channel_type in {
v4c.CHANNEL_TYPE_VIRTUAL,
v4c.CHANNEL_TYPE_VIRTUAL_MASTER,
}:
if not channel.dtype_fmt:
channel.dtype_fmt = get_fmt_v4(data_type, 64)
ch_dtype = dtype(channel.dtype_fmt)
channel_values = []
timestamps = []
invalidation_bits = []
channel_group = grp.channel_group
record_size = channel_group.samples_byte_nr
record_size += channel_group.invalidation_bytes_nr
count = 0
for fragment in data:
data_bytes, offset, _count = fragment
offset = offset // record_size
vals = arange(len(data_bytes) // record_size, dtype=ch_dtype)
vals += offset
if master_is_required:
timestamps.append(
self.get_master(gp_nr, fragment, copy_master=copy_master)
)
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
channel_values.append(vals)
count += 1
if count > 1:
vals = concatenate(channel_values)
elif count == 1:
vals = channel_values[0]
else:
vals = []
if master_is_required:
if count > 1:
timestamps = concatenate(timestamps)
else:
timestamps = timestamps[0]
if channel_invalidation_present:
if count > 1:
invalidation_bits = concatenate(invalidation_bits)
else:
invalidation_bits = invalidation_bits[0]
if not ignore_invalidation_bits:
vals = vals[nonzero(~invalidation_bits)[0]]
if master_is_required:
timestamps = timestamps[nonzero(~invalidation_bits)[0]]
if raster and len(timestamps) > 1:
num = float(float32((timestamps[-1] - timestamps[0]) / raster))
if num.is_integer():
t = linspace(timestamps[0], timestamps[-1], int(num))
else:
t = arange(timestamps[0], timestamps[-1], raster)
vals = (
Signal(vals, timestamps, name="_")
.interp(t, interpolation_mode=interp_mode)
)
vals, timestamps, invalidation_bits = (
vals.samples,
vals.timestamps,
vals.invalidation_bits,
)
else:
channel_values = []
timestamps = []
invalidation_bits = []
count = 0
for fragment in data:
data_bytes, offset, _count = fragment
try:
parent, bit_offset = parents[ch_nr]
except KeyError:
parent, bit_offset = None, None
if parent is not None:
if grp.record is None:
record = fromstring(data_bytes, dtype=dtypes)
else:
record = grp.record
vals = record[parent]
dtype_ = vals.dtype
shape_ = vals.shape
size = vals.dtype.itemsize
for dim in shape_[1:]:
size *= dim
kind_ = dtype_.kind
if kind_ == "b":
pass
elif len(shape_) > 1 and data_type != v4c.DATA_TYPE_BYTEARRAY:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
elif kind_ not in "ui":
if bit_offset:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if bit_count != size * 8:
if (
bit_count % 8 == 0
and size in (2, 4, 8)
and data_type <= 3
): # integer types
vals = vals.view(f"<u{size}")
if data_type in v4c.SIGNED_INT:
vals = as_non_byte_sized_signed_int(
vals, bit_count
)
else:
mask = (1 << bit_count) - 1
if vals.flags.writeable:
vals &= mask
else:
vals = vals & mask
else:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if data_type <= 3:
if not channel.dtype_fmt:
channel.dtype_fmt = get_fmt_v4(data_type, bit_count, channel_type)
channel_dtype = dtype(channel.dtype_fmt.split(')')[-1])
vals = vals.view(channel_dtype)
else:
if data_type <= 3:
if dtype_.byteorder == ">":
if bit_offset or bit_count != size << 3:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if bit_offset:
if kind_ == "i":
vals = vals.astype(
dtype(f"{dtype_.byteorder}u{size}")
)
vals >>= bit_offset
else:
vals = vals >> bit_offset
if bit_count != size << 3:
if data_type in v4c.SIGNED_INT:
vals = as_non_byte_sized_signed_int(
vals, bit_count
)
else:
mask = (1 << bit_count) - 1
if vals.flags.writeable:
vals &= mask
else:
vals = vals & mask
else:
if bit_count != size * 8:
vals = self._get_not_byte_aligned_data(
data_bytes, grp, ch_nr
)
else:
if not channel.dtype_fmt:
channel.dtype_fmt = get_fmt_v4(data_type, bit_count, channel_type)
channel_dtype = dtype(channel.dtype_fmt.split(')')[-1])
vals = vals.view(channel_dtype)
else:
vals = self._get_not_byte_aligned_data(data_bytes, grp, ch_nr)
if bit_count == 1 and self._single_bit_uint_as_bool:
vals = array(vals, dtype=bool)
else:
if not channel.dtype_fmt:
channel.dtype_fmt = get_fmt_v4(
data_type, bit_count, channel_type,
)
channel_dtype = dtype(channel.dtype_fmt.split(")")[-1])
if vals.dtype != channel_dtype:
vals = vals.astype(channel_dtype)
if master_is_required:
timestamps.append(
self.get_master(gp_nr, fragment, copy_master=copy_master)
)
if channel_invalidation_present:
invalidation_bits.append(
self.get_invalidation_bits(gp_nr, channel, fragment)
)
if vals.flags.writeable:
channel_values.append(vals)
else:
channel_values.append(vals.copy())
count += 1
if count > 1:
vals = concatenate(channel_values)
elif count == 1:
vals = channel_values[0]
else:
vals = []
if master_is_required:
if count > 1:
timestamps = concatenate(timestamps)
elif count == 1:
timestamps = timestamps[0]
else:
timestamps = []
if channel_invalidation_present:
if count > 1:
invalidation_bits = concatenate(invalidation_bits)
elif count == 1:
invalidation_bits = invalidation_bits[0]
else:
invalidation_bits = []
if not ignore_invalidation_bits:
vals = vals[nonzero(~invalidation_bits)[0]]
if master_is_required:
timestamps = timestamps[nonzero(~invalidation_bits)[0]]
if raster and len(timestamps) > 1:
num = float(float32((timestamps[-1] - timestamps[0]) / raster))
if num.is_integer():
t = linspace(timestamps[0], timestamps[-1], int(num))
else:
t = arange(timestamps[0], timestamps[-1], raster)
vals = (
Signal(vals, timestamps, name="_")
.interp(t, interpolation_mode=interp_mode)
)
vals, timestamps, invalidation_bits = (
vals.samples,
vals.timestamps,
vals.invalidation_bits,
)
# get the channel conversion
conversion = channel.conversion
if channel_type == v4c.CHANNEL_TYPE_VLSD:
signal_data = self._load_signal_data(group=grp, index=ch_nr)
if signal_data:
values = []
vals = vals.tolist()
for offset in vals:
(str_size,) = UINT32_uf(signal_data, offset)
offset += 4
values.append(signal_data[offset : offset + str_size])
if data_type == v4c.DATA_TYPE_BYTEARRAY:
vals = array(values)
vals = vals.view(dtype=f"({vals.itemsize},)u1")
else:
vals = array(values)
if data_type == v4c.DATA_TYPE_STRING_UTF_16_BE:
encoding = "utf-16-be"
elif data_type == v4c.DATA_TYPE_STRING_UTF_16_LE:
encoding = "utf-16-le"
elif data_type == v4c.DATA_TYPE_STRING_UTF_8:
encoding = "utf-8"
elif data_type == v4c.DATA_TYPE_STRING_LATIN_1:
encoding = "latin-1"
else:
raise MdfException(
f'wrong data type "{data_type}" for vlsd channel'
)
else:
# no VLSD signal data samples
if data_type != v4c.DATA_TYPE_BYTEARRAY:
vals = array([], dtype="S")
if data_type == v4c.DATA_TYPE_STRING_UTF_16_BE:
encoding = "utf-16-be"
elif data_type == v4c.DATA_TYPE_STRING_UTF_16_LE:
encoding = "utf-16-le"
elif data_type == v4c.DATA_TYPE_STRING_UTF_8:
encoding = "utf-8"
elif data_type == v4c.DATA_TYPE_STRING_LATIN_1:
encoding = "latin-1"
else:
raise MdfException(
f'wrong data type "{data_type}" for vlsd channel'
)
else:
vals = array([], dtype=get_fmt_v4(data_type, bit_count, v4c.CHANNEL_TYPE_VALUE))
elif channel_type in {
v4c.CHANNEL_TYPE_VALUE,
v4c.CHANNEL_TYPE_MLSD,
} and (
v4c.DATA_TYPE_STRING_LATIN_1
<= data_type
<= v4c.DATA_TYPE_STRING_UTF_16_BE
):
if data_type == v4c.DATA_TYPE_STRING_UTF_16_BE:
encoding = "utf-16-be"
elif data_type == v4c.DATA_TYPE_STRING_UTF_16_LE:
encoding = "utf-16-le"
elif data_type == v4c.DATA_TYPE_STRING_UTF_8:
encoding = "utf-8"
elif data_type == v4c.DATA_TYPE_STRING_LATIN_1:
encoding = "latin-1"
else:
raise MdfException(
f'wrong data type "{data_type}" for string channel'
)
# CANopen date
if data_type == v4c.DATA_TYPE_CANOPEN_DATE:
vals = vals.tostring()
types = dtype(
[
("ms", "<u2"),
("min", "<u1"),
("hour", "<u1"),
("day", "<u1"),
("month", "<u1"),
("year", "<u1"),
]
)
vals = vals.view(types)
arrays = []
arrays.append(vals["ms"])
# bit 6 and 7 of minutes are reserved
arrays.append(vals["min"] & 0x3F)
# only firt 4 bits of hour are used
arrays.append(vals["hour"] & 0xF)
# the first 4 bits are the day number
arrays.append(vals["day"] & 0xF)
# bit 6 and 7 of month are reserved
arrays.append(vals["month"] & 0x3F)
# bit 7 of year is reserved
arrays.append(vals["year"] & 0x7F)
# add summer or standard time information for hour
arrays.append((vals["hour"] & 0x80) >> 7)
# add day of week information
arrays.append((vals["day"] & 0xF0) >> 4)
names = [
"ms",
"min",
"hour",
"day",
"month",
"year",
"summer_time",
"day_of_week",
]
vals = fromarrays(arrays, names=names)
del arrays
conversion = None
# CANopen time
elif data_type == v4c.DATA_TYPE_CANOPEN_TIME:
types = dtype([("ms", "<u4"), ("days", "<u2")])
vals = vals.view(types)
arrays = []
# bits 28 to 31 are reserverd for ms
arrays.append(vals["ms"] & 0xFFFFFFF)
arrays.append(vals["days"] & 0x3F)
names = ["ms", "days"]
vals = fromarrays(arrays, names=names)
del arrays
if not raw:
if conversion:
vals = conversion.convert(vals)
conversion = None
if samples_only:
if not channel_invalidation_present or not ignore_invalidation_bits:
invalidation_bits = None
res = vals, invalidation_bits
else:
# search for unit in conversion texts
if name is None:
name = channel.name
unit = conversion and conversion.unit or channel.unit
comment = channel.comment
source = channel.source
cg_source = grp.channel_group.acq_source
if source:
source = SignalSource(
source.name,
source.path,
source.comment,
source.source_type,
source.bus_type,
)
elif cg_source:
source = SignalSource(
cg_source.name,
cg_source.path,
cg_source.comment,
cg_source.source_type,
cg_source.bus_type,
)
else:
source = None
if hasattr(channel, "attachment_addr"):
index = self._attachments_map[channel.attachment_addr]
attachment = self.extract_attachment(index=index)
elif channel_type == v4c.CHANNEL_TYPE_SYNC:
index = self._attachments_map[channel.data_block_addr]
attachment = self.extract_attachment(index=index)
else:
attachment = ()
master_metadata = self._master_channel_metadata.get(gp_nr, None)
if not channel_invalidation_present or not ignore_invalidation_bits:
invalidation_bits = None
try:
res = Signal(
samples=vals,
timestamps=timestamps,
unit=unit,
name=name,
comment=comment,
conversion=conversion,
raw=raw,
master_metadata=master_metadata,
attachment=attachment,
source=source,
display_name=channel.display_name,
bit_count=bit_count,
stream_sync=stream_sync,
invalidation_bits=invalidation_bits,
encoding=encoding,
)
except:
debug_channel(self, grp, channel, dependency_list)
raise
return res |
java | boolean recoverLease(String src, String holder, String clientMachine,
boolean discardLastBlock) throws IOException {
// convert names to array of bytes w/o holding lock
byte[][] components = INodeDirectory.getPathComponents(src);
writeLock();
try {
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot recover the lease of " + src, safeMode);
}
INode inode = dir.getFileINode(components);
if (inode == null) {
throw new FileNotFoundException("File not found " + src);
}
if (!inode.isUnderConstruction()) {
return true;
}
if (isPermissionEnabled) {
INode[] inodes = dir.getExistingPathINodes(src);
if (isPermissionCheckingEnabled(inodes)) {
checkPathAccess(src, inodes, FsAction.WRITE);
}
}
return recoverLeaseInternal(inode, src, holder, clientMachine,
true, discardLastBlock);
} finally {
writeUnlock();
}
} |
java | private void loadAliases(ApplicationContext applicationContext, String propertyFile) {
if (propertyFile.isEmpty()) {
return;
}
Resource[] resources;
try {
resources = applicationContext.getResources(propertyFile);
} catch (IOException e) {
log.error("Failed to locate alias property file: " + propertyFile, e);
return;
}
for (Resource resource : resources) {
if (!resource.exists()) {
log.info("Did not find alias property file: " + resource.getFilename());
continue;
}
try (InputStream is = resource.getInputStream();) {
Properties props = new Properties();
props.load(is);
for (Entry<Object, Object> entry : props.entrySet()) {
try {
register((String) entry.getKey(), (String) entry.getValue());
entryCount++;
} catch (Exception e) {
log.error("Error registering alias for '" + entry.getKey() + "'.", e);
}
}
fileCount++;
} catch (IOException e) {
log.error("Failed to load alias property file: " + resource.getFilename(), e);
}
}
} |
python | def set_file_path(self, path):
"""Update the file_path Entry widget"""
self.file_path.delete(0, END)
self.file_path.insert(0, path) |
python | def new(cls, arg):
"""
Creates a new Parameter object from the given ParameterArgument.
"""
content = None
if arg.kind == 'file':
if os.path.exists(arg.value):
with open(arg.value, 'r') as f:
content = f.read()
else:
raise Exception('File does not exist: {}'.format(arg.value))
elif arg.kind == 'cli':
content = arg.value
for source_cls in cls.sources:
if source_cls.supports_source(arg):
return source_cls(content)
msg = 'Unsupported Parameter Source "{}"'
raise Execption(msg.format(arg.value)) |
java | public static <T> Collection<T> unique(Collection<T> self) {
return unique(self, true);
} |
java | private Function findField(String name) {
Selection slkt;
Function f;
if (name.indexOf('.') == -1) {
name = type.getName() + "." + name;
}
slkt = Method.forName(name);
if (slkt.size() == 0) {
f = Field.forName(name);
if (f != null) {
slkt = slkt.add(new Selection(f));
}
}
slkt = slkt.restrictArgumentCount(1);
slkt = slkt.restrictArgumentType(0, type);
switch (slkt.size()) {
case 0:
throw new RuntimeException("no such field: " + name);
case 1:
return slkt.getFunction();
default:
throw new RuntimeException("ambiguous field: " + name);
}
} |
python | def reference(self, reference):
"""
Sets the reference of this CreateCertificateIssuerConfig.
The certificate name, as created in the factory, to which the certificate issuer configuration applies. The following names are reserved and cannot be configured: LwM2M, BOOTSTRAP.
:param reference: The reference of this CreateCertificateIssuerConfig.
:type: str
"""
if reference is None:
raise ValueError("Invalid value for `reference`, must not be `None`")
if reference is not None and len(reference) > 50:
raise ValueError("Invalid value for `reference`, length must be less than or equal to `50`")
if reference is not None and not re.search('(?!mbed\\.)[\\w-_.]{1,50}', reference):
raise ValueError("Invalid value for `reference`, must be a follow pattern or equal to `/(?!mbed\\.)[\\w-_.]{1,50}/`")
self._reference = reference |
python | def extract(args):
"""
%prog extract idsfile sizesfile
Extract the lines containing only the given IDs.
"""
p = OptionParser(extract.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
idsfile, sizesfile = args
sizes = Sizes(sizesfile).mapping
fp = open(idsfile)
for row in fp:
name = row.strip()
size = sizes[name]
print("\t".join(str(x) for x in (name, size))) |
python | def getNumberOfRegularSamples(self):
"""
Returns the number of regular samples.
:returns: number of regular samples
:rtype: integer
"""
analyses = self.getRegularAnalyses()
samples = [a.getRequestUID() for a in analyses]
# discarding any duplicate values
return len(set(samples)) |
python | def return_collection(collection_type):
"""Change method return value from raw API output to collection of models
"""
def outer_func(func):
@functools.wraps(func)
def inner_func(self, *pargs, **kwargs):
result = func(self, *pargs, **kwargs)
return list(map(collection_type, result))
return inner_func
return outer_func |
python | def basek_string_to_base10_integer(k, x):
"""Convert a base k string into an integer."""
assert 1 < k <= max_k_labeled
return sum(numeral_index[c]*(k**i)
for i, c in enumerate(reversed(x))) |
java | public static String getHBCIHostForBLZ(String blz) {
BankInfo info = getBankInfo(blz);
if (info == null)
return "";
return info.getRdhAddress() != null ? info.getRdhAddress() : "";
} |
java | private CompletableFuture<WriterFlushResult> flushPendingAppends(Duration timeout) {
// Gather an InputStream made up of all the operations we can flush.
FlushArgs flushArgs;
try {
flushArgs = getFlushArgs();
} catch (DataCorruptionException ex) {
return Futures.failedFuture(ex);
}
long traceId = LoggerHelpers.traceEnterWithContext(log, this.traceObjectId, "flushPendingAppends");
if (flushArgs.getLength() == 0 && flushArgs.getAttributes().isEmpty()) {
// Nothing to flush.
WriterFlushResult result = new WriterFlushResult();
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
return CompletableFuture.completedFuture(result);
}
// Flush them.
TimeoutTimer timer = new TimeoutTimer(timeout);
CompletableFuture<Void> flush;
if (flushArgs.getLength() == 0) {
flush = CompletableFuture.completedFuture(null);
} else {
flush = createSegmentIfNecessary(
() -> this.storage.write(this.handle.get(), this.metadata.getStorageLength(), flushArgs.getStream(), flushArgs.getLength(), timer.getRemaining()),
timer.getRemaining());
}
if (!flushArgs.getAttributes().isEmpty()) {
flush = flush.thenComposeAsync(v -> handleAttributeException(
this.dataSource.persistAttributes(this.metadata.getId(), flushArgs.attributes, timer.getRemaining())));
}
return flush
.thenApplyAsync(v -> {
WriterFlushResult result = updateStatePostFlush(flushArgs);
LoggerHelpers.traceLeave(log, this.traceObjectId, "flushPendingAppends", traceId, result);
return result;
}, this.executor)
.exceptionally(ex -> {
if (Exceptions.unwrap(ex) instanceof BadOffsetException) {
// We attempted to write at an offset that already contained other data. This can happen for a number of
// reasons, but we do not have enough information here to determine why. We need to enter reconciliation
// mode, which will determine the actual state of the segment in storage and take appropriate actions.
setState(AggregatorState.ReconciliationNeeded);
}
// Rethrow all exceptions.
throw new CompletionException(ex);
});
} |
python | def _try_acquire_lease(self, shard_state, tstate):
"""Validate datastore and the task payload are consistent.
If so, attempt to get a lease on this slice's execution.
See model.ShardState doc on slice_start_time.
Args:
shard_state: model.ShardState from datastore.
tstate: model.TransientShardState from taskqueue paylod.
Returns:
A _TASK_DIRECTIVE enum. PROCEED_TASK if lock is acquired.
RETRY_TASK if task should be retried, DROP_TASK if task should
be dropped. Only old tasks (comparing to datastore state)
will be dropped. Future tasks are retried until they naturally
become old so that we don't ever stuck MR.
"""
# Controller will tally shard_states and properly handle the situation.
if not shard_state:
logging.warning("State not found for shard %s; Possible spurious task "
"execution. Dropping this task.",
tstate.shard_id)
return self._TASK_DIRECTIVE.DROP_TASK
if not shard_state.active:
logging.warning("Shard %s is not active. Possible spurious task "
"execution. Dropping this task.", tstate.shard_id)
logging.warning(str(shard_state))
return self._TASK_DIRECTIVE.DROP_TASK
# Validate shard retry count.
if shard_state.retries > tstate.retries:
logging.warning(
"Got shard %s from previous shard retry %s. Possible spurious "
"task execution. Dropping this task.",
tstate.shard_id,
tstate.retries)
logging.warning(str(shard_state))
return self._TASK_DIRECTIVE.DROP_TASK
elif shard_state.retries < tstate.retries:
# By the end of last slice, task enqueue succeeded but datastore commit
# failed. That transaction will be retried and adding the same task
# will pass.
logging.warning(
"ShardState for %s is behind slice. Waiting for it to catch up",
shard_state.shard_id)
return self._TASK_DIRECTIVE.RETRY_TASK
# Validate slice id.
# Taskqueue executes old successful tasks.
if shard_state.slice_id > tstate.slice_id:
logging.warning(
"Task %s-%s is behind ShardState %s. Dropping task.""",
tstate.shard_id, tstate.slice_id, shard_state.slice_id)
return self._TASK_DIRECTIVE.DROP_TASK
# By the end of last slice, task enqueue succeeded but datastore commit
# failed. That transaction will be retried and adding the same task
# will pass. User data is duplicated in this case.
elif shard_state.slice_id < tstate.slice_id:
logging.warning(
"Task %s-%s is ahead of ShardState %s. Waiting for it to catch up.",
tstate.shard_id, tstate.slice_id, shard_state.slice_id)
return self._TASK_DIRECTIVE.RETRY_TASK
# Check potential duplicated tasks for the same slice.
# See model.ShardState doc.
if shard_state.slice_start_time:
countdown = self._wait_time(shard_state,
parameters._LEASE_DURATION_SEC)
if countdown > 0:
logging.warning(
"Last retry of slice %s-%s may be still running."
"Will try again in %s seconds", tstate.shard_id, tstate.slice_id,
countdown)
# TODO(user): There might be a better way. Taskqueue's countdown
# only applies to add new tasks, not retry of tasks.
# Reduce contention.
time.sleep(countdown)
return self._TASK_DIRECTIVE.RETRY_TASK
# lease could have expired. Verify with logs API.
else:
if self._wait_time(shard_state,
parameters._MAX_LEASE_DURATION_SEC):
if not self._has_old_request_ended(shard_state):
logging.warning(
"Last retry of slice %s-%s is still in flight with request_id "
"%s. Will try again later.", tstate.shard_id, tstate.slice_id,
shard_state.slice_request_id)
return self._TASK_DIRECTIVE.RETRY_TASK
else:
logging.warning(
"Last retry of slice %s-%s has no log entry and has"
"timed out after %s seconds",
tstate.shard_id, tstate.slice_id,
parameters._MAX_LEASE_DURATION_SEC)
# Lease expired or slice_start_time not set.
config = util.create_datastore_write_config(tstate.mapreduce_spec)
@db.transactional(retries=5)
def _tx():
"""Use datastore to set slice_start_time to now.
If failed for any reason, raise error to retry the task (hence all
the previous validation code). The task would die naturally eventually.
Raises:
Rollback: If the shard state is missing.
Returns:
A _TASK_DIRECTIVE enum.
"""
fresh_state = model.ShardState.get_by_shard_id(tstate.shard_id)
if not fresh_state:
logging.warning("ShardState missing.")
raise db.Rollback()
if (fresh_state.active and
fresh_state.slice_id == shard_state.slice_id and
fresh_state.slice_start_time == shard_state.slice_start_time):
shard_state.slice_start_time = datetime.datetime.now()
shard_state.slice_request_id = os.environ.get("REQUEST_LOG_ID")
shard_state.acquired_once = True
shard_state.put(config=config)
return self._TASK_DIRECTIVE.PROCEED_TASK
else:
logging.warning(
"Contention on slice %s-%s execution. Will retry again.",
tstate.shard_id, tstate.slice_id)
# One proposer should win. In case all lost, back off arbitrarily.
time.sleep(random.randrange(1, 5))
return self._TASK_DIRECTIVE.RETRY_TASK
return _tx() |
python | def delete(self):
"""
Delete the instance from redis storage.
"""
# Delete each field
for field_name in self._fields:
field = self.get_field(field_name)
if not isinstance(field, PKField):
# pk has no stored key
field.delete()
# Remove the pk from the model collection
self.connection.srem(self.get_field('pk').collection_key, self._pk)
# Deactivate the instance
delattr(self, "_pk") |
java | @Override
public ICmdLineArg<E> setEnumCriteria(final String _enumClassName)
throws ParseException, IOException
{
this.enumClassName = _enumClassName;
Class<?> enumClass;
try
{
enumClass = CmdLine.ClassLoader.loadClass(_enumClassName);
} catch (final ClassNotFoundException e)
{
throw new ParseException("Enum class not found: " + e.getMessage(), 0);
}
final List<E> list = new ArrayList<>();
if (!enumClass.isEnum())
throw new ParseException("Enum class expected, found " + enumClass.getName(), 0);
final Object[] constants = enumClass.getEnumConstants();
for (final Object constant : constants)
{
final String econst = constant.toString();
list.add(convert(econst, true, null));
}
setCriteria(new EnumCriteria<>(list));
return this;
} |
python | def toStr (self, separator = ':'):
"""
Returns the address as string consisting of 12 hex chars separated
by separator.
"""
return separator.join(('{:02x}'.format(x) for x in self.__value)) |
python | def restart(self, all=False):
"""Restarts the given process."""
if all:
data = {'type': self.type}
else:
data = {'ps': self.process}
r = self._h._http_resource(
method='POST',
resource=('apps', self.app.name, 'ps', 'restart'),
data=data
)
r.raise_for_status() |
python | def _link_variables_on_block(self, block, kb):
"""
Link atoms (AIL expressions) in the given block to corresponding variables identified previously.
:param ailment.Block block: The AIL block to work on.
:return: None
"""
variable_manager = kb.variables[self.function.addr]
for stmt_idx, stmt in enumerate(block.statements):
# I wish I could do functional programming in this method...
stmt_type = type(stmt)
if stmt_type is ailment.Stmt.Store:
# find a memory variable
mem_vars = variable_manager.find_variables_by_atom(block.addr, stmt_idx, stmt)
if len(mem_vars) == 1:
stmt.variable, stmt.offset = next(iter(mem_vars))
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, stmt.data)
elif stmt_type is ailment.Stmt.Assignment:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, stmt.dst)
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, stmt.src)
elif stmt_type is ailment.Stmt.ConditionalJump:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, stmt.condition)
elif stmt_type is ailment.Stmt.Call:
if stmt.ret_expr:
self._link_variables_on_expr(variable_manager, block, stmt_idx, stmt, stmt.ret_expr) |
python | def _validate(self, schema):
""" Validates a schema that defines rules against supported rules.
:param schema: The schema to be validated as a legal cerberus schema
according to the rules of this Validator object.
"""
if isinstance(schema, _str_type):
schema = self.validator.schema_registry.get(schema, schema)
if schema is None:
raise SchemaError(errors.SCHEMA_ERROR_MISSING)
schema = copy(schema)
for field in schema:
if isinstance(schema[field], _str_type):
schema[field] = rules_set_registry.get(schema[field],
schema[field])
if not self.schema_validator(schema, normalize=False):
raise SchemaError(self.schema_validator.errors) |
java | public static <T> T notNullNotEquals (final T aValue,
@Nonnull final Supplier <? extends String> aName,
@Nonnull final T aUnexpectedValue)
{
notNull (aValue, aName);
notNull (aUnexpectedValue, "UnexpectedValue");
if (isEnabled ())
if (aValue.equals (aUnexpectedValue))
throw new IllegalArgumentException ("The value of '" +
aName.get () +
"' may not be equal to " +
aUnexpectedValue +
"!");
return aValue;
} |
python | def get_binaries():
"""Download and return paths of all platform-specific binaries"""
paths = []
for arp in [False, True]:
paths.append(get_binary(arp=arp))
return paths |
java | public ServiceFuture<SubscriptionQuotasListResultInner> listQuotasAsync(String location, final ServiceCallback<SubscriptionQuotasListResultInner> serviceCallback) {
return ServiceFuture.fromResponse(listQuotasWithServiceResponseAsync(location), serviceCallback);
} |
java | public static void notNull(Object obj, Supplier<String> message) {
if (isNull(obj)) {
throw new IllegalArgumentException(message.get());
}
} |
python | def filter_duplicate(self, url):
"""
url去重
"""
if self.filterDuplicate:
if url in self.historys:
raise Exception('duplicate excepiton: %s is duplicate' % url)
else:
self.historys.add(url)
else:
pass |
java | public Stylesheet parse() throws ParseException {
while (tokenizer.more()) {
if (tokenizer.current().isKeyword(KEYWORD_IMPORT)) {
// Handle @import
parseImport();
} else if (tokenizer.current().isKeyword(KEYWORD_MIXIN)) {
// Handle @mixin
Mixin mixin = parseMixin();
if (mixin.getName() != null) {
result.addMixin(mixin);
}
} else if (tokenizer.current().isKeyword(KEYWORD_MEDIA)) {
// Handle @media
result.addSection(parseSection(true));
} else if (tokenizer.current().isSpecialIdentifier("$") && tokenizer.next().isSymbol(":")) {
// Handle variable definition
parseVariableDeclaration();
} else {
// Everything else is a "normal" section with selectors and attributes
result.addSection(parseSection(false));
}
}
// Something went wrong? Throw an exception
if (!tokenizer.getProblemCollector().isEmpty()) {
throw ParseException.create(tokenizer.getProblemCollector());
}
return result;
} |
python | def set_config(self, config):
"""Set (replace) the configuration for the session.
Args:
config: Configuration object
"""
with self._conn:
self._conn.execute("DELETE FROM config")
self._conn.execute('INSERT INTO config VALUES(?)',
(serialize_config(config),)) |
java | protected CmsBasicFormField createUrlNameField() {
if (m_urlNameField != null) {
m_urlNameField.unbind();
}
String description = message(Messages.GUI_URLNAME_PROPERTY_DESC_0);
String label = message(Messages.GUI_URLNAME_PROPERTY_0);
final CmsTextBox textbox = new CmsTextBox();
textbox.setTriggerChangeOnKeyPress(true);
textbox.setInhibitValidationForKeypresses(true);
CmsBasicFormField result = new CmsBasicFormField(FIELD_URLNAME, description, label, null, textbox);
result.getLayoutData().put("property", A_CmsPropertyEditor.FIELD_URLNAME);
String urlName = m_handler.getName();
if (urlName == null) {
urlName = "";
}
String parent = CmsResource.getParentFolder(m_handler.getPath());
CmsUUID id = m_handler.getId();
result.setValidator(new CmsUrlNameValidator(parent, id));
I_CmsStringModel model = getUrlNameModel(urlName);
result.getWidget().setFormValueAsString(model.getValue());
result.bind(model);
//result.getWidget().setFormValueAsString(getUrlNameModel().getValue());
m_urlNameField = result;
return result;
} |
java | public static void childConnectionObserver(ServerBootstrap b,
ConnectionObserver connectionObserver) {
Objects.requireNonNull(b, "bootstrap");
Objects.requireNonNull(connectionObserver, "connectionObserver");
b.childOption(OBSERVER_OPTION, connectionObserver);
} |
java | public void initialize(Iterable<ModuleMetadataFileFinder> moduleMetadataFiles, ClassLoader cl) {
// First init core module metadata
FileLookup fileLookup = FileLookupFactory.newInstance();
try {
readMetadata(fileLookup.lookupFileLocation("infinispan-core-component-metadata.dat", cl));
} catch (Exception e) {
throw new CacheException("Unable to load component metadata!", e);
}
// Now the modules
for (ModuleMetadataFileFinder finder: moduleMetadataFiles) {
try {
readMetadata(fileLookup.lookupFileLocation(finder.getMetadataFilename(), cl));
} catch (Exception e) {
throw new CacheException("Unable to load component metadata in file " + finder.getMetadataFilename(), e);
}
}
} |
java | public List<FeatureTileLink> queryForTileTableName(String tileTableName) {
List<FeatureTileLink> results = null;
try {
results = queryForEq(FeatureTileLink.COLUMN_TILE_TABLE_NAME,
tileTableName);
} catch (SQLException e) {
throw new GeoPackageException(
"Failed to query for Feature Tile Link objects by Tile Table Name: "
+ tileTableName);
}
return results;
} |
python | def _load_relationship_info(self):
"""Maps parent and child entries in the MFT.
Because the library expects the MFT file to be provided, it doesn't
have access to anything non-resident. Because of that, if we want all
the information related to the entry, it is necessary to visit all the
entries and map the relationship between each of them.
Note:
Because the data necessary to do this should always happen before
the first fixup entry, we don't need to apply it.
"""
mft_entry_size = self.mft_entry_size
fp = self.file_pointer
record_n = 0
#define the minimum amount that needs to be read
base_struct = struct.Struct("<Q")
base_struct_offset = 32
seq_struct = struct.Struct("<H")
seq_struct_offset = 16
buffer_base = bytearray(base_struct.size)
buffer_seq = bytearray(seq_struct.size)
#go over the file using the entry size as setp
for i in range(0, _get_file_size(self.file_pointer), mft_entry_size):
record_n = int(i/mft_entry_size)
fp.seek(i + base_struct_offset)
fp.readinto(buffer_base)
base_ref, base_seq = get_file_reference(base_struct.unpack(buffer_base)[0])
if base_ref:
#calculate and prepare to read the sequence number
fp.seek((base_ref * mft_entry_size) + seq_struct_offset)
fp.readinto(buffer_seq)
seq, = seq_struct.unpack(buffer_seq)
if seq == base_seq: #entries are related
self._entries_parent_child[base_ref].append(record_n)
self._entries_child_parent[record_n] = base_ref
else:
self._number_valid_entries += 1
else:
self._number_valid_entries += 1 |
python | def convert_random_normal(node, **kwargs):
"""Map MXNet's random_normal operator attributes to onnx's RandomNormal
operator and return the created node.
"""
name, input_nodes, attrs = get_inputs(node, kwargs)
# Converting to float32
mean = float(attrs.get("loc", 0))
scale = float(attrs.get("scale", 1.0))
shape = convert_string_to_list(attrs.get('shape', '[]'))
dtype = onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[np.dtype(attrs.get('dtype', 'float32'))]
node = onnx.helper.make_node(
'RandomNormal',
input_nodes,
[name],
mean=mean,
scale=scale,
dtype=dtype,
shape=shape,
name=name
)
return [node] |
java | @Override
public int prolepticYear(Era era, int yearOfEra) {
if (era instanceof EthiopicEra == false) {
throw new ClassCastException("Era must be EthiopicEra");
}
return (era == EthiopicEra.INCARNATION ? yearOfEra : 1 - yearOfEra);
} |
java | public static InterestingSomething getInterestingSomething(Throwable cause, File sourceDir) {
InterestingSomething something = null;
for (StackTraceElement stackTraceElement : cause.getStackTrace()) {
if (stackTraceElement.getLineNumber() > 0) {
String path = stackTraceElement.getClassName().replace(".", "/");
path = path.substring(0, path.lastIndexOf("/"));
File source = new File(sourceDir, path);
if (source.exists() && source.isDirectory()) {
String fN = stackTraceElement.getFileName();
int index = fN.indexOf("$");
if (index < 0) {
fN = fN.substring(0, fN.indexOf("."));
} else {
fN = fN.substring(0, index);
}
source = new File(source, fN + ".java");
if (something == null) {
something = new InterestingSomething(stackTraceElement, source);
something.setUsefulFiles(Lists.newArrayList());
something.setUsefulStackTraceElement(Lists.newArrayList());
} else {
if (source.exists()) {
something.getUsefulStackTraceElements().add(stackTraceElement);
something.getUsefulFiles().add(source);
}
}
}
}
}
return something;
} |
python | def genl_ctrl_probe_by_name(sk, name):
"""Look up generic Netlink family by family name querying the kernel directly.
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L237
Directly query's the kernel for a given family name.
Note: This API call differs from genl_ctrl_search_by_name in that it queries the kernel directly, allowing for
module autoload to take place to resolve the family request. Using an nl_cache prevents that operation.
Positional arguments:
sk -- Generic Netlink socket (nl_sock class instance).
name -- family name (bytes).
Returns:
Generic Netlink family `genl_family` class instance or None if no match was found.
"""
ret = genl_family_alloc()
if not ret:
return None
genl_family_set_name(ret, name)
msg = nlmsg_alloc()
orig = nl_socket_get_cb(sk)
cb = nl_cb_clone(orig)
genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, GENL_ID_CTRL, 0, 0, CTRL_CMD_GETFAMILY, 1)
nla_put_string(msg, CTRL_ATTR_FAMILY_NAME, name)
nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, probe_response, ret)
if nl_send_auto(sk, msg) < 0:
return None
if nl_recvmsgs(sk, cb) < 0:
return None
if wait_for_ack(sk) < 0: # If search was successful, request may be ACKed after data.
return None
if genl_family_get_id(ret) != 0:
return ret |
python | def GetEventTaggingRules(self):
"""Retrieves the event tagging rules from the tagging file.
Returns:
dict[str, FilterObject]: tagging rules, that consists of one or more
filter objects per label.
Raises:
TaggingFileError: if a filter expression cannot be compiled.
"""
tagging_rules = {}
label_name = None
with io.open(self._path, 'r', encoding='utf-8') as tagging_file:
for line in tagging_file.readlines():
line = line.rstrip()
stripped_line = line.lstrip()
if not stripped_line or stripped_line[0] == '#':
continue
if not line[0].isspace():
label_name = line
tagging_rules[label_name] = []
continue
if not label_name:
continue
filter_object = event_filter.EventObjectFilter()
try:
filter_object.CompileFilter(stripped_line)
except errors.ParseError as exception:
raise errors.TaggingFileError((
'Unable to compile filter for label: {0:s} with error: '
'{1!s}').format(label_name, exception))
if filter_object not in tagging_rules[label_name]:
tagging_rules[label_name].append(filter_object)
return tagging_rules |
python | def _predict_with_options(self, dataset, with_ground_truth,
postprocess=True, confidence_threshold=0.001,
iou_threshold=None,
verbose=True):
"""
Predict with options for what kind of SFrame should be returned.
If postprocess is False, a single numpy array with raw unprocessed
results will be returned.
"""
if iou_threshold is None: iou_threshold = self.non_maximum_suppression_threshold
_raise_error_if_not_detection_sframe(dataset, self.feature, self.annotations,
require_annotations=with_ground_truth)
from ._sframe_loader import SFrameDetectionIter as _SFrameDetectionIter
from ._detection import (yolo_map_to_bounding_boxes as _yolo_map_to_bounding_boxes,
non_maximum_suppression as _non_maximum_suppression,
bbox_to_ybox as _bbox_to_ybox)
from .._mxnet import _mxnet_utils
import mxnet as _mx
loader = _SFrameDetectionIter(dataset,
batch_size=self.batch_size,
input_shape=self.input_image_shape[1:],
output_shape=self._grid_shape,
anchors=self.anchors,
class_to_index=self._class_to_index,
loader_type='stretched',
load_labels=with_ground_truth,
shuffle=False,
epochs=1,
feature_column=self.feature,
annotations_column=self.annotations)
num_anchors = len(self.anchors)
preds_per_box = 5 + len(self.classes)
output_size = preds_per_box * num_anchors
# If prediction is done with ground truth, two sframes of the same
# structure are returned, the second one containing ground truth labels
num_returns = 2 if with_ground_truth else 1
sf_builders = [
_tc.SFrameBuilder([int, str, float, float, float, float, float],
column_names=['row_id', 'label', 'confidence',
'x', 'y', 'width', 'height'])
for _ in range(num_returns)
]
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=self.batch_size)
use_mps = _use_mps() and num_mxnet_gpus == 0
if use_mps:
if not hasattr(self, '_mps_inference_net') or self._mps_inference_net is None:
mxnet_params = self._model.collect_params()
mps_net_params = { k : mxnet_params[k].data().asnumpy()
for k in mxnet_params }
mps_config = {
'mode': _MpsGraphMode.Inference,
'od_include_network': True,
'od_include_loss': False,
}
mps_net = _get_mps_od_net(input_image_shape=self.input_image_shape,
batch_size=self.batch_size,
output_size=output_size,
anchors=self.anchors,
config=mps_config,
weights=mps_net_params)
self._mps_inference_net = mps_net
dataset_size = len(dataset)
ctx = _mxnet_utils.get_mxnet_context()
done = False
last_time = 0
raw_results = []
for batch in loader:
if batch.pad is not None:
size = self.batch_size - batch.pad
b_data = _mx.nd.slice_axis(batch.data[0], axis=0, begin=0, end=size)
b_indices = _mx.nd.slice_axis(batch.label[1], axis=0, begin=0, end=size)
b_oshapes = _mx.nd.slice_axis(batch.label[2], axis=0, begin=0, end=size)
else:
b_data = batch.data[0]
b_indices = batch.label[1]
b_oshapes = batch.label[2]
size = self.batch_size
if b_data.shape[0] < len(ctx):
ctx0 = ctx[:b_data.shape[0]]
else:
ctx0 = ctx
split_data = _mx.gluon.utils.split_and_load(b_data, ctx_list=ctx0, even_split=False)
split_indices = _mx.gluon.utils.split_data(b_indices, num_slice=len(ctx0), even_split=False)
split_oshapes = _mx.gluon.utils.split_data(b_oshapes, num_slice=len(ctx0), even_split=False)
for data, indices, oshapes in zip(split_data, split_indices, split_oshapes):
if use_mps:
mps_data = _mxnet_to_mps(data.asnumpy())
n_samples = mps_data.shape[0]
if mps_data.shape[0] != self.batch_size:
mps_data_padded = _np.zeros((self.batch_size,) + mps_data.shape[1:],
dtype=mps_data.dtype)
mps_data_padded[:mps_data.shape[0]] = mps_data
mps_data = mps_data_padded
mps_float_array = self._mps_inference_net.predict(mps_data)
mps_z = mps_float_array.asnumpy()[:n_samples]
z = _mps_to_mxnet(mps_z)
else:
z = self._model(data).asnumpy()
if not postprocess:
raw_results.append(z)
continue
ypred = z.transpose(0, 2, 3, 1)
ypred = ypred.reshape(ypred.shape[:-1] + (num_anchors, -1))
zipped = zip(indices.asnumpy(), ypred, oshapes.asnumpy())
for index0, output0, oshape0 in zipped:
index0 = int(index0)
x_boxes, x_classes, x_scores = _yolo_map_to_bounding_boxes(
output0[_np.newaxis], anchors=self.anchors,
confidence_threshold=confidence_threshold,
nms_thresh=None)
x_boxes0 = _np.array(x_boxes).reshape(-1, 4)
# Normalize
x_boxes0[:, 0::2] /= self.input_image_shape[1]
x_boxes0[:, 1::2] /= self.input_image_shape[2]
# Re-shape to original input size
x_boxes0[:, 0::2] *= oshape0[0]
x_boxes0[:, 1::2] *= oshape0[1]
# Clip the boxes to the original sizes
x_boxes0[:, 0::2] = _np.clip(x_boxes0[:, 0::2], 0, oshape0[0])
x_boxes0[:, 1::2] = _np.clip(x_boxes0[:, 1::2], 0, oshape0[1])
# Non-maximum suppression (also limit to 100 detection per
# image, inspired by the evaluation in COCO)
x_boxes0, x_classes, x_scores = _non_maximum_suppression(
x_boxes0, x_classes, x_scores,
num_classes=self.num_classes, threshold=iou_threshold,
limit=100)
for bbox, cls, s in zip(x_boxes0, x_classes, x_scores):
cls = int(cls)
values = [index0, self.classes[cls], s] + list(_bbox_to_ybox(bbox))
sf_builders[0].append(values)
if index0 == len(dataset) - 1:
done = True
cur_time = _time.time()
# Do not print process if only a few samples are predicted
if verbose and (dataset_size >= 5 and cur_time > last_time + 10 or done):
print('Predicting {cur_n:{width}d}/{max_n:{width}d}'.format(
cur_n=index0 + 1, max_n=dataset_size, width=len(str(dataset_size))))
last_time = cur_time
if done:
break
# Ground truth
if with_ground_truth:
zipped = _itertools.islice(zip(batch.label[1].asnumpy(), batch.raw_bboxes, batch.raw_classes), size)
for index0, bbox0, cls0 in zipped:
index0 = int(index0)
for bbox, cls in zip(bbox0, cls0):
cls = int(cls)
if cls == -1:
break
values = [index0, self.classes[cls], 1.0] + list(bbox)
sf_builders[1].append(values)
if index0 == len(dataset) - 1:
break
if postprocess:
ret = tuple([sb.close() for sb in sf_builders])
if len(ret) == 1:
return ret[0]
else:
return ret
else:
return _np.concatenate(raw_results, axis=0) |
java | public CollectionDescriptor getCollectionDescriptorByName(String name)
{
if (name == null)
{
return null;
}
CollectionDescriptor cod = (CollectionDescriptor) getCollectionDescriptorNameMap().get(name);
//
// BRJ: if the CollectionDescriptor is not found
// look in the ClassDescriptor referenced by 'super' for it
//
if (cod == null)
{
ClassDescriptor superCld = getSuperClassDescriptor();
if (superCld != null)
{
cod = superCld.getCollectionDescriptorByName(name);
}
}
return cod;
} |
python | def _to_http_hosts(hosts: Union[Iterable[str], str]) -> List[str]:
"""Convert a string of whitespace or comma separated hosts into a list of hosts.
Hosts may also already be a list or other iterable.
Each host will be prefixed with 'http://' if it is not already there.
>>> _to_http_hosts('n1:4200,n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('n1:4200 n2:4200')
['http://n1:4200', 'http://n2:4200']
>>> _to_http_hosts('https://n1:4200')
['https://n1:4200']
>>> _to_http_hosts(['http://n1:4200', 'n2:4200'])
['http://n1:4200', 'http://n2:4200']
"""
if isinstance(hosts, str):
hosts = hosts.replace(',', ' ').split()
return [_to_http_uri(i) for i in hosts] |
python | def clean(self):
"""
Make sure the lookup makes sense
"""
if self.lookup == '?': # Randomly sort
return
else:
lookups = self.lookup.split(LOOKUP_SEP)
opts = self.model_def.model_class()._meta
valid = True
while len(lookups):
lookup = lookups.pop(0)
try:
field = opts.get_field(lookup)
except FieldDoesNotExist:
valid = False
else:
if isinstance(field, models.ForeignKey):
opts = get_remote_field_model(field)._meta
elif len(lookups): # Cannot go any deeper
valid = False
finally:
if not valid:
msg = _("This field doesn't exist")
raise ValidationError({'lookup': [msg]}) |
python | def gap_index_map(sequence, gap_chars='-'):
"""
Opposite of ungap_index_map: returns mapping from gapped index to ungapped
index.
>>> gap_index_map('AC-TG-')
{0: 0, 1: 1, 3: 2, 4: 3}
"""
return dict(
(v, k) for k, v in list(ungap_index_map(sequence, gap_chars).items())) |
python | def show(self, title=LABEL_DEFAULT, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT):
"""
Visualize the SArray.
Notes
-----
- The plot will render either inline in a Jupyter Notebook, or in a
native GUI window, depending on the value provided in
`turicreate.visualization.set_target` (defaults to 'auto').
Parameters
----------
title : str
The plot title to show for the resulting visualization.
If the title is None, the title will be omitted.
xlabel : str
The X axis label to show for the resulting visualization.
If the xlabel is None, the X axis label will be omitted.
ylabel : str
The Y axis label to show for the resulting visualization.
If the ylabel is None, the Y axis label will be omitted.
Returns
-------
None
Examples
--------
Suppose 'sa' is an SArray, we can view it using:
>>> sa.show()
To override the default plot title and axis labels:
>>> sa.show(title="My Plot Title", xlabel="My X Axis", ylabel="My Y Axis")
"""
returned_plot = self.plot(title, xlabel, ylabel)
returned_plot.show() |
python | def paginate(self, page=None, per_page=None, error_out=True, max_per_page=None, count=True):
"""Returns ``per_page`` items from page ``page``.
If ``page`` or ``per_page`` are ``None``, they will be retrieved from
the request query. If ``max_per_page`` is specified, ``per_page`` will
be limited to that value. If there is no request or they aren't in the
query, they default to 1 and 20 respectively. If ``count`` is ``False``,
no query to help determine total page count will be run.
When ``error_out`` is ``True`` (default), the following rules will
cause a 404 response:
* No items are found and ``page`` is not 1.
* ``page`` is less than 1, or ``per_page`` is negative.
* ``page`` or ``per_page`` are not ints.
When ``error_out`` is ``False``, ``page`` and ``per_page`` default to
1 and 20 respectively.
Returns a :class:`Pagination` object.
"""
if request:
if page is None:
try:
page = int(request.args.get('page', 1))
except (TypeError, ValueError):
if error_out:
abort(404)
page = 1
if per_page is None:
try:
per_page = int(request.args.get('per_page', 20))
except (TypeError, ValueError):
if error_out:
abort(404)
per_page = 20
else:
if page is None:
page = 1
if per_page is None:
per_page = 20
if max_per_page is not None:
per_page = min(per_page, max_per_page)
if page < 1:
if error_out:
abort(404)
else:
page = 1
if per_page < 0:
if error_out:
abort(404)
else:
per_page = 20
items = self.limit(per_page).offset((page - 1) * per_page).all()
if not items and page != 1 and error_out:
abort(404)
# No need to count if we're on the first page and there are fewer
# items than we expected or if count is disabled.
if not count:
total = None
elif page == 1 and len(items) < per_page:
total = len(items)
else:
total = self.order_by(None).count()
return Pagination(self, page, per_page, total, items) |
java | private static boolean isTransformPossible(byte[] bytes) {
if (bytes.length < 8) {
return false;
}
// The transform method will be called for all classes, but ASM is only
// capable of processing some class file format versions. That's ok
// because the transformer only modifies classes that have been
// preprocessed by our build anyway.
//
// ASM doesn't provide a way to determine its max supported version, so
// we hard code the supported version number.
int classFileVersion = ((bytes[6] & 0xff) << 8) | (bytes[7] & 0xff);
//Limit bytecode that we transform based on JDK retransform compatibility
//If we have issues here, 1.8 classes will be instead handled by a separate
//transformer that only does those classes.
if (isJDK8WithHotReplaceBug)
return classFileVersion <= Opcodes.V1_7;
else
return classFileVersion <= Opcodes.V11;
} |
java | boolean isStarvedForFairShare(JobInfo info, TaskType type) {
int desiredFairShare = (int) Math.floor(Math.min(
(fairTasks(info, type) + 1) / 2, runnableTasks(info, type)));
return (runningTasks(info, type) < desiredFairShare);
} |
java | public void setClientInfo(final String name, final String value) throws SQLClientInfoException {
checkClientClose(name);
checkClientReconnect(name);
checkClientValidProperty(name);
try {
Statement statement = createStatement();
statement.execute(buildClientQuery(name, value));
} catch (SQLException sqle) {
Map<String, ClientInfoStatus> failures = new HashMap<>();
failures.put(name, ClientInfoStatus.REASON_UNKNOWN);
throw new SQLClientInfoException("unexpected error during setClientInfo", failures, sqle);
}
} |
python | def _build_id_tuple(params, spec):
"""
Builds a 2-element tuple used to identify fields by grabbing the class_
and tag from an Asn1Value class and the params dict being passed to it
:param params:
A dict of params to pass to spec
:param spec:
An Asn1Value class
:return:
A 2-element integer tuple in the form (class_, tag)
"""
# Handle situations where the spec is not known at setup time
if spec is None:
return (None, None)
required_class = spec.class_
required_tag = spec.tag
_tag_type_to_explicit_implicit(params)
if 'explicit' in params:
if isinstance(params['explicit'], tuple):
required_class, required_tag = params['explicit']
else:
required_class = 2
required_tag = params['explicit']
elif 'implicit' in params:
if isinstance(params['implicit'], tuple):
required_class, required_tag = params['implicit']
else:
required_class = 2
required_tag = params['implicit']
if required_class is not None and not isinstance(required_class, int_types):
required_class = CLASS_NAME_TO_NUM_MAP[required_class]
required_class = params.get('class_', required_class)
required_tag = params.get('tag', required_tag)
return (required_class, required_tag) |
java | @Override
public ResourceFactory createResourceFactory(Map<String, Object> props) throws Exception {
final boolean trace = TraceComponent.isAnyTracingEnabled();
if (trace && tc.isEntryEnabled())
Tr.entry(tc, "createResourceFactory", props);
//Place holder for admin object properties like queue/topic
Hashtable<String, Object> adminObjectSvcProps = new Hashtable<String, Object>();
//Store all the props from annotation as well as from deployment descriptor
Map<String, Object> annotationDDProps = new HashMap<String, Object>();
//Just move all the properties from props to annotationProps after resolving strings.
VariableRegistry variableRegistry = variableRegistryRef.getServiceWithException();
for (Map.Entry<String, Object> prop : props.entrySet()) {
Object value = prop.getValue();
if (value instanceof String)
value = variableRegistry.resolveString((String) value);
annotationDDProps.put(prop.getKey(), value);
}
String application = (String) annotationDDProps.remove(AppDefinedResource.APPLICATION);
String declaringApplication = (String) annotationDDProps.remove(DECLARING_APPLICATION);
String module = (String) annotationDDProps.remove(AppDefinedResource.MODULE);
String component = (String) annotationDDProps.remove(AppDefinedResource.COMPONENT);
String jndiName = (String) annotationDDProps.remove(AdminObjectService.JNDI_NAME);
annotationDDProps.remove(DESCRIPTION);
annotationDDProps.remove(NAME);
String adminObjectID = getadminObjectID(application, module, component, jndiName);
StringBuilder filter = new StringBuilder(FilterUtils.createPropertyFilter(ID, adminObjectID));
filter.insert(filter.length() - 1, '*');
// Fail if server.xml is already using the id
if (!removeExistingConfigurations(filter.toString()))
throw new IllegalArgumentException(adminObjectID); // internal error, shouldn't ever have been permitted in server.xml
adminObjectSvcProps.put(ID, adminObjectID);
adminObjectSvcProps.put(CONFIG_DISPLAY_ID, adminObjectID);
// Use the unique identifier because jndiName is not always unique for app-defined data sources
adminObjectSvcProps.put(UNIQUE_JNDI_NAME, adminObjectID);
adminObjectSvcProps.put(ResourceFactory.JNDI_NAME, jndiName);
if (application != null) {
adminObjectSvcProps.put(AppDefinedResource.APPLICATION, application);
if (module != null) {
adminObjectSvcProps.put(AppDefinedResource.MODULE, module);
if (component != null)
adminObjectSvcProps.put(AppDefinedResource.COMPONENT, component);
}
}
String resourceAdapter = ((String) annotationDDProps.remove(RESOURCE_ADAPTER));
String destinationName = (String) annotationDDProps.get(JMSResourceDefinitionConstants.DESTINATION_NAME);
String interfaceName = (String) annotationDDProps.remove(INTERFACE_NAME);
//If the resource adapter type is wasJms, then the destination name has to be mapped with respective name in the resource adapter.
if (JMSResourceDefinitionConstants.RESOURCE_ADAPTER_WASJMS.equals(resourceAdapter)) {
// The destinationName specified by via annotation/dd has to be mapped to the respective adapter property.
if (JMSResourceDefinitionConstants.JMS_QUEUE_INTERFACE.equals(interfaceName)) {
annotationDDProps.put(JMSResourceDefinitionConstants.JMS_QUEUE_NAME, destinationName);
} else if (JMSResourceDefinitionConstants.JMS_TOPIC_INTERFACE.equals(interfaceName)) {
annotationDDProps.put(JMSResourceDefinitionConstants.JMS_TOPIC_NAME, destinationName);
}
//If the resource adapter type is wmqJms, then the destination name has to be mapped with respective name in the resource adapter.
} else if (JMSResourceDefinitionConstants.RESOURCE_ADAPTER_WMQJMS.equals(resourceAdapter)) {
// The destinationName specified by via annotation/dd has to be mapped to the respective adapter property.
if (JMSResourceDefinitionConstants.JMS_QUEUE_INTERFACE.equals(interfaceName)) {
annotationDDProps.put(JMSResourceDefinitionConstants.WMQ_QUEUE_NAME, destinationName);
} else if (JMSResourceDefinitionConstants.JMS_TOPIC_INTERFACE.equals(interfaceName)) {
annotationDDProps.put(JMSResourceDefinitionConstants.WMQ_TOPIC_NAME, destinationName);
}
}
//Get props with default values only and see if the same is specified by the user in annotation/dd, then use that value otherwise set the default value.
//Note: Its not necessary for the user to specify the props which has default value, so we set them in here.
Dictionary<String, Object> adminObjectDefaultProps = getDefaultProperties(resourceAdapter, interfaceName);
for (Enumeration<String> keys = adminObjectDefaultProps.keys(); keys.hasMoreElements();)
{
String key = keys.nextElement();
Object value = adminObjectDefaultProps.get(key);
//Override the administered object default property values with values provided annotation
if (annotationDDProps.containsKey(key))
value = annotationDDProps.remove(key);
if (value instanceof String)
value = variableRegistry.resolveString((String) value);
adminObjectSvcProps.put(JMSResourceDefinitionConstants.PROPERTIES_REF_KEY + key, value);
}
//Get all the properties for a given resource(get the resource by the interfaceName) from the corresponding resource adapter,
//then see if user specified any of these props in annotation/dd, if yes set that value otherwise ignore.
//Note: The above section for handling default values can be eliminated by handling the same here in this section since we get all the properties. Will be taken care in future.
AttributeDefinition[] ads = getAttributeDefinitions(resourceAdapter, interfaceName);
for (AttributeDefinition attributeDefinition : ads) {
Object value = annotationDDProps.remove(attributeDefinition.getID());
if (value != null) {
if (value instanceof String)
value = variableRegistry.resolveString((String) value);
adminObjectSvcProps.put(JMSResourceDefinitionConstants.PROPERTIES_REF_KEY + attributeDefinition.getID(), value);
}
}
adminObjectSvcProps.put(BOOTSTRAP_CONTEXT, "(id=" + resourceAdapter + ")");
BundleContext bundleContext = FrameworkUtil.getBundle(AdminObjectService.class).getBundleContext();
StringBuilder adminObjectFilter = new StringBuilder(200);
adminObjectFilter.append("(&").append(FilterUtils.createPropertyFilter(ID, adminObjectID));
adminObjectFilter.append(FilterUtils.createPropertyFilter(Constants.OBJECTCLASS, AdminObjectService.class.getName())).append(")");
ResourceFactory factory = new AppDefinedResourceFactory(this, bundleContext, adminObjectID, adminObjectFilter.toString(), declaringApplication);
try {
String bundleLocation = bundleContext.getBundle().getLocation();
ConfigurationAdmin configAdmin = configAdminRef.getService();
Configuration adminObjectSvcConfig = configAdmin.createFactoryConfiguration(AdminObjectService.ADMIN_OBJECT_PID, bundleLocation);
adminObjectSvcConfig.update(adminObjectSvcProps);
} catch (Exception x) {
factory.destroy();
throw x;
} catch (Error x) {
factory.destroy();
throw x;
}
if (trace && tc.isEntryEnabled())
Tr.exit(tc, "createResourceFactory", factory);
return factory;
} |
python | def content_type(self, request=None, response=None):
"""Returns the content type that should be used by default for this endpoint"""
if callable(self.outputs.content_type):
return self.outputs.content_type(request=request, response=response)
else:
return self.outputs.content_type |
python | def param_value_send(self, param_id, param_value, param_type, param_count, param_index, force_mavlink1=False):
'''
Emit the value of a onboard parameter. The inclusion of param_count
and param_index in the message allows the recipient to
keep track of received parameters and allows him to
re-request missing parameters after a loss or timeout.
param_id : Onboard parameter id, terminated by NULL if the length is less than 16 human-readable chars and WITHOUT null termination (NULL) byte if the length is exactly 16 chars - applications have to provide 16+1 bytes storage if the ID is stored as string (char)
param_value : Onboard parameter value (float)
param_type : Onboard parameter type: see the MAV_PARAM_TYPE enum for supported data types. (uint8_t)
param_count : Total number of onboard parameters (uint16_t)
param_index : Index of this onboard parameter (uint16_t)
'''
return self.send(self.param_value_encode(param_id, param_value, param_type, param_count, param_index), force_mavlink1=force_mavlink1) |
java | @Override
public List<Component> listComponents( String applicationName ) {
this.logger.fine( "Request: list components for the application " + applicationName + "." );
List<Component> result = new ArrayList<> ();
Application app = this.manager.applicationMngr().findApplicationByName( applicationName );
if( app != null )
result.addAll( ComponentHelpers.findAllComponents( app ));
return result;
} |
java | public static Dater iso(String date) {
//Converting ISO8601-compliant String to java.util.Date
return of(DatatypeConverter.parseDateTime(
Strs.WHITESPACE.removeFrom(checkNotNull(date))));
} |
java | public Thread newThread(final Runnable runnable) {
final Thread thread = new Thread(runnable);
thread.setName("HSQLDB Timer @" + Integer.toHexString(hashCode()));
thread.setDaemon(true);
return thread;
} |
java | public boolean isEUI64(boolean partial) {
int segmentCount = getSegmentCount();
int endIndex = addressSegmentIndex + segmentCount;
if(addressSegmentIndex <= 5) {
if(endIndex > 6) {
int index3 = 5 - addressSegmentIndex;
IPv6AddressSegment seg3 = getSegment(index3);
IPv6AddressSegment seg4 = getSegment(index3 + 1);
return seg4.matchesWithMask(0xfe00, 0xff00) && seg3.matchesWithMask(0xff, 0xff);
} else if(partial && endIndex == 6) {
IPv6AddressSegment seg3 = getSegment(5 - addressSegmentIndex);
return seg3.matchesWithMask(0xff, 0xff);
}
} else if(partial && addressSegmentIndex == 6 && endIndex > 6) {
IPv6AddressSegment seg4 = getSegment(6 - addressSegmentIndex);
return seg4.matchesWithMask(0xfe00, 0xff00);
}
return partial;
} |
python | def calc_piece_size(size, min_piece_size=20, max_piece_size=29, max_piece_count=1000):
"""
Calculates a good piece size for a size
"""
logger.debug('Calculating piece size for %i' % size)
for i in range(min_piece_size, max_piece_size): # 20 = 1MB
if size / (2**i) < max_piece_count:
break
return 2**i |
java | public static InsnList addLabel(LabelNode labelNode) {
Validate.notNull(labelNode);
InsnList ret = new InsnList();
ret.add(labelNode);
return ret;
} |
java | @Reqiured
public void setInterceptors(Interceptor[] interceptors) {
Listener last = null;
for (int i = interceptors.length - 1; i >= 0; i--) {
final Interceptor current = interceptors[i];
final Listener next = last;
last = new Listener() {
public void render(Context context) throws IOException, ParseException {
if (next == null) {
Listener listener = (Listener) context.get(LISTENER_KEY);
if (listener != null) {
current.render(context, listener);
}
} else {
current.render(context, next);
}
}
};
}
this.chain = last;
} |
java | public ResourceadapterType<ConnectorDescriptor> getOrCreateResourceadapter()
{
Node node = model.getOrCreate("resourceadapter");
ResourceadapterType<ConnectorDescriptor> resourceadapter = new ResourceadapterTypeImpl<ConnectorDescriptor>(this, "resourceadapter", model, node);
return resourceadapter;
} |
python | def log_batch(self, log_data):
"""Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment
"""
url = uri_join(self.base_url, "log")
attachments = []
for log_item in log_data:
log_item["item_id"] = self.stack[-1]
attachment = log_item.get("attachment", None)
if "attachment" in log_item:
del log_item["attachment"]
if attachment:
if not isinstance(attachment, collections.Mapping):
attachment = {"data": attachment}
name = attachment.get("name", str(uuid.uuid4()))
log_item["file"] = {"name": name}
attachments.append(("file", (
name,
attachment["data"],
attachment.get("mime", "application/octet-stream")
)))
files = [(
"json_request_part", (
None,
json.dumps(log_data),
"application/json"
)
)]
files.extend(attachments)
from reportportal_client import POST_LOGBATCH_RETRY_COUNT
for i in range(POST_LOGBATCH_RETRY_COUNT):
try:
r = self.session.post(
url=url,
files=files,
verify=self.verify_ssl
)
except KeyError:
if i < POST_LOGBATCH_RETRY_COUNT - 1:
continue
else:
raise
break
logger.debug("log_batch - Stack: %s", self.stack)
logger.debug("log_batch response: %s", r.text)
return _get_data(r) |
java | @Override
public boolean eIsSet(int featureID) {
switch (featureID) {
case AfplibPackage.MCF__RG:
return rg != null && !rg.isEmpty();
}
return super.eIsSet(featureID);
} |
java | @Override
public synchronized TopicSession createTopicSession(boolean transacted, int acknowledgeMode) throws JMSException
{
checkNotClosed();
RemoteTopicSession session = new RemoteTopicSession(idProvider.createID(),
this,
transportHub.createEndpoint(),
transacted,
acknowledgeMode);
registerSession(session);
session.remoteInit();
return session;
} |
python | def parse_codons(ref, start, end, strand):
"""
parse codon nucleotide positions in range start -> end, wrt strand
"""
codon = []
c = cycle([1, 2, 3])
ref = ref[start - 1:end]
if strand == -1:
ref = rc_stats(ref)
for pos in ref:
n = next(c)
codon.append(pos)
if n == 3:
yield codon
codon = [] |
python | def create_data_iters_and_vocabs(args: argparse.Namespace,
max_seq_len_source: int,
max_seq_len_target: int,
shared_vocab: bool,
resume_training: bool,
output_folder: str) -> Tuple['data_io.BaseParallelSampleIter',
'data_io.BaseParallelSampleIter',
'data_io.DataConfig',
List[vocab.Vocab], vocab.Vocab]:
"""
Create the data iterators and the vocabularies.
:param args: Arguments as returned by argparse.
:param max_seq_len_source: Source maximum sequence length.
:param max_seq_len_target: Target maximum sequence length.
:param shared_vocab: Whether to create a shared vocabulary.
:param resume_training: Whether to resume training.
:param output_folder: Output folder.
:return: The data iterators (train, validation, config_data) as well as the source and target vocabularies.
"""
num_words_source, num_words_target = args.num_words
num_words_source = num_words_source if num_words_source > 0 else None
num_words_target = num_words_target if num_words_target > 0 else None
word_min_count_source, word_min_count_target = args.word_min_count
batch_num_devices = 1 if args.use_cpu else sum(-di if di < 0 else 1 for di in args.device_ids)
batch_by_words = args.batch_type == C.BATCH_TYPE_WORD
validation_sources = [args.validation_source] + args.validation_source_factors
validation_sources = [str(os.path.abspath(source)) for source in validation_sources]
validation_target = str(os.path.abspath(args.validation_target))
either_raw_or_prepared_error_msg = "Either specify a raw training corpus with %s and %s or a preprocessed corpus " \
"with %s." % (C.TRAINING_ARG_SOURCE,
C.TRAINING_ARG_TARGET,
C.TRAINING_ARG_PREPARED_DATA)
if args.prepared_data is not None:
utils.check_condition(args.source is None and args.target is None, either_raw_or_prepared_error_msg)
if not resume_training:
utils.check_condition(args.source_vocab is None and args.target_vocab is None,
"You are using a prepared data folder, which is tied to a vocabulary. "
"To change it you need to rerun data preparation with a different vocabulary.")
train_iter, validation_iter, data_config, source_vocabs, target_vocab = data_io.get_prepared_data_iters(
prepared_data_dir=args.prepared_data,
validation_sources=validation_sources,
validation_target=validation_target,
shared_vocab=shared_vocab,
batch_size=args.batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices)
check_condition(args.source_factors_combine == C.SOURCE_FACTORS_COMBINE_SUM \
or len(source_vocabs) == len(args.source_factors_num_embed) + 1,
"Data was prepared with %d source factors, but only provided %d source factor dimensions." % (
len(source_vocabs), len(args.source_factors_num_embed) + 1))
if resume_training:
# resuming training. Making sure the vocabs in the model and in the prepared data match up
model_source_vocabs = vocab.load_source_vocabs(output_folder)
for i, (v, mv) in enumerate(zip(source_vocabs, model_source_vocabs)):
utils.check_condition(vocab.are_identical(v, mv),
"Prepared data and resumed model source vocab %d do not match." % i)
model_target_vocab = vocab.load_target_vocab(output_folder)
utils.check_condition(vocab.are_identical(target_vocab, model_target_vocab),
"Prepared data and resumed model target vocabs do not match.")
check_condition(data_config.num_source_factors == len(validation_sources),
'Training and validation data must have the same number of factors, but found %d and %d.' % (
data_config.num_source_factors, len(validation_sources)))
return train_iter, validation_iter, data_config, source_vocabs, target_vocab
else:
utils.check_condition(args.prepared_data is None and args.source is not None and args.target is not None,
either_raw_or_prepared_error_msg)
if resume_training:
# Load the existing vocabs created when starting the training run.
source_vocabs = vocab.load_source_vocabs(output_folder)
target_vocab = vocab.load_target_vocab(output_folder)
# Recover the vocabulary path from the data info file:
data_info = cast(data_io.DataInfo, Config.load(os.path.join(output_folder, C.DATA_INFO)))
source_vocab_paths = data_info.source_vocabs
target_vocab_path = data_info.target_vocab
else:
# Load or create vocabs
source_factor_vocab_paths = [args.source_factor_vocabs[i] if i < len(args.source_factor_vocabs)
else None for i in range(len(args.source_factors))]
source_vocab_paths = [args.source_vocab] + source_factor_vocab_paths
target_vocab_path = args.target_vocab
source_vocabs, target_vocab = vocab.load_or_create_vocabs(
source_paths=[args.source] + args.source_factors,
target_path=args.target,
source_vocab_paths=source_vocab_paths,
target_vocab_path=target_vocab_path,
shared_vocab=shared_vocab,
num_words_source=num_words_source,
num_words_target=num_words_target,
word_min_count_source=word_min_count_source,
word_min_count_target=word_min_count_target,
pad_to_multiple_of=args.pad_vocab_to_multiple_of)
check_condition(args.source_factors_combine == C.SOURCE_FACTORS_COMBINE_SUM \
or len(args.source_factors) == len(args.source_factors_num_embed),
"Number of source factor data (%d) differs from provided source factor dimensions (%d)" % (
len(args.source_factors), len(args.source_factors_num_embed)))
sources = [args.source] + args.source_factors
sources = [str(os.path.abspath(source)) for source in sources]
check_condition(len(sources) == len(validation_sources),
'Training and validation data must have the same number of factors, but found %d and %d.' % (
len(source_vocabs), len(validation_sources)))
train_iter, validation_iter, config_data, data_info = data_io.get_training_data_iters(
sources=sources,
target=os.path.abspath(args.target),
validation_sources=validation_sources,
validation_target=validation_target,
source_vocabs=source_vocabs,
target_vocab=target_vocab,
source_vocab_paths=source_vocab_paths,
target_vocab_path=target_vocab_path,
shared_vocab=shared_vocab,
batch_size=args.batch_size,
batch_by_words=batch_by_words,
batch_num_devices=batch_num_devices,
max_seq_len_source=max_seq_len_source,
max_seq_len_target=max_seq_len_target,
bucketing=not args.no_bucketing,
bucket_width=args.bucket_width)
data_info_fname = os.path.join(output_folder, C.DATA_INFO)
logger.info("Writing data config to '%s'", data_info_fname)
data_info.save(data_info_fname)
return train_iter, validation_iter, config_data, source_vocabs, target_vocab |
java | public static byte[] getContent(HttpResponse response) {
try {
if (response != null && response.getStatusLine().getStatusCode() == HttpStatus.SC_OK) {
return ResourceUtils.getBytes(response.getEntity().getContent());
}
}
catch (IOException e) {
log.error("Failed to read response: ", e.getMessage());
}
return null;
} |
java | public void setPointerList(FSArray v) {
if (MMAXAnnotation_Type.featOkTst && ((MMAXAnnotation_Type)jcasType).casFeat_pointerList == null)
jcasType.jcas.throwFeatMissing("pointerList", "de.julielab.jules.types.mmax.MMAXAnnotation");
jcasType.ll_cas.ll_setRefValue(addr, ((MMAXAnnotation_Type)jcasType).casFeatCode_pointerList, jcasType.ll_cas.ll_getFSRef(v));} |
java | private void initFromSeriesDefinition(String seriesDefinition) {
String seriesDefinitionString = seriesDefinition;
m_seriesDefinition = new CmsSerialDateValue(seriesDefinitionString);
if (m_seriesDefinition.isValid()) {
I_CmsSerialDateBean bean = CmsSerialDateBeanFactory.createSerialDateBean(m_seriesDefinition);
m_dates = bean.getDates();
m_duration = bean.getEventDuration();
} else {
try {
throw new Exception("Could not read series definition: " + seriesDefinitionString);
} catch (Exception e) {
LOG.debug(e.getMessage(), e);
}
m_dates = new TreeSet<>();
}
} |
java | public Content commentTagsToOutput(Element holder, List<? extends DocTree> tags) {
return commentTagsToOutput(null, holder, tags, false);
} |
java | public FieldDefinition getInverseLinkDef() {
assert isLinkType();
TableDefinition inverseTableDef = getInverseTableDef();
if (inverseTableDef == null) {
return null;
}
return inverseTableDef.getFieldDef(m_linkInverse);
} |
java | public CommandAuthorization createOrUpdate(String chargingStationId, String userIdentity, Class commandClass) {
EntityManager em = getEntityManager();
EntityTransaction tx = null;
try {
tx = em.getTransaction();
tx.begin();
CommandAuthorization storedCommandAuthorization = em.merge(new CommandAuthorization(new CommandAuthorizationId(chargingStationId, userIdentity, commandClass)));
tx.commit();
return storedCommandAuthorization;
} catch (Exception e) {
if(tx != null && tx.isActive()) {
tx.rollback();
}
throw e;
} finally {
em.close();
}
} |
java | private static String getFinalResponseUrl(
final HttpClientContext context,
final String candidateUrl) {
List<URI> redirectLocations = context.getRedirectLocations();
if (redirectLocations != null) {
return redirectLocations.get(redirectLocations.size() - 1).toString();
}
return candidateUrl;
} |
java | public int read(byte[] buff, int offset, int length) {
if (buffer == null)
throw new AssertionError("Attempted to read from closed RAR");
if (length == 0)
return 0;
if (isEOF())
return -1;
if (current >= bufferOffset + buffer.length || validBufferBytes == -1)
reBuffer();
assert current >= bufferOffset && current < bufferOffset + validBufferBytes
: String.format("File (%s), current offset %d, buffer offset %d, buffer limit %d",
getPath(),
current,
bufferOffset,
validBufferBytes);
int toCopy = Math.min(length, validBufferBytes - bufferCursor());
System.arraycopy(buffer, bufferCursor(), buff, offset, toCopy);
current += toCopy;
return toCopy;
} |
java | public void sendTimed(Long chatId, Object messageRequest)
{
MessageQueue queue = mMessagesMap.get(chatId);
if (queue == null)
{
queue = new MessageQueue(chatId);
queue.putMessage(messageRequest);
mMessagesMap.put(chatId, queue);
}
else
{
queue.putMessage(messageRequest);
mMessagesMap.putIfAbsent(chatId, queue); //Double check, because the queue can be removed from hashmap on state DELETE
}
mSendRequested.set(true);
} |
java | private <E> boolean onCheckListAttribute(PluralAttribute<? super X, ?, ?> pluralAttribute, Class<E> paramClass)
{
if (pluralAttribute != null)
{
if (isListAttribute(pluralAttribute) && isBindable(pluralAttribute, paramClass))
{
return true;
}
}
return false;
} |
java | public CMAEditorInterface addControl(Control control) {
if (controls == null) {
controls = new ArrayList<Control>();
}
controls.add(control);
return this;
} |
python | def parse_dtype_info(flags):
"""Convert dtype string to tf dtype, and set loss_scale default as needed.
Args:
flags: namespace object returned by arg parser.
Raises:
ValueError: If an invalid dtype is provided.
"""
if flags.dtype in (i[0] for i in DTYPE_MAP.values()):
return # Make function idempotent
try:
flags.dtype, default_loss_scale = DTYPE_MAP[flags.dtype]
except KeyError:
raise ValueError("Invalid dtype: {}".format(flags.dtype))
flags.loss_scale = flags.loss_scale or default_loss_scale |
java | protected void setURL(URL u, String protocol, String host, int port,
String authority, String userInfo, String path,
String query, String ref) {
try {
if (this != u.getHandler()) {
throw new SecurityException("handler for url different from " +
"this handler");
}
} catch (MalformedURLException e) {
// Ignore.
}
// ensure that no one can reset the protocol on a given URL.
u.set(u.getProtocol(), host, port, authority, userInfo, path, query, ref);
} |
python | def compose_capability(base, *classes):
"""Create a new class starting with the base and adding capabilities."""
if _debug: compose_capability._debug("compose_capability %r %r", base, classes)
# make sure the base is a Collector
if not issubclass(base, Collector):
raise TypeError("base must be a subclass of Collector")
# make sure you only add capabilities
for cls in classes:
if not issubclass(cls, Capability):
raise TypeError("%s is not a Capability subclass" % (cls,))
# start with everything the base has and add the new ones
bases = (base,) + classes
# build a new name
name = base.__name__
for cls in classes:
name += '+' + cls.__name__
# return a new type
return type(name, bases, {}) |
java | void encode(ChannelBuffer buf) {
buf.writeByte('*');
writeInt(buf, 1 + (args != null ? args.count() : 0));
buf.writeBytes(CRLF);
buf.writeByte('$');
writeInt(buf, type.bytes.length);
buf.writeBytes(CRLF);
buf.writeBytes(type.bytes);
buf.writeBytes(CRLF);
if (args != null) {
buf.writeBytes(args.buffer());
}
} |
java | public LaSchedulingNow start() {
final ClassLoader originalLoader = startHotdeploy();
final Cron4jScheduler cron4jScheduler;
final Cron4jNow cron4jNow;
try {
final LaJobScheduler appScheduler = findAppScheduler();
inject(appScheduler);
final LaJobRunner jobRunner = appScheduler.createRunner();
inject(jobRunner);
cron4jScheduler = createCron4jScheduler(jobRunner);
cron4jNow = createCron4jNow(cron4jScheduler, jobRunner);
final Cron4jCron cron4jCron = createCron4jCron(cron4jScheduler, jobRunner, cron4jNow);
appScheduler.schedule(cron4jCron);
showBoot(appScheduler, jobRunner, cron4jScheduler, cron4jNow);
} finally {
stopHotdeploy(originalLoader);
}
// thread start is out of hot-deploy scope
// because launcher thread should not inherit hot-deploy class loader
startCron(cron4jScheduler);
return cron4jNow;
} |
java | private void init() {
cacheable = null;
status = SC_OK;
streamDelegate = new ServletResponseStreamDelegate(this) {
protected OutputStream createOutputStream() throws IOException {
// Test if this request is really cacheable, otherwise,
// just write through to underlying response, and don't cache
if (isCacheable()) {
return cacheResponse.getOutputStream();
}
else {
// TODO: We need to tell the cache about this, somehow...
writeHeaders(cacheResponse, (HttpServletResponse) getResponse());
return super.getOutputStream();
}
}
};
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.