language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def deregister_instances(self, load_balancer_name, instances):
"""
Remove Instances from an existing Load Balancer.
:type load_balancer_name: string
:param load_balancer_name: The name of the Load Balancer
:type instances: List of strings
:param instances: The instance ID's of the EC2 instances to remove.
:rtype: List of strings
:return: An updated list of instances for this Load Balancer.
"""
params = {'LoadBalancerName' : load_balancer_name}
self.build_list_params(params, instances,
'Instances.member.%d.InstanceId')
return self.get_list('DeregisterInstancesFromLoadBalancer',
params, [('member', InstanceInfo)]) |
java | @Override
protected boolean isValidFragment(String fragmentName) {
Boolean knownFrag = false;
for (Class<?> cls : INNER_CLASSES) {
if ( cls.getName().equals(fragmentName) ){
knownFrag = true;
break;
}
}
return knownFrag;
} |
python | def _get_value(self, entity):
"""Override _get_value() to *not* raise UnprojectedPropertyError."""
value = self._get_user_value(entity)
if value is None and entity._projection:
# Invoke super _get_value() to raise the proper exception.
return super(StructuredProperty, self)._get_value(entity)
return value |
python | def generate_unique_key(master_key_path, url):
"""
Input1: Path to the BD2K Master Key (for S3 Encryption)
Input2: S3 URL (e.g. https://s3-us-west-2.amazonaws.com/cgl-driver-projects-encrypted/wcdt/exome_bams/DTB-111-N.bam)
Returns: 32-byte unique key generated for that URL
"""
with open(master_key_path, 'r') as f:
master_key = f.read()
assert len(master_key) == 32, 'Invalid Key! Must be 32 characters. ' \
'Key: {}, Length: {}'.format(master_key, len(master_key))
new_key = hashlib.sha256(master_key + url).digest()
assert len(new_key) == 32, 'New key is invalid and is not 32 characters: {}'.format(new_key)
return new_key |
java | @Override
public CommerceOrderItem findByC_I_Last(long commerceOrderId,
long CPInstanceId,
OrderByComparator<CommerceOrderItem> orderByComparator)
throws NoSuchOrderItemException {
CommerceOrderItem commerceOrderItem = fetchByC_I_Last(commerceOrderId,
CPInstanceId, orderByComparator);
if (commerceOrderItem != null) {
return commerceOrderItem;
}
StringBundler msg = new StringBundler(6);
msg.append(_NO_SUCH_ENTITY_WITH_KEY);
msg.append("commerceOrderId=");
msg.append(commerceOrderId);
msg.append(", CPInstanceId=");
msg.append(CPInstanceId);
msg.append("}");
throw new NoSuchOrderItemException(msg.toString());
} |
python | def _numpy_index_by_percentile(self, data, percentile):
""" Calculate percentile of numpy stack and return the index of the chosen pixel.
numpy percentile function is used with one of the following interpolations {'linear', 'lower', 'higher',
'midpoint', 'nearest'}
"""
data_perc_low = np.nanpercentile(data, percentile, axis=0, interpolation=self.interpolation)
indices = np.empty(data_perc_low.shape, dtype=np.uint8)
indices[:] = np.nan
abs_diff = np.where(np.isnan(data_perc_low), np.inf, abs(data - data_perc_low))
indices = np.where(np.isnan(data_perc_low), self.max_index, np.nanargmin(abs_diff, axis=0))
return indices |
python | def create_farm(farm_name):
"""
Create a farm. Creates a farm named FARM_NAME on the currently selected
cloud server. You can use the `openag cloud select_farm` command to start
mirroring data into it.
"""
utils.check_for_cloud_server()
utils.check_for_cloud_user()
server = Server(config["cloud_server"]["url"])
username = config["cloud_server"]["username"]
password = config["cloud_server"]["password"]
server.log_in(username, password)
url = urljoin(server.resource.url, "_openag", "v0.1", "register_farm")
status, _, content = server.resource.session.request(
"POST", url, headers=server.resource.headers.copy(), body={
"name": username, "farm_name": farm_name
}, credentials=(username, password)
)
if status != 200:
raise click.ClickException(
"Failed to register farm with cloud server ({}): {}".format(
status, content
)
) |
python | async def create_category(self, name, *, overwrites=None, reason=None):
"""|coro|
Same as :meth:`create_text_channel` except makes a :class:`CategoryChannel` instead.
.. note::
The ``category`` parameter is not supported in this function since categories
cannot have categories.
"""
data = await self._create_channel(name, overwrites, ChannelType.category, reason=reason)
channel = CategoryChannel(state=self._state, guild=self, data=data)
# temporarily add to the cache
self._channels[channel.id] = channel
return channel |
java | @SuppressWarnings({"rawtypes", "unchecked"})
public static Kryo getKryo(Map conf) {
IKryoFactory kryoFactory =
(IKryoFactory) Utils.newInstance((String) conf.get(Config.TOPOLOGY_KRYO_FACTORY));
Kryo k = kryoFactory.getKryo(conf);
k.register(byte[].class);
k.register(ListDelegate.class);
k.register(ArrayList.class, new ArrayListSerializer());
k.register(HashMap.class, new HashMapSerializer());
k.register(HashSet.class, new HashSetSerializer());
k.register(BigInteger.class, new BigIntegerSerializer());
// k.register(TransactionAttempt.class);
k.register(Values.class);
// k.register(backtype.storm.metric.api.IMetricsConsumer.DataPoint.class);
// k.register(backtype.storm.metric.api.IMetricsConsumer.TaskInfo.class);
/*
try {
JavaBridge.registerPrimitives(k);
JavaBridge.registerCollections(k);
} catch(Exception e) {
throw new RuntimeException(e);
}
*/
Map<String, String> registrations = normalizeKryoRegister(conf);
kryoFactory.preRegister(k, conf);
boolean skipMissing = (Boolean) conf.get(Config.TOPOLOGY_SKIP_MISSING_KRYO_REGISTRATIONS);
for (String klassName : registrations.keySet()) {
String serializerClassName = registrations.get(klassName);
try {
Class klass = Class.forName(klassName);
Class serializerClass = null;
if (serializerClassName != null) {
serializerClass = Class.forName(serializerClassName);
}
LOG.info("Doing kryo.register for class " + klass);
if (serializerClass == null) {
k.register(klass);
} else {
k.register(klass, resolveSerializerInstance(k, klass, serializerClass));
}
} catch (ClassNotFoundException e) {
if (skipMissing) {
LOG.info("Could not find serialization or class for "
+ serializerClassName + ". Skipping registration...");
} else {
throw new RuntimeException(e);
}
}
}
kryoFactory.postRegister(k, conf);
if (conf.get(Config.TOPOLOGY_KRYO_DECORATORS) != null) {
for (String klassName : (List<String>) conf.get(Config.TOPOLOGY_KRYO_DECORATORS)) {
try {
Class klass = Class.forName(klassName);
IKryoDecorator decorator = (IKryoDecorator) klass.newInstance();
decorator.decorate(k);
} catch (ClassNotFoundException e) {
if (skipMissing) {
LOG.info("Could not find kryo decorator named "
+ klassName + ". Skipping registration...");
} else {
throw new RuntimeException(e);
}
} catch (InstantiationException e) {
throw new RuntimeException(e);
} catch (IllegalAccessException e) {
throw new RuntimeException(e);
}
}
}
kryoFactory.postDecorate(k, conf);
return k;
} |
java | private static void onGlInitialization() {
if (glUninitialized) {
glGetError(); // reset any previous error
int[] size = new int[] { -1 };
glGetIntegerv(GL_MAX_TEXTURE_SIZE, size, 0);
int errorCode = glGetError();
if (errorCode != GL_NO_ERROR) {
throw Exceptions.RuntimeAssertion(
"Error %d getting max texture size", errorCode);
}
int maxTextureSize = size[0];
if (maxTextureSize <= 0) {
throw Exceptions.RuntimeAssertion(
"Invalid max texture size %d", maxTextureSize);
}
glMaxTextureSize = maxTextureSize;
Log.d(TAG, "Actual GL_MAX_TEXTURE_SIZE = %d", glMaxTextureSize);
glUninitialized = false;
}
} |
java | public static boolean consistentAccessions(List<Location> subLocations) {
Set<AccessionID> set = new HashSet<AccessionID>();
for(Location sub: subLocations) {
set.add(sub.getAccession());
}
return set.size() == 1;
} |
python | def p_expr_end(p):
"end : END_EXPR"
p[0] = node.expr(
op="end", args=node.expr_list([node.number(0), node.number(0)])) |
python | def fetch_live(self, formatter=TableFormat):
"""
View logs in real-time. If previous filters were already set on
this query, they will be preserved on the original instance (this
method forces ``fetch_type='current'``).
:param formatter: Formatter type for data representation. Any type
in :py:mod:`smc_monitoring.models.formatters`.
:return: generator of formatted results
"""
clone = self.copy()
clone.update_query(type='current')
fmt = formatter(clone)
for result in clone.fetch_raw():
yield fmt.formatted(result) |
java | private static void getAllInterfaces(Class<?> cls, Collection<Class<?>> interfacesFound) {
while (cls != null) {
Class<?>[] interfaces = cls.getInterfaces();
for (int i = 0; i < interfaces.length; i++) {
interfacesFound.add(interfaces[i]);
getAllInterfaces(interfaces[i], interfacesFound);
}
cls = cls.getSuperclass();
}
} |
python | def is_preflight_request(self, request: web.Request) -> bool:
"""Is `request` is a CORS preflight request."""
route = self._request_route(request)
if _is_web_view(route, strict=False):
return request.method == 'OPTIONS'
return route in self._preflight_routes |
python | def __optimize(self):
"""
Merge overlapping or contacting subranges from ``self.__has`` attribute and update it. Called from all methods
that modify object contents.
Returns
-------
None
Method does not return. It does internal modifications on ``self.__has`` attribute.
"""
ret = []
for (begin, end) in sorted(self.__has):
if ret and begin <= ret[-1][1] < end: # when current range overlaps with the last one from ret
ret[-1] = (ret[-1][0], end)
elif not ret or begin > ret[-1][1]:
ret.append( (begin, end) )
self.__has = set(ret) |
python | def _resolve_child(self, path):
'Return a member generator by a dot-delimited path'
obj = self
for component in path.split('.'):
ptr = obj
if not isinstance(ptr, Permuter):
raise self.MessageNotFound("Bad element path [wrong type]")
# pylint: disable=protected-access
found_gen = (_ for _ in ptr._generators if _.name() == component)
obj = next(found_gen, None)
if not obj:
raise self.MessageNotFound("Path '{}' unresolved to member."
.format(path))
return ptr, obj |
python | def view_fields(self, *attributes, **options):
""" Returns an :class:`ordered dictionary <collections.OrderedDict>` which
contains the selected field *attributes* of the `Pointer` field itself
extended with a ``['data']`` key which contains the selected field *attribute*
or the dictionaries of the selected field *attributes* for each :class:`Field`
*nested* in the :attr:`data` object referenced by the `Pointer` field.
The *attributes* of each :class:`Field` for containers *nested* in the
:attr:`data` object referenced by the `Pointer` field are viewed as well
(chained method call).
:param str attributes: selected :class:`Field` attributes.
Fallback is the field :attr:`~Field.value`.
:keyword tuple fieldnames: sequence of dictionary keys for the selected
field *attributes*. Defaults to ``(*attributes)``.
:keyword bool nested: if ``True`` all :class:`Pointer` fields in the
:attr:`data` object referenced by the `Pointer` field views their
referenced :attr:`~Pointer.data` object field attributes as well
(chained method call).
"""
items = OrderedDict()
# Pointer field
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
if len(attributes) > 1:
for key, value in zip(attributes, field_getter(self)):
items[key] = value
else:
items['value'] = field_getter(self)
# Data object
if is_container(self._data):
# Container
items['data'] = self._data.view_fields(*attributes, **options)
elif is_pointer(self._data) and get_nested(options):
# Pointer
items['data'] = self._data.view_fields(*attributes, **options)
elif is_field(self._data):
# Field
if attributes:
field_getter = attrgetter(*attributes)
else:
field_getter = attrgetter('value')
if len(attributes) > 1:
fieldnames = options.get('fieldnames', attributes)
items['data'] = dict(zip(fieldnames, field_getter(self._data)))
else:
items['data'] = field_getter(self._data)
else:
# None
items['data'] = self._data
return items |
java | public @Nullable <T> T findUniqueOrNull(@NotNull RowMapper<T> rowMapper, @NotNull SqlQuery query) {
return findOptional(rowMapper, query).orElse(null);
} |
python | def hardware_port_group_mode_portgroup_speed(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
hardware = ET.SubElement(config, "hardware", xmlns="urn:brocade.com:mgmt:brocade-hardware")
port_group = ET.SubElement(hardware, "port-group")
name_key = ET.SubElement(port_group, "name")
name_key.text = kwargs.pop('name')
mode = ET.SubElement(port_group, "mode")
portgroup_speed = ET.SubElement(mode, "portgroup-speed")
portgroup_speed.text = kwargs.pop('portgroup_speed')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
java | @Override
public Request<DescribeNetworkInterfaceAttributeRequest> getDryRunRequest() {
Request<DescribeNetworkInterfaceAttributeRequest> request = new DescribeNetworkInterfaceAttributeRequestMarshaller().marshall(this);
request.addParameter("DryRun", Boolean.toString(true));
return request;
} |
java | public static TimestampLessThanCondition.Builder lt(String variable, Date expectedValue) {
return TimestampLessThanCondition.builder().variable(variable).expectedValue(expectedValue);
} |
python | def find_enriched(sample_entities=None,
background_entities=None,
object_category=None,
**kwargs):
"""
Given a sample set of sample_entities (e.g. overexpressed genes) and a background set (e.g. all genes assayed), and a category of descriptor (e.g. phenotype, function),
return enriched descriptors/classes
"""
if sample_entities is None:
sample_entites = []
(sample_counts, sample_results) = get_counts(entities=sample_entities,
object_category=object_category,
min_count=2,
**kwargs)
print(str(sample_counts))
sample_fcs = sample_results['facet_counts']
taxon_count_dict = sample_fcs[M.SUBJECT_TAXON]
taxon=None
for (t,tc) in taxon_count_dict.items():
# TODO - throw error if multiple taxa
taxon = t
if background_entities is None:
objects = list(sample_counts.keys())
print("OBJECTS="+str(objects))
background_entities = get_background(objects, taxon, object_category)
# TODO: consider caching
(bg_counts,_) = get_counts(entities=background_entities,
object_category=object_category,
**kwargs)
sample_n = len(sample_entities) # TODO - annotated only?
pop_n = len(background_entities)
# adapted from goatools
for (sample_termid,sample_count) in sample_counts.items():
pop_count = bg_counts[sample_termid]
# https://en.wikipedia.org/wiki/Fisher's_exact_test
# Cls NotCls
# study/sample [a, b]
# rest of ref [c, d]
#
a = sample_count
b = sample_n - sample_count
c = pop_count - sample_count
d = pop_n - pop_count - b
print("ABCD="+str((sample_termid,a,b,c,d,sample_n)))
_, p_uncorrected = sp.stats.fisher_exact( [[a, b], [c, d]])
print("P="+str(p_uncorrected)) |
python | def cftype_to_value(cftype):
"""Convert a CFType into an equivalent python type.
The convertible CFTypes are taken from the known_cftypes
dictionary, which may be added to if another library implements
its own conversion methods."""
if not cftype:
return None
typeID = cf.CFGetTypeID(cftype)
if typeID in known_cftypes:
convert_function = known_cftypes[typeID]
return convert_function(cftype)
else:
return cftype |
java | public Object getObject(int parameterIndex,
Map<String, Class<?>> map) throws SQLException {
throw Util.notSupported();
} |
java | private static void validateSetValueEntryForSet(Object entry, Set<?> set) {
if (entry instanceof Set<?> || entry instanceof SetValue) {
throw new IllegalArgumentException("Unsupported Value type [nested sets]");
}
if (!set.isEmpty()) {
Object existingEntry = set.iterator().next();
if (!existingEntry.getClass().isAssignableFrom(entry.getClass())) {
throw new IllegalArgumentException(
String.format(
"Unsupported Value type [SetValue with " + "mixed types %s and %s]",
existingEntry.getClass(), entry.getClass()));
}
}
} |
java | public long getCount(Date fromWhen, Date toWhen, boolean fromInclusive, boolean toInclusive) {
return getCount(fromWhen.getTime(), toWhen.getTime(), true, false);
} |
java | public String evaluate(Map symbolTable)
throws RslEvaluationException {
String var = null;
if (symbolTable != null) {
var = (String)symbolTable.get(value);
}
if (var == null && defValue != null) {
var = defValue.evaluate(symbolTable);
}
if (var == null) {
/* NOTE: according to the rsl specs the variables
* should be replaces with empty string.
* however, in real code an error is returned.
*/
throw new RslEvaluationException("Variable '" + value + "' not defined.");
}
if (concatValue == null) {
return var;
} else {
return var + concatValue.evaluate(symbolTable);
}
} |
python | def get_basedir(path):
"""Returns the base directory of a path.
Examples:
get_basedir('foo/bar/baz') --> 'foo'
get_basedir('/foo/bar/baz') --> ''
get_basedir('foo') --> 'foo'
"""
return path[:path.index(os.sep)] if os.sep in path else path |
python | def parse_environ(name, parse_class=ParseResult, **defaults):
"""
same as parse() but you pass in an environment variable name that will be used
to fetch the dsn
:param name: string, the environment variable name that contains the dsn to parse
:param parse_class: ParseResult, the class that will be used to hold parsed values
:param **defaults: dict, any values you want to have defaults for if they aren't in the dsn
:returns: ParseResult() tuple
"""
return parse(os.environ[name], parse_class, **defaults) |
java | public List<GitlabProject> getOwnedProjects() throws IOException {
Query query = new Query().append("owned", "true");
query.mergeWith(new Pagination().withPerPage(Pagination.MAX_ITEMS_PER_PAGE).asQuery());
String tailUrl = GitlabProject.URL + query.toString();
return retrieve().getAll(tailUrl, GitlabProject[].class);
} |
python | def setparents(self):
"""Correct all parent relations for elements within the scop. There is sually no need to call this directly, invoked implicitly by :meth:`copy`"""
for c in self:
if isinstance(c, AbstractElement):
c.parent = self
c.setparents() |
java | Observable<ChatResult> handleConversationUpdated(ConversationUpdate request, ComapiResult<ConversationDetails> result) {
if (result.isSuccessful()) {
return persistenceController.upsertConversation(ChatConversation.builder().populate(result.getResult(), result.getETag()).build()).map(success -> adapter.adaptResult(result, success));
}
if (result.getCode() == ETAG_NOT_VALID) {
return checkState().flatMap(client -> client.service().messaging().getConversation(request.getId())
.flatMap(newResult -> {
if (newResult.isSuccessful()) {
return persistenceController.upsertConversation(ChatConversation.builder().populate(newResult.getResult(), newResult.getETag()).build())
.flatMap(success -> Observable.fromCallable(() -> new ChatResult(false, success ? new ChatResult.Error(ETAG_NOT_VALID, "Conversation updated, try delete again.", "Conversation "+request.getId()+" updated in response to wrong eTag error when updating."): new ChatResult.Error(1500, "Error updating custom store.", null))));
} else {
return Observable.fromCallable(() -> adapter.adaptResult(newResult));
}
}));
} else {
return Observable.fromCallable(() -> adapter.adaptResult(result));
}
} |
python | def _flag_handler(self, play, handler_name, host):
'''
if a task has any notify elements, flag handlers for run
at end of execution cycle for hosts that have indicated
changes have been made
'''
found = False
for x in play.handlers():
if handler_name == utils.template(play.basedir, x.name, x.module_vars):
found = True
self.callbacks.on_notify(host, x.name)
x.notified_by.append(host)
if not found:
raise errors.AnsibleError("change handler (%s) is not defined" % handler_name) |
python | def record_download_archive(track):
"""
Write the track_id in the download archive
"""
global arguments
if not arguments['--download-archive']:
return
archive_filename = arguments.get('--download-archive')
try:
with open(archive_filename, 'a', encoding='utf-8') as file:
file.write('{0}'.format(track['id'])+'\n')
except IOError as ioe:
logger.error('Error trying to write to download archive...')
logger.debug(ioe) |
java | private void reset() {
calledNextStartPosition = false;
noMorePositions = false;
noMorePositionsSpan2 = false;
lastSpans2StartPosition = -1;
lastSpans2EndPosition = -1;
previousSpans2StartPosition = -1;
previousSpans2EndPositions.clear();
} |
python | def GetName(obj):
"""A compatibility wrapper for getting object's name.
In Python 2 class names are returned as `bytes` (since class names can contain
only ASCII characters) whereas in Python 3 they are `unicode` (since class
names can contain arbitrary unicode characters).
This function makes this behaviour consistent and always returns class name as
an unicode string.
Once support for Python 2 is dropped all invocations of this call can be
replaced with ordinary `__name__` access.
Args:
obj: A type or function object to get the name for.
Returns:
Name of the specified class as unicode string.
"""
precondition.AssertType(obj, (type, types.FunctionType))
if PY2:
return obj.__name__.decode("ascii")
else:
return obj.__name__ |
java | @Override
public void setFromCorners(double x1, double y1, double z1, double x2, double y2, double z2) {
if (x1<x2) {
this.minx = x1;
this.maxx = x2;
}
else {
this.minx = x2;
this.maxx = x1;
}
if (y1<y2) {
this.miny = y1;
this.maxy = y2;
}
else {
this.miny = y2;
this.maxy = y1;
}
if (z1<z2) {
this.minz = z1;
this.maxz = z2;
}
else {
this.minz = z2;
this.maxz = z1;
}
} |
java | public static int numNewAxis(INDArrayIndex... axes) {
int ret = 0;
for (INDArrayIndex index : axes)
if (index instanceof NewAxis)
ret++;
return ret;
} |
python | def add(self, txt, indent=0):
"""Adds some text, no newline will be appended.
The text can be indented with the optional argument 'indent'.
"""
if isinstance(txt, unicode):
try:
txt = str(txt)
except UnicodeEncodeError:
s = []
for c in txt:
try:
s.append(str(c))
except UnicodeEncodeError:
s.append(repr(c))
txt = ''.join(s)
self.text.append( ' '*indent + txt ) |
python | def map_attribute_to_seq(self,
attribute: str,
key_attribute: str,
value_attribute: Optional[str] = None) -> None:
"""Converts a mapping attribute to a sequence.
This function takes an attribute of this Node whose value \
is a mapping or a mapping of mappings and turns it into a \
sequence of mappings. Each entry in the original mapping is \
converted to an entry in the list. If only a key attribute is \
given, then each entry in the original mapping must map to a \
(sub)mapping. This submapping becomes the corresponding list \
entry, with the key added to it as an additional attribute. If a \
value attribute is also given, then an entry in the original \
mapping may map to any object. If the mapped-to object is a \
mapping, the conversion is as before, otherwise a new \
submapping is created, and key and value are added using the \
given key and value attribute names.
An example probably helps. If you have a Node representing \
this piece of YAML::
items:
item1:
description: Basic widget
price: 100.0
item2:
description: Premium quality widget
price: 200.0
and call map_attribute_to_seq('items', 'item_id'), then the \
Node will be modified to represent this::
items:
- item_id: item1
description: Basic widget
price: 100.0
- item_id: item2
description: Premium quality widget
price: 200.0
which once converted to an object is often easier to deal with \
in code.
Slightly more complicated, this YAML::
items:
item1: Basic widget
item2:
description: Premium quality widget
price: 200.0
when passed through map_attribute_to_seq('items', 'item_id', \
'description'), will result in th equivalent of::
items:
- item_id: item1
description: Basic widget
- item_id: item2
description: Premium quality widget
price: 200.0
If the attribute does not exist, or is not a mapping, this \
function will silently do nothing.
With thanks to the makers of the Common Workflow Language for \
the idea.
Args:
attribute: Name of the attribute whose value to modify.
key_attribute: Name of the new attribute in each item to \
add with the value of the key.
value_attribute: Name of the new attribute in each item to \
add with the value of the key.
"""
if not self.has_attribute(attribute):
return
attr_node = self.get_attribute(attribute)
if not attr_node.is_mapping():
return
start_mark = attr_node.yaml_node.start_mark
end_mark = attr_node.yaml_node.end_mark
object_list = []
for item_key, item_value in attr_node.yaml_node.value:
item_value_node = Node(item_value)
if not item_value_node.is_mapping():
if value_attribute is None:
return
ynode = item_value_node.yaml_node
item_value_node.make_mapping()
item_value_node.set_attribute(value_attribute, ynode)
item_value_node.set_attribute(key_attribute, item_key.value)
object_list.append(item_value_node.yaml_node)
seq_node = yaml.SequenceNode('tag:yaml.org,2002:seq', object_list,
start_mark, end_mark)
self.set_attribute(attribute, seq_node) |
python | def consume_socket_output(frames, demux=False):
"""
Iterate through frames read from the socket and return the result.
Args:
demux (bool):
If False, stdout and stderr are multiplexed, and the result is the
concatenation of all the frames. If True, the streams are
demultiplexed, and the result is a 2-tuple where each item is the
concatenation of frames belonging to the same stream.
"""
if demux is False:
# If the streams are multiplexed, the generator returns strings, that
# we just need to concatenate.
return six.binary_type().join(frames)
# If the streams are demultiplexed, the generator yields tuples
# (stdout, stderr)
out = [None, None]
for frame in frames:
# It is guaranteed that for each frame, one and only one stream
# is not None.
assert frame != (None, None)
if frame[0] is not None:
if out[0] is None:
out[0] = frame[0]
else:
out[0] += frame[0]
else:
if out[1] is None:
out[1] = frame[1]
else:
out[1] += frame[1]
return tuple(out) |
java | public static void minimum(Planar<GrayF64> input, GrayF64 output) {
output.reshape(input.width,input.height);
if (BoofConcurrency.USE_CONCURRENT) {
ImplImageBandMath_MT.minimum(input, output, 0, input.getNumBands() - 1);
} else {
ImplImageBandMath.minimum(input, output, 0, input.getNumBands() - 1);
}
} |
java | public java.util.List<String> getEnvironmentNames() {
if (environmentNames == null) {
environmentNames = new com.amazonaws.internal.SdkInternalList<String>();
}
return environmentNames;
} |
python | def get_k8s_model(model_type, model_dict):
"""
Returns an instance of type specified model_type from an model instance or
represantative dictionary.
"""
model_dict = copy.deepcopy(model_dict)
if isinstance(model_dict, model_type):
return model_dict
elif isinstance(model_dict, dict):
# convert the dictionaries camelCase keys to snake_case keys
model_dict = _map_dict_keys_to_model_attributes(model_type, model_dict)
# use the dictionary keys to initialize a model of given type
return model_type(**model_dict)
else:
raise AttributeError("Expected object of type 'dict' (or '{}') but got '{}'.".format(model_type.__name__, type(model_dict).__name__)) |
java | public static FileChannel createTempFile(String prefix, String suffix) throws IOException {
return createTempFile(TMPDIR, prefix, suffix);
} |
java | public java.util.List<SuccessfulInstanceCreditSpecificationItem> getSuccessfulInstanceCreditSpecifications() {
if (successfulInstanceCreditSpecifications == null) {
successfulInstanceCreditSpecifications = new com.amazonaws.internal.SdkInternalList<SuccessfulInstanceCreditSpecificationItem>();
}
return successfulInstanceCreditSpecifications;
} |
python | def _read_config(filename):
"""Read configuration from the given file.
Parsing is performed through the configparser library.
Returns:
dict: a flattened dict of (option_name, value), using defaults.
"""
parser = configparser.RawConfigParser()
if filename and not parser.read(filename):
sys.stderr.write("Unable to open configuration file %s. Use --config='' to disable this warning.\n" % filename)
config = {}
for section, defaults in BASE_CONFIG.items():
# Patterns are handled separately
if section == 'patterns':
continue
for name, descr in defaults.items():
kind, default = descr
if section in parser.sections() and name in parser.options(section):
if kind == 'int':
value = parser.getint(section, name)
elif kind == 'float':
value = parser.getfloat(section, name)
elif kind == 'bool':
value = parser.getboolean(section, name)
else:
value = parser.get(section, name)
else:
value = default
config[name] = value
if 'patterns' in parser.sections():
patterns = [parser.get('patterns', opt) for opt in parser.options('patterns')]
else:
patterns = DEFAULT_PATTERNS
config['patterns'] = patterns
return config |
java | public <R> Plan0<R> then(Func5<T1, T2, T3, T4, T5, R> selector) {
if (selector == null) {
throw new NullPointerException();
}
return new Plan5<T1, T2, T3, T4, T5, R>(this, selector);
} |
python | def _send_file(self, method, path, data, filename):
"""Make a multipart/form-encoded request.
Args:
`method`: The method of the request (POST or PUT).
`path`: The path to the resource.
`data`: The JSON-encoded data.
`filename`: The filename of the file to send.
Returns:
The content of the response.
Raises:
An exception depending on the HTTP status code of the response.
"""
with open(filename, 'r') as f:
return self._make_request(method, path, data=data, files=[f, ]) |
java | protected Template getTemplate(File file) throws ServletException {
String key = file.getAbsolutePath();
Template template = findCachedTemplate(key, file);
//
// Template not cached or the source file changed - compile new template!
//
if (template == null) {
try {
template = createAndStoreTemplate(key, new FileInputStream(file), file);
} catch (Exception e) {
throw new ServletException("Creation of template failed: " + e, e);
}
}
return template;
} |
java | public boolean write( RasterData rasterData ) throws RasterWritingFailureException {
try {
return writer.write(rasterData);
} catch (Exception e) {
e.printStackTrace();
throw new RasterWritingFailureException(e.getLocalizedMessage());
}
} |
python | def dl_database(db_dir, dl_dir, records='all', annotators='all',
keep_subdirs=True, overwrite=False):
"""
Download WFDB record (and optionally annotation) files from a
Physiobank database. The database must contain a 'RECORDS' file in
its base directory which lists its WFDB records.
Parameters
----------
db_dir : str
The Physiobank database directory to download. eg. For database:
'http://physionet.org/physiobank/database/mitdb', db_dir='mitdb'.
dl_dir : str
The full local directory path in which to download the files.
records : list, or 'all', optional
A list of strings specifying the WFDB records to download. Leave
as 'all' to download all records listed in the database's
RECORDS file.
eg. records=['test01_00s', test02_45s] for database:
https://physionet.org/physiobank/database/macecgdb/
annotators : list, 'all', or None, optional
A list of strings specifying the WFDB annotation file types to
download along with the record files. Is either None to skip
downloading any annotations, 'all' to download all annotation
types as specified by the ANNOTATORS file, or a list of strings
which each specify an annotation extension.
eg. annotators = ['anI'] for database:
https://physionet.org/physiobank/database/prcp/
keep_subdirs : bool, optional
Whether to keep the relative subdirectories of downloaded files
as they are organized in Physiobank (True), or to download all
files into the same base directory (False).
overwrite : bool, optional
If True, all files will be redownloaded regardless. If False,
existing files with the same name and relative subdirectory will
be checked. If the local file is the same size as the online
file, the download is skipped. If the local file is larger, it
will be deleted and the file will be redownloaded. If the local
file is smaller, the file will be assumed to be partially
downloaded and the remaining bytes will be downloaded and
appended.
Examples
--------
>>> wfdb.dl_database('ahadb', os.getcwd())
"""
# Full url physiobank database
db_url = posixpath.join(download.config.db_index_url, db_dir)
# Check if the database is valid
r = requests.get(db_url)
r.raise_for_status()
# Get the list of records
recordlist = download.get_record_list(db_dir, records)
# Get the annotator extensions
annotators = download.get_annotators(db_dir, annotators)
# All files to download (relative to the database's home directory)
allfiles = []
for rec in recordlist:
# Check out whether each record is in MIT or EDF format
if rec.endswith('.edf'):
allfiles.append(rec)
else:
# May be pointing to directory
if rec.endswith('/'):
rec = rec + rec[:-1]
# If MIT format, have to figure out all associated files
allfiles.append(rec+'.hea')
dir_name, baserecname = os.path.split(rec)
record = rdheader(baserecname, pb_dir=posixpath.join(db_dir, dir_name))
# Single segment record
if isinstance(record, Record):
# Add all dat files of the segment
for file in (record.file_name if record.file_name else []):
allfiles.append(posixpath.join(dir_name, file))
# Multi segment record
else:
for seg in record.seg_name:
# Skip empty segments
if seg == '~':
continue
# Add the header
allfiles.append(posixpath.join(dir_name, seg+'.hea'))
# Layout specifier has no dat files
if seg.endswith('_layout'):
continue
# Add all dat files of the segment
recseg = rdheader(seg, pb_dir=posixpath.join(db_dir, dir_name))
for file in recseg.file_name:
allfiles.append(posixpath.join(dir_name, file))
# check whether the record has any requested annotation files
if annotators is not None:
for a in annotators:
annfile = rec+'.'+a
url = posixpath.join(download.config.db_index_url, db_dir, annfile)
rh = requests.head(url)
if rh.status_code != 404:
allfiles.append(annfile)
dlinputs = [(os.path.split(file)[1], os.path.split(file)[0], db_dir, dl_dir, keep_subdirs, overwrite) for file in allfiles]
# Make any required local directories
download.make_local_dirs(dl_dir, dlinputs, keep_subdirs)
print('Downloading files...')
# Create multiple processes to download files.
# Limit to 2 connections to avoid overloading the server
pool = multiprocessing.Pool(processes=2)
pool.map(download.dl_pb_file, dlinputs)
print('Finished downloading files')
return |
python | def new_binary_container(self, name):
"""Defines a new binary container to template.
Binary container can only contain binary fields defined with `Bin`
keyword.
Examples:
| New binary container | flags |
| bin | 2 | foo |
| bin | 6 | bar |
| End binary container |
"""
self._message_stack.append(BinaryContainerTemplate(name, self._current_container)) |
python | def _build_likelihood(self):
"""
This gives a variational bound on the model likelihood.
"""
# Get prior KL.
KL = self.build_prior_KL()
# Get conditionals
fmean, fvar = self._build_predict(self.X, full_cov=False, full_output_cov=False)
# Get variational expectations.
var_exp = self.likelihood.variational_expectations(fmean, fvar, self.Y)
# re-scale for minibatch size
scale = tf.cast(self.num_data, settings.float_type) / tf.cast(tf.shape(self.X)[0], settings.float_type)
return tf.reduce_sum(var_exp) * scale - KL |
java | public static boolean deletePublicKeyNode(PepManager pepManager, OpenPgpV4Fingerprint fingerprint)
throws XMPPException.XMPPErrorException, SmackException.NotConnectedException, InterruptedException,
SmackException.NoResponseException {
PubSubManager pm = pepManager.getPepPubSubManager();
return pm.deleteNode(PEP_NODE_PUBLIC_KEY(fingerprint));
} |
java | public static <T extends MethodDescription> ElementMatcher.Junction<T> isSetter(TypeDescription type) {
return isSetter(is(type));
} |
java | public void schedule(final String name, final Command command, final int delayMsec) {
BurstEvent e = m_memory.remove(name);
if (e != null) {
// disable the old event
e.getTimer().cancel();
}
// put the new event and schedule it
e = new BurstEvent(name, command);
m_memory.put(name, e);
e.getTimer().schedule(delayMsec);
} |
java | @Override
public T resolve(String name, MediaType mediaType) {
if (this.cache != null) {
if (!this.cache.containsKey(name)) {
T t = this.resolve(name);
if (t != null)
this.cache.putIfAbsent(name, t);
}
return this.cache.get(name);
} else {
return this.resolve(name);
}
} |
java | private static void getMethodsRecursive(Class<?> service, List<Method> methods) {
Collections.addAll(methods, service.getDeclaredMethods());
} |
python | def _update_rows(self):
""" Update the row and column numbers of child items. """
for row, item in enumerate(self._items):
item.row = row # Row is the Parent item
item.column = 0
for column, item in enumerate(self._columns):
item.row = self.row # Row is the Parent item
item.column = column |
python | def _load(self, path='config', filetype=None, relaxed=False, ignore=False):
""" load key value pairs from a file
Parameters:
path - path to configuration data (see Note 1)
filetype - type component of dot-delimited path
relaxed - if True, define keys on the fly (see Note 2)
ignore - if True, ignore undefined keys in path
Return:
self
Notes:
1. The path can be:
* an open file object with a readlines method
* a dot delimited path to a file (see normalize_path)
* an os-specific path to a file (relative to cwd)
* an iterable of key=value strings
2. Normally keys read from the file must conform to keys
previously defined for the Config. If the relaxed flag
is True, any keys found in the file will be accepted.
If the ignore flag is True, and kyes found in the file
that are not previously defined are ignored.
"""
for num, line in enumerate(
un_comment(load_lines_from_path(path, filetype)),
start=1,
):
if not line:
continue
try:
key, val = line.split('=', 1)
key = key.strip()
val = val.strip()
if relaxed:
self._define(key)
try:
level, itemname = self.__lookup(key)
except KeyError:
if ignore:
continue
raise
item = level.get(itemname)
if item is None:
raise KeyError(itemname)
item.load(val)
except Exception as e:
args = e.args or ('',)
msg = 'line {} of config: {}'. format(num, args[0])
e.args = (msg,) + args[1:]
raise
return self |
java | private I_CmsContextMenuItem findDefaultAction(Collection<I_CmsContextMenuItem> items) {
I_CmsContextMenuItem result = null;
int resultRank = -1;
for (I_CmsContextMenuItem menuItem : items) {
if ((menuItem instanceof CmsContextMenuActionItem)
&& (((CmsContextMenuActionItem)menuItem).getWorkplaceAction() instanceof I_CmsDefaultAction)) {
I_CmsDefaultAction action = (I_CmsDefaultAction)((CmsContextMenuActionItem)menuItem).getWorkplaceAction();
if (getVisibility(menuItem).isActive()) {
if (result == null) {
result = menuItem;
resultRank = action.getDefaultActionRank(m_context);
} else {
int rank = action.getDefaultActionRank(m_context);
if (rank > resultRank) {
result = menuItem;
resultRank = rank;
}
}
}
}
}
return result;
} |
java | protected <T> List<T> deleteListResources(String path, Class<T> objectClass) throws SmartsheetException {
Util.throwIfNull(path, objectClass);
Util.throwIfEmpty(path);
Result<List<T>> obj = null;
HttpRequest request;
request = createHttpRequest(smartsheet.getBaseURI().resolve(path), HttpMethod.DELETE);
try {
HttpResponse response = this.smartsheet.getHttpClient().request(request);
switch (response.getStatusCode()) {
case 200:
obj = this.smartsheet.getJsonSerializer().deserializeListResult(objectClass,
response.getEntity().getContent());
break;
default:
handleError(response);
}
} finally {
smartsheet.getHttpClient().releaseConnection();
}
return obj.getResult();
} |
java | public void registerView(@Nullable RegisterViewStatusListener callback) {
if (Branch.getInstance() != null) {
Branch.getInstance().registerView(this, callback);
} else {
if (callback != null) {
callback.onRegisterViewFinished(false, new BranchError("Register view error", BranchError.ERR_BRANCH_NOT_INSTANTIATED));
}
}
} |
java | @Override
public void eSet(int featureID, Object newValue) {
switch (featureID) {
case BpsimPackage.GAMMA_DISTRIBUTION_TYPE__SCALE:
setScale((Double)newValue);
return;
case BpsimPackage.GAMMA_DISTRIBUTION_TYPE__SHAPE:
setShape((Double)newValue);
return;
}
super.eSet(featureID, newValue);
} |
python | def mavg(data, xseq, **params):
"""
Fit moving average
"""
window = params['method_args']['window']
# The first average comes after the full window size
# has been swept over
rolling = data['y'].rolling(**params['method_args'])
y = rolling.mean()[window:]
n = len(data)
stderr = rolling.std()[window:]
x = data['x'][window:]
data = pd.DataFrame({'x': x, 'y': y})
data.reset_index(inplace=True, drop=True)
if params['se']:
df = n - window # Original - Used
data['ymin'], data['ymax'] = tdist_ci(
y, df, stderr, params['level'])
data['se'] = stderr
return data |
python | def check(self, req):
"""Determine if ``req`` is in this instances cache.
Determine if there are cache hits for the request in this aggregator
instance.
Not in the cache
If req is not in the cache, it returns ``None`` to indicate that the
caller should send the request.
Cache Hit; response has errors
When a cached CheckResponse has errors, it's assumed that ``req`` would
fail as well, so the cached CheckResponse is returned. However, the
first CheckRequest after the flush interval has elapsed should be sent
to the server to refresh the CheckResponse, though until it's received,
subsequent CheckRequests should fail with the cached CheckResponse.
Cache behaviour - response passed
If the cached CheckResponse has no errors, it's assumed that ``req``
will succeed as well, so the CheckResponse is returned, with the quota
info updated to the same as requested. The requested tokens are
aggregated until flushed.
Args:
req (``ServicecontrolServicesCheckRequest``): to be sent to
the service control service
Raises:
ValueError: if the ``req`` service_name is not the same as
this instances
Returns:
``CheckResponse``: if an applicable response is cached by this
instance is available for use or None, if there is no applicable
response
"""
if self._cache is None:
return None # no cache, send request now
if not isinstance(req, sc_messages.ServicecontrolServicesCheckRequest):
raise ValueError(u'Invalid request')
if req.serviceName != self.service_name:
_logger.error(u'bad check(): service_name %s does not match ours %s',
req.serviceName, self.service_name)
raise ValueError(u'Service name mismatch')
check_request = req.checkRequest
if check_request is None:
_logger.error(u'bad check(): no check_request in %s', req)
raise ValueError(u'Expected operation not set')
op = check_request.operation
if op is None:
_logger.error(u'bad check(): no operation in %s', req)
raise ValueError(u'Expected operation not set')
if op.importance != sc_messages.Operation.ImportanceValueValuesEnum.LOW:
return None # op is important, send request now
signature = sign(check_request)
with self._cache as cache:
_logger.debug(u'checking the cache for %r\n%s', signature, cache)
item = cache.get(signature)
if item is None:
return None # signal to caller to send req
else:
return self._handle_cached_response(req, item) |
java | public String getRawDataAsString() throws IOException
{
byte[] buffer = getRawData();
if ( buffer == null ) {
return null;
}
String encoding = request.getCharacterEncoding();
if ( encoding == null ) {
encoding = "utf-8";
}
return new String(buffer, encoding);
} |
java | public void marshall(DescribeProductViewRequest describeProductViewRequest, ProtocolMarshaller protocolMarshaller) {
if (describeProductViewRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(describeProductViewRequest.getAcceptLanguage(), ACCEPTLANGUAGE_BINDING);
protocolMarshaller.marshall(describeProductViewRequest.getId(), ID_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public SubscriptionQos setExpiryDateMs(final long expiryDateMs) {
long now = System.currentTimeMillis();
if (expiryDateMs <= now && expiryDateMs != NO_EXPIRY_DATE) {
throw new IllegalArgumentException("Subscription ExpiryDate " + expiryDateMs + " in the past. Now: " + now);
}
this.expiryDateMs = expiryDateMs;
return this;
} |
python | def _get_indicators_page_generator(self, from_time=None, to_time=None, page_number=0, page_size=None,
enclave_ids=None, included_tag_ids=None, excluded_tag_ids=None):
"""
Creates a generator from the |get_indicators_page| method that returns each successive page.
:param int from_time: start of time window in milliseconds since epoch (defaults to 7 days ago)
:param int to_time: end of time window in milliseconds since epoch (defaults to current time)
:param int page_number: the page number
:param int page_size: the page size
:param list(string) enclave_ids: a list of enclave IDs to filter by
:param list(string) included_tag_ids: only indicators containing ALL of these tags will be returned
:param list(string) excluded_tag_ids: only indicators containing NONE of these tags will be returned
:return: a |Page| of |Indicator| objects
"""
get_page = functools.partial(
self.get_indicators_page,
from_time=from_time,
to_time=to_time,
page_number=page_number,
page_size=page_size,
enclave_ids=enclave_ids,
included_tag_ids=included_tag_ids,
excluded_tag_ids=excluded_tag_ids
)
return Page.get_page_generator(get_page, page_number, page_size) |
python | def obj_to_string(obj):
'''Render an object into a unicode string if possible'''
if not obj:
return None
elif isinstance(obj, bytes):
return obj.decode('utf-8')
elif isinstance(obj, basestring):
return obj
elif is_lazy_string(obj):
return obj.value
elif hasattr(obj, '__html__'):
return obj.__html__()
else:
return str(obj) |
python | def get(cls):
"""Subsystems used outside of any task."""
return {
SourceRootConfig,
Reporting,
Reproducer,
RunTracker,
Changed,
BinaryUtil.Factory,
Subprocess.Factory
} |
java | @FFDCIgnore(IOException.class)
public static String getLocationFromBundleFile(BundleFile bundleFile) {
BundleEntry be = bundleFile.getEntry(BUNDLE_PROPERTY_ENTRY_NAME);
if (be != null) {
Properties p = new Properties();
try {
p.load(be.getInputStream());
return p.getProperty(BUNDLE_LOCATION);
} catch (IOException e) {
return null;
}
} else {
return null;
}
} |
java | public Period plusDays(long daysToAdd) {
if (daysToAdd == 0) {
return this;
}
return create(years, months, Jdk8Methods.safeToInt(Jdk8Methods.safeAdd(days, daysToAdd)));
} |
python | def media_artist(self):
"""Artist of current playing media (Music track only)."""
try:
artists = self.session['NowPlayingItem']['Artists']
if len(artists) > 1:
return artists[0]
else:
return artists
except KeyError:
return None |
python | def move(self, source_path, destination_path):
"""
Rename/move an object from one GCS location to another.
"""
self.copy(source_path, destination_path)
self.remove(source_path) |
python | def padded_accuracy_topk(logits, labels, k):
"""Percentage of times that top-k predictions matches labels on non-0s."""
with tf.variable_scope("padded_accuracy_topk", values=[logits, labels]):
logits, labels = _pad_tensors_to_same_length(logits, labels)
weights = tf.to_float(tf.not_equal(labels, 0))
effective_k = tf.minimum(k, tf.shape(logits)[-1])
_, outputs = tf.nn.top_k(logits, k=effective_k)
outputs = tf.to_int32(outputs)
padded_labels = tf.to_int32(labels)
padded_labels = tf.expand_dims(padded_labels, axis=-1)
padded_labels += tf.zeros_like(outputs) # Pad to same shape.
same = tf.to_float(tf.equal(outputs, padded_labels))
same_topk = tf.reduce_sum(same, axis=-1)
return same_topk, weights |
python | def chk_date_arg(s):
"""Checks if the string `s` is a valid date string.
Return True of False."""
if re_date.search(s) is None:
return False
comp = s.split('-')
try:
dt = datetime.date(int(comp[0]), int(comp[1]), int(comp[2]))
return True
except Exception as e:
return False |
python | def _check_hyperedge_id_consistency(self):
"""Consistency Check 4: check for misplaced hyperedge ids
:raises: ValueError -- detected inconsistency among dictionaries
"""
# Get list of hyperedge_ids from the hyperedge attributes dict
hyperedge_ids_from_attributes = set(self._hyperedge_attributes.keys())
# get hyperedge ids in the forward star
forward_star_hyperedge_ids = set()
for hyperedge_id_set in self._forward_star.values():
forward_star_hyperedge_ids.update(hyperedge_id_set)
# get hyperedge ids in the backward star
backward_star_hyperedge_ids = set()
for hyperedge_id_set in self._backward_star.values():
backward_star_hyperedge_ids.update(hyperedge_id_set)
# Check 4.1: hyperedge ids in the forward star must be the
# same as the hyperedge ids from attributes
if forward_star_hyperedge_ids != hyperedge_ids_from_attributes:
raise ValueError(
'Consistency Check 4.1 Failed: hyperedge ids ' +
'are different in the forward star ' +
'values and the hyperedge ids from ' +
'attribute keys.')
# Check 4.2: hyperedge ids in the backward star must be the
# same as the hyperedge ids from attributes
if backward_star_hyperedge_ids != hyperedge_ids_from_attributes:
raise ValueError(
'Consistency Check 4.2 Failed: hyperedge ids ' +
'are different in the backward star ' +
'values and the hyperedge ids from ' +
'attribute keys.')
# Note that by Check 4.1 and 4.2, forward_star_hyperedge_ids =
# backward_star_hyperedge_ids
# get hyperedge ids in the predecessors dict
predecessor_hyperedge_ids = set()
for all_tails_from_predecessor in self._predecessors.values():
for hyperedge_id in all_tails_from_predecessor.values():
predecessor_hyperedge_ids.add(hyperedge_id)
# get hyperedge ids in the successors dict
successor_hyperedge_ids = set()
for all_heads_from_successor in self._successors.values():
for hyperedge_id in all_heads_from_successor.values():
successor_hyperedge_ids.add(hyperedge_id)
# Check 4.3: hyperedge ids in the predecessor dict must be the
# same as the hyperedge ids from attributes
if predecessor_hyperedge_ids != hyperedge_ids_from_attributes:
raise ValueError(
'Consistency Check 4.3 Failed: hyperedge ids are ' +
'different in the predecessor values and ' +
'hyperedge ids from attribute keys.')
# Check 4.4: hyperedge ids in the successor dict must be the
# same as the hyperedge ids from attributes
if successor_hyperedge_ids != hyperedge_ids_from_attributes:
raise ValueError(
'Consistency Check 4.4 Failed: hyperedge ids are ' +
'different in the successor values and ' +
'hyperedge ids from attribute keys.') |
java | private static Pair<Double, ClassicCounter<Integer>> readModel(File modelFile, boolean multiclass) {
int modelLineCount = 0;
try {
int numLinesToSkip = multiclass ? 13 : 10;
String stopToken = "#";
BufferedReader in = new BufferedReader(new FileReader(modelFile));
for (int i=0; i < numLinesToSkip; i++) {
in.readLine();
modelLineCount ++;
}
List<Pair<Double, ClassicCounter<Integer>>> supportVectors = new ArrayList<Pair<Double, ClassicCounter<Integer>>>();
// Read Threshold
String thresholdLine = in.readLine();
modelLineCount ++;
String[] pieces = thresholdLine.split("\\s+");
double threshold = Double.parseDouble(pieces[0]);
// Read Support Vectors
while (in.ready()) {
String svLine = in.readLine();
modelLineCount ++;
pieces = svLine.split("\\s+");
// First Element is the alpha_i * y_i
double alpha = Double.parseDouble(pieces[0]);
ClassicCounter<Integer> supportVector = new ClassicCounter<Integer>();
for (int i=1; i < pieces.length; ++i) {
String piece = pieces[i];
if (piece.equals(stopToken)) break;
// Each in featureIndex:num class
String[] indexNum = piece.split(":");
String featureIndex = indexNum[0];
// mihai: we may see "qid" as indexNum[0]. just skip this piece. this is the block id useful only for reranking, which we don't do here.
if(! featureIndex.equals("qid")){
double count = Double.parseDouble(indexNum[1]);
supportVector.incrementCount(Integer.valueOf(featureIndex), count);
}
}
supportVectors.add(new Pair<Double, ClassicCounter<Integer>>(alpha, supportVector));
}
in.close();
return new Pair<Double, ClassicCounter<Integer>>(threshold, getWeights(supportVectors));
}
catch (Exception e) {
e.printStackTrace();
throw new RuntimeException("Error reading SVM model (line " + modelLineCount + " in file " + modelFile.getAbsolutePath() + ")");
}
} |
python | def to_dict(self):
'''A representation of that publication data that matches the schema we use in our databases.'''
if not self.record_type == 'journal':
# todo: it may be worthwhile creating subclasses for each entry type (journal, conference, etc.) with a common
# API e.g. to_json which creates output appropriately
raise Exception('This function has only been tested on journal entries at present.')
author_list = []
authors = self.article.get('authors', [])
for x in range(len(authors)):
author = authors[x]
first_name = None
middle_names = None
if author.get('given_name'):
names = author['given_name'].split()
first_name = names[0]
middle_names = (' '.join(names[1:])) or None
author_list.append(
dict(
AuthorOrder = x + 1,
FirstName = first_name,
MiddleNames = middle_names,
Surname = author.get('surname')
)
)
return dict(
Title = self.article.get('title'),
PublicationName = self.issue.get('full_title'),
Volume = self.issue.get('volume'),
Issue = self.issue.get('issue'),
StartPage = self.article.get('first_page'),
EndPage = self.article.get('last_page'),
PublicationYear = self.get_year(),
PublicationDate = self.get_earliest_date(),
RIS = None,
DOI = self.doi,
PubMedID = self.get_pubmed_id(),
URL = 'http://dx.doi.org/%s' % self.doi,
ISSN = None, # eight-digit number
authors = author_list,
#
RecordType = DOI.record_types.get(self.record_type)
) |
python | def get_file_uuid(fpath, hasher=None, stride=1):
""" Creates a uuid from the hash of a file
"""
if hasher is None:
hasher = hashlib.sha1() # 20 bytes of output
#hasher = hashlib.sha256() # 32 bytes of output
# sha1 produces a 20 byte hash
hashbytes_20 = get_file_hash(fpath, hasher=hasher, stride=stride)
# sha1 produces 20 bytes, but UUID requires 16 bytes
hashbytes_16 = hashbytes_20[0:16]
uuid_ = uuid.UUID(bytes=hashbytes_16)
return uuid_ |
python | def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool:
"""
Tests whether the predicate in BIO tags1 overlap
with those of tags2.
"""
# Get predicate word indices from both predictions
pred_ind1 = get_predicate_indices(tags1)
pred_ind2 = get_predicate_indices(tags2)
# Return if pred_ind1 pred_ind2 overlap
return any(set.intersection(set(pred_ind1), set(pred_ind2))) |
python | def rename_afw_states(afw: dict, suffix: str):
""" Side effect on input! Renames all the states of the AFW
adding a **suffix**.
It is an utility function used during testing to avoid automata to have
states with names in common.
Avoid suffix that can lead to special name like "as", "and",...
:param dict afw: input AFW.
:param str suffix: string to be added at beginning of each state name.
"""
conversion_dict = {}
new_states = set()
new_accepting = set()
for state in afw['states']:
conversion_dict[state] = '' + suffix + state
new_states.add('' + suffix + state)
if state in afw['accepting_states']:
new_accepting.add('' + suffix + state)
afw['states'] = new_states
afw['initial_state'] = '' + suffix + afw['initial_state']
afw['accepting_states'] = new_accepting
new_transitions = {}
for transition in afw['transitions']:
new_transition = __replace_all(conversion_dict, transition[0])
new_transitions[new_transition, transition[1]] = \
__replace_all(conversion_dict, afw['transitions'][transition])
afw['transitions'] = new_transitions |
python | def touch_last_worklog(self, issue):
"""
Touch the last worklog for an issue (changes the updated date on the worklog). We use this date as the 'mark' for
determining the time elapsed for the next log entry.
"""
worklogs = self.get_worklog(issue)
if worklogs:
last_worklog = worklogs[-1]
last_worklog.update()
return True |
python | def FinalizeTransferUrl(self, url):
"""Modify the url for a given transfer, based on auth and version."""
url_builder = _UrlBuilder.FromUrl(url)
if self.global_params.key:
url_builder.query_params['key'] = self.global_params.key
return url_builder.url |
java | private static ZonedDateTime ofLenient(LocalDateTime localDateTime, ZoneOffset offset, ZoneId zone) {
Jdk8Methods.requireNonNull(localDateTime, "localDateTime");
Jdk8Methods.requireNonNull(offset, "offset");
Jdk8Methods.requireNonNull(zone, "zone");
if (zone instanceof ZoneOffset && offset.equals(zone) == false) {
throw new IllegalArgumentException("ZoneId must match ZoneOffset");
}
return new ZonedDateTime(localDateTime, offset, zone);
} |
java | @Override
public Pair<ByteSlice, ByteSlice> splitOnLastLineEnd() {
Pair<ByteSlice, ByteSlice> sliced = suffix.splitOnLastLineEnd();
return Pair.of(ByteSlice.join(this.prefix, sliced.first), sliced.second);
} |
python | def reactions_add(self, *, name: str, **kwargs) -> SlackResponse:
"""Adds a reaction to an item.
Args:
name (str): Reaction (emoji) name. e.g. 'thumbsup'
channel (str): Channel where the message to add reaction to was posted.
e.g. 'C1234567890'
timestamp (str): Timestamp of the message to add reaction to. e.g. '1234567890.123456'
"""
kwargs.update({"name": name})
return self.api_call("reactions.add", json=kwargs) |
java | protected String setEscapedParameter(HttpMessage message, String param, String value) {
return variant.setEscapedParameter(message, originalPair, param, value);
} |
python | def batch_step(self, batch_idx=None):
"""Updates the learning rate for the batch index: ``batch_idx``.
If ``batch_idx`` is None, ``CyclicLR`` will use an internal
batch index to keep track of the index.
"""
if batch_idx is None:
batch_idx = self.last_batch_idx + 1
self.last_batch_idx = batch_idx
for param_group, lr in zip(self.optimizer.param_groups, self.get_lr()):
param_group['lr'] = lr |
java | public AbstractExpression getSimpleFilterExpression()
{
if (m_whereExpr != null) {
if (m_joinExpr != null) {
return ExpressionUtil.combine(m_whereExpr, m_joinExpr);
}
return m_whereExpr;
}
return m_joinExpr;
} |
python | def html_document_fromstring(s):
"""Parse html tree from string. Return None if the string can't be parsed.
"""
if isinstance(s, six.text_type):
s = s.encode('utf8')
try:
if html_too_big(s):
return None
return html5parser.document_fromstring(s, parser=_html5lib_parser())
except Exception:
pass |
python | def ReadClientStartupInfoHistory(self, client_id, timerange=None):
"""Reads the full startup history for a particular client."""
from_time, to_time = self._ParseTimeRange(timerange)
history = self.startup_history.get(client_id)
if not history:
return []
res = []
for ts in sorted(history, reverse=True):
if ts < from_time or ts > to_time:
continue
client_data = rdf_client.StartupInfo.FromSerializedString(history[ts])
client_data.timestamp = ts
res.append(client_data)
return res |
java | public void setFontAssetDelegate(
@SuppressWarnings("NullableProblems") FontAssetDelegate assetDelegate) {
this.fontAssetDelegate = assetDelegate;
if (fontAssetManager != null) {
fontAssetManager.setDelegate(assetDelegate);
}
} |
python | def get_path():
'''
Returns a list of items in the SYSTEM path
CLI Example:
.. code-block:: bash
salt '*' win_path.get_path
'''
ret = salt.utils.stringutils.to_unicode(
__utils__['reg.read_value'](
'HKEY_LOCAL_MACHINE',
'SYSTEM\\CurrentControlSet\\Control\\Session Manager\\Environment',
'PATH')['vdata']
).split(';')
# Trim ending backslash
return list(map(_normalize_dir, ret)) |
java | @Override
protected void _fit(Dataframe trainingData) {
TrainingParameters trainingParameters = knowledgeBase.getTrainingParameters();
Configuration configuration = knowledgeBase.getConfiguration();
//reset previous entries on the bundle
resetBundle();
//initialize the parts of the pipeline
AbstractScaler.AbstractTrainingParameters nsParams = trainingParameters.getNumericalScalerTrainingParameters();
AbstractScaler numericalScaler = null;
if(nsParams != null) {
numericalScaler = MLBuilder.create(nsParams, configuration);
}
bundle.put(NS_KEY, numericalScaler);
AbstractEncoder.AbstractTrainingParameters ceParams = trainingParameters.getCategoricalEncoderTrainingParameters();
AbstractEncoder categoricalEncoder = null;
if(ceParams != null) {
categoricalEncoder = MLBuilder.create(ceParams, configuration);
}
bundle.put(CE_KEY, categoricalEncoder);
List<AbstractFeatureSelector.AbstractTrainingParameters> fsParamsList = trainingParameters.getFeatureSelectorTrainingParametersList();
int numOfFS = fsParamsList.size();
for(int i=0;i<numOfFS;i++) {
AbstractFeatureSelector.AbstractTrainingParameters fsParams = fsParamsList.get(i);
AbstractFeatureSelector featureSelector = MLBuilder.create(fsParams, configuration);
bundle.put(FS_KEY+i, featureSelector);
}
AbstractModeler.AbstractTrainingParameters mlParams = trainingParameters.getModelerTrainingParameters();
AbstractModeler modeler = MLBuilder.create(mlParams, configuration);
bundle.put(ML_KEY, modeler);
//set the parallized flag to all algorithms
bundle.setParallelized(isParallelized());
//run the pipeline
for(String step : pipeline) {
switch (step) {
case NS_KEY:
if(numericalScaler != null) {
numericalScaler.fit_transform(trainingData);
}
break;
case CE_KEY:
if(categoricalEncoder != null) {
categoricalEncoder.fit_transform(trainingData);
}
break;
case FS_KEY:
for(int i=0;i<numOfFS;i++) {
AbstractFeatureSelector featureSelector = (AbstractFeatureSelector) bundle.get(FS_KEY+i);
featureSelector.fit_transform(trainingData);
}
break;
case ML_KEY:
modeler.fit(trainingData);
break;
default:
throw new RuntimeException("Invalid Pipeline Step");
}
}
} |
python | def get_event_transfers(self, id, **data):
"""
GET /events/:id/transfers/
Returns a list of :format:`transfers` for the event.
"""
return self.get("/events/{0}/transfers/".format(id), data=data) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.