language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
@Override public <T> T execute(final String query, Object connection) { Session session = factory.getConnection(); try { Statement queryStmt = new SimpleStatement(query); KunderaCoreUtils.printQuery(query, showQuery); queryStmt.setConsistencyLevel(ConsistencyLevel.valueOf(this.consistencyLevel.name())); return (T) session.execute(queryStmt); } catch (Exception e) { log.error("Error while executing query {}.", query); throw new KunderaException(e); } finally { // factory.releaseConnection(session); } }
java
public static StoreException create(final Type type, final Throwable cause, final String errorMessage) { Preconditions.checkArgument(cause != null || (errorMessage != null && !errorMessage.isEmpty()), "Either cause or errorMessage should be non-empty"); StoreException exception; switch (type) { case DATA_EXISTS: exception = new DataExistsException(errorMessage, cause); break; case DATA_NOT_FOUND: exception = new DataNotFoundException(errorMessage, cause); break; case DATA_CONTAINS_ELEMENTS: exception = new DataNotEmptyException(errorMessage, cause); break; case WRITE_CONFLICT: exception = new WriteConflictException(errorMessage, cause); break; case ILLEGAL_STATE: exception = new IllegalStateException(errorMessage, cause); break; case OPERATION_NOT_ALLOWED: exception = new OperationNotAllowedException(errorMessage, cause); break; case CONNECTION_ERROR: exception = new StoreConnectionException(errorMessage, cause); break; case UNKNOWN: exception = new UnknownException(errorMessage, cause); break; default: throw new IllegalArgumentException("Invalid exception type"); } return exception; }
python
def build_command(self, parameter_values, command=None): """ Build the command for this step using the given parameter values. Even if the original configuration only declared a single `command`, this function will return a list of shell commands. It is the caller's responsibility to concatenate them, likely using the semicolon or double ampersands. It is also possible to override the `command`. :param parameter_values: Parameter values to augment any parameter defaults. :type parameter_values: dict[str, object] :param command: Overriding command; leave falsy to not override. :type command: str|list[str]|None :return: list of commands :rtype: list[str] """ command = (command or self.command) # merge defaults with passed values # ignore flag default values as they are special # undefined flag will remain undefined regardless of default value values = dict(self.get_parameter_defaults(include_flags=False), **parameter_values) parameter_map = ParameterMap(parameters=self.parameters, values=values) return build_command(command, parameter_map)
python
def submit_and_verify( xml_str=None, xml_file=None, xml_root=None, config=None, session=None, dry_run=None, **kwargs ): """Submits data to the Polarion Importer and checks that it was imported.""" try: config = config or configuration.get_config() xml_root = _get_xml_root(xml_root, xml_str, xml_file) submit_config = SubmitConfig(xml_root, config, **kwargs) session = session or utils.get_session(submit_config.credentials, config) submit_response = submit(xml_root, submit_config, session, dry_run=dry_run, **kwargs) except Dump2PolarionException as err: logger.error(err) return None valid_response = submit_response.validate_response() if not valid_response or kwargs.get("no_verify"): return submit_response.response response = verify_submit( session, submit_config.queue_url, submit_config.log_url, submit_response.job_ids, timeout=kwargs.get("verify_timeout"), log_file=kwargs.get("log_file"), ) return response
python
def segment(self, *args): """Segment one or more datasets with this subword field. Arguments: Positional arguments: Dataset objects or other indexable mutable sequences to segment. If a Dataset object is provided, all columns corresponding to this field are used; individual columns can also be provided directly. """ sources = [] for arg in args: if isinstance(arg, Dataset): sources += [getattr(arg, name) for name, field in arg.fields.items() if field is self] else: sources.append(arg) for data in sources: for x in tqdm(data, 'segmenting'): x[:] = self.vocab.segment(x)
python
def move_complete_channel(self): """ Channels and theirs subscribers are moved completely to new channel or existing channel. """ to_channel = Channel.objects.get(self.current.task_data['target_channel_key']) chosen_channels = self.current.task_data['chosen_channels'] chosen_channels_names = self.current.task_data['chosen_channels_names'] with BlockSave(Subscriber, query_dict={'channel_id': to_channel.key}): for s in Subscriber.objects.filter(channel_id__in=chosen_channels, typ=15): s.channel = to_channel s.save() with BlockDelete(Message): Message.objects.filter(channel_id__in=chosen_channels, typ=15).delete() with BlockDelete(Channel): Channel.objects.filter(key__in=chosen_channels).delete() self.current.task_data[ 'msg'] = _(u"Chosen channels(%s) have been merged to '%s' channel successfully.") % \ (', '.join(chosen_channels_names), to_channel.name)
python
def gossip_bind(self, format, *args): """ Set-up gossip discovery of other nodes. At least one node in the cluster must bind to a well-known gossip endpoint, so other nodes can connect to it. Note that gossip endpoints are completely distinct from Zyre node endpoints, and should not overlap (they can use the same transport). """ return lib.zyre_gossip_bind(self._as_parameter_, format, *args)
python
def _array2cstr(arr): """ Serializes a numpy array to a compressed base64 string """ out = StringIO() np.save(out, arr) return b64encode(out.getvalue())
java
static MavenProject getRootModule(MavenProject module, List<MavenProject> reactor, String rulesDirectory, boolean useExecutionRootAsProjectRoot) throws MojoExecutionException { String rootModuleContextKey = ProjectResolver.class.getName() + "#rootModule"; MavenProject rootModule = (MavenProject) module.getContextValue(rootModuleContextKey); if (rootModule == null) { if (useExecutionRootAsProjectRoot) { rootModule = getRootModule(reactor); } else { rootModule = getRootModule(module, rulesDirectory); } module.setContextValue(rootModuleContextKey, rootModule); } return rootModule; }
java
public synchronized void stopInactivityTimer() { final boolean traceOn = TraceComponent.isAnyTracingEnabled(); if (traceOn && tc.isEntryEnabled()) Tr.entry(tc, "stopInactivityTimer"); if (_inactivityTimerActive) { _inactivityTimerActive = false; EmbeddableTimeoutManager.setTimeout(this, EmbeddableTimeoutManager.INACTIVITY_TIMEOUT, 0); } // The inactivity timer's being stopped so the transaction is // back on-server. Push the thread that it's running on onto // the stack. _mostRecentThread.push(Thread.currentThread()); if (traceOn && tc.isEntryEnabled()) Tr.exit(tc, "stopInactivityTimer"); }
java
@Override public void rollback() throws IllegalStateException, SystemException { if (tc.isEntryEnabled()) Tr.entry(tc, "rollback (SPI)"); final int state = _status.getState(); // // We are only called in this method for superiors. // if (state == TransactionState.STATE_ACTIVE) { // // Cancel timeout prior to completion phase // cancelAlarms(); try { _status.setState(TransactionState.STATE_ROLLING_BACK); } catch (SystemException se) { FFDCFilter.processException(se, "com.ibm.tx.jta.TransactionImpl.rollback", "1587", this); if (tc.isDebugEnabled()) Tr.debug(tc, "Exception caught setting state to ROLLING_BACK!", se); if (tc.isEntryEnabled()) Tr.exit(tc, "rollback (SPI)"); throw se; } try { internalRollback(); } catch (HeuristicMixedException hme) { if (tc.isDebugEnabled()) Tr.debug(tc, "HeuristicMixedException caught rollback processing", hme); // state change handled by notifyCompletion // Add to list of heuristically completed transactions addHeuristic(); } catch (HeuristicHazardException hhe) { if (tc.isDebugEnabled()) Tr.debug(tc, "HeuristicHazardException caught rollback processing", hhe); // state change handled by notifyCompletion // Add to list of heuristically completed transactions addHeuristic(); } catch (HeuristicCommitException hce) { if (tc.isDebugEnabled()) Tr.debug(tc, "HeuristicHazardException caught rollback processing", hce); // state change handled by notifyCompletion // Add to list of heuristically completed transactions addHeuristic(); } catch (SystemException se) { FFDCFilter.processException(se, "com.ibm.tx.jta.TransactionImpl.rollback", "1626", this); if (tc.isEventEnabled()) Tr.event(tc, "SystemException caught during rollback", se); if (tc.isEntryEnabled()) Tr.exit(tc, "rollback (SPI)"); throw se; } catch (Throwable ex) { FFDCFilter.processException(ex, "com.ibm.tx.jta.TransactionImpl.rollback", "1633", this); if (tc.isEventEnabled()) Tr.event(tc, "Exception caught during rollback", ex); if (tc.isEntryEnabled()) Tr.exit(tc, "rollback (SPI)"); throw new SystemException(ex.getLocalizedMessage()); } finally { notifyCompletion(); } } // // Defect 1440 // // We are not in ACTIVE state so we need to // throw the appropriate exception. // else if (state == TransactionState.STATE_NONE) { if (tc.isEventEnabled()) Tr.event(tc, "No transaction available!"); if (tc.isEntryEnabled()) Tr.exit(tc, "rollback (SPI)"); throw new IllegalStateException(); } else { if (tc.isEventEnabled()) Tr.event(tc, "Invalid transaction state:" + state); if (tc.isEntryEnabled()) Tr.exit(tc, "rollback (SPI)"); throw new SystemException(); } if (tc.isEntryEnabled()) Tr.exit(tc, "rollback (SPI)"); }
python
def retry_connect(self): """Will be called when new channels in the token network are detected. If the minimum number of channels was not yet established, it will try to open new channels. If the connection manager has no funds, this is a noop. """ with self.lock: if self._funds_remaining > 0 and not self._leaving_state: self._open_channels()
java
public static String dateToIsoString(Date inputDate) { DateFormat dateStringFormat = new SimpleDateFormat("yyyy-MM-dd"); return dateStringFormat.format(inputDate); }
python
def _set_key_table(self, v, load=False): """ Setter method for key_table, mapped from YANG variable /interface/fortygigabitethernet/ip/interface_fo_ospf_conf/ospf_interface_config/md5_authentication/key_table (container) If this variable is read-only (config: false) in the source YANG file, then _set_key_table is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_key_table() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=key_table.key_table, is_container='container', presence=False, yang_name="key-table", rest_name="key-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' MD5 authentication key ID table ', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'key-id'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """key_table must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=key_table.key_table, is_container='container', presence=False, yang_name="key-table", rest_name="key-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u' MD5 authentication key ID table ', u'cli-full-no': None, u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'alt-name': u'key-id'}}, namespace='urn:brocade.com:mgmt:brocade-ospf', defining_module='brocade-ospf', yang_type='container', is_config=True)""", }) self.__key_table = t if hasattr(self, '_set'): self._set()
python
def _vis_calibrate(data, chn, calib_type, pre_launch_coeffs=False, calib_coeffs=None, mask=False): """Visible channel calibration only. *calib_type* in count, reflectance, radiance """ # Calibration count to albedo, the calibration is performed separately for # two value ranges. if calib_type not in ['counts', 'radiance', 'reflectance']: raise ValueError('Calibration ' + calib_type + ' unknown!') arr = data["hrpt"][:, :, chn] mask |= arr == 0 channel = arr.astype(np.float) if calib_type == 'counts': return channel if calib_type == 'radiance': logger.info("Radiances are not yet supported for " + "the VIS/NIR channels!") if pre_launch_coeffs: coeff_idx = 2 else: # check that coeffs are valid if np.all(data["calvis"][:, chn, 0, 4] == 0): logger.info( "No valid operational coefficients, fall back to pre-launch") coeff_idx = 2 else: coeff_idx = 0 intersection = data["calvis"][:, chn, coeff_idx, 4] if calib_coeffs is not None: logger.info("Updating from external calibration coefficients.") # intersection = np.expand_dims slope1 = np.expand_dims(calib_coeffs[0], 1) intercept1 = np.expand_dims(calib_coeffs[1], 1) slope2 = np.expand_dims(calib_coeffs[2], 1) intercept2 = np.expand_dims(calib_coeffs[3], 1) else: slope1 = np.expand_dims(data["calvis"][:, chn, coeff_idx, 0] * 1e-10, 1) intercept1 = np.expand_dims(data["calvis"][:, chn, coeff_idx, 1] * 1e-7, 1) slope2 = np.expand_dims(data["calvis"][:, chn, coeff_idx, 2] * 1e-10, 1) intercept2 = np.expand_dims(data["calvis"][:, chn, coeff_idx, 3] * 1e-7, 1) if chn == 2: slope2[slope2 < 0] += 0.4294967296 mask1 = channel <= np.expand_dims(intersection, 1) mask2 = channel > np.expand_dims(intersection, 1) channel[mask1] = (channel * slope1 + intercept1)[mask1] channel[mask2] = (channel * slope2 + intercept2)[mask2] channel = channel.clip(min=0) return np.where(mask, np.nan, channel)
python
def cost(self, logits, target): """Returns cost. Args: logits: model output. target: target. Returns: Cross-entropy loss for a sequence of logits. The loss will be averaged across time steps if time_average_cost was enabled at construction time. """ logits = tf.reshape(logits, [self._num_steps * self._batch_size, -1]) target = tf.reshape(target, [self._num_steps * self._batch_size, -1]) xent = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=target) loss = tf.reduce_sum(xent) return loss / self._batch_size
java
@Nonnull public static Properties loadProperties(@Nonnull String properties) throws IOException { Properties p = new Properties(); p.load(new StringReader(properties)); return p; }
java
public static Object convertExample(String value, String type) { if (value == null) { return null; } try { switch (type) { case "integer": return Integer.valueOf(value); case "number": return Float.valueOf(value); case "boolean": return Boolean.valueOf(value); case "string": return value; default: return value; } } catch (NumberFormatException e) { throw new RuntimeException(String.format("Value '%s' cannot be converted to '%s'", value, type), e); } }
python
def getApplicationProcessId(self, pchAppKey): """Returns the process ID for an application. Return 0 if the application was not found or is not running.""" fn = self.function_table.getApplicationProcessId result = fn(pchAppKey) return result
java
@Override public Map<K, V> clearAll() { Map<K, V> result = new LinkedHashMap<K, V>(map.size()); for (Map.Entry<K, V> entry : map.entrySet()) { K key = entry.getKey(); V value = entry.getValue(); boolean removed = map.remove(key, value); if (removed) { result.put(key, value); } } return result; }
python
def is_stable(self,species): ''' This routine accepts input formatted like 'He-3' and checks with stable_el list if occurs in there. If it does, the routine returns True, otherwise False. Notes ----- this method is designed to work with an se instance from nugridse.py. In order to make it work with ppn.py some additional work is required. FH, April 20, 2013. ''' element_name_of_iso = species.split('-')[0] try: a_of_iso = int(species.split('-')[1]) except ValueError: # if the species name contains in addition to the # mass number some letters, e.g. for isomere, then # we assume it is unstable. This is not correct but # related to the fact that in nugridse.py we do not # identify species properly by the three numbers A, Z # and isomeric_state. We should do that!!!!!! a_of_iso = 999 idp_of_element_in_stable_names = self.stable_names.index(element_name_of_iso) if a_of_iso in self.stable_el[idp_of_element_in_stable_names][1:]: return True else: return False
python
def unregister_factory(self, factory_name): # type: (str) -> bool """ Unregisters the given component factory :param factory_name: Name of the factory to unregister :return: True the factory has been removed, False if the factory is unknown """ if not factory_name or not is_string(factory_name): # Invalid name return False with self.__factories_lock: try: # Remove the factory from the registry factory_class = self.__factories.pop(factory_name) except KeyError: # Unknown factory return False # Trigger an event self._fire_ipopo_event( constants.IPopoEvent.UNREGISTERED, factory_name ) # Invalidate and delete all components of this factory with self.__instances_lock: # Compute the list of __instances to remove to_remove = self.__get_stored_instances(factory_name) # Remove instances from the registry: avoids dependencies \ # update to link against a component from this factory again. for instance in to_remove: try: # Kill the instance self.kill(instance.name) except ValueError: # Unknown instance: already killed by the invalidation # callback of a component killed in this loop # => ignore pass # Remove waiting component names = [ name for name, (context, _) in self.__waiting_handlers.items() if context.factory_context.name == factory_name ] for name in names: del self.__waiting_handlers[name] # Clear the bundle context of the factory _set_factory_context(factory_class, None) return True
python
def from_httplib(ResponseCls, r, **response_kw): """ Given an :class:`httplib.HTTPResponse` instance ``r``, return a corresponding :class:`urllib3.response.HTTPResponse` object. Remaining parameters are passed to the HTTPResponse constructor, along with ``original_response=r``. """ headers = HTTPHeaderDict() for k, v in r.getheaders(): headers.add(k, v) # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, 'strict', 0) return ResponseCls(body=r, headers=headers, status=r.status, version=r.version, reason=r.reason, strict=strict, original_response=r, **response_kw)
python
def egg(qualifier: Union[str, Type] = '', profile: str = None): """ A function that returns a decorator (or acts like a decorator) that marks class or function as a source of `base`. If a class is decorated, it should inherit after from `base` type. If a function is decorated, it declared return type should inherit after some `base` type, or it should be the `base` type. .. code-block:: python @egg class DepImpl(DepType): pass @egg(profile='test') class TestDepImpl(DepType): pass @egg(qualifier='special_dep') def dep_factory() -> DepType: return SomeDepImpl() :param qualifier: extra qualifier for dependency. Can be used to\ register more than one type for one base. If non-string argument\ is passed, it'll act like a decorator. :param profile: An optional profile within this dependency should be used :return: decorator """ first_arg = qualifier def egg_dec(obj: Union[FunctionType, type]) -> T: if isinstance(obj, FunctionType): spec = inspect.signature(obj) return_annotation = spec.return_annotation if return_annotation is Signature.empty: raise ConfigurationError('No return type annotation') egg.factories.append( Egg( type_=spec.return_annotation, qualifier=qualifier, egg_=obj, base_=None, profile=profile )) return obj elif isinstance(obj, type): egg.factories.append( Egg(type_=obj, qualifier=qualifier, egg_=obj, base_=None, profile=profile)) return obj else: raise AttributeError('Wrong egg obj type') if isinstance(qualifier, str): qualifier = qualifier or None return egg_dec else: qualifier = None return egg_dec(first_arg)
java
protected Dataset getDatasetOrThrowException(final Uri uri) { final Dataset dataset = mMatcher.matchDataset(uri); if (dataset == null) { throw new IllegalArgumentException("Unsupported URI: " + uri); } if (dataset instanceof ContextDataset) { ((ContextDataset)dataset).setContext(getContext()); } return dataset; }
java
private void printKeySet() { Set<?> keys = keySet(); System.out.println("printing keyset:"); for (Object o: keys) { //System.out.println(Arrays.asList((Object[]) i.next())); System.out.println(o); } }
python
def annotate(text, gcube_token=None, lang=DEFAULT_LANG, api=DEFAULT_TAG_API, long_text=DEFAULT_LONG_TEXT): ''' Annotate a text, linking it to Wikipedia entities. :param text: the text to annotate. :param gcube_token: the authentication token provided by the D4Science infrastructure. :param lang: the Wikipedia language. :param api: the API endpoint. :param long_text: long_text parameter (see TagMe documentation). ''' payload = [("text", text.encode("utf-8")), ("long_text", long_text), ("lang", lang)] json_response = _issue_request(api, payload, gcube_token) return AnnotateResponse(json_response) if json_response else None
python
def _apply_shadow_vars(avg_grads): """ Create shadow variables on PS, and replace variables in avg_grads by these shadow variables. Args: avg_grads: list of (grad, var) tuples """ ps_var_grads = [] for grad, var in avg_grads: assert var.name.startswith('tower'), var.name my_name = '/'.join(var.name.split('/')[1:]) my_name = get_op_tensor_name(my_name)[0] new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype, initializer=var.initial_value, trainable=True) # (g, v) to be applied, where v is global (ps vars) ps_var_grads.append((grad, new_v)) return ps_var_grads
java
protected void proddle() throws SIConnectionDroppedException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "proddle"); boolean useThisThread = false; synchronized (priorityQueue) { synchronized (this) { if (idle) { useThisThread = isWorkAvailable(); idle = !useThisThread; } } } if (useThisThread) { doWork(false); } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "proddle"); }
python
def geodist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0): """ Distributed version of tree_distance.geodist Parameters: two valid newick strings and a boolean """ tree_a = Tree(newick_string_a) tree_b = Tree(newick_string_b) return treedist.geodist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value)
java
public void buildSerialFieldTagsInfo(Content serializableFieldsTree) { if(configuration.nocomment){ return; } VariableElement field = (VariableElement)currentMember; // Process Serializable Fields specified as array of // ObjectStreamFields. Print a member for each serialField tag. // (There should be one serialField tag per ObjectStreamField // element.) SortedSet<SerialFieldTree> tags = new TreeSet<>(utils.makeSerialFieldTreeComparator()); // sort the elements for (DocTree dt : utils.getSerialFieldTrees(field)) { SerialFieldTree st = (SerialFieldTree) dt; tags.add(st); } CommentHelper ch = utils.getCommentHelper(field); for (SerialFieldTree tag : tags) { if (tag.getName() == null || tag.getType() == null) // ignore malformed @serialField tags continue; Content fieldsContentTree = fieldWriter.getFieldsContentHeader(tag.equals(tags.last())); TypeElement te = ch.getReferencedClass(configuration, tag); String fieldType = ch.getReferencedMemberName(tag); if (te != null && utils.isPrimitive(te.asType())) { fieldType = utils.getTypeName(te.asType(), false); te = null; } String refSignature = ch.getReferencedSignature(tag); // TODO: Print the signature directly, if it is an array, the // current DocTree APIs makes it very hard to distinguish // an as these are returned back as "Array" a DeclaredType. if (refSignature.endsWith("[]")) { te = null; fieldType = refSignature; } fieldWriter.addMemberHeader(te, fieldType, "", tag.getName().getName().toString(), fieldsContentTree); fieldWriter.addMemberDescription(field, tag, fieldsContentTree); serializableFieldsTree.addContent(fieldsContentTree); } }
python
def make_crossroad_router(source, drain=False): ''' legacy crossroad implementation. deprecated ''' sink_observer = None def on_sink_subscribe(observer): nonlocal sink_observer sink_observer = observer def dispose(): nonlocal sink_observer sink_observer = None return dispose def route_crossroad(request): def on_response_subscribe(observer): def on_next_source(i): if type(i) is cyclotron.Drain: observer.on_completed() else: observer.on_next(i) source_disposable = source.subscribe( on_next=on_next_source, on_error=lambda e: observer.on_error(e), on_completed=lambda: observer.on_completed() ) def on_next_request(i): if sink_observer is not None: sink_observer.on_next(i) def on_request_completed(): if sink_observer is not None: if drain is True: sink_observer.on_next(cyclotron.Drain()) else: sink_observer.on_completed() request_disposable = request.subscribe( on_next=on_next_request, on_error=observer.on_error, on_completed=on_request_completed ) def dispose(): source_disposable.dispose() request_disposable.dispose() return dispose return Observable.create(on_response_subscribe) return Observable.create(on_sink_subscribe), route_crossroad
python
def bearing(self, format='numeric'): """Calculate bearing between locations in segments. Args: format (str): Format of the bearing string to return Returns: list of list of float: Groups of bearings between points in segments """ bearings = [] for segment in self: if len(segment) < 2: bearings.append([]) else: bearings.append(segment.bearing(format)) return bearings
java
public Any command_handler(final DeviceImpl device, final String command, final Any in_any) throws DevFailed { Any ret = Util.instance().get_orb().create_any(); Util.out4.println("Entering DeviceClass::command_handler() method"); int i; final String cmd_name = command.toLowerCase(); for (i = 0; i < command_list.size(); i++) { final Command cmd = (Command) command_list.elementAt(i); if (cmd.get_name().toLowerCase().equals(cmd_name) == true) { // // Call the always executed method // device.always_executed_hook(); // // Check if the command is allowed // if (cmd.is_allowed(device, in_any) == false) { final StringBuffer o = new StringBuffer("Command "); o.append(command); o.append(" not allowed when the device is in "); o.append(Tango_DevStateName[device.get_state().value()]); o.append(" state"); Except.throw_exception("API_CommandNotAllowed", o.toString(), "DeviceClass.command_handler"); } // // Execute the command // ret = cmd.execute(device, in_any); break; } } if (i == command_list.size()) { Util.out3.println("DeviceClass.command_handler(): command " + command + " not found"); // // throw an exception to client // Except.throw_exception("API_CommandNotFound", "Command " + command + " not found", "DeviceClass.command_handler"); } Util.out4.println("Leaving DeviceClass.command_handler() method"); return ret; }
python
def add_stderr_logger(level=logging.DEBUG): """ Helper for quickly adding a StreamHandler to the logger. Useful for debugging. Returns the handler after adding it. """ # This method needs to be in this __init__.py to get the __name__ correct # even if urllib3 is vendored within another package. logger = logging.getLogger(__name__) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s %(message)s')) logger.addHandler(handler) logger.setLevel(level) logger.debug('Added a stderr logging handler to logger: %s' % __name__) return handler
python
def parseEC2Json2List(jsontext, region): """ Takes a JSON and returns a list of InstanceType objects representing EC2 instance params. :param jsontext: :param region: :return: """ currentList = json.loads(jsontext) ec2InstanceList = [] for k, v in iteritems(currentList["products"]): if "location" in v["attributes"] and v["attributes"]["location"] == region: # 3 tenant types: 'Host' (always $0.00; just a template?) # 'Dedicated' (toil does not support; these are pricier) # 'Shared' (AWS default and what toil uses) if "tenancy" in v["attributes"] and v["attributes"]["tenancy"] == "Shared": if v["attributes"]["operatingSystem"] == "Linux": # The same instance can appear with multiple "operation" # values; "RunInstances" is normal, and # "RunInstances:<code>" is e.g. Linux with MS SQL Server # installed. if v["attributes"]["operation"] == "RunInstances": disks, disk_capacity = parseStorage(v["attributes"]["storage"]) memory = parseMemory(v["attributes"]["memory"]) instance = InstanceType(name=v["attributes"]["instanceType"], cores=v["attributes"]["vcpu"], memory=memory, disks=disks, disk_capacity=disk_capacity) if instance not in ec2InstanceList: ec2InstanceList.append(instance) else: raise RuntimeError('EC2 JSON format has likely changed. ' 'Duplicate instance {} found.'.format(instance)) return ec2InstanceList
python
def unicode2bytes(x, encoding='utf-8', errors='strict'): """ Convert a unicode string to C{bytes}. @param x: a unicode string, of type C{unicode} on Python 2, or C{str} on Python 3. @param encoding: an optional codec, default: 'utf-8' @param errors: error handling scheme, default 'strict' @return: a string of type C{bytes} """ if isinstance(x, text_type): x = x.encode(encoding, errors) return x
java
public KeyValue getKeyValue(String KeyURI) throws EmbeddedJmxTransException { String etcdURI = KeyURI.substring(0, KeyURI.indexOf("/", 7)); String key = KeyURI.substring(KeyURI.indexOf("/", 7)); try { return getFromEtcd(makeEtcdBaseUris(etcdURI), key); } catch (Throwable t) { throw new EmbeddedJmxTransException("Exception reading etcd key '" + KeyURI + "': " + t.getMessage(), t); } }
java
public final void equalityExpression() throws RecognitionException { int equalityExpression_StartIndex = input.index(); try { if ( state.backtracking>0 && alreadyParsedRule(input, 116) ) { return; } // src/main/resources/org/drools/compiler/semantics/java/parser/Java.g:1162:5: ( instanceOfExpression ( ( '==' | '!=' ) instanceOfExpression )* ) // src/main/resources/org/drools/compiler/semantics/java/parser/Java.g:1162:9: instanceOfExpression ( ( '==' | '!=' ) instanceOfExpression )* { pushFollow(FOLLOW_instanceOfExpression_in_equalityExpression5214); instanceOfExpression(); state._fsp--; if (state.failed) return; // src/main/resources/org/drools/compiler/semantics/java/parser/Java.g:1162:30: ( ( '==' | '!=' ) instanceOfExpression )* loop148: while (true) { int alt148=2; int LA148_0 = input.LA(1); if ( (LA148_0==30||LA148_0==55) ) { alt148=1; } switch (alt148) { case 1 : // src/main/resources/org/drools/compiler/semantics/java/parser/Java.g:1162:32: ( '==' | '!=' ) instanceOfExpression { if ( input.LA(1)==30||input.LA(1)==55 ) { input.consume(); state.errorRecovery=false; state.failed=false; } else { if (state.backtracking>0) {state.failed=true; return;} MismatchedSetException mse = new MismatchedSetException(null,input); throw mse; } pushFollow(FOLLOW_instanceOfExpression_in_equalityExpression5226); instanceOfExpression(); state._fsp--; if (state.failed) return; } break; default : break loop148; } } } } catch (RecognitionException re) { reportError(re); recover(input,re); } finally { // do for sure before leaving if ( state.backtracking>0 ) { memoize(input, 116, equalityExpression_StartIndex); } } }
python
def duplicate(self): """ create an copy of the script Returns: """ # get settings of script class_of_script = self.__class__ script_name = self.name script_instruments = self.instruments sub_scripts = self.scripts script_settings = self.settings log_function = self.log_function data_path = self.data_path #create a new instance of same script type class_creation_string = '' if script_instruments != {}: class_creation_string += ', instruments = script_instruments' if sub_scripts != {}: class_creation_string += ', scripts = sub_scripts' if script_settings != {}: class_creation_string += ', settings = script_settings' if log_function is not None: class_creation_string += ', log_function = log_function' if data_path is not None: class_creation_string += ', data_path = data_path' class_creation_string = 'class_of_script(name=script_name{:s})'.format(class_creation_string) # create instance script_instance = eval(class_creation_string) # copy some other properties that might be checked later for the duplicated script script_instance.data = deepcopy(self.data) script_instance.start_time = self.start_time script_instance.end_time = self.end_time script_instance.is_running = self.is_running return script_instance
java
static int darker(int color, float factor) { return Color.argb(Color.alpha(color), Math.max((int) (Color.red(color) * factor), 0), Math.max((int) (Color.green(color) * factor), 0), Math.max((int) (Color.blue(color) * factor), 0)); }
java
public EList<Grammar> getUsedGrammars() { if (usedGrammars == null) { usedGrammars = new EObjectResolvingEList<Grammar>(Grammar.class, this, XtextPackage.GRAMMAR__USED_GRAMMARS); } return usedGrammars; }
python
def get_mapreduce_yaml(parse=parse_mapreduce_yaml): """Locates mapreduce.yaml, loads and parses its info. Args: parse: Used for testing. Returns: MapReduceYaml object. Raises: errors.BadYamlError: when contents is not a valid mapreduce.yaml file or the file is missing. """ mr_yaml_path = find_mapreduce_yaml() if not mr_yaml_path: raise errors.MissingYamlError() mr_yaml_file = open(mr_yaml_path) try: return parse(mr_yaml_file.read()) finally: mr_yaml_file.close()
python
def viz_json(net, dendro=True, links=False): ''' make the dictionary for the clustergram.js visualization ''' from . import calc_clust import numpy as np all_dist = calc_clust.group_cutoffs() for inst_rc in net.dat['nodes']: inst_keys = net.dat['node_info'][inst_rc] all_cats = [x for x in inst_keys if 'cat-' in x] for i in range(len(net.dat['nodes'][inst_rc])): inst_dict = {} inst_dict['name'] = net.dat['nodes'][inst_rc][i] inst_dict['ini'] = net.dat['node_info'][inst_rc]['ini'][i] inst_dict['clust'] = net.dat['node_info'][inst_rc]['clust'].index(i) inst_dict['rank'] = net.dat['node_info'][inst_rc]['rank'][i] if 'rankvar' in inst_keys: inst_dict['rankvar'] = net.dat['node_info'][inst_rc]['rankvar'][i] # fix for similarity matrix if len(all_cats) > 0: for inst_name_cat in all_cats: actual_cat_name = net.dat['node_info'][inst_rc][inst_name_cat][i] inst_dict[inst_name_cat] = actual_cat_name check_pval = 'pval_'+inst_name_cat.replace('-','_') if check_pval in net.dat['node_info'][inst_rc]: tmp_pval_name = inst_name_cat.replace('-','_') + '_pval' inst_dict[tmp_pval_name] = net.dat['node_info'][inst_rc][check_pval][actual_cat_name] tmp_index_name = inst_name_cat.replace('-', '_') + '_index' inst_dict[tmp_index_name] = net.dat['node_info'][inst_rc] \ [tmp_index_name][i] if len(net.dat['node_info'][inst_rc]['value']) > 0: inst_dict['value'] = net.dat['node_info'][inst_rc]['value'][i] if len(net.dat['node_info'][inst_rc]['info']) > 0: inst_dict['info'] = net.dat['node_info'][inst_rc]['info'][i] if dendro is True: inst_dict['group'] = [] for tmp_dist in all_dist: tmp_dist = str(tmp_dist).replace('.', '') tmp_append = float( net.dat['node_info'][inst_rc]['group'][tmp_dist][i]) inst_dict['group'].append(tmp_append) net.viz[inst_rc + '_nodes'].append(inst_dict) mat_types = ['mat', 'mat_orig', 'mat_info', 'mat_hl', 'mat_up', 'mat_dn'] # save data as links or mat ########################### if links is True: for i in range(len(net.dat['nodes']['row'])): for j in range(len(net.dat['nodes']['col'])): inst_dict = {} inst_dict['source'] = i inst_dict['target'] = j inst_dict['value'] = float(net.dat['mat'][i, j]) if 'mat_up' in net.dat: inst_dict['value_up'] = net.dat['mat_up'][i, j] inst_dict['value_dn'] = net.dat['mat_dn'][i, j] if 'mat_orig' in net.dat: inst_dict['value_orig'] = net.dat['mat_orig'][i, j] if np.isnan(inst_dict['value_orig']): inst_dict['value_orig'] = 'NaN' if 'mat_info' in net.dat: inst_dict['info'] = net.dat['mat_info'][str((i, j))] if 'mat_hl' in net.dat: inst_dict['highlight'] = net.dat['mat_hl'][i, j] net.viz['links'].append(inst_dict) else: for inst_mat in mat_types: if inst_mat in net.dat: net.viz[inst_mat] = net.dat[inst_mat].tolist()
java
public Observable<LogAnalyticsOperationResultInner> beginExportThrottledRequestsAsync(String location, ThrottledRequestsInput parameters) { return beginExportThrottledRequestsWithServiceResponseAsync(location, parameters).map(new Func1<ServiceResponse<LogAnalyticsOperationResultInner>, LogAnalyticsOperationResultInner>() { @Override public LogAnalyticsOperationResultInner call(ServiceResponse<LogAnalyticsOperationResultInner> response) { return response.body(); } }); }
python
def text_to_title(value): """when a title is required, generate one from the value""" title = None if not value: return title words = value.split(" ") keep_words = [] for word in words: if word.endswith(".") or word.endswith(":"): keep_words.append(word) if len(word) > 1 and "<italic>" not in word and "<i>" not in word: break else: keep_words.append(word) if len(keep_words) > 0: title = " ".join(keep_words) if title.split(" ")[-1] != "spp.": title = title.rstrip(" .:") return title
python
def holidays(self, year=None): """Computes holidays (non-working days) for a given year. Return a 2-item tuple, composed of the date and a label.""" if not year: year = date.today().year if year in self._holidays: return self._holidays[year] # Here we process the holiday specific calendar temp_calendar = tuple(self.get_calendar_holidays(year)) # it is sorted self._holidays[year] = sorted(temp_calendar) return self._holidays[year]
java
public TemporalDocument next() { //String fileName = filesToProcess.poll(); NameAndTime n = filesToProcess.poll(); if (n == null) return null; try { return (n.hasTimeStamp()) ? new TemporalFileDocument(n.fileName, n.timeStamp) : new TemporalFileDocument(n.fileName); // no timestamp } catch (IOException ioe) { return null; } }
python
def _bbvi_fit(self, optimizer='RMSProp', iterations=1000, print_progress=True, start_diffuse=False, **kwargs): """ Performs Black Box Variational Inference Parameters ---------- posterior : method Hands bbvi_fit a posterior object optimizer : string Stochastic optimizer: either RMSProp or ADAM. iterations: int How many iterations to run print_progress : bool Whether tp print the ELBO progress or not start_diffuse : bool Whether to start from diffuse values (if not: use approx Gaussian) Returns ---------- BBVIResults object """ if self.model_name2 in ["t", "Skewt"]: default_learning_rate = 0.0001 else: default_learning_rate = 0.001 animate = kwargs.get('animate', False) batch_size = kwargs.get('batch_size', 24) learning_rate = kwargs.get('learning_rate', default_learning_rate) record_elbo = kwargs.get('record_elbo', False) # Starting values gaussian_latents = self._preoptimize_model() # find parameters for Gaussian model phi = self.latent_variables.get_z_starting_values() q_list = self.initialize_approx_dist(phi, start_diffuse, gaussian_latents) # PERFORM BBVI bbvi_obj = ifr.CBBVI(self.neg_logposterior, self.log_p_blanket, q_list, batch_size, optimizer, iterations, learning_rate, record_elbo) if print_progress is False: bbvi_obj.printer = False if animate is True: q, q_params, q_ses, stored_z, stored_predictive_likelihood = bbvi_obj.run_and_store() self._animate_bbvi(stored_z,stored_predictive_likelihood) else: q, q_params, q_ses, elbo_records = bbvi_obj.run() self.latent_variables.set_z_values(q_params[:self.z_no],'BBVI',np.exp(q_ses[:self.z_no]),None) # STORE RESULTS for k in range(len(self.latent_variables.z_list)): self.latent_variables.z_list[k].q = q[k] theta = q_params[self.z_no:] Y = self.data scores = None states = q_params[self.z_no:] X_names = None states_ses = np.exp(q_ses[self.z_no:]) self.states = states self.states_ses = states_ses return res.BBVISSResults(data_name=self.data_name,X_names=X_names,model_name=self.model_name, model_type=self.model_type, latent_variables=self.latent_variables,data=Y,index=self.index, multivariate_model=self.multivariate_model,objective=self.neg_logposterior(q_params), method='BBVI',ses=q_ses[:self.z_no],signal=theta,scores=scores,elbo_records=elbo_records, z_hide=self._z_hide,max_lag=self.max_lag,states=states,states_var=np.power(states_ses,2))
python
def serialize(self) -> dict: """ Serialize the message for sending to slack API Returns: serialized message """ data = {**self} if "attachments" in self: data["attachments"] = json.dumps(self["attachments"]) return data
python
def _get_key_values(self, name): """Return a dict containing key / values items for a given key, used for items like filters, page, etc. :param str name: name of the querystring parameter :return dict: a dict of key / values items """ results = {} for key, value in self.qs.items(): try: if not key.startswith(name): continue key_start = key.index('[') + 1 key_end = key.index(']') item_key = key[key_start:key_end] if ',' in value: item_value = value.split(',') else: item_value = value results.update({item_key: item_value}) except Exception: raise BadRequest("Parse error", source={'parameter': key}) return results
java
@Override public void notifyChange() throws CouldNotPerformException, InterruptedException { logger.debug("Notify data change of " + this); // synchronized by manageable lock to prevent reinit between validateInitialization and publish M newData; manageLock.lockWrite(this); try { try { validateInitialization(); } catch (final NotInitializedException ex) { // only forward if instance was not destroyed before otherwise skip notification. if (destroyed) { return; } throw ex; } // update the current data builder before updating to allow implementations to change data beforehand newData = updateDataToPublish(cloneDataBuilder()); Event event = new Event(informer.getScope(), newData.getClass(), newData); event.getMetaData().setUserTime(RPCHelper.USER_TIME_KEY, System.nanoTime()); if (isActive()) { try { waitForMiddleware(NOTIFICATILONG_TIMEOUT, TimeUnit.MILLISECONDS); informer.publish(event); } catch (TimeoutException ex) { ExceptionPrinter.printHistory(new CouldNotPerformException("Skip data update notification because middleware is not ready since "+TimeUnit.MILLISECONDS.toSeconds(NOTIFICATILONG_TIMEOUT)+" seconds of " + this + "!", ex), logger, LogLevel.WARN); } catch (CouldNotPerformException ex) { ExceptionPrinter.printHistory(new CouldNotPerformException("Could not inform about data change of " + this + "!", ex), logger); } } } finally { manageLock.unlockWrite(this); } // Notify data update try { notifyDataUpdate(newData); } catch (CouldNotPerformException ex) { ExceptionPrinter.printHistory(new CouldNotPerformException("Could not notify data update!", ex), logger); } dataObserver.notifyObservers(newData); }
python
def instance_norm(attrs, inputs, proto_obj): """Instance Normalization.""" new_attrs = translation_utils._fix_attribute_names(attrs, {'epsilon' : 'eps'}) new_attrs['eps'] = attrs.get('epsilon', 1e-5) return 'InstanceNorm', new_attrs, inputs
java
@Pure @Inline(value = "new $2($3.intValue($1))", imported = {AtomicInteger.class, PrimitiveCastExtensions.class}) public static AtomicInteger toAtomicInteger(CharSequence value) { return new AtomicInteger(intValue(value)); }
java
public ListTablesResult withTableNames(String... tableNames) { if (getTableNames() == null) setTableNames(new java.util.ArrayList<String>(tableNames.length)); for (String value : tableNames) { getTableNames().add(value); } return this; }
python
def ishermitian(A, fast_check=True, tol=1e-6, verbose=False): r"""Return True if A is Hermitian to within tol. Parameters ---------- A : {dense or sparse matrix} e.g. array, matrix, csr_matrix, ... fast_check : {bool} If True, use the heuristic < Ax, y> = < x, Ay> for random vectors x and y to check for conjugate symmetry. If False, compute A - A.H. tol : {float} Symmetry tolerance verbose: {bool} prints max( \|A - A.H\| ) if nonhermitian and fast_check=False abs( <Ax, y> - <x, Ay> ) if nonhermitian and fast_check=False Returns ------- True if hermitian False if nonhermitian Notes ----- This function applies a simple test of conjugate symmetry Examples -------- >>> import numpy as np >>> from pyamg.util.linalg import ishermitian >>> ishermitian(np.array([[1,2],[1,1]])) False >>> from pyamg.gallery import poisson >>> ishermitian(poisson((10,10))) True """ # convert to matrix type if not sparse.isspmatrix(A): A = np.asmatrix(A) if fast_check: x = sp.rand(A.shape[0], 1) y = sp.rand(A.shape[0], 1) if A.dtype == complex: x = x + 1.0j*sp.rand(A.shape[0], 1) y = y + 1.0j*sp.rand(A.shape[0], 1) xAy = np.dot((A*x).conjugate().T, y) xAty = np.dot(x.conjugate().T, A*y) diff = float(np.abs(xAy - xAty) / np.sqrt(np.abs(xAy*xAty))) else: # compute the difference, A - A.H if sparse.isspmatrix(A): diff = np.ravel((A - A.H).data) else: diff = np.ravel(A - A.H) if np.max(diff.shape) == 0: diff = 0 else: diff = np.max(np.abs(diff)) if diff < tol: diff = 0 return True else: if verbose: print(diff) return False return diff
java
public String getClassPath() { StringBuilder buf = new StringBuilder(); for (Entry entry : entryList) { if (buf.length() > 0) { buf.append(File.pathSeparator); } buf.append(entry.getURL()); } return buf.toString(); }
java
@Override public <T> T getProperty(Object key, Class<T> clazz) { return clazz.cast(getProperty(key)); }
python
def read_folder(folder): """ Parameters ---------- folder : string Path to a folde with *.inkml files. Returns ------- list : Objects of the type HandwrittenData """ import glob recordings = [] for filename in natsorted(glob.glob("%s/*.inkml" % folder)): hw = read(filename) if hw.formula_in_latex is not None: hw.formula_in_latex = hw.formula_in_latex.strip() if hw.formula_in_latex is None or \ not hw.formula_in_latex.startswith('$') or \ not hw.formula_in_latex.endswith('$'): if hw.formula_in_latex is not None: logging.info("Starts with: %s", str(hw.formula_in_latex.startswith('$'))) logging.info("ends with: %s", str(hw.formula_in_latex.endswith('$'))) logging.info(hw.formula_in_latex) logging.info(hw.segmentation) hw.show() recordings.append(hw) return recordings
python
def to_utf8(value): """Returns a string encoded using UTF-8. This function comes from `Tornado`_. :param value: A unicode or string to be encoded. :returns: The encoded string. """ if isinstance(value, unicode): return value.encode('utf-8') assert isinstance(value, str) return value
python
def get_block(self, parent, config='running_config'): """ Scans the config and returns a block of code Args: parent (str): The parent string to search the config for and return the block config (str): A text config string to be searched. Default is to search the running-config of the Node. Returns: A string object that represents the block from the config. If the parent string is not found, then this method will return None. """ try: parent = r'^%s$' % parent return self.node.section(parent, config=config) except TypeError: return None
python
def ok(self): """ Returns True if OK to use, else False """ try: v = int(self._value) chunk = self.mfac.value() if v < self.imin or v > self.imax or (v % chunk != 0): return False else: return True except: return False
python
def _is_zero(x): """ Returns True if x is numerically 0 or an array with 0's. """ if x is None: return True if isinstance(x, numbers.Number): return x == 0.0 if isinstance(x, np.ndarray): return np.all(x == 0) return False
java
public static byte[] fromHex(final String string, final int offset, final int count) { if(offset >= string.length()) throw new IllegalArgumentException("Offset is greater than the length (" + offset + " >= " + string.length() + ").")/*by contract*/; if( (count & 0x01) != 0) throw new IllegalArgumentException("Count is not divisible by two (" + count + ").")/*by contract*/; final int charCount = Math.min((string.length() - offset), count); final int upperBound = offset + charCount; final byte[] bytes = new byte[charCount >>> 1/*aka /2*/]; int byteIndex = 0/*beginning*/; for(int i=offset; i<upperBound; i+=2) { bytes[byteIndex++] = (byte)(( (digit(string.charAt(i)) << 4) | digit(string.charAt(i + 1))) & 0xFF); } return bytes; }
python
def generate_component_annotation_miriam_match(elements, component, db): """ Tabulate which MIRIAM databases the element's annotation match. If the relevant MIRIAM identifier is not in an element's annotation it is ignored. Parameters ---------- elements : list Elements of a model, either metabolites or reactions. component : {"metabolites", "reactions"} A string denoting a type of ``cobra.Model`` component. db : str One of the MIRIAM database identifiers. Returns ------- list The components whose annotation does not match the pattern for the MIRIAM database. """ def is_faulty(annotation, key, pattern): # Ignore missing annotation for this database. if key not in annotation: return False test = annotation[key] if isinstance(test, native_str): return pattern.match(test) is None else: return any(pattern.match(elem) is None for elem in test) pattern = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS, "genes": GENE_PRODUCT_ANNOTATIONS }[component][db] return [elem for elem in elements if is_faulty(elem.annotation, db, pattern)]
java
@Nonnull public Section addDataSection(@Nullable SoftwareSystem softwareSystem, @Nonnull Format format, @Nonnull String content) { return addSection(softwareSystem, "Data", format, content); }
python
def _cleandoc(doc): """Remove uniform indents from ``doc`` lines that are not empty :returns: Cleaned ``doc`` """ indent_length = lambda s: len(s) - len(s.lstrip(" ")) not_empty = lambda s: s != "" lines = doc.split("\n") indent = min(map(indent_length, filter(not_empty, lines))) return "\n".join(s[indent:] for s in lines)
python
def remap_index_fn(ref_file): """minimap2 can build indexes on the fly but will also store commons ones. """ index_dir = os.path.join(os.path.dirname(ref_file), os.pardir, "minimap2") if os.path.exists(index_dir) and os.path.isdir(index_dir): return index_dir else: return os.path.dirname(ref_file)
python
def compress(self, condition, axis=0, out=None): """Return selected slices of an array along given axis. Parameters ---------- condition : array_like, bool Array that selects which entries to return. N.B., if len(condition) is less than the size of the given axis, then output is truncated to the length of the condition array. axis : int, optional Axis along which to take slices. If None, work on the flattened array. out : ndarray, optional Output array. Its type is preserved and it must be of the right shape to hold the output. Returns ------- out : HaplotypeArray A copy of the array without the slices along axis for which `condition` is false. Examples -------- >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 1], ... [0, 1, 1, 1], ... [0, 2, -1, -1]], dtype='i1') >>> h.compress([True, False, True], axis=0) <HaplotypeArray shape=(2, 4) dtype=int8> 0 0 0 1 0 2 . . >>> h.compress([True, False, True, False], axis=1) <HaplotypeArray shape=(3, 2) dtype=int8> 0 0 0 1 0 . """ return compress_haplotype_array(self, condition, axis=axis, cls=type(self), compress=np.compress, out=out)
python
def ntwodgaussian_lmfit(params): """ Convert an lmfit.Parameters object into a function which calculates the model. Parameters ---------- params : lmfit.Parameters Model parameters, can have multiple components. Returns ------- model : func A function f(x,y) that will compute the model. """ def rfunc(x, y): """ Compute the model given by params, at pixel coordinates x,y Parameters ---------- x, y : numpy.ndarray The x/y pixel coordinates at which the model is being evaluated Returns ------- result : numpy.ndarray Model """ result = None for i in range(params['components'].value): prefix = "c{0}_".format(i) # I hope this doesn't kill our run time amp = np.nan_to_num(params[prefix + 'amp'].value) xo = params[prefix + 'xo'].value yo = params[prefix + 'yo'].value sx = params[prefix + 'sx'].value sy = params[prefix + 'sy'].value theta = params[prefix + 'theta'].value if result is not None: result += elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) else: result = elliptical_gaussian(x, y, amp, xo, yo, sx, sy, theta) return result return rfunc
python
def cost_loss(y_true, y_pred, cost_mat): #TODO: update description """Cost classification loss. This function calculates the cost of using y_pred on y_true with cost-matrix cost-mat. It differ from traditional classification evaluation measures since measures such as accuracy asing the same cost to different errors, but that is not the real case in several real-world classification problems as they are example-dependent cost-sensitive in nature, where the costs due to misclassification vary between examples. Parameters ---------- y_true : array-like or label indicator matrix Ground truth (correct) labels. y_pred : array-like or label indicator matrix Predicted labels, as returned by a classifier. cost_mat : array-like of shape = [n_samples, 4] Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. Returns ------- loss : float Cost of a using y_pred on y_true with cost-matrix cost-mat References ---------- .. [1] C. Elkan, "The foundations of Cost-Sensitive Learning", in Seventeenth International Joint Conference on Artificial Intelligence, 973-978, 2001. .. [2] A. Correa Bahnsen, A. Stojanovic, D.Aouada, B, Ottersten, `"Improving Credit Card Fraud Detection with Calibrated Probabilities" <http://albahnsen.com/files/%20Improving%20Credit%20Card%20Fraud%20Detection%20by%20using%20Calibrated%20Probabilities%20-%20Publish.pdf>`__, in Proceedings of the fourteenth SIAM International Conference on Data Mining, 677-685, 2014. See also -------- savings_score Examples -------- >>> import numpy as np >>> from costcla.metrics import cost_loss >>> y_pred = [0, 1, 0, 0] >>> y_true = [0, 1, 1, 0] >>> cost_mat = np.array([[4, 1, 0, 0], [1, 3, 0, 0], [2, 3, 0, 0], [2, 1, 0, 0]]) >>> cost_loss(y_true, y_pred, cost_mat) 3 """ #TODO: Check consistency of cost_mat y_true = column_or_1d(y_true) y_true = (y_true == 1).astype(np.float) y_pred = column_or_1d(y_pred) y_pred = (y_pred == 1).astype(np.float) cost = y_true * ((1 - y_pred) * cost_mat[:, 1] + y_pred * cost_mat[:, 2]) cost += (1 - y_true) * (y_pred * cost_mat[:, 0] + (1 - y_pred) * cost_mat[:, 3]) return np.sum(cost)
java
public boolean overrides(Link other) { if (other.getStatus() == LinkStatus.ASSERTED && status != LinkStatus.ASSERTED) return false; else if (status == LinkStatus.ASSERTED && other.getStatus() != LinkStatus.ASSERTED) return true; // the two links are from equivalent sources of information, so we // believe the most recent return timestamp > other.getTimestamp(); }
java
public static ByteArrayCache getInstance() { Application app = LCCore.getApplication(); synchronized (ByteArrayCache.class) { ByteArrayCache instance = app.getInstance(ByteArrayCache.class); if (instance != null) return instance; app.setInstance(ByteArrayCache.class, instance = new ByteArrayCache()); return instance; } }
python
def ensure_chosen_alternatives_are_in_user_alt_ids(choice_col, wide_data, availability_vars): """ Ensures that all chosen alternatives in `wide_df` are present in the `availability_vars` dict. Raises a helpful ValueError if not. Parameters ---------- choice_col : str. Denotes the column in `wide_data` that contains a one if the alternative pertaining to the given row was the observed outcome for the observation pertaining to the given row and a zero otherwise. wide_data : pandas dataframe. Contains one row for each observation. Should contain the specified `choice_col` column. availability_vars : dict. There should be one key value pair for each alternative that is observed in the dataset. Each key should be the alternative id for the alternative, and the value should be the column heading in `wide_data` that denotes (using ones and zeros) whether an alternative is available/unavailable, respectively, for a given observation. Alternative id's, i.e. the keys, must be integers. Returns ------- None. """ if not wide_data[choice_col].isin(availability_vars.keys()).all(): msg = "One or more values in wide_data[choice_col] is not in the user " msg_2 = "provided alternative ids in availability_vars.keys()" raise ValueError(msg + msg_2) return None
java
@SuppressWarnings("unchecked") public static <T extends Comparable<? super T>> T min(T... numberArray) { return ArrayUtil.min(numberArray); }
python
def confd_state_internal_cdb_client_subscription_priority(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring") internal = ET.SubElement(confd_state, "internal") cdb = ET.SubElement(internal, "cdb") client = ET.SubElement(cdb, "client") subscription = ET.SubElement(client, "subscription") priority = ET.SubElement(subscription, "priority") priority.text = kwargs.pop('priority') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True): """ Kind of like urlparse.parse_qs, except returns an ordered dict. Also avoids replicating that function's bad habit of overriding the built-in 'dict' type. Taken from below with modification: <https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py> """ od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list) for name, value in parse_qsl(qs, keep_blank_values, strict_parsing): od[name].append(value) return od
python
def prepare(self, setup_func): """This decorator wrap a function which setup a environment before running a command @manager.prepare(setup_func) def some_command(): pass """ assert inspect.isfunction(setup_func) argsspec = inspect.getargspec(setup_func) if argsspec.args: raise ValueError("prepare function shouldn't have any arguments") def decorator(command_func): @functools.wraps(command_func) def wrapper(*args, **kwgs): # Run setup_func before command_func setup_func() return command_func(*args, **kwgs) return wrapper return decorator
python
def deriv(self, t, y, data=None): " Calculate [dtheta/dt, d2theta/dt2] from [theta, dtheta/dt]." theta, dtheta_dt = y return np.array([dtheta_dt, - self.g_l * gv.sin(theta)])
python
def _extract_hidden_data(dom): """ Extracts hidden input data from DOM and returns the data as dictionary. """ input_tags = dom.find_all('input', attrs={'type': 'hidden'}) data = {} for input_tag in input_tags: data[input_tag['name']] = input_tag['value'] return data
python
def from_dict(cls, d): """ Restores an object state from a dictionary, used in de-JSONification. :param d: the object dictionary :type d: dict :return: the object :rtype: object """ conf = {} for k in d["config"]: v = d["config"][k] if isinstance(v, dict): if u"type" in v: typestr = v[u"type"] else: typestr = v["type"] conf[str(k)] = classes.get_dict_handler(typestr)(v) else: conf[str(k)] = v return classes.get_class(d["class"])(name=d["name"], config=conf)
python
def to_query_parameters_dict(parameters): """Converts a dictionary of parameter values into query parameters. :type parameters: Mapping[str, Any] :param parameters: Dictionary of query parameter values. :rtype: List[google.cloud.bigquery.query._AbstractQueryParameter] :returns: A list of named query parameters. """ return [ scalar_to_query_parameter(value, name=name) for name, value in six.iteritems(parameters) ]
java
public static <T extends ImageGray<T>> InputToBinary<T> blockMean(ConfigLength regionWidth, double scale , boolean down, boolean thresholdFromLocalBlocks, Class<T> inputType) { if( BOverrideFactoryThresholdBinary.blockMean != null ) return BOverrideFactoryThresholdBinary.blockMean.handle(regionWidth, scale, down, thresholdFromLocalBlocks, inputType); BlockProcessor processor; if( inputType == GrayU8.class ) processor = new ThresholdBlockMean_U8(scale,down); else processor = new ThresholdBlockMean_F32((float)scale,down); if( BoofConcurrency.USE_CONCURRENT ) { return new ThresholdBlock_MT(processor, regionWidth, thresholdFromLocalBlocks, inputType); } else { return new ThresholdBlock(processor, regionWidth, thresholdFromLocalBlocks, inputType); } }
python
def change_site(self, new_name, new_location=None, new_er_data=None, new_pmag_data=None, replace_data=False): """ Update a site's name, location, er_data, and pmag_data. By default, new data will be added in to pre-existing data, overwriting existing values. If replace_data is True, the new data dictionary will simply take the place of the existing dict. """ self.name = new_name if new_location: self.location = new_location self.update_data(new_er_data, new_pmag_data, replace_data)
java
@Override public EClass getIfcSolarDevice() { if (ifcSolarDeviceEClass == null) { ifcSolarDeviceEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc4Package.eNS_URI) .getEClassifiers().get(607); } return ifcSolarDeviceEClass; }
java
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "SQL_NONCONSTANT_STRING_PASSED_TO_EXECUTE", justification = "Temporary fix") private List<Long> getValidationOutputFromHive(List<String> queries) throws IOException { if (null == queries || queries.size() == 0) { log.warn("No queries specified to be executed"); return Collections.emptyList(); } List<Long> rowCounts = Lists.newArrayList(); Closer closer = Closer.create(); try { HiveJdbcConnector hiveJdbcConnector = closer.register(HiveJdbcConnector.newConnectorWithProps(props)); for (String query : queries) { String hiveOutput = "hiveConversionValidationOutput_" + UUID.randomUUID().toString(); Path hiveTempDir = new Path("/tmp" + Path.SEPARATOR + hiveOutput); query = "INSERT OVERWRITE DIRECTORY '" + hiveTempDir + "' " + query; log.info("Executing query: " + query); try { if (this.hiveSettings.size() > 0) { hiveJdbcConnector.executeStatements(this.hiveSettings.toArray(new String[this.hiveSettings.size()])); } hiveJdbcConnector.executeStatements("SET hive.exec.compress.output=false","SET hive.auto.convert.join=false", query); FileStatus[] fileStatusList = this.fs.listStatus(hiveTempDir); List<FileStatus> files = new ArrayList<>(); for (FileStatus fileStatus : fileStatusList) { if (fileStatus.isFile()) { files.add(fileStatus); } } if (files.size() > 1) { log.warn("Found more than one output file. Should have been one."); } else if (files.size() == 0) { log.warn("Found no output file. Should have been one."); } else { String theString = IOUtils.toString(new InputStreamReader(this.fs.open(files.get(0).getPath()), Charsets.UTF_8)); log.info("Found row count: " + theString.trim()); if (StringUtils.isBlank(theString.trim())) { rowCounts.add(0l); } else { try { rowCounts.add(Long.parseLong(theString.trim())); } catch (NumberFormatException e) { throw new RuntimeException("Could not parse Hive output: " + theString.trim(), e); } } } } finally { if (this.fs.exists(hiveTempDir)) { log.debug("Deleting temp dir: " + hiveTempDir); this.fs.delete(hiveTempDir, true); } } } } catch (SQLException e) { log.warn("Execution failed for query set " + queries.toString(), e); } finally { try { closer.close(); } catch (Exception e) { log.warn("Could not close HiveJdbcConnector", e); } } return rowCounts; }
java
public static ExecutorService newMaxQueueThreadPool(int numWorkerThreads, long waitingMillis, int maxQueue) { return new ThreadPoolExecutor(numWorkerThreads, numWorkerThreads, 0L, TimeUnit.MILLISECONDS, waitingMillis > 0 ? getWaitingLimitedBlockingQueue( waitingMillis, maxQueue) : getLimitedBlockingQueue( maxQueue)); }
python
def get_deprecation_reason( node: Union[EnumValueDefinitionNode, FieldDefinitionNode] ) -> Optional[str]: """Given a field or enum value node, get deprecation reason as string.""" from ..execution import get_directive_values deprecated = get_directive_values(GraphQLDeprecatedDirective, node) return deprecated["reason"] if deprecated else None
java
public static String wrapLinesByWords(String text, int maxLen) { StringBuffer buffer = new StringBuffer(); int lineLength = 0; for (String token : text.split(" ")) { if (lineLength + token.length() + 1 > maxLen) { buffer.append("\n"); lineLength = 0; } else if (lineLength > 0) { buffer.append(" "); lineLength++; } buffer.append(token); lineLength += token.length(); } text = buffer.toString(); return text; }
python
def _commonprefix(m): "Given a list of pathnames, returns the longest common leading component" if not m: return '' prefix = m[0] for item in m: for i in range(len(prefix)): if prefix[:i+1] != item[:i+1]: prefix = prefix[:i] if i == 0: return '' break return prefix
python
def setval(self, varname, value): """ Set the value of the variable with the given name. """ if varname in self: self[varname]['value'] = value else: self[varname] = Variable(self.default_type, value=value)
java
public Signature argType(int index, Class<?> type) { return new Signature(type().changeParameterType(index, type), argNames()); }
python
def _scaled_bqm(bqm, scalar, bias_range, quadratic_range, ignored_variables, ignored_interactions, ignore_offset): """Helper function of sample for scaling""" bqm_copy = bqm.copy() if scalar is None: scalar = _calc_norm_coeff(bqm_copy.linear, bqm_copy.quadratic, bias_range, quadratic_range, ignored_variables, ignored_interactions) bqm_copy.scale(scalar, ignored_variables=ignored_variables, ignored_interactions=ignored_interactions, ignore_offset=ignore_offset) bqm_copy.info.update({'scalar': scalar}) return bqm_copy
java
public CMAEntry publish(CMAEntry entry) { assertNotNull(entry, "entry"); final String entryId = getResourceIdOrThrow(entry, "entry"); final String environmentId = entry.getEnvironmentId(); final String spaceId = getSpaceIdOrThrow(entry, "entry"); return service.publish( entry.getSystem().getVersion(), spaceId, environmentId, entryId).blockingFirst(); }
java
public ModuleDefinitionParameterComponent addParameter() { //3 ModuleDefinitionParameterComponent t = new ModuleDefinitionParameterComponent(); if (this.parameter == null) this.parameter = new ArrayList<ModuleDefinitionParameterComponent>(); this.parameter.add(t); return t; }
python
def _conv_general_permutations(self, dimension_numbers): """Utility for convolution dimension permutations relative to Conv HLO.""" lhs_spec, rhs_spec, out_spec = dimension_numbers lhs_char, rhs_char, out_char = ('N', 'C'), ('O', 'I'), ('N', 'C') charpairs = (lhs_char, rhs_char, out_char) for i, (a, b) in enumerate(charpairs): if not (dimension_numbers[i].count(a) == 1 and dimension_numbers[i].count(b) == 1): msg = ('convolution dimension_numbers[{}] must contain the characters ' '"{}" and "{}" exatly once, got {}.') raise TypeError(msg.format(i, a, b, dimension_numbers[i])) if len(dimension_numbers[i]) != len(set(dimension_numbers[i])): msg = ('convolution dimension_numbers[{}] cannot have duplicate ' 'characters, got {}.') raise TypeError(msg.format(i, dimension_numbers[i])) if not (set(lhs_spec) - set(lhs_char) == set(rhs_spec) - set(rhs_char) == set(out_spec) - set(out_char)): msg = ('convolution dimension_numbers elements must each have the same ' 'set of spatial characters, got {}.') raise TypeError(msg.format(dimension_numbers)) def getperm(spec, charpair): spatial = (i for i, c in enumerate(spec) if c not in charpair) if spec is not rhs_spec: spatial = sorted(spatial, key=lambda i: rhs_spec.index(spec[i])) return (spec.index(charpair[0]), spec.index(charpair[1])) + tuple(spatial) lhs_perm, rhs_perm, out_perm = map(getperm, dimension_numbers, charpairs) return lhs_perm, rhs_perm, out_perm
python
def create_for_placeholder(self, placeholder, sort_order=1, language_code=None, **kwargs): """ Create a Content Item with the given parameters If the language_code is not provided, the language code of the parent will be used. This may perform an additional database query, unless the :class:`~fluent_contents.models.managers.PlaceholderManager` methods were used to construct the object, such as :func:`~fluent_contents.models.managers.PlaceholderManager.create_for_object` or :func:`~fluent_contents.models.managers.PlaceholderManager.get_by_slot` """ if language_code is None: # Could also use get_language() or appsettings.FLUENT_CONTENTS_DEFAULT_LANGUAGE_CODE # thus avoid the risk of performing an extra query here to the parent. # However, this identical behavior to BaseContentItemFormSet, # and the parent can be set already via Placeholder.objects.create_for_object() language_code = get_parent_language_code(placeholder.parent) obj = self.create( placeholder=placeholder, parent_type_id=placeholder.parent_type_id, parent_id=placeholder.parent_id, sort_order=sort_order, language_code=language_code, **kwargs ) # Fill the reverse caches obj.placeholder = placeholder parent = getattr(placeholder, '_parent_cache', None) # by GenericForeignKey (_meta.virtual_fields[0].cache_attr) if parent is not None: obj.parent = parent return obj
java
private ParseTree parseEquality(Expression expressionIn) { SourcePosition start = getTreeStartLocation(); ParseTree left = parseRelational(expressionIn); while (peekEqualityOperator()) { Token operator = nextToken(); ParseTree right = parseRelational(expressionIn); left = new BinaryOperatorTree(getTreeLocation(start), left, operator, right); } return left; }
python
def uninstall_handle_input(self): """Remove the hook.""" if self.hooked is None: return ctypes.windll.user32.UnhookWindowsHookEx(self.hooked) self.hooked = None
python
def write(self): """If data exists for the entity, writes it to disk as a .JSON file with the url-encoded URI as the filename and returns True. Else, returns False.""" if (self.data): dataPath = self.client.local_dir / (urllib.parse.quote_plus(self.uri)+'.json') with dataPath.open(mode='w') as dump_file: json.dump(self.data, dump_file) dump_file.close() logger.info('Wrote ' + self.uri + ' to file') return True else: logger.warning('No data to write for ' + self.uri) return False