language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public static <T extends Comparable<?>> DateTimeTemplate<T> dateTimeTemplate(Class<? extends T> cl, String template, List<?> args) { return dateTimeTemplate(cl, createTemplate(template), args); }
python
def _normalize_options(options): """Renames keys in the options dictionary to their internally-used names.""" normalized_options = {} for key, value in iteritems(options): optname = str(key).lower() intname = INTERNAL_URI_OPTION_NAME_MAP.get(optname, key) normalized_options[intname] = options[key] return normalized_options
python
def int_gps_time_to_str(t): """Takes an integer GPS time, either given as int or lal.LIGOTimeGPS, and converts it to a string. If a LIGOTimeGPS with nonzero decimal part is given, raises a ValueError.""" if isinstance(t, int): return str(t) elif isinstance(t, float): # Wouldn't this just work generically? int_t = int(t) if abs(t - int_t) > 0.: raise ValueError('Need an integer GPS time, got %s' % str(t)) return str(int_t) elif isinstance(t, lal.LIGOTimeGPS): if t.gpsNanoSeconds == 0: return str(t.gpsSeconds) else: raise ValueError('Need an integer GPS time, got %s' % str(t)) else: err_msg = "Didn't understand input type {}".format(type(t)) raise ValueError(err_msg)
python
def __patch_write_method(tango_device_klass, attribute): """ Checks if method given by it's name for the given DeviceImpl class has the correct signature. If a read/write method doesn't have a parameter (the traditional Attribute), then the method is wrapped into another method which has correct parameter definition to make it work. :param tango_device_klass: a DeviceImpl class :type tango_device_klass: class :param attribute: the attribute data information :type attribute: AttrData """ write_method = getattr(attribute, "fset", None) if write_method: method_name = "__write_{0}__".format(attribute.attr_name) attribute.write_method_name = method_name else: method_name = attribute.write_method_name write_method = getattr(tango_device_klass, method_name) write_attr = _get_wrapped_write_method(attribute, write_method) setattr(tango_device_klass, method_name, write_attr)
python
def _proc_ctype_header(self, request, result): """ Process the Content-Type header rules for the request. Only the desired API version can be determined from those rules. :param request: The Request object provided by WebOb. :param result: The Result object to store the results in. """ if result: # Result has already been fully determined return try: ctype = request.headers['content-type'] except KeyError: # No content-type header to examine return # Parse the content type ctype, params = parse_ctype(ctype) # Is it a recognized content type? if ctype not in self.types: return # Get the mapped ctype and version mapped_ctype, mapped_version = self.types[ctype](params) # Update the content type header and set the version if mapped_ctype: request.environ['aversion.request_type'] = mapped_ctype request.environ['aversion.orig_request_type'] = ctype request.environ['aversion.content_type'] = \ request.headers['content-type'] if self.overwrite_headers: request.headers['content-type'] = mapped_ctype if mapped_version: result.set_version(mapped_version)
java
final void removePathEntry(final String pathName, boolean check) throws OperationFailedException{ synchronized (pathEntries) { PathEntry pathEntry = pathEntries.get(pathName); if (pathEntry.isReadOnly()) { throw ControllerLogger.ROOT_LOGGER.pathEntryIsReadOnly(pathName); } Set<String> dependents = dependenctRelativePaths.get(pathName); if (check && dependents != null) { throw ControllerLogger.ROOT_LOGGER.cannotRemovePathWithDependencies(pathName, dependents); } pathEntries.remove(pathName); triggerCallbacksForEvent(pathEntry, Event.REMOVED); if (pathEntry.getRelativeTo() != null) { dependents = dependenctRelativePaths.get(pathEntry.getRelativeTo()); if (dependents != null) { dependents.remove(pathEntry.getName()); if (dependents.size() == 0) { dependenctRelativePaths.remove(pathEntry.getRelativeTo()); } } } } }
java
public void registerJsonBeanProcessor( Class target, JsonBeanProcessor jsonBeanProcessor ) { if( target != null && jsonBeanProcessor != null ) { beanProcessorMap.put( target, jsonBeanProcessor ); } }
java
private void mkdirs(File directory, String message) throws IOException { try { FileUtils.mkdirs(directory); } catch (FileUtils.CreateDirectoryException cde) { mCacheErrorLogger.logError( CacheErrorLogger.CacheErrorCategory.WRITE_CREATE_DIR, TAG, message, cde); throw cde; } }
java
public static dnstxtrec get(nitro_service service, String domain) throws Exception{ dnstxtrec obj = new dnstxtrec(); obj.set_domain(domain); dnstxtrec response = (dnstxtrec) obj.get_resource(service); return response; }
java
protected void retain(BaseEvent event) { Info info = events.get(event); if (info != null) { info.refcount.incrementAndGet(); } else { log.warn("Retain called on already released event."); } }
python
def clean(): """Remove build, dist, egg-info garbage.""" d = ['build', 'dist', 'scikits.audiolab.egg-info', HTML_DESTDIR, PDF_DESTDIR] for i in d: paver.path.path(i).rmtree() (paver.path.path('docs') / options.sphinx.builddir).rmtree()
java
private Collection<AnnotationData> getAnnotationSet(final Collection<TagAndLength> pTags) { Collection<AnnotationData> ret = null; if (pTags != null) { Map<ITag, AnnotationData> data = AnnotationUtils.getInstance().getMap().get(getClass().getName()); ret = new ArrayList<AnnotationData>(data.size()); for (TagAndLength tal : pTags) { AnnotationData ann = data.get(tal.getTag()); if (ann != null) { ann.setSize(tal.getLength() * BitUtils.BYTE_SIZE); } else { ann = new AnnotationData(); ann.setSkip(true); ann.setSize(tal.getLength() * BitUtils.BYTE_SIZE); } ret.add(ann); } } else { ret = AnnotationUtils.getInstance().getMapSet().get(getClass().getName()); } return ret; }
java
@Override protected void initializeDefaults() { String err = "A BEL Framework system configuration must be present"; if (configurationFile != null) { err += " (using " + configurationFile + ")"; } throw new BELRuntimeException(err, MISSING_SYSTEM_CONFIGURATION); }
java
private boolean isDataDirectExp(SQLException e) { /* * example of exception message * msft = "[Microsoft][SQLServer JDBC Driver][SQLServer]Invalid column name 'something'."; */ String message = e.getMessage(); int ind = message.indexOf('[', 2); // find the position of the second [ ind = message.indexOf("][", ind + 10); // look for [] starting after the length of [SQLServer JDBC Driver], 10 is a good number. if (ind != -1) { // if none found ===> it is a datadirect one if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) Tr.debug(this, tc, "The exception is NOT a DataDirect exception "); return false; } if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) Tr.debug(this, tc, "the exception is a DataDirect exception "); return true; }
python
def decoder(decoder_input, encoder_output, decoder_self_attention_bias, encoder_decoder_attention_bias, hparams, name="decoder", save_weights_to=None, make_image_summary=True,): """A stack of transformer layers. Args: decoder_input: a Tensor encoder_output: a Tensor decoder_self_attention_bias: bias Tensor for self-attention (see common_attention.attention_bias()) encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention (see common_attention.attention_bias()) hparams: hyperparameters for model name: a string save_weights_to: an optional dictionary to capture attention weights for visualization; the weights tensor will be appended there under a string key created from the variable scope (including name). make_image_summary: Whether to make an attention image summary. Returns: y: a Tensors """ x = decoder_input with tf.variable_scope(name): for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers): layer_name = "layer_%d" % layer with tf.variable_scope(layer_name): with tf.variable_scope("self_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), None, decoder_self_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, attention_type=hparams.self_attention_type, save_weights_to=save_weights_to, make_image_summary=make_image_summary, ) utils.collect_named_outputs("norms", "decoder_self_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "decoder_self_attention_post_%d"%(layer), tf.norm(x, axis=-1)) if encoder_output is not None: with tf.variable_scope("encdec_attention"): y = common_attention.multihead_attention( common_layers.layer_preprocess(x, hparams), encoder_output, encoder_decoder_attention_bias, hparams.attention_key_channels or hparams.hidden_size, hparams.attention_value_channels or hparams.hidden_size, hparams.hidden_size, hparams.num_heads, hparams.attention_dropout, save_weights_to=save_weights_to, make_image_summary=make_image_summary, ) utils.collect_named_outputs( "norms", "decoder_encoder_attention_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs( "norms", "decoder_encoder_attention_post_%d"%(layer), tf.norm(x, axis=-1)) with tf.variable_scope("ffn"): y = common_layers.dense_relu_dense( common_layers.layer_preprocess(x, hparams), hparams.filter_size, hparams.hidden_size, dropout=hparams.relu_dropout, ) utils.collect_named_outputs("norms", "decoder_ffn_%d"%(layer), tf.norm(y, axis=-1)) x = common_layers.layer_postprocess(x, y, hparams) utils.collect_named_outputs("norms", "decoder_ffn_post_%d"%(layer), tf.norm(x, axis=-1)) # if normalization is done in layer_preprocess, then it should also be done # on the output, since the output can grow very large, being the sum of # a whole stack of unnormalized layer outputs. return common_layers.layer_preprocess(x, hparams)
java
public static INDArray min(INDArray x, INDArray y, INDArray z, int... dimensions) { if(dimensions == null || dimensions.length == 0) { validateShapesNoDimCase(x,y,z); return Nd4j.getExecutioner().exec(new OldMin(x,y,z)); } return Nd4j.getExecutioner().exec(new BroadcastMin(x,y,z,dimensions)); }
java
@Override public void channelConnected(ChannelHandlerContext ctx, ChannelStateEvent event) { // register the newly established channel Channel channel = event.getChannel(); LOG.info("connection established to :{}, local port:{}", client.getRemoteAddr(), channel.getLocalAddress()); client.connectChannel(ctx.getChannel()); client.handleResponse(ctx.getChannel(), null); }
java
static boolean isValidMonth(@Nullable String monthString) { if (monthString == null) { return false; } try { int monthInt = Integer.parseInt(monthString); return monthInt > 0 && monthInt <= 12; } catch (NumberFormatException numEx) { return false; } }
python
def get_rmse(self, data_x=None, data_y=None): """ Get Root Mean Square Error using self.bestfit_func args: x_min: scalar, default=min(x) minimum x value of the line x_max: scalar, default=max(x) maximum x value of the line resolution: int, default=1000 how many steps between x_min and x_max """ if data_x is None: data_x = np.array(self.args["x"]) if data_y is None: data_y = np.array(self.args["y"]) if len(data_x) != len(data_y): raise ValueError("Lengths of data_x and data_y are different") rmse_y = self.bestfit_func(data_x) return np.sqrt(np.mean((rmse_y - data_y) ** 2))
java
public final static <T> Stream<Seq<T>> sliding(final Stream<T> stream, final int windowSize) { return sliding(stream, windowSize, 1); }
java
public void generate( String templateName, Row row ) { try { CompiledTemplate template = getTemplate( templateName ); VariableResolverFactory factory = new MapVariableResolverFactory(); Map<String, Object> vars = new HashMap<String, Object>(); initializePriorCommaConstraints( vars ); initializeHasPriorJunctionConstraint( vars ); vars.put( "row", row ); for ( Cell cell : row.getCells() ) { cell.addValue( vars ); } String drl = String.valueOf( TemplateRuntime.execute( template, vars, factory, registry ) ); rules.add( drl ); } catch ( Exception e ) { throw new RuntimeException( e ); } }
java
public void marshall(GetParametersByPathRequest getParametersByPathRequest, ProtocolMarshaller protocolMarshaller) { if (getParametersByPathRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(getParametersByPathRequest.getPath(), PATH_BINDING); protocolMarshaller.marshall(getParametersByPathRequest.getRecursive(), RECURSIVE_BINDING); protocolMarshaller.marshall(getParametersByPathRequest.getParameterFilters(), PARAMETERFILTERS_BINDING); protocolMarshaller.marshall(getParametersByPathRequest.getWithDecryption(), WITHDECRYPTION_BINDING); protocolMarshaller.marshall(getParametersByPathRequest.getMaxResults(), MAXRESULTS_BINDING); protocolMarshaller.marshall(getParametersByPathRequest.getNextToken(), NEXTTOKEN_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def add_var_condor_cmd(self, command): """ Add a condor command to the submit file that allows variable (macro) arguments to be passes to the executable. """ if command not in self.__var_cmds: self.__var_cmds.append(command) macro = self.__bad_macro_chars.sub( r'', command ) self.add_condor_cmd(command, '$(macro' + macro + ')')
python
def cmd_attitude(self, args): '''attitude q0 q1 q2 q3 thrust''' if len(args) != 5: print("Usage: attitude q0 q1 q2 q3 thrust (0~1)") return if len(args) == 5: q0 = float(args[0]) q1 = float(args[1]) q2 = float(args[2]) q3 = float(args[3]) thrust = float(args[4]) att_target = [q0, q1, q2, q3] print("q0:%.3f, q1:%.3f, q2:%.3f q3:%.3f thrust:%.2f" % (q0, q1, q2, q3, thrust)) self.master.mav.set_attitude_target_send( 0, # system time in milliseconds 1, # target system 0, # target component 63, # type mask (ignore all except attitude + thrust) att_target, # quaternion attitude 0, # body roll rate 0, # body pich rate 0, # body yaw rate thrust)
python
def is_gesture(self): """Macro to check if this event is a :class:`~libinput.event.GestureEvent`. """ if self in {type(self).GESTURE_SWIPE_BEGIN, type(self).GESTURE_SWIPE_END, type(self).GESTURE_SWIPE_UPDATE, type(self).GESTURE_PINCH_BEGIN, type(self).GESTURE_PINCH_UPDATE, type(self).GESTURE_PINCH_END}: return True else: return False
java
private void readMap(final Element map, final Map<String, KeyDef> keyDefs) { readKeyDefinition(map, keyDefs); for (final Element elem: getChildElements(map)) { if (!(SUBMAP.matches(elem) || elem.getAttributeNode(ATTRIBUTE_NAME_KEYSCOPE) != null)) { readMap(elem, keyDefs); } } }
python
def tilequeue_rawr_seed_toi(cfg, peripherals): """command to read the toi and enqueue the corresponding rawr tiles""" tiles_of_interest = peripherals.toi.fetch_tiles_of_interest() coords = map(coord_unmarshall_int, tiles_of_interest) _tilequeue_rawr_seed(cfg, peripherals, coords)
python
def _read_csv(filepath, kwargs): """See documentation of mpu.io.read.""" if 'delimiter' not in kwargs: kwargs['delimiter'] = ',' if 'quotechar' not in kwargs: kwargs['quotechar'] = '"' if 'skiprows' not in kwargs: kwargs['skiprows'] = [] if isinstance(kwargs['skiprows'], int): kwargs['skiprows'] = [i for i in range(kwargs['skiprows'])] if 'format' in kwargs: format_ = kwargs['format'] kwargs.pop('format', None) else: format_ = 'default' skiprows = kwargs['skiprows'] kwargs.pop('skiprows', None) kwargs_open = {'newline': ''} mode = 'r' if sys.version_info < (3, 0): kwargs_open.pop('newline', None) mode = 'rb' with open(filepath, mode, **kwargs_open) as fp: if format_ == 'default': reader = csv.reader(fp, **kwargs) data = EList([row for row in reader]) data = data.remove_indices(skiprows) elif format_ == 'dicts': reader_list = csv.DictReader(fp, **kwargs) data = [row for row in reader_list] else: raise NotImplementedError('Format \'{}\' unknown' .format(format_)) return data
python
def _read(self): """ Reads backup file from json_file property and sets backup_dict property with data decompressed and deserialized from that file. If no usable data is found backup_dict is set to the empty dict. """ self.json_file.seek(0) try: data = zlib.decompress(self.json_file.read()) self.backup_dict = json.loads(data.decode('utf-8')) except (EOFError, zlib.error): self.backup_dict = {}
python
def present(name, save=False, **kwargs): ''' Ensure beacon is configured with the included beacon data. Args: name (str): The name of the beacon ensure is configured. save (bool): ``True`` updates the beacons.conf. Default is ``False``. Returns: dict: A dictionary of information about the results of the state Example: .. code-block:: yaml ps_beacon: beacon.present: - name: ps - save: True - enable: False - services: salt-master: running apache2: stopped ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': []} current_beacons = __salt__['beacons.list'](return_yaml=False, **kwargs) beacon_data = [{k: v} for k, v in six.iteritems(kwargs)] if name in current_beacons: if beacon_data == current_beacons[name]: ret['comment'].append('Job {0} in correct state'.format(name)) else: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True result = __salt__['beacons.modify'](name, beacon_data, **kwargs) ret['comment'].append(result['comment']) ret['changes'] = result['changes'] else: result = __salt__['beacons.modify'](name, beacon_data, **kwargs) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] return ret else: if 'changes' in result: ret['comment'].append('Modifying {0} in beacons'.format(name)) ret['changes'] = result['changes'] else: ret['comment'].append(result['comment']) else: if 'test' in __opts__ and __opts__['test']: kwargs['test'] = True result = __salt__['beacons.add'](name, beacon_data, **kwargs) ret['comment'].append(result['comment']) else: result = __salt__['beacons.add'](name, beacon_data, **kwargs) if not result['result']: ret['result'] = result['result'] ret['comment'] = result['comment'] return ret else: ret['comment'].append('Adding {0} to beacons'.format(name)) if save: __salt__['beacons.save'](**kwargs) ret['comment'].append('Beacon {0} saved'.format(name)) ret['comment'] = '\n'.join(ret['comment']) return ret
python
def interact(self, unique_id, case_id, question_prompt, answer, choices=None, randomize=True): """Reads student input for unlocking tests until the student answers correctly. PARAMETERS: unique_id -- str; the ID that is recorded with this unlocking attempt. case_id -- str; the ID that is recorded with this unlocking attempt. question_prompt -- str; the question prompt answer -- list; a list of locked lines in a test case answer. choices -- list or None; a list of choices. If None or an empty list, signifies the question is not multiple choice. randomize -- bool; if True, randomizes the choices on first invocation. DESCRIPTION: Continually prompt the student for an answer to an unlocking question until one of the folliwng happens: 1. The student supplies the correct answer, in which case the supplied answer is returned 2. The student aborts abnormally (either by typing 'exit()' or using Ctrl-C/D. In this case, return None Correctness is determined by the verify method. RETURNS: list; the correct solution (that the student supplied). Each element in the list is a line of the correct output. """ if randomize and choices: choices = random.sample(choices, len(choices)) correct = False while not correct: if choices: assert len(answer) == 1, 'Choices must have 1 line of output' choice_map = self._display_choices(choices) question_timestamp = datetime.now() input_lines = [] for line_number, line in enumerate(answer): if len(answer) == 1: prompt = self.PROMPT else: prompt = '(line {}){}'.format(line_number + 1, self.PROMPT) student_input = format.normalize(self._input(prompt)) self._add_history(student_input) if student_input in self.EXIT_INPUTS: raise EOFError if choices and student_input in choice_map: student_input = choice_map[student_input] correct_answer = self._verify_student_input(student_input, line) if correct_answer: input_lines.append(correct_answer) else: input_lines.append(student_input) break else: correct = True tg_id = -1 misU_count_dict = {} rationale = "Unknown - Default Value" if not correct: guidance_data = self.guidance_util.show_guidance_msg(unique_id, input_lines, self.hash_key) misU_count_dict, tg_id, printed_msg, rationale = guidance_data else: rationale = self.guidance_util.prompt_with_prob() print("-- OK! --") printed_msg = ["-- OK! --"] self.analytics.append({ 'id': unique_id, 'case_id': case_id, 'question timestamp': self.unix_time(question_timestamp), 'answer timestamp': self.unix_time(datetime.now()), 'prompt': question_prompt, 'answer': input_lines, 'correct': correct, 'treatment group id': tg_id, 'rationale': rationale, 'misU count': misU_count_dict, 'printed msg': printed_msg }) print() return input_lines
java
public final void entryRuleGrammar() throws RecognitionException { try { // InternalXtext.g:54:1: ( ruleGrammar EOF ) // InternalXtext.g:55:1: ruleGrammar EOF { before(grammarAccess.getGrammarRule()); pushFollow(FollowSets000.FOLLOW_1); ruleGrammar(); state._fsp--; after(grammarAccess.getGrammarRule()); match(input,EOF,FollowSets000.FOLLOW_2); } } catch (RecognitionException re) { reportError(re); recover(input,re); } finally { } return ; }
java
private void configureTransform(int viewWidth, int viewHeight) { int cameraWidth,cameraHeight; try { open.mLock.lock(); if (null == mTextureView || null == open.mCameraSize) { return; } cameraWidth = open.mCameraSize.getWidth(); cameraHeight = open.mCameraSize.getHeight(); } finally { open.mLock.unlock(); } int rotation = getWindowManager().getDefaultDisplay().getRotation(); Matrix matrix = new Matrix(); RectF viewRect = new RectF(0, 0, viewWidth, viewHeight); RectF bufferRect = new RectF(0, 0, cameraHeight, cameraWidth);// TODO why w/h swapped? float centerX = viewRect.centerX(); float centerY = viewRect.centerY(); if (Surface.ROTATION_90 == rotation || Surface.ROTATION_270 == rotation) { bufferRect.offset(centerX - bufferRect.centerX(), centerY - bufferRect.centerY()); matrix.setRectToRect(viewRect, bufferRect, Matrix.ScaleToFit.FILL); float scale = Math.max( (float) viewHeight / cameraHeight, (float) viewWidth / cameraWidth); matrix.postScale(scale, scale, centerX, centerY); matrix.postRotate(90 * (rotation - 2), centerX, centerY); } mTextureView.setTransform(matrix); }
java
@Deprecated public static <C extends Callable<T>, T> T call(Class<C> callableClass, IFactory factory, PrintStream out, String... args) { return call(callableClass, factory, out, System.err, Help.Ansi.AUTO, args); }
python
def writeXML(self, n): """ Writes a XML string to the data stream. @type n: L{ET<xml.ET>} @param n: The XML Document to be encoded to the AMF3 data stream. """ self.stream.write(TYPE_XMLSTRING) ref = self.context.getObjectReference(n) if ref != -1: self._writeInteger(ref << 1) return self.context.addObject(n) self.serialiseString(xml.tostring(n).encode('utf-8'))
java
protected void mergeSameCost(LinkedList<TimephasedCost> list) { LinkedList<TimephasedCost> result = new LinkedList<TimephasedCost>(); TimephasedCost previousAssignment = null; for (TimephasedCost assignment : list) { if (previousAssignment == null) { assignment.setAmountPerDay(assignment.getTotalAmount()); result.add(assignment); } else { Number previousAssignmentCost = previousAssignment.getAmountPerDay(); Number assignmentCost = assignment.getTotalAmount(); if (NumberHelper.equals(previousAssignmentCost.doubleValue(), assignmentCost.doubleValue(), 0.01)) { Date assignmentStart = previousAssignment.getStart(); Date assignmentFinish = assignment.getFinish(); double total = previousAssignment.getTotalAmount().doubleValue(); total += assignmentCost.doubleValue(); TimephasedCost merged = new TimephasedCost(); merged.setStart(assignmentStart); merged.setFinish(assignmentFinish); merged.setAmountPerDay(assignmentCost); merged.setTotalAmount(Double.valueOf(total)); result.removeLast(); assignment = merged; } else { assignment.setAmountPerDay(assignment.getTotalAmount()); } result.add(assignment); } previousAssignment = assignment; } list.clear(); list.addAll(result); }
java
@Override public void writeInt(int value) throws JMSException { try { getOutput().writeInt(value); } catch (IOException e) { throw new FFMQException("Cannot write message body","IO_ERROR",e); } }
java
public void deleteTemplate(DeleteTemplateRequest request) { checkNotNull(request, "object request should not be null."); assertStringNotNullOrEmpty(request.getTemplateId(), "object templateId should not be null or empty."); InternalRequest internalRequest = this.createRequest("template", request, HttpMethodName.DELETE, request.getTemplateId()); this.invokeHttpClient(internalRequest, SmsResponse.class); }
python
def execute(args): """ Executes the *run* subprogram with parsed commandline *args*. """ task_family = None error = None # try to infer the task module from the passed task family and import it parts = args.task_family.rsplit(".", 1) if len(parts) == 2: modid, cls_name = parts try: mod = __import__(modid, globals(), locals(), [cls_name]) if hasattr(mod, cls_name): task_cls = getattr(mod, cls_name) if not issubclass(task_cls, Task): abort("object '{}' is not a Task".format(args.task_family)) task_family = task_cls.task_family except ImportError as e: logger.warning("import error in module {}: {}".format(modid, e)) error = e # read task info from the index file and import it if task_family is None: index_file = Config.instance().get_expanded("core", "index_file") if os.path.exists(index_file): info = read_task_from_index(args.task_family, index_file) if not info: abort("task family '{}' not found in index".format(args.task_family)) modid, task_family, _ = info __import__(modid, globals(), locals()) # complain when no task could be found if task_family is None: if error: raise error else: abort("task '{}' not found".format(args.task_family)) # import the module and run luigi luigi_run([task_family] + sys.argv[3:])
python
def object_deserializer(obj): """Helper to deserialize a raw result dict into a proper dict. :param obj: The dict. """ for key, val in obj.items(): if isinstance(val, six.string_types) and DATETIME_REGEX.search(val): try: obj[key] = dates.localize_datetime(parser.parse(val)) except ValueError: obj[key] = val return obj
java
private String getCustomHandler(WaybackException e, WaybackRequest wbRequest) { String jspPath = null; if((e instanceof ResourceNotInArchiveException) && wbRequest.isReplayRequest()) { String url = wbRequest.getRequestUrl(); Date captureDate = wbRequest.getReplayDate(); try { Rule rule = client.getRule(url,captureDate,new Date(),who); jspPath = ruleToJspPath(rule); } catch (RuleOracleUnavailableException e1) { e1.printStackTrace(); } } return jspPath; }
java
private void callAnnotatedMethod(final Class<? extends Annotation> annotationClass) { if (this.lifecycleMethod.get(annotationClass.getName()) != null) { for (final Method method : this.lifecycleMethod.get(annotationClass.getName())) { try { ClassUtility.callMethod(method, this); } catch (final CoreException e) { LOGGER.error(CALL_ANNOTATED_METHOD_ERROR, e); } } } }
java
public EEnum getCDDXocBase() { if (cddXocBaseEEnum == null) { cddXocBaseEEnum = (EEnum)EPackage.Registry.INSTANCE.getEPackage(AfplibPackage.eNS_URI).getEClassifiers().get(8); } return cddXocBaseEEnum; }
java
static List<PatternLevel> readLevelResourceFile(InputStream stream) { List<PatternLevel> levels = null; if (stream != null) { try { levels = configureClassLevels(stream); } catch (IOException e) { System.err.println( "IO exception reading the log properties file '" + LOCAL_LOG_PROPERTIES_FILE + "': " + e); } finally { try { stream.close(); } catch (IOException e) { // ignore close exception } } } return levels; }
python
def get_precinctsreporting(self, obj): """Precincts reporting if vote is top level result else ``None``.""" if obj.division.level == \ obj.candidate_election.election.division.level: return obj.candidate_election.election.meta.precincts_reporting return None
python
def run_configurations(callback, sections_reader): """Parse configurations and execute callback for matching.""" base = dict(OPTIONS) sections = sections_reader() if sections is None: logger.info("Configuration not found in .ini files. " "Running with default settings") recompile() elif sections == []: logger.info("Configuration does not match current runtime. " "Exiting") results = [] for section, options in sections: OPTIONS.clear() OPTIONS.update(base) OPTIONS.update(options) logger.debug("Running configuration from section \"%s\". OPTIONS: %r", section, OPTIONS) results.append(callback()) return results
python
def label_storm_objects(data, method, min_intensity, max_intensity, min_area=1, max_area=100, max_range=1, increment=1, gaussian_sd=0): """ From a 2D grid or time series of 2D grids, this method labels storm objects with either the Enhanced Watershed or Hysteresis methods. Args: data: the gridded data to be labeled. Should be a 2D numpy array in (y, x) coordinate order or a 3D numpy array in (time, y, x) coordinate order method: "ew" or "watershed" for Enhanced Watershed or "hyst" for hysteresis min_intensity: Minimum intensity threshold for gridpoints contained within any objects max_intensity: For watershed, any points above max_intensity are considered as the same value as max intensity. For hysteresis, all objects have to contain at least 1 pixel that equals or exceeds this value min_area: (default 1) The minimum area of any object in pixels. max_area: (default 100) The area threshold in pixels at which the enhanced watershed ends growth. Object area may exceed this threshold if the pixels at the last watershed level exceed the object area. max_range: Maximum difference between the maximum and minimum value in an enhanced watershed object before growth is stopped. increment: Discretization increment for the enhanced watershed gaussian_sd: Standard deviation of Gaussian filter applied to data Returns: label_grid: an ndarray with the same shape as data in which each pixel is labeled with a positive integer value. """ if method.lower() in ["ew", "watershed"]: labeler = EnhancedWatershed(min_intensity, increment, max_intensity, max_area, max_range) else: labeler = Hysteresis(min_intensity, max_intensity) if len(data.shape) == 2: label_grid = labeler.label(gaussian_filter(data, gaussian_sd)) label_grid[data < min_intensity] = 0 if min_area > 1: label_grid = labeler.size_filter(label_grid, min_area) else: label_grid = np.zeros(data.shape, dtype=int) for t in range(data.shape[0]): label_grid[t] = labeler.label(gaussian_filter(data[t], gaussian_sd)) label_grid[t][data[t] < min_intensity] = 0 if min_area > 1: label_grid[t] = labeler.size_filter(label_grid[t], min_area) return label_grid
python
def rename_columns(self, col): """ Rename columns of dataframe. Parameters ---------- col : list(str) List of columns to rename. """ try: self.cleaned_data.columns = col except Exception as e: raise e
python
def debugPreview(self, title="Debug"): """ Displays the region in a preview window. If the region is a Match, circles the target area. If the region is larger than half the primary screen in either dimension, scales it down to half size. """ region = self haystack = self.getBitmap() if isinstance(region, Match): cv2.circle( haystack, (region.getTarget().x - self.x, region.getTarget().y - self.y), 5, 255) if haystack.shape[0] > (Screen(0).getBounds()[2]/2) or haystack.shape[1] > (Screen(0).getBounds()[3]/2): # Image is bigger than half the screen; scale it down haystack = cv2.resize(haystack, (0, 0), fx=0.5, fy=0.5) Image.fromarray(haystack).show()
python
def record_staged(self): """Encode staged information on request and result to output""" if self.enabled: pwargs = OpArgs( self._pywbem_method, self._pywbem_args) pwresult = OpResult( self._pywbem_result_ret, self._pywbem_result_exc) httpreq = HttpRequest( self._http_request_version, self._http_request_url, self._http_request_target, self._http_request_method, self._http_request_headers, self._http_request_payload) httpresp = HttpResponse( self._http_response_version, self._http_response_status, self._http_response_reason, self._http_response_headers, self._http_response_payload) self.record(pwargs, pwresult, httpreq, httpresp)
java
@Parameters public static final Collection<Object[]> getParameters() { return Arrays.asList( new Object[] { (Function<ListenerStore, ? extends EventProvider>) SequentialEventProvider::new, (Supplier<ListenerStore>) DefaultListenerStore::create }, new Object[] { (Function<ListenerStore, ? extends EventProvider>) SequentialEventProvider::new, (Supplier<ListenerStore>) PriorityListenerStore::create } ); }
java
public static boolean isClassAvilableInClassPath(String string) { try { Class.forName(string); return true; } catch (ClassNotFoundException e) { return false; } }
python
def uint32_gte(a: int, b: int) -> bool: """ Return a >= b. """ return (a == b) or uint32_gt(a, b)
java
public static String normalizeParameters(String requestUrl, Map<String, String> protocolParameters) throws AuthException { SortedSet<RequestParameter> parameters = new TreeSet<>(); int index = requestUrl.indexOf('?'); if (index > 0) { String query = requestUrl.substring(index + 1); Iterator<String> i = new StringIterator(query, "&"); while (i.hasNext()) { String parameter = i.next(); int equalsIndex = parameter.indexOf('='); if (equalsIndex > 0) { parameters.add(new RequestParameter(AuthUtils.percentDecode(parameter.substring(0, equalsIndex)) , AuthUtils.percentDecode(parameter.substring(equalsIndex + 1)))); } else if (equalsIndex == -1) { parameters.add(new RequestParameter(AuthUtils.percentDecode(parameter))); } } } for (Entry<String, String> entry : protocolParameters.entrySet()) { parameters.add(new RequestParameter(entry.getKey(), entry.getValue())); } StringBuilder normalized = new StringBuilder(); Iterator<RequestParameter> parameterIterator = parameters.iterator(); if (parameterIterator.hasNext()) { RequestParameter requestParameter = parameterIterator.next(); normalized.append(requestParameter.getEncodedName()) .append('=').append(requestParameter.getEncodedValue()); while (parameterIterator.hasNext()) { requestParameter = parameterIterator.next(); normalized.append('&').append(requestParameter.getEncodedName()) .append('=').append(requestParameter.getEncodedValue()); } } return normalized.toString(); }
python
def rsync(hosts, source, destination, logger=None, sudo=False): """ Grabs the hosts (or single host), creates the connection object for each and set the rsync execnet engine to push the files. It assumes that all of the destinations for the different hosts is the same. This deviates from what execnet does because it has the flexibility to push to different locations. """ logger = logger or basic_remote_logger() sync = _RSync(source, logger=logger) # setup_targets if not isinstance(hosts, list): hosts = [hosts] for host in hosts: conn = Connection( host, logger, sudo, ) sync.add_target(conn.gateway, destination) return sync.send()
java
private <T> void addEntry(StreamElementQueueEntry<T> streamElementQueueEntry) { assert(lock.isHeldByCurrentThread()); queue.addLast(streamElementQueueEntry); streamElementQueueEntry.onComplete( (StreamElementQueueEntry<T> value) -> { try { onCompleteHandler(value); } catch (InterruptedException e) { // we got interrupted. This indicates a shutdown of the executor LOG.debug("AsyncBufferEntry could not be properly completed because the " + "executor thread has been interrupted.", e); } catch (Throwable t) { operatorActions.failOperator(new Exception("Could not complete the " + "stream element queue entry: " + value + '.', t)); } }, executor); }
python
def ismatch(a,b): """Method to allow smart comparisons between classes, instances, and string representations of units and give the right answer. For internal use only.""" #Try the easy case if a == b: return True else: #Try isinstance in both orders try: if isinstance(a,b): return True except TypeError: try: if isinstance(b,a): return True except TypeError: #Try isinstance(a, type(b)) in both orders try: if isinstance(a,type(b)): return True except TypeError: try: if isinstance(b,type(a)): return True except TypeError: #Try the string representation if str(a).lower() == str(b).lower(): return True else: return False
python
def from_both(cls, tags_file: str, tags_folder: str, folder: str) -> 'TrainData': """Load data from both a database and a structured folder""" return cls.from_tags(tags_file, tags_folder) + cls.from_folder(folder)
python
def _self_destruct(self): """Auto quit exec if parent process failed """ # This will give parent process 15 seconds to reset. self._kill = threading.Timer(15, lambda: os._exit(0)) self._kill.start()
python
def get_whoami(self): """ A convenience function used in the event that you need to confirm that the broker thinks you are who you think you are. :returns dict whoami: Dict structure contains: * administrator: whether the user is has admin privileges * name: user name * auth_backend: backend used to determine admin rights """ path = Client.urls['whoami'] whoami = self._call(path, 'GET') return whoami
python
def reportMatchCompletion(cfg, results, replayData): """send information back to the server about the match's winners/losers""" payload = json.dumps([cfg.flatten(), results, replayData]) ladder = cfg.ladder return requests.post( url = c.URL_BASE%(ladder.ipAddress, ladder.serverPort, "matchfinished"), data = payload, #headers=headers, )
java
public void setup(PluginParameters pluginParameters) { this.indentCharacters = pluginParameters.indentCharacters; this.lineSeparatorUtil = pluginParameters.lineSeparatorUtil; this.encoding = pluginParameters.encoding; this.expandEmptyElements = pluginParameters.expandEmptyElements; this.indentBlankLines = pluginParameters.indentBlankLines; }
python
def path(self): """Requested path as unicode. This works a bit like the regular path info in the WSGI environment but will always include a leading slash, even if the URL root is accessed. """ raw_path = wsgi_decoding_dance( self.environ.get("PATH_INFO") or "", self.charset, self.encoding_errors ) return "/" + raw_path.lstrip("/")
python
def get(self, name): """Returns a Vxlan interface as a set of key/value pairs The Vxlan interface resource returns the following: * name (str): The name of the interface * type (str): Always returns 'vxlan' * source_interface (str): The vxlan source-interface value * multicast_group (str): The vxlan multicast-group value * udp_port (int): The vxlan udp-port value * vlans (dict): The vlan to vni mappings * flood_list (list): The list of global VTEP flood list * multicast_decap (bool): If the mutlicast decap feature is configured Args: name (str): The interface identifier to retrieve from the running-configuration Returns: A Python dictionary object of key/value pairs that represents the interface configuration. If the specified interface does not exist, then None is returned """ config = self.get_block('^interface %s' % name) if not config: return None response = super(VxlanInterface, self).get(name) response.update(dict(name=name, type='vxlan')) response.update(self._parse_source_interface(config)) response.update(self._parse_multicast_group(config)) response.update(self._parse_udp_port(config)) response.update(self._parse_vlans(config)) response.update(self._parse_flood_list(config)) response.update(self._parse_multicast_decap(config)) return response
java
@Nullable private AuthorizationInfo getAuthorizationInfoById(String id) { AuthorizationInfo authorizationInfo; // Search the cache first Cache<String, AuthorizationInfo> idAuthorizationCache = getAvailableIdAuthorizationCache(); if (idAuthorizationCache != null) { authorizationInfo = idAuthorizationCache.get(id); if (authorizationInfo != null) { // Check whether it is the stand-in "null" cached value if (authorizationInfo != _nullAuthorizationInfo) { _log.debug("Authorization info found cached for id {}", id); return authorizationInfo; } else { _log.debug("Authorization info previously cached as not found for id {}", id); return null; } } } authorizationInfo = getUncachedAuthorizationInfoById(id); cacheAuthorizationInfoById(id, authorizationInfo); return authorizationInfo; }
java
public EnvironmentInner get(String resourceGroupName, String labAccountName, String labName, String environmentSettingName, String environmentName, String expand) { return getWithServiceResponseAsync(resourceGroupName, labAccountName, labName, environmentSettingName, environmentName, expand).toBlocking().single().body(); }
java
public static GiftCard.Redemption createRedemption(String accountCode) { Redemption redemption = new Redemption(); redemption.setAccountCode(accountCode); return redemption; }
java
public List<U> getAll(final boolean readFromSession) { final LinkedHashMap<String, U> profiles = retrieveAll(readFromSession); return ProfileHelper.flatIntoAProfileList(profiles); }
python
def collapse_equal_values(values, counts): """ Take a tuple (values, counts), remove consecutive values and increment their count instead. """ assert len(values) == len(counts) previousValue = values[0] previousCount = 0 for value, count in zip(values, counts): if value != previousValue: yield (previousValue, previousCount) previousCount = 0 previousValue = value previousCount += count yield (previousValue, previousCount)
python
def calculate_rates(base_currency, counter_currency, forward_rate=None, fwd_points=None, spot_reference=None): """Calculate rates for Fx Forward based on others.""" if base_currency not in DIVISOR_TABLE: divisor = DIVISOR_TABLE.get(counter_currency, DEFAULT_DIVISOR) if forward_rate is None and fwd_points is not None and spot_reference is not None: forward_rate = spot_reference + fwd_points / divisor elif forward_rate is not None and fwd_points is None and spot_reference is not None: fwd_points = (forward_rate - spot_reference) * divisor elif forward_rate is not None and fwd_points is not None and spot_reference is None: spot_reference = forward_rate - fwd_points / divisor rates = {} if forward_rate is not None: rates['forward_rate'] = forward_rate if fwd_points is not None: rates['fwd_points'] = fwd_points if spot_reference is not None: rates['spot_reference'] = spot_reference return rates
python
def load_extension(self, path, name_filter=None, class_filter=None, unique=False, component=None): """Load a single python module extension. This function is similar to using the imp module directly to load a module and potentially inspecting the objects it declares to filter them by class. Args: path (str): The path to the python file to load name_filter (str): If passed, the basename of the module must match name or nothing is returned. class_filter (type): If passed, only instance of this class are returned. unique (bool): If True (default is False), there must be exactly one object found inside this extension that matches all of the other criteria. component (IOTile): The component that this extension comes from if it is loaded from an installed component. This is used to properly import the extension as a submodule of the component's support package. Returns: list of (name, type): A list of the objects found at the extension path. If unique is True, then the list only contains a single entry and that entry will be directly returned. """ import_name = None if component is not None: import_name = _ensure_package_loaded(path, component) name, ext = _try_load_module(path, import_name=import_name) if name_filter is not None and name != name_filter: return [] found = [(name, x) for x in self._filter_subclasses(ext, class_filter)] found = [(name, x) for name, x in found if self._filter_nonextensions(x)] if not unique: return found if len(found) > 1: raise ArgumentError("Extension %s should have had exactly one instance of class %s, found %d" % (path, class_filter.__name__, len(found)), classes=found) elif len(found) == 0: raise ArgumentError("Extension %s had no instances of class %s" % (path, class_filter.__name__)) return found[0]
python
def commit_changeset(self, changeset_id: uuid.UUID) -> Dict[bytes, Union[bytes, DeletedEntry]]: """ Collapses all changes for the given changeset into the previous changesets if it exists. """ does_clear = self.has_clear(changeset_id) changeset_data = self.pop_changeset(changeset_id) if not self.is_empty(): # we only have to assign changeset data into the latest changeset if # there is one. if does_clear: # if there was a clear and more changesets underneath then clear the latest # changeset, and replace with a new clear changeset self.latest = {} self._clears_at.add(self.latest_id) self.record_changeset() self.latest = changeset_data else: # otherwise, merge in all the current data self.latest = merge( self.latest, changeset_data, ) return changeset_data
python
def _guess_normalized(self): """Returns true if the collated counts in `self._results` appear to be normalized. Notes ----- It's possible that the _results df has already been normalized, which can cause some methods to fail. This method lets us guess whether that's true and act accordingly. """ return ( getattr(self, "_normalized", False) or getattr(self, "_field", None) == "abundance" or bool((self._results.sum(axis=1).round(4) == 1.0).all()) )
python
def to_vector(np_array): """Convert numpy array to MLlib Vector """ if len(np_array.shape) == 1: return Vectors.dense(np_array) else: raise Exception("An MLLib Vector can only be created from a one-dimensional " + "numpy array, got {}".format(len(np_array.shape)))
java
public final void deleteProfile(String name) { DeleteProfileRequest request = DeleteProfileRequest.newBuilder().setName(name).build(); deleteProfile(request); }
python
def rand_email(): """Random email. Usage Example:: >>> rand_email() [email protected] """ name = random.choice(string.ascii_letters) + \ rand_str(string.ascii_letters + string.digits, random.randint(4, 14)) domain = rand_str(string.ascii_lowercase, random.randint(2, 10)) kind = random.choice(_all_email_kinds) return "%s@%s%s" % (name, domain, kind)
java
@Override public void initialize() { metricRegistry.register(createMetricName("name"), new Gauge<String>() { @Override public String getValue() { return key.name(); } }); // allow monitor to know exactly at what point in time these stats are for so they can be plotted accurately metricRegistry.register(createMetricName("currentTime"), new Gauge<Long>() { @Override public Long getValue() { return System.currentTimeMillis(); } }); metricRegistry.register(createMetricName("threadActiveCount"), new Gauge<Number>() { @Override public Number getValue() { return metrics.getCurrentActiveCount(); } }); metricRegistry.register(createMetricName("completedTaskCount"), new Gauge<Number>() { @Override public Number getValue() { return metrics.getCurrentCompletedTaskCount(); } }); metricRegistry.register(createMetricName("largestPoolSize"), new Gauge<Number>() { @Override public Number getValue() { return metrics.getCurrentLargestPoolSize(); } }); metricRegistry.register(createMetricName("totalTaskCount"), new Gauge<Number>() { @Override public Number getValue() { return metrics.getCurrentTaskCount(); } }); metricRegistry.register(createMetricName("queueSize"), new Gauge<Number>() { @Override public Number getValue() { return metrics.getCurrentQueueSize(); } }); metricRegistry.register(createMetricName("rollingMaxActiveThreads"), new Gauge<Number>() { @Override public Number getValue() { return metrics.getRollingMaxActiveThreads(); } }); metricRegistry.register(createMetricName("countThreadsExecuted"), new Gauge<Number>() { @Override public Number getValue() { return metrics.getCumulativeCountThreadsExecuted(); } }); metricRegistry.register(createMetricName("rollingCountCommandsRejected"), new Gauge<Number>() { @Override public Number getValue() { try { return metrics.getRollingCount(HystrixRollingNumberEvent.THREAD_POOL_REJECTED); } catch (NoSuchFieldError error) { logger.error("While publishing CodaHale metrics, error looking up eventType for : rollingCountCommandsRejected. Please check that all Hystrix versions are the same!"); return 0L; } } }); metricRegistry.register(createMetricName("rollingCountThreadsExecuted"), new Gauge<Number>() { @Override public Number getValue() { return metrics.getRollingCountThreadsExecuted(); } }); // properties metricRegistry.register(createMetricName("propertyValue_corePoolSize"), new Gauge<Number>() { @Override public Number getValue() { return properties.coreSize().get(); } }); metricRegistry.register(createMetricName("propertyValue_maximumSize"), new Gauge<Number>() { @Override public Number getValue() { return properties.maximumSize().get(); } }); metricRegistry.register(createMetricName("propertyValue_actualMaximumSize"), new Gauge<Number>() { @Override public Number getValue() { return properties.actualMaximumSize(); } }); metricRegistry.register(createMetricName("propertyValue_keepAliveTimeInMinutes"), new Gauge<Number>() { @Override public Number getValue() { return properties.keepAliveTimeMinutes().get(); } }); metricRegistry.register(createMetricName("propertyValue_queueSizeRejectionThreshold"), new Gauge<Number>() { @Override public Number getValue() { return properties.queueSizeRejectionThreshold().get(); } }); metricRegistry.register(createMetricName("propertyValue_maxQueueSize"), new Gauge<Number>() { @Override public Number getValue() { return properties.maxQueueSize().get(); } }); }
java
public static <T> Constructor<T> getAccessibleConstructor(final Constructor<T> ctor) { Objects.requireNonNull(ctor, "constructor cannot be null"); return MemberUtils.isAccessible(ctor) && isAccessible(ctor.getDeclaringClass()) ? ctor : null; }
python
def readSB(self, bits): """ Read a signed int using the specified number of bits """ shift = 32 - bits return int32(self.readbits(bits) << shift) >> shift
python
def generate_sample(self, start_state=None, size=1): """ Generator version of self.sample Return Type: ------------ List of State namedtuples, representing the assignment to all variables of the model. Examples: --------- >>> from pgmpy.models.MarkovChain import MarkovChain >>> from pgmpy.factors.discrete import State >>> model = MarkovChain() >>> model.add_variables_from(['intel', 'diff'], [3, 2]) >>> intel_tm = {0: {0: 0.2, 1: 0.4, 2:0.4}, 1: {0: 0, 1: 0.5, 2: 0.5}, 2: {0: 0.3, 1: 0.3, 2: 0.4}} >>> model.add_transition_model('intel', intel_tm) >>> diff_tm = {0: {0: 0.5, 1: 0.5}, 1: {0: 0.25, 1:0.75}} >>> model.add_transition_model('diff', diff_tm) >>> gen = model.generate_sample([State('intel', 0), State('diff', 0)], 2) >>> [sample for sample in gen] [[State(var='intel', state=2), State(var='diff', state=1)], [State(var='intel', state=2), State(var='diff', state=0)]] """ if start_state is None: if self.state is None: self.state = self.random_state() # else use previously-set state else: self.set_start_state(start_state) # sampled.loc[0] = [self.state[var] for var in self.variables] for i in range(size): for j, (var, st) in enumerate(self.state): next_st = sample_discrete(list(self.transition_models[var][st].keys()), list(self.transition_models[var][st].values()))[0] self.state[j] = State(var, next_st) yield self.state[:]
python
def start(self): """Start the sensor. """ running = self._webcam.start() if not running: return running running &= self._phoxi.start() if not running: self._webcam.stop() return running
java
@SuppressWarnings("PMD.AvoidCatchingThrowable") public static <T> Consumer<T> swallowExceptions(ThrowingConsumer<T> in) { return (item) -> { try { in.accept(item); }
java
public String getFullName() { StringBuffer buf = new StringBuffer(); if (getParentPrivlige() != null) { buf.append(parentPrivlige.getFullName().concat(" > ")); } buf.append(getPriviligeName()); return buf.toString(); }
java
private MemcachedBackupSession loadFromMemcached( final String sessionId ) { if ( _log.isDebugEnabled() ) { _log.debug( "Loading session from memcached: " + sessionId ); } LockStatus lockStatus = null; try { if ( !_sticky ) { lockStatus = _lockingStrategy.onBeforeLoadFromMemcached( sessionId ); } final long start = System.currentTimeMillis(); /* In the previous version (<1.2) the session was completely serialized by * custom Transcoder implementations. * Such sessions have set the SERIALIZED flag (from SerializingTranscoder) so that * they get deserialized by BaseSerializingTranscoder.deserialize or the appropriate * specializations. */ final byte[] object = _storage.get( _memcachedNodesManager.getStorageKeyFormat().format( sessionId ) ); _memcachedNodesManager.onLoadFromMemcachedSuccess( sessionId ); if ( object != null ) { final long startDeserialization = System.currentTimeMillis(); final MemcachedBackupSession result = _transcoderService.deserialize( object, _manager ); _statistics.registerSince( SESSION_DESERIALIZATION, startDeserialization ); _statistics.registerSince( LOAD_FROM_MEMCACHED, start ); result.setSticky( _sticky ); if ( !_sticky ) { _lockingStrategy.onAfterLoadFromMemcached( result, lockStatus ); } if ( _log.isDebugEnabled() ) { _log.debug( "Found session with id " + sessionId ); } return result; } else { releaseIfLocked( sessionId, lockStatus ); _invalidSessionsCache.put( sessionId, Boolean.TRUE ); if ( _log.isDebugEnabled() ) { _log.debug( "Session " + sessionId + " not found in memcached." ); } return null; } } catch ( final TranscoderDeserializationException e ) { _log.warn( "Could not deserialize session with id " + sessionId + " from memcached, session will be purged from storage.", e ); releaseIfLocked( sessionId, lockStatus ); _storage.delete( _memcachedNodesManager.getStorageKeyFormat().format(sessionId) ); _invalidSessionsCache.put( sessionId, Boolean.TRUE ); } catch ( final Exception e ) { _log.warn( "Could not load session with id " + sessionId + " from memcached.", e ); releaseIfLocked( sessionId, lockStatus ); } finally { } return null; }
java
public AwsSecurityFindingFilters withNetworkDestinationIpV4(IpFilter... networkDestinationIpV4) { if (this.networkDestinationIpV4 == null) { setNetworkDestinationIpV4(new java.util.ArrayList<IpFilter>(networkDestinationIpV4.length)); } for (IpFilter ele : networkDestinationIpV4) { this.networkDestinationIpV4.add(ele); } return this; }
python
def find_version(include_dev_version=True, version_file='version.txt', version_module_paths=(), git_args=('git', 'describe', '--tags', '--long'), Popen=subprocess.Popen): """Find an appropriate version number from version control. It's much more convenient to be able to use your version control system's tagging mechanism to derive a version number than to have to duplicate that information all over the place. Currently, only git is supported. The default behavior is to write out a ``version.txt`` file which contains the git output, for systems where git isn't installed or there is no .git directory present. ``version.txt`` can (and probably should!) be packaged in release tarballs by way of the ``MANIFEST.in`` file. :param include_dev_version: By default, if there are any commits after the most recent tag (as reported by git), that number will be included in the version number as a ``.dev`` suffix. For example, if the most recent tag is ``1.0`` and there have been three commits after that tag, the version number will be ``1.0.dev3``. This behavior can be disabled by setting this parameter to ``False``. :param version_file: The name of the file where version information will be saved. Reading and writing version files can be disabled altogether by setting this parameter to ``None``. :param version_module_paths: A list of python modules which will be automatically generated containing ``__version__`` and ``__sha__`` attributes. For example, with ``package/_version.py`` as a version module path, ``package/__init__.py`` could do ``from package._version import __version__, __sha__``. :param git_args: The git command to run to get a version. By default, this is ``git describe --tags --long``. Specify this as a list of string arguments including ``git``, e.g. ``['git', 'describe']``. :param Popen: Defaults to ``subprocess.Popen``. This is for testing. """ # try to pull the version from git, or (perhaps) fall back on a # previously-saved version. try: proc = Popen(git_args, stdout=subprocess.PIPE) except OSError: raw_version = None else: raw_version = proc.communicate()[0].strip().decode() version_source = 'git' # git failed if the string is empty if not raw_version: if version_file is None: print('%r failed' % (git_args,)) raise SystemExit(2) elif not os.path.exists(version_file): print("%r failed and %r isn't present." % (git_args, version_file)) print("are you installing from a github tarball?") raise SystemExit(2) print("couldn't determine version from git; using %r" % version_file) with open(version_file, 'r') as infile: raw_version = infile.read() version_source = repr(version_file) # try to parse the version into something usable. try: tag_version, commits, sha = raw_version.rsplit('-', 2) except ValueError: print("%r (from %s) couldn't be parsed into a version" % ( raw_version, version_source)) raise SystemExit(2) if version_file is not None: with open(version_file, 'w') as outfile: outfile.write(raw_version) if commits == '0' or not include_dev_version: version = tag_version else: version = '%s.dev%s' % (tag_version, commits) for path in version_module_paths: with open(path, 'w') as outfile: outfile.write(""" # This file is automatically generated by setup.py. __version__ = %s __sha__ = %s """ % (repr(version).lstrip('u'), repr(sha).lstrip('u'))) return Version(version, commits, sha)
java
public static MozuUrl getApplicationUrl(String appId, String responseFields) { UrlFormatter formatter = new UrlFormatter("/api/platform/applications/{appId}?responseFields={responseFields}"); formatter.formatUrl("appId", appId); formatter.formatUrl("responseFields", responseFields); return new MozuUrl(formatter.getResourceUrl(), MozuUrl.UrlLocation.TENANT_POD) ; }
java
public List<Alternatives<Beans<T>>> getAllAlternatives() { List<Alternatives<Beans<T>>> list = new ArrayList<Alternatives<Beans<T>>>(); List<Node> nodeList = childNode.get("alternatives"); for(Node node: nodeList) { Alternatives<Beans<T>> type = new AlternativesImpl<Beans<T>>(this, "alternatives", childNode, node); list.add(type); } return list; }
python
def render_in_page(request, template): """return rendered template in standalone mode or ``False`` """ from leonardo.module.web.models import Page page = request.leonardo_page if hasattr( request, 'leonardo_page') else Page.objects.filter(parent=None).first() if page: try: slug = request.path_info.split("/")[-2:-1][0] except KeyError: slug = None try: body = render_to_string(template, RequestContext(request, { 'request_path': request.path, 'feincms_page': page, 'slug': slug, 'standalone': True})) response = http.HttpResponseNotFound( body, content_type=CONTENT_TYPE) except TemplateDoesNotExist: response = False return response return False
java
@Override public void execute(JobExecutionContext context) throws JobExecutionException { Map<String, String> originalContext = MDC.getCopyOfContextMap(); if (this.mdcContext != null) { MDC.setContextMap(this.mdcContext); } try { executeImpl(context); } finally { if (originalContext != null) { MDC.setContextMap(originalContext); } else { MDC.clear(); } } }
python
def calc_psd_variation(strain, psd_short_segment, psd_long_segment, short_psd_duration, short_psd_stride, psd_avg_method, low_freq, high_freq): """Calculates time series of PSD variability This function first splits the segment up into 512 second chunks. It then calculates the PSD over this 512 second period as well as in 4 second chunks throughout each 512 second period. Next the function estimates how different the 4 second PSD is to the 512 second PSD and produces a timeseries of this variability. Parameters ---------- strain : TimeSeries Input strain time series to estimate PSDs psd_short_segment : {float, 8} Duration of the short segments for PSD estimation in seconds. psd_long_segment : {float, 512} Duration of the long segments for PSD estimation in seconds. short_psd_duration : {float, 4} Duration of the segments for PSD estimation in seconds. short_psd_stride : {float, 2} Separation between PSD estimation segments in seconds. psd_avg_method : {string, 'median'} Method for averaging PSD estimation segments. low_freq : {float, 20} Minimum frequency to consider the comparison between PSDs. high_freq : {float, 480} Maximum frequency to consider the comparison between PSDs. Returns ------- psd_var : TimeSeries Time series of the variability in the PSD estimation """ # Calculate strain precision if strain.precision == 'single': fs_dtype = numpy.float32 elif strain.precision == 'double': fs_dtype = numpy.float64 # Convert start and end times immediately to floats start_time = numpy.float(strain.start_time) end_time = numpy.float(strain.end_time) # Find the times of the long segments times_long = numpy.arange(start_time, end_time, psd_long_segment) # Set up the empty time series for the PSD variation estimate psd_var = TimeSeries(zeros(int(numpy.ceil((end_time - start_time) / psd_short_segment))), delta_t=psd_short_segment, copy=False, epoch=start_time) ind = 0 for tlong in times_long: # Calculate PSD for long segment and separate the long segment in to # overlapping shorter segments if tlong + psd_long_segment <= end_time: psd_long = pycbc.psd.welch( strain.time_slice(tlong, tlong + psd_long_segment), seg_len=int(short_psd_duration * strain.sample_rate), seg_stride=int(short_psd_stride * strain.sample_rate), avg_method=psd_avg_method) times_short = numpy.arange(tlong, tlong + psd_long_segment, psd_short_segment) else: psd_long = pycbc.psd.welch( strain.time_slice(end_time - psd_long_segment, end_time), seg_len=int(short_psd_duration * strain.sample_rate), seg_stride=int(short_psd_stride * strain.sample_rate), avg_method=psd_avg_method) times_short = numpy.arange(tlong, end_time, psd_short_segment) # Calculate the PSD of the shorter segments psd_short = [] for tshort in times_short: if tshort + psd_short_segment <= end_time: pshort = pycbc.psd.welch( strain.time_slice(tshort, tshort + psd_short_segment), seg_len=int(short_psd_duration * strain.sample_rate), seg_stride=int(short_psd_stride * strain.sample_rate), avg_method=psd_avg_method) else: pshort = pycbc.psd.welch( strain.time_slice(tshort - psd_short_segment, end_time), seg_len=int(short_psd_duration * strain.sample_rate), seg_stride=int(short_psd_stride * strain.sample_rate), avg_method=psd_avg_method) psd_short.append(pshort) # Estimate the range of the PSD to compare kmin = int(low_freq / psd_long.delta_f) kmax = int(high_freq / psd_long.delta_f) # Comapre the PSD of the short segment to the long segment # The weight factor gives the rough response of a cbc template across # the defined frequency range given the expected PSD (i.e. long PSD) # Then integrate the weighted ratio of the actual PSD (i.e. short PSD) # with the expected PSD (i.e. long PSD) over the specified frequency # range freqs = FrequencySeries(psd_long.sample_frequencies, delta_f=psd_long.delta_f, epoch=psd_long.epoch, dtype=fs_dtype) weight = numpy.array( freqs[kmin:kmax]**(-7./3.) / psd_long[kmin:kmax]) weight /= weight.sum() diff = numpy.array([(weight * numpy.array(p_short[kmin:kmax] / psd_long[kmin:kmax])).sum() for p_short in psd_short]) # Store variation value for i, val in enumerate(diff): psd_var[ind+i] = val ind = ind+len(diff) return psd_var
python
def set_format_options(fmt, format_options): """Apply the desired format options to the format description fmt""" if not format_options: return for opt in format_options: try: key, value = opt.split('=') except ValueError: raise ValueError("Format options are expected to be of the form key=value, not '{}'".format(opt)) if key not in _VALID_FORMAT_OPTIONS: raise ValueError("'{}' is not a valid format option. Expected one of '{}'" .format(key, "', '".join(_VALID_FORMAT_OPTIONS))) if key in _BINARY_FORMAT_OPTIONS: value = str2bool(value) fmt[key] = value
python
async def edit_caption(self, caption: base.String, parse_mode: typing.Union[base.String, None] = None, reply_markup=None): """ Use this method to edit captions of messages sent by the bot or via the bot (for inline bots). Source: https://core.telegram.org/bots/api#editmessagecaption :param caption: New caption of the message :type caption: :obj:`typing.Union[base.String, None]` :param parse_mode: Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message. :type parse_mode: :obj:`typing.Union[base.String, None]` :param reply_markup: A JSON-serialized object for an inline keyboard :type reply_markup: :obj:`typing.Union[types.InlineKeyboardMarkup, None]` :return: On success, if edited message is sent by the bot, the edited Message is returned, otherwise True is returned. :rtype: :obj:`typing.Union[types.Message, base.Boolean]` """ return await self.bot.edit_message_caption(chat_id=self.chat.id, message_id=self.message_id, caption=caption, parse_mode=parse_mode, reply_markup=reply_markup)
python
def listFileArray(self, **kwargs): """ API to list files in DBS. Non-wildcarded logical_file_name, non-wildcarded dataset, non-wildcarded block_name or non-wildcarded lfn list is required. The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported. * For lumi_list the following two json formats are supported: - [a1, a2, a3,] - [[a,b], [c, d],] * lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. They cannot be mixed. * If lumi_list is provided run only run_num=single-run-number is allowed. * When run_num=1, one has to provide logical_file_name. * When lfn list is present, no run or lumi list is allowed. :param logical_file_name: logical_file_name of the file, Max length 1000. :type logical_file_name: str, list :param dataset: dataset :type dataset: str :param block_name: block name :type block_name: str :param release_version: release version :type release_version: str :param pset_hash: parameter set hash :type pset_hash: str :param app_name: Name of the application :type app_name: str :param output_module_label: name of the used output module :type output_module_label: str :param run_num: run , run ranges, and run list, Max list length 1000. :type run_num: int, list, string :param origin_site_name: site where the file was created :type origin_site_name: str :param lumi_list: List containing luminosity sections, Max length 1000. :type lumi_list: list :param detail: Get detailed information about a file :type detail: bool :param validFileOnly: 0 or 1. default=0. Return only valid files if set to 1. :type validFileOnly: int :param sumOverLumi: 0 or 1. default=0. When sumOverLumi = 1 and run_num is given , it will count the event by lumi; No list inputs are allowed whtn sumOverLumi=1. :type sumOverLumi: int :returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid) :rtype: list of dicts """ validParameters = ['dataset', 'block_name', 'logical_file_name', 'release_version', 'pset_hash', 'app_name', 'output_module_label', 'run_num', 'origin_site_name', 'lumi_list', 'detail', 'validFileOnly', 'sumOverLumi'] requiredParameters = {'multiple': ['dataset', 'block_name', 'logical_file_name']} #set defaults if 'detail' not in kwargs.keys(): kwargs['detail'] = False checkInputParameter(method="listFileArray", parameters=kwargs.keys(), validParameters=validParameters, requiredParameters=requiredParameters) # In order to protect DB and make sure the query can be return in 300 seconds, we limit the length of # logical file names, lumi and run num to 1000. These number may be adjusted later if # needed. YG May-20-2015. # CMS has all MC data with run_num=1. It almost is a full table scan if run_num=1 without lfn. So we will request lfn # to be present when run_num=1. YG Jan 14, 2016 if 'logical_file_name' in kwargs.keys() and isinstance(kwargs['logical_file_name'], list)\ and len(kwargs['logical_file_name']) > 1: if 'run_num' in kwargs.keys() and isinstance(kwargs['run_num'],list) and len(kwargs['run_num']) > 1 : raise dbsClientException('Invalid input', 'files API does not supprt two lists: run_num and lfn. ') elif 'lumi_list' in kwargs.keys() and kwargs['lumi_list'] and len(kwargs['lumi_list']) > 1 : raise dbsClientException('Invalid input', 'files API does not supprt two lists: lumi_lis and lfn. ') elif 'lumi_list' in kwargs.keys() and kwargs['lumi_list']: if 'run_num' not in kwargs.keys() or not kwargs['run_num'] or kwargs['run_num'] ==-1 : raise dbsClientException('Invalid input', 'When Lumi section is present, a single run is required. ') else: if 'run_num' in kwargs.keys(): if isinstance(kwargs['run_num'], list): if 1 in kwargs['run_num'] or '1' in kwargs['run_num']: raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 when no lumi.') else: if kwargs['run_num']==1 or kwargs['run_num']=='1': raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 when no lumi.') #check if no lfn is given, but run_num=1 is used for searching if ('logical_file_name' not in kwargs.keys() or not kwargs['logical_file_name']) and 'run_num' in kwargs.keys(): if isinstance(kwargs['run_num'], list): if 1 in kwargs['run_num'] or '1' in kwargs['run_num']: raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 without logical_file_name.') else: if kwargs['run_num'] == 1 or kwargs['run_num'] == '1': raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 without logical_file_name.') results = [] mykey = None total_lumi_len = 0 split_lumi_list = [] max_list_len = 1000 #this number is defined in DBS server for key, value in kwargs.iteritems(): if key == 'lumi_list' and isinstance(kwargs['lumi_list'], list)\ and kwargs['lumi_list'] and isinstance(kwargs['lumi_list'][0], list): lapp = 0 l = 0 sm = [] for i in kwargs['lumi_list']: while i[0]+max_list_len < i[1]: split_lumi_list.append([[i[0], i[0]+max_list_len-1]]) i[0] = i[0] + max_list_len else: l += (i[1]-i[0]+1) if l <= max_list_len: sm.append([i[0], i[1]]) lapp = l #number lumis in sm else: split_lumi_list.append(sm) sm=[] sm.append([i[0], i[1]]) lapp = i[1]-i[0]+1 if sm: split_lumi_list.append(sm) elif key in ('logical_file_name', 'run_num', 'lumi_list') and isinstance(value, list) and len(value)>max_list_len: mykey =key # if mykey: sourcelist = [] #create a new list to slice sourcelist = kwargs[mykey][:] for slice in slicedIterator(sourcelist, max_list_len): kwargs[mykey] = slice results.extend(self.__callServer("fileArray", data=kwargs, callmethod="POST")) elif split_lumi_list: for item in split_lumi_list: kwargs['lumi_list'] = item results.extend(self.__callServer("fileArray", data=kwargs, callmethod="POST")) else: return self.__callServer("fileArray", data=kwargs, callmethod="POST") #make sure only one dictionary per lfn. #Make sure this changes when we move to 2.7 or 3.0 #http://stackoverflow.com/questions/11092511/python-list-of-unique-dictionaries # YG May-26-2015 return dict((v['logical_file_name'], v) for v in results).values()
java
@Override public void exceptionCaught(IoSession session, Throwable cause) { final IoSessionInputStream in = (IoSessionInputStream) session .getAttribute(KEY_IN); IOException e = null; if (cause instanceof StreamIoException) { e = (IOException) cause.getCause(); } else if (cause instanceof IOException) { e = (IOException) cause; } if (e != null && in != null) { in.throwException(e); } else { LOGGER.warn("Unexpected exception.", cause); session.close(true); } }
python
def parse_intervals(path, as_context=False): """ Parse path strings into a collection of Intervals. `path` is a string describing a region in a file. It's format is dotted.module.name:[line | start-stop | context] `dotted.module.name` is a python module `line` is a single line number in the module (1-offset) `start-stop` is a right-open interval of line numbers `context` is a '.' delimited, nested name of a class or function. For example FooClass.method_a.inner_method identifies the innermost function in code like class FooClass: def method_a(self): def inner_method(): pass Parameters ---------- path : str Region description (see above) as_context : bool (optional, default=False) If `True`, return `ContextInterval`s instead of `LineInterval`s. If `path` provides a line number or range, the result will include all contexts that intersect this line range. Returns ------- list of `Interval`s """ def _regions_from_range(): if as_context: ctxs = list(set(pf.lines[start - 1: stop - 1])) return [ ContextInterval(filename, ctx) for ctx in ctxs ] else: return [LineInterval(filename, start, stop)] if ':' in path: path, subpath = path.split(':') else: subpath = '' pf = PythonFile.from_modulename(path) filename = pf.filename rng = NUMBER_RE.match(subpath) if rng: # specified a line or line range start, stop = map(int, rng.groups(0)) stop = stop or start + 1 return _regions_from_range() elif not subpath: # asked for entire module if as_context: return [ContextInterval(filename, pf.prefix)] start, stop = 1, pf.line_count + 1 return _regions_from_range() else: # specified a context name context = pf.prefix + ':' + subpath if context not in pf.lines: raise ValueError("%s is not a valid context for %s" % (context, pf.prefix)) if as_context: return [ContextInterval(filename, context)] else: start, stop = pf.context_range(context) return [LineInterval(filename, start, stop)]
python
def get_redis_connection(): """ Get the redis connection if not using mock """ if config.MOCK_REDIS: # pragma: no cover import mockredis return mockredis.mock_strict_redis_client() # pragma: no cover elif config.DEFENDER_REDIS_NAME: # pragma: no cover try: cache = caches[config.DEFENDER_REDIS_NAME] except InvalidCacheBackendError: raise KeyError(INVALID_CACHE_ERROR_MSG.format( config.DEFENDER_REDIS_NAME)) # every redis backend implement it own way to get the low level client try: # redis_cache.RedisCache case (django-redis-cache package) return cache.get_master_client() except AttributeError: # django_redis.cache.RedisCache case (django-redis package) return cache.client.get_client(True) else: # pragma: no cover redis_config = parse_redis_url(config.DEFENDER_REDIS_URL) return redis.StrictRedis( host=redis_config.get('HOST'), port=redis_config.get('PORT'), db=redis_config.get('DB'), password=redis_config.get('PASSWORD'), ssl=redis_config.get('SSL'))
java
public List<Integer> get(String stream, List<Object> tuple, Collection<Tuple> anchors, Object rootId) { List<Integer> outTasks = new ArrayList<>(); // get grouper, then get which task should tuple be sent to. Map<String, MkGrouper> componentCrouping = streamComponentGrouper.get(stream); if (componentCrouping == null) { // if the target component's parallelism is 0, don't need send to // them LOG.debug("Failed to get Grouper of " + stream + " when " + debugIdStr); return outTasks; } for (Entry<String, MkGrouper> ee : componentCrouping.entrySet()) { String targetComponent = ee.getKey(); MkGrouper g = ee.getValue(); if (GrouperType.direct.equals(g.gettype())) { throw new IllegalArgumentException("Cannot do regular emit to direct stream"); } outTasks.addAll(g.grouper(tuple)); } if (isDebug(anchors, rootId)) { LOG.info(debugIdStr + stream + " to " + outTasks + ":" + tuple.toString()); } int num_out_tasks = outTasks.size(); taskStats.send_tuple(stream, num_out_tasks); return outTasks; }
python
def loop(self, timeout=1.0, max_packets=1): """Process network events. This function must be called regularly to ensure communication with the broker is carried out. It calls select() on the network socket to wait for network events. If incoming data is present it will then be processed. Outgoing commands, from e.g. publish(), are normally sent immediately that their function is called, but this is not always possible. loop() will also attempt to send any remaining outgoing messages, which also includes commands that are part of the flow for messages with QoS>0. timeout: The time in seconds to wait for incoming/outgoing network traffic before timing out and returning. max_packets: Not currently used. Returns MQTT_ERR_SUCCESS on success. Returns >0 on error. A ValueError will be raised if timeout < 0""" if timeout < 0.0: raise ValueError('Invalid timeout.') self._current_out_packet_mutex.acquire() self._out_packet_mutex.acquire() if self._current_out_packet is None and len(self._out_packet) > 0: self._current_out_packet = self._out_packet.pop(0) if self._current_out_packet: wlist = [self.socket()] else: wlist = [] self._out_packet_mutex.release() self._current_out_packet_mutex.release() # sockpairR is used to break out of select() before the timeout, on a # call to publish() etc. rlist = [self.socket(), self._sockpairR] try: socklist = select.select(rlist, wlist, [], timeout) except TypeError as e: # Socket isn't correct type, in likelihood connection is lost return MQTT_ERR_CONN_LOST except ValueError: # Can occur if we just reconnected but rlist/wlist contain a -1 for # some reason. return MQTT_ERR_CONN_LOST except: return MQTT_ERR_UNKNOWN if self.socket() in socklist[0]: rc = self.loop_read(max_packets) if rc or (self._ssl is None and self._sock is None): return rc if self._sockpairR in socklist[0]: # Stimulate output write even though we didn't ask for it, because # at that point the publish or other command wasn't present. socklist[1].insert(0, self.socket()) # Clear sockpairR - only ever a single byte written. try: self._sockpairR.recv(1) except socket.error as err: if err.errno != EAGAIN: raise if self.socket() in socklist[1]: rc = self.loop_write(max_packets) if rc or (self._ssl is None and self._sock is None): return rc return self.loop_misc()
java
public VirtualResourcePoolSpec getVRPSettings(String vrpId) throws InvalidState, NotFound, RuntimeFault, RemoteException { return getVimService().getVRPSettings(getMOR(), vrpId); }