language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def compress(pdf_in, pdf_output): """ mac下安装 brew install ghostscript :param pdf_in: :param pdf_output: :return: """ cmd = 'gs -dNOPAUSE -dBATCH -sDEVICE=pdfwrite -dCompatibilityLevel=1.4 -dPDFSETTINGS=/screen -sOutputFile=%s %s' cmd = cmd % (pdf_output, pdf_in) os.system(cmd)
java
public void addForwardedField(int sourceField, FieldSet destinationFields) { FieldSet fs; if((fs = this.forwardedFields.get(sourceField)) != null) { fs.addAll(destinationFields); } else { fs = new FieldSet(destinationFields); this.forwardedFields.put(sourceField, fs); } }
python
def event_detail(self, event_detail): """ :param event_detail: :return: """ if event_detail is not None: if isinstance(event_detail, str): self._event_detail = event_detail else: try: for key, val in event_detail.items(): if isinstance(val, date): event_detail[key] = str(val) self._event_detail = json.dumps(event_detail) except TypeError: raise TypeError('Passed in event detail object are not json serializable.Please check the format')
java
protected void handleError(final AccountState.Error error, final AuthenticationResponse response, final PasswordPolicyConfiguration configuration, final List<MessageDescriptor> messages) throws LoginException { LOGGER.debug("Handling LDAP account state error [{}]", error); if (errorMap.containsKey(error)) { throw errorMap.get(error); } LOGGER.debug("No LDAP error mapping defined for [{}]", error); }
python
def get_related(self): """Check if any of the context interfaces have relation ids. Set self.related and return True if one of the interfaces has relation ids. """ # Fresh start self.related = False try: for interface in self.interfaces: if relation_ids(interface): self.related = True return self.related except AttributeError as e: log("{} {}" "".format(self, e), 'INFO') return self.related
java
public static String dropWhile(GString self, @ClosureParams(value=SimpleType.class, options="char") Closure condition) { return dropWhile(self.toString(), condition).toString(); }
java
public static lbvserver_stats get(nitro_service service, String name) throws Exception{ lbvserver_stats obj = new lbvserver_stats(); obj.set_name(name); lbvserver_stats response = (lbvserver_stats) obj.stat_resource(service); return response; }
python
def compile(self, s): """ Compile the given script. Returns a bytes object with the compiled script. """ f = io.BytesIO() for t in s.split(): t_up = t.upper() if t_up in self.opcode_to_int: f.write(int2byte(self.opcode_to_int[t])) elif ("OP_%s" % t_up) in self.opcode_to_int: f.write(int2byte(self.opcode_to_int["OP_%s" % t])) elif t_up.startswith("0X"): d = binascii.unhexlify(t[2:]) f.write(d) else: v = self.compile_expression(t) self.write_push_data([v], f) return f.getvalue()
java
@Override public VType put(KType key, VType value) { assert assigned < mask + 1; final int mask = this.mask; if (Intrinsics.<KType> isEmpty(key)) { hasEmptyKey = true; VType previousValue = Intrinsics.<VType> cast(values[mask + 1]); values[mask + 1] = value; return previousValue; } else { final KType[] keys = Intrinsics.<KType[]> cast(this.keys); int slot = hashKey(key) & mask; KType existing; while (!Intrinsics.<KType> isEmpty(existing = keys[slot])) { if (Intrinsics.<KType> equals(this, key, existing)) { final VType previousValue = Intrinsics.<VType> cast(values[slot]); values[slot] = value; return previousValue; } slot = (slot + 1) & mask; } if (assigned == resizeAt) { allocateThenInsertThenRehash(slot, key, value); } else { keys[slot] = key; values[slot] = value; } assigned++; return Intrinsics.<VType> empty(); } }
python
def _set_body(self, body): """Set the main body for this control flow structure.""" assert isinstance(body, CodeStatement) if isinstance(body, CodeBlock): self.body = body else: self.body._add(body)
java
public UriTemplateBuilder template(String... template) { UriTemplateParser parser = new UriTemplateParser(); for(String t : template) { addComponents(parser.scan(t)); } return this; }
python
def handle_starting_instance(self): """Starting up PostgreSQL may take a long time. In case we are the leader we may want to fail over to.""" # Check if we are in startup, when paused defer to main loop for manual failovers. if not self.state_handler.check_for_startup() or self.is_paused(): self.set_start_timeout(None) if self.is_paused(): self.state_handler.set_state(self.state_handler.is_running() and 'running' or 'stopped') return None # state_handler.state == 'starting' here if self.has_lock(): if not self.update_lock(): logger.info("Lost lock while starting up. Demoting self.") self.demote('immediate-nolock') return 'stopped PostgreSQL while starting up because leader key was lost' timeout = self._start_timeout or self.patroni.config['master_start_timeout'] time_left = timeout - self.state_handler.time_in_state() if time_left <= 0: if self.is_failover_possible(self.cluster.members): logger.info("Demoting self because master startup is taking too long") self.demote('immediate') return 'stopped PostgreSQL because of startup timeout' else: return 'master start has timed out, but continuing to wait because failover is not possible' else: msg = self.process_manual_failover_from_leader() if msg is not None: return msg return 'PostgreSQL is still starting up, {0:.0f} seconds until timeout'.format(time_left) else: # Use normal processing for standbys logger.info("Still starting up as a standby.") return None
python
def _batch_norm(name, x): """Batch normalization.""" with tf.name_scope(name): return tf.contrib.layers.batch_norm( inputs=x, decay=.9, center=True, scale=True, activation_fn=None, updates_collections=None, is_training=False)
java
@Nullable public static String getUntilLastIncl (@Nullable final String sStr, final char cSearch) { return _getUntilLast (sStr, cSearch, true); }
java
public static br add(nitro_service client, br resource) throws Exception { resource.validate("add"); return ((br[]) resource.perform_operation(client, "add"))[0]; }
python
def html_email(email, title=None): """ >>> html_email('[email protected]') '<a href="mailto:[email protected]">[email protected]</a>' """ if not title: title = email return '<a href="mailto:{email}">{title}</a>'.format(email=email, title=title)
java
public String getEffectiveListName() throws NoResponseException, XMPPErrorException, NotConnectedException, InterruptedException { String activeListName = getActiveListName(); if (activeListName != null) { return activeListName; } return getDefaultListName(); }
java
@Override public void setValueAt(final Object value, final List<Integer> row, final int col) { getBacking().setValueAt(value, getRowIndex(row), col); }
python
def service_create(image=str, name=str, command=str, hostname=str, replicas=int, target_port=int, published_port=int): ''' Create Docker Swarm Service Create image The docker image name Is the service name command The docker command to run in the container at launch hostname The hostname of the containers replicas How many replicas you want running in the swarm target_port The target port on the container published_port port thats published on the host/os CLI Example: .. code-block:: bash salt '*' swarm.service_create image=httpd name=Test_Service \ command=None hostname=salthttpd replicas=6 target_port=80 published_port=80 ''' try: salt_return = {} replica_mode = docker.types.ServiceMode('replicated', replicas=replicas) ports = docker.types.EndpointSpec(ports={target_port: published_port}) __context__['client'].services.create(name=name, image=image, command=command, mode=replica_mode, endpoint_spec=ports) echoback = __context__['server_name'] + ' has a Docker Swarm Service running named ' + name salt_return.update({'Info': echoback, 'Minion': __context__['server_name'], 'Name': name, 'Image': image, 'Command': command, 'Hostname': hostname, 'Replicas': replicas, 'Target_Port': target_port, 'Published_Port': published_port}) except TypeError: salt_return = {} salt_return.update({'Error': 'Please make sure you are passing arguments correctly ' '[image, name, command, hostname, replicas, target_port and published_port]'}) return salt_return
java
public String getContent() { String content = ""; NodeList list = dom.getChildNodes(); for (int i=0;i<list.getLength();i++) { if (list.item(i) instanceof Text) { content += (list.item(i).getNodeValue()); } } return content; }
python
def json_api_call(req_function): """ Wrap a view-like function that returns an object that is convertable from json """ @wraps(req_function) def newreq(request, *args, **kwargs): outp = req_function(request, *args, **kwargs) if issubclass(outp.__class__, HttpResponse): return outp else: return '%s' % json.dumps(outp, cls=LazyEncoder) return string_to_response("application/json")(newreq)
java
public synchronized OServerAdmin createDatabase(final String iDatabaseType, String iStorageMode) throws IOException { storage.checkConnection(); try { if (storage.getName() == null || storage.getName().length() <= 0) { OLogManager.instance().error(this, "Cannot create unnamed remote storage. Check your syntax", OStorageException.class); } else { if (iStorageMode == null) iStorageMode = "csv"; final OChannelBinaryClient network = storage.beginRequest(OChannelBinaryProtocol.REQUEST_DB_CREATE); try { network.writeString(storage.getName()); if (network.getSrvProtocolVersion() >= 8) network.writeString(iDatabaseType); network.writeString(iStorageMode); } finally { storage.endRequest(network); } storage.getResponse(network); } } catch (Exception e) { OLogManager.instance().error(this, "Cannot create the remote storage: " + storage.getName(), e, OStorageException.class); storage.close(true); } return this; }
python
def generate_nodes(tpm, cm, network_state, indices, node_labels=None): """Generate |Node| objects for a subsystem. Args: tpm (np.ndarray): The system's TPM cm (np.ndarray): The corresponding CM. network_state (tuple): The state of the network. indices (tuple[int]): Indices to generate nodes for. Keyword Args: node_labels (|NodeLabels|): Textual labels for each node. Returns: tuple[Node]: The nodes of the system. """ if node_labels is None: node_labels = NodeLabels(None, indices) node_state = utils.state_of(indices, network_state) return tuple(Node(tpm, cm, index, state, node_labels) for index, state in zip(indices, node_state))
java
String getProcErrorOutput(Process proc) throws IOException { StringBuffer output = new StringBuffer(); InputStream procIn = proc.getInputStream(); int read; // Dump the data printed by the process do { byte[] buffer = new byte[BUFFER_SIZE]; read = procIn.read(buffer); String s = new String(buffer); output.append(s); } while (read == BUFFER_SIZE); return output.toString(); }
python
def getServiceNamesToTraceIds(self, time_stamp, service_name, rpc_name): """ Given a time stamp, server service name, and rpc name, fetch all of the client services calling in paired with the lists of every trace Ids (list<i64>) from the server to client. The three arguments specify epoch time in microseconds, server side service name and rpc name. The return maps contains the key - client_service_name and value - list<trace_id>. Parameters: - time_stamp - service_name - rpc_name """ self.send_getServiceNamesToTraceIds(time_stamp, service_name, rpc_name) return self.recv_getServiceNamesToTraceIds()
java
private void findNextExpireTime() { if (objects.size() == 0) { nextTimeSomeExpired = NO_OBJECTS; } else { nextTimeSomeExpired = NO_OBJECTS; Collection<Long> longs = null; synchronized (objects) { longs = new ArrayList(objectTimeStamps.values()); } for (Iterator<Long> iterator = longs.iterator(); iterator.hasNext(); ) { Long next = iterator.next() + timeToLive; if (nextTimeSomeExpired == NO_OBJECTS || next < nextTimeSomeExpired) { nextTimeSomeExpired = next; } } } }
java
public void finish() throws CmsException { m_finished = true; m_requiresCleanup = false; CmsProject project = getProject(); CmsObject projectCms = OpenCms.initCmsObject(m_adminCms); projectCms.getRequestContext().setCurrentProject(project); if (m_configuration.isAutoPublish()) { // we don't necessarily publish with the user who has the locks on the resources, so we need to steal the locks List<CmsResource> projectResources = projectCms.readProjectView(project.getUuid(), CmsResource.STATE_KEEP); for (CmsResource projectResource : projectResources) { CmsLock lock = projectCms.getLock(projectResource); if (!lock.isUnlocked() && !lock.isLockableBy(projectCms.getRequestContext().getCurrentUser())) { projectCms.changeLock(projectResource); } } OpenCms.getPublishManager().publishProject( projectCms, new CmsLogReport(Locale.ENGLISH, CmsUgcSession.class)); } else { // try to unlock everything - we don't need this in case of auto-publish, since publishing already unlocks the resources projectCms.unlockProject(project.getUuid()); } }
java
@Override public MatchResult matches(List<String> lines) { MatchResult result = NO_MATCH; for (int i = 0; i < lines.size(); i++) { String line = lines.get(i); if (line.contains("<PC-Substance") && result == NO_MATCH) result = new MatchResult(true, this, i); if (line.contains("<PC-Substances")) return NO_MATCH; } return result; }
python
def read_metadata(self, key): """ return the meta data array for this key """ if getattr(getattr(self.group, 'meta', None), key, None) is not None: return self.parent.select(self._get_metadata_path(key)) return None
java
public static CmsSelectWidgetOption getWidgetOptionForType(CmsObject cms, String typeName) { String niceTypeName = typeName; try { Locale locale = OpenCms.getWorkplaceManager().getWorkplaceLocale(cms); niceTypeName = CmsWorkplaceMessages.getResourceTypeName(locale, typeName); } catch (@SuppressWarnings("unused") Exception e) { // resource type name will be used as a fallback } CmsSelectWidgetOption option = new CmsSelectWidgetOption( CmsFormatterChangeSet.keyForType(typeName), false, getMessage(cms, Messages.GUI_SCHEMA_FORMATTER_OPTION_1, niceTypeName)); return option; }
python
def get_all_dataset_names(configuration=None, **kwargs): # type: (Optional[Configuration], Any) -> List[str] """Get all dataset names in HDX Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. **kwargs: See below limit (int): Number of rows to return. Defaults to all dataset names. offset (int): Offset in the complete result for where the set of returned dataset names should begin Returns: List[str]: list of all dataset names in HDX """ dataset = Dataset(configuration=configuration) dataset['id'] = 'all dataset names' # only for error message if produced return dataset._write_to_hdx('list', kwargs, 'id')
python
def get(self, filename): """ Check if a distribution archive exists in the local cache. :param filename: The filename of the distribution archive (a string). :returns: The pathname of a distribution archive on the local file system or :data:`None`. """ pathname = os.path.join(self.config.binary_cache, filename) if os.path.isfile(pathname): logger.debug("Distribution archive exists in local cache (%s).", pathname) return pathname else: logger.debug("Distribution archive doesn't exist in local cache (%s).", pathname) return None
java
protected void estimateCase1( double betas[] ) { betas[0] = matchScale(nullPts[0], controlWorldPts); betas[0] = adjustBetaSign(betas[0],nullPts[0]); betas[1] = 0; betas[2] = 0; betas[3] = 0; }
java
public static cacheobject[] get(nitro_service service, cacheobject_args args) throws Exception{ cacheobject obj = new cacheobject(); options option = new options(); option.set_args(nitro_util.object_to_string_withoutquotes(args)); cacheobject[] response = (cacheobject[])obj.get_resources(service, option); return response; }
java
protected void setupForbiddenEndElements() { forbiddenIdEndElements.put(THREAD_SUBJECT, THREAD_SUBJECT); forbiddenIdEndElements.put(THREAD_PARENT, THREAD_PARENT); forbiddenIdEndElements.put(THREAD_ANCESTOR, THREAD_ANCESTOR); forbiddenIdEndElements.put(THREAD_PAGE, THREAD_PAGE); forbiddenIdEndElements.put(THREAD_ID, THREAD_ID); forbiddenIdEndElements.put(THREAD_SUMMARY_PAGE, THREAD_SUMMARY_PAGE); forbiddenIdEndElements.put(THREAD_AUTHOR, THREAD_AUTHOR); forbiddenIdEndElements.put(THREAD_EDIT_STATUS, THREAD_EDIT_STATUS); forbiddenIdEndElements.put(THREAD_TYPE, THREAD_TYPE); forbiddenIdEndElements.put(COMMENT, COMMENT); forbiddenIdEndElements.put(CONTRIBUTOR, CONTRIBUTOR); forbiddenIdEndElements.put(ID, ID); forbiddenIdEndElements.put(IP, IP); forbiddenIdEndElements.put(MINOR, MINOR); forbiddenIdEndElements.put(PAGE, PAGE); forbiddenIdEndElements.put(RESTRICTIONS, RESTRICTIONS); forbiddenIdEndElements.put(REVISION, REVISION); forbiddenIdEndElements.put(TEXT, TEXT); forbiddenIdEndElements.put(TIMESTAMP, TIMESTAMP); forbiddenIdEndElements.put(TITLE, TITLE); forbiddenIdEndElements.put(USERNAME, USERNAME); }
java
public static void move(final Object ar, final int pos, final int off, final int l) { System.arraycopy(ar, pos, ar, pos + off, l); }
python
def set_backlight(self, backlight): """Enable or disable the backlight. If PWM is not enabled (default), a non-zero backlight value will turn on the backlight and a zero value will turn it off. If PWM is enabled, backlight can be any value from 0.0 to 1.0, with 1.0 being full intensity backlight. """ if self._backlight is not None: if self._pwm_enabled: self._backlight.pwm_start(self._pwm_duty_cycle(backlight)) else: self._backlight.set(self._blpol if backlight else not self._blpol)
python
def _get_calculated_size(self, size, data): """ Get's the final size of the field and runs the lambda functions recursively until a final size is derived. If size is None then it will just return the length of the data as it is assumed it is the final field (None should only be set on size for the final field). :param size: The size to calculate/expand :param data: The data that the size is being calculated for :return: The final size """ # if the size is derived from a lambda function, run it now; otherwise # return the value we passed in or the length of the data if the size # is None (last field value) if size is None: return len(data) elif isinstance(size, types.LambdaType): expanded_size = size(self.structure) return self._get_calculated_size(expanded_size, data) else: return size
java
public static <T> Predicate<T> getEquals(T target2) { return target -> target.equals(target2); }
python
def hdel(self, hashkey, *keys): """Emulate hdel""" redis_hash = self._get_hash(hashkey, 'HDEL') count = 0 for key in keys: attribute = self._encode(key) if attribute in redis_hash: count += 1 del redis_hash[attribute] if not redis_hash: self.delete(hashkey) return count
java
public PrivateZoneInner createOrUpdate(String resourceGroupName, String privateZoneName, PrivateZoneInner parameters) { return createOrUpdateWithServiceResponseAsync(resourceGroupName, privateZoneName, parameters).toBlocking().last().body(); }
python
def run(path, timer=False, repeat=3, number=10000, precision=2): """ Extracts and runs the '@cyther' code from the given file 'path' name """ code = extractAtCyther(path) if not code: output = "There was no '@cyther' code collected from the " \ "file '{}'\n".format(path) # TODO This should use a result, right? return {'returncode': 0, 'output': output} module_directory = os.path.dirname(path) module_name = os.path.splitext(os.path.basename(path))[0] setup_string = SETUP_TEMPLATE.format(module_directory, module_name, '{}') if timer: string = TIMER_TEMPLATE.format(setup_string, code, repeat, number, precision, '{}') else: string = setup_string + code script = os.path.join(os.path.dirname(__file__), 'script.py') with open(script, 'w+') as file: file.write(string) response = call(['python', script]) return response
python
def _build_url(self, shorten=True): """Build the url for a cable ratings page""" self.url = URL_FORMAT.format(*self._get_url_params(shorten=shorten))
java
public UOWControl getUOWControl(UserTransaction userTx) //LIDB4171-35.03 F84120 { final boolean entryEnabled = TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled(); if (entryEnabled) { Tr.entry(tc, "getUOWControl"); } UOWControl uowCtrl = new TransactionControlImpl(userTx); //LIDB4171-35.03 F84120 if (entryEnabled) { Tr.exit(tc, "getUOWControl"); } return uowCtrl; }
python
def check_password(password, encoded, setter=None, preferred='default'): """ Return a boolean of whether the raw password matches the three part encoded digest. If setter is specified, it'll be called when you need to regenerate the password. """ if password is None: return False preferred = bCryptPasswordHasher hasher = bCryptPasswordHasher hasher_changed = hasher.algorithm != preferred.algorithm must_update = hasher_changed or preferred.must_update(encoded) is_correct = hasher.verify(password, encoded) # If the hasher didn't change (we don't protect against enumeration if it # does) and the password should get updated, try to close the timing gap # between the work factor of the current encoded password and the default # work factor. if not is_correct and not hasher_changed and must_update: hasher.harden_runtime(password, encoded) if setter and is_correct and must_update: setter(password) return is_correct
python
def cmd_wp_changealt(self, args): '''handle wp change target alt of multiple waypoints''' if len(args) < 2: print("usage: wp changealt WPNUM NEWALT <NUMWP>") return idx = int(args[0]) if idx < 1 or idx > self.wploader.count(): print("Invalid wp number %u" % idx) return newalt = float(args[1]) if len(args) >= 3: count = int(args[2]) else: count = 1 for wpnum in range(idx, idx+count): wp = self.wploader.wp(wpnum) if not self.wploader.is_location_command(wp.command): continue wp.z = newalt wp.target_system = self.target_system wp.target_component = self.target_component self.wploader.set(wp, wpnum) self.loading_waypoints = True self.loading_waypoint_lasttime = time.time() self.master.mav.mission_write_partial_list_send(self.target_system, self.target_component, idx, idx+count) print("Changed alt for WPs %u:%u to %f" % (idx, idx+(count-1), newalt))
python
def _generate_property_iv(entity_iv, pk, rk, property_name, isJavaV1): ''' Uses the entity_iv, partition key, and row key to generate and return the iv for the specified property. ''' digest = Hash(SHA256(), default_backend()) if not isJavaV1: digest.update(entity_iv + (rk + pk + property_name).encode('utf-8')) else: digest.update(entity_iv + (pk + rk + property_name).encode('utf-8')) propertyIV = digest.finalize() return propertyIV[:16]
java
private static void addSuggestion(List<LocationSuggestion> suggestions, String address) { suggestions.add(new LocationSuggestion(address)); }
python
def get_end_start_epochs(year, month, day, direction, unit, count): """ Gets epoch from a start date and epoch from a shifted date Args: year: Int between 1 and 9999. month: Int between 1 and 12. day: Int between 1 and 31. direction: String to shift time forwards or backwards. Valid values: 'last', 'next'. unit: String of time period unit for count argument. How far back to check historical market data. Valid values: 'hour', 'day', 'week', 'month', 'year'. count: Int of units. How far back to check historical market data? Returns: Dict of int epochs in UTC with keys 'initial' and 'shifted' """ if year or month or day: # Date is specified if not year: year = 2017 if not month: month = 1 if not day: day = 1 initial_delorean = date_to_delorean(year, month, day) else: # Date is not specified, get current date count += 1 # Get another date because market is still open initial_delorean = now_delorean() initial_epoch = int(initial_delorean.epoch) shifted_epoch = shift_epoch(initial_delorean, direction, unit, count) return { 'initial': initial_epoch, 'shifted': shifted_epoch }
python
def ufo2glyphs(options): """Convert one designspace file or one or more UFOs to a Glyphs.app source file.""" import fontTools.designspaceLib import defcon sources = options.designspace_file_or_UFOs designspace_file = None if ( len(sources) == 1 and sources[0].endswith(".designspace") and os.path.isfile(sources[0]) ): designspace_file = sources[0] designspace = fontTools.designspaceLib.DesignSpaceDocument() designspace.read(designspace_file) object_to_read = designspace elif all(source.endswith(".ufo") and os.path.isdir(source) for source in sources): ufos = [defcon.Font(source) for source in sources] ufos.sort( key=lambda ufo: [ # Order the masters by weight and width ufo.info.openTypeOS2WeightClass or 400, ufo.info.openTypeOS2WidthClass or 5, ] ) object_to_read = ufos else: print( "Please specify just one designspace file *or* one or more " "UFOs. They must end in '.designspace' or '.ufo', respectively.", file=sys.stderr, ) return 1 font = glyphsLib.to_glyphs( object_to_read, minimize_ufo_diffs=options.no_preserve_glyphsapp_metadata ) # Make the Glyphs file more suitable for roundtrip: font.customParameters["Disable Last Change"] = options.enable_last_change font.disablesAutomaticAlignment = options.enable_automatic_alignment if options.output_path: font.save(options.output_path) else: if designspace_file: filename_to_write = os.path.splitext(designspace_file)[0] + ".glyphs" else: filename_to_write = os.path.join( os.path.dirname(sources[0]), font.familyName.replace(" ", "") + ".glyphs", ) font.save(filename_to_write)
python
def create_oracle(username, password, host, port, database, **kwargs): # pragma: no cover """ create an engine connected to a oracle database using cx_oracle. """ return create_engine( _create_oracle(username, password, host, port, database), **kwargs )
java
public boolean isSortedFromTo(int from, int to) { if (size==0) return true; checkRangeFromTo(from, to, size); Object[] theElements = elements; for (int i=from+1; i<=to; i++ ) { if (((Comparable)theElements[i]).compareTo((Comparable) theElements[i-1]) < 0) return false; } return true; }
python
def get_series_vintage_dates(self, series_id): """ Get a list of vintage dates for a series. Vintage dates are the dates in history when a series' data values were revised or new data values were released. Parameters ---------- series_id : str Fred series id such as 'CPIAUCSL' Returns ------- dates : list list of vintage dates """ url = "%s/series/vintagedates?series_id=%s" % (self.root_url, series_id) root = self.__fetch_data(url) if root is None: raise ValueError('No vintage date exists for series id: ' + series_id) dates = [] for child in root.getchildren(): dates.append(self._parse(child.text)) return dates
python
def page_erase(addr): """Erases a single page.""" if __verbose: print("Erasing page: 0x%x..." % (addr)) # Send DNLOAD with first byte=0x41 and page address buf = struct.pack("<BI", 0x41, addr) __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 0, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: erase failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: erase failed")
java
public com.ibm.wsspi.cache.CacheEntry getEntry(Object cacheId){ CacheEntry cacheEntry = new CacheEntry(cacheInstance.getEntry(cacheId)); return cacheEntry; }
java
private IAsyncResultHandler<IApiConnectionResponse> createApiConnectionResponseHandler() { return (IAsyncResult<IApiConnectionResponse> result) -> { if (result.isSuccess()) { requestMetric.setApiEnd(new Date()); // The result came back. NB: still need to put it through the response chain. apiConnectionResponse = result.getResult(); ApiResponse apiResponse = apiConnectionResponse.getHead(); context.setAttribute("apiman.engine.apiResponse", apiResponse); //$NON-NLS-1$ // Execute the response chain to evaluate the response. responseChain = createResponseChain((ApiResponse response) -> { // Send the api response to the caller. final EngineResultImpl engineResult = new EngineResultImpl(response); engineResult.setConnectorResponseStream(apiConnectionResponse); resultHandler.handle(AsyncResultImpl.create(engineResult)); // We've come all the way through the response chain successfully responseChain.bodyHandler(buffer -> { requestMetric.setBytesDownloaded(requestMetric.getBytesDownloaded() + buffer.length()); engineResult.write(buffer); }); responseChain.endHandler(isEnd -> { engineResult.end(); finished = true; metrics.record(requestMetric); }); // Signal to the connector that it's safe to start transmitting data. apiConnectionResponse.transmit(); }); // Write data from the back-end response into the response chain. apiConnectionResponse.bodyHandler(buffer -> responseChain.write(buffer)); // Indicate back-end response is finished to the response chain. apiConnectionResponse.endHandler(isEnd -> responseChain.end()); responseChain.doApply(apiResponse); } else { resultHandler.handle(AsyncResultImpl.create(result.getError())); } }; }
python
def add_step_timing_signal(x, step, hparams): """Add n-dimensional embedding as the step (vertical) timing signal. Args: x: a tensor with shape [batch, length, depth] step: step hparams: model hyper parameters Returns: a Tensor with the same shape as x. """ if hparams.recurrence_type == "act": num_steps = hparams.act_max_steps else: num_steps = hparams.num_rec_steps channels = common_layers.shape_list(x)[-1] if hparams.step_timing_signal_type == "learned": signal = common_attention.get_layer_timing_signal_learned_1d( channels, step, num_steps) elif hparams.step_timing_signal_type == "sinusoid": signal = common_attention.get_layer_timing_signal_sinusoid_1d( channels, step, num_steps) if hparams.add_or_concat_timing_signal == "add": x_with_timing = x + common_layers.cast_like(signal, x) elif hparams.add_or_concat_timing_signal == "concat": batch_size = common_layers.shape_list(x)[0] length = common_layers.shape_list(x)[1] signal_tiled = tf.tile(signal, [batch_size, length, 1]) x_with_timing = tf.concat((x, signal_tiled), axis=-1) return x_with_timing
python
def _clear_context(): ''' Clear any lxc variables set in __context__ ''' for var in [x for x in __context__ if x.startswith('lxc.')]: log.trace('Clearing __context__[\'%s\']', var) __context__.pop(var, None)
java
@Nullable @CheckReturnValue public static String trimStartAndEnd (@Nullable final String sSrc, @Nullable final String sLead, @Nullable final String sTail) { final String sInbetween = trimStart (sSrc, sLead); return trimEnd (sInbetween, sTail); }
python
def tagscleanupdicts(configuration=None, url=None, keycolumn=5, failchained=True): # type: (Optional[Configuration], Optional[str], int, bool) -> Tuple[Dict,List] """ Get tags cleanup dictionaries Args: configuration (Optional[Configuration]): HDX configuration. Defaults to global configuration. url (Optional[str]): Url of tags cleanup spreadsheet. Defaults to None (internal configuration parameter). keycolumn (int): Column number of tag column in spreadsheet. Defaults to 5. failchained (bool): Fail if chained rules found. Defaults to True. Returns: Tuple[Dict,List]: Returns (Tags dictionary, Wildcard tags list) """ if not Tags._tags_dict: if configuration is None: configuration = Configuration.read() with Download(full_agent=configuration.get_user_agent()) as downloader: if url is None: url = configuration['tags_cleanup_url'] Tags._tags_dict = downloader.download_tabular_rows_as_dicts(url, keycolumn=keycolumn) keys = Tags._tags_dict.keys() chainerror = False for i, tag in enumerate(keys): whattodo = Tags._tags_dict[tag] action = whattodo[u'action'] final_tags = whattodo[u'final tags (semicolon separated)'] for final_tag in final_tags.split(';'): if final_tag in keys: index = list(keys).index(final_tag) if index != i: whattodo2 = Tags._tags_dict[final_tag] action2 = whattodo2[u'action'] if action2 != 'OK' and action2 != 'Other': final_tags2 = whattodo2[u'final tags (semicolon separated)'] if final_tag not in final_tags2.split(';'): chainerror = True if failchained: logger.error('Chained rules: %s (%s -> %s) | %s (%s -> %s)' % (action, tag, final_tags, action2, final_tag, final_tags2)) if failchained and chainerror: raise ChainRuleError('Chained rules for tags detected!') Tags._wildcard_tags = list() for tag in Tags._tags_dict: if '*' in tag: Tags._wildcard_tags.append(tag) return Tags._tags_dict, Tags._wildcard_tags
java
private List<CmsSelectWidgetOption> getProjects() { List<CmsSelectWidgetOption> projects = new ArrayList<CmsSelectWidgetOption>(); try { String defProject = null; if ((m_user != null) && CmsStringUtil.isNotEmptyOrWhitespaceOnly(m_user.getName())) { defProject = new CmsUserSettings(m_user).getStartProject(); } if (CmsStringUtil.isEmptyOrWhitespaceOnly(defProject)) { defProject = getParamOufqn() + OpenCms.getWorkplaceManager().getDefaultUserSettings().getStartProject(); } if (CmsStringUtil.isEmptyOrWhitespaceOnly(defProject)) { defProject = getCms().getRequestContext().getCurrentProject().getName(); } List<CmsProject> projectsList; projectsList = OpenCms.getOrgUnitManager().getAllAccessibleProjects(getCms(), getParamOufqn(), false); Iterator<CmsProject> itProjects = projectsList.iterator(); while (itProjects.hasNext()) { boolean selected = false; CmsProject project = itProjects.next(); String projectName = project.getName(); if (projectName.equals(defProject)) { selected = true; } if (isNewUser()) { projects.add( new CmsSelectWidgetOption(project.getSimpleName(), selected, project.getSimpleName(), null)); } else { projects.add(new CmsSelectWidgetOption(project.getName(), selected, project.getSimpleName(), null)); } } } catch (CmsException e) { if (LOG.isErrorEnabled()) { LOG.error(e.getLocalizedMessage(), e); } } return projects; }
java
private void flushCharacters() { int indentLength = 0; int len = charBuffer.length(); /* * Count characters from end of ignorable whitespace to first end of line we hit */ for ( int i = len - 1; i >= 0; i-- ) { char ch = charBuffer.charAt( i ); switch ( ch ) { case '\n': case '\r': lastIndent = new Indent( charLineNumber, indentLength ); charBuffer.setLength( 0 ); return; case ' ': case '\t': indentLength++; break; default: /* * No end of line foundIndent in the trailing whitespace. Leave the foundIndent from previous * ignorable whitespace unchanged */ charBuffer.setLength( 0 ); return; } } }
java
public static MatrixAccumulator mkManhattanNormAccumulator() { return new MatrixAccumulator() { private double result = 0.0; @Override public void update(int i, int j, double value) { result += Math.abs(value); } @Override public double accumulate() { double value = result; result = 0.0; return value; } }; }
python
def update_dataset(self,dataStr,flatten=False): ''' update class with a data structure. :keyword flatten: use this to automatically flatten variables (squeeze dimensions) ''' #Load keys and dimensions ######################### dataDim = dataStr.pop('_dimensions',{}) attrStr = dataStr.pop('_attributes',{}) ndims = dataDim.pop('_ndims',0) dimensions = [dataDim.keys(),dataDim.values()] keys = dataStr.keys() if len(keys) == 0: self.warning(2, 'No data loaded') return self.message(2, 'Loaded variables : '+str(keys)) #Check what is the current variable type isStructure = True if isinstance(dataStr[keys[0]],dict) else False # datalen = [np.size(dataStr[key]) for key in keys] datalen = [list(np.shape(dataStr[key]['data'])[::-1]) for key in keys] if isStructure else [list(np.shape(dataStr[key])[::-1]) for key in keys] #Shape is inverted wrt to order of dimensions to be consistent with check_variable if isStructure : varDim = [list(dataStr[key]['_dimensions'])[1:] for key in keys] ind = [where_list(vDim,dimensions[0]) for vDim in varDim] #Dimensions indices from actual variables' dimensions #Check dimension lengths # dimOk = np.array([enum[1][0] == dimensions[1][ind[enum[0]][0]] for enum in enumerate(datalen)]) dimOk = [any([enum[1][ii] == dimensions[1][jj] for ii,jj in enumerate(ind[enum[0]])]) for enum in enumerate(datalen)] if any([not d for d in dimOk]) : notOk = np.where(~np.array(dimOk))[0] print datalen self.Error('Problem with {0} variables : {1}'.format(len(notOk),','.join(np.array(dataStr.keys())[notOk]))) else : ind = [where_list(dlen,dimensions[1]) for dlen in datalen] #Dimensions indices from variable length if (np.array(ind).sum() == -1)!= 0 : self.Error('At least one variable have not been properly defined') dimname = [np.array(dimensions[0])[i].tolist() for i in ind] #Get correspondance between data structure dimensions and variables curDim, nself=self.get_currentDim() createDim=np.array([np.array([w == -1 for w in where_list(j, curDim[0])]) for i,j in enumerate(dimname) ]) createDim=np.squeeze(createDim) # curInd = atools.where_list(dimname_reduced,curDim[0]) #Get correspondance between data structure dimensions and object dimensions # createDim = (np.array(curInd) == -1) #Get dimensions to be created toCreate = np.array([not self.__dict__.has_key(key) for key in keys]) updateDim=[] self.message(2, 'Updating object with '+str(['{0}({1}:{2})'.format(i[0],i[1],i[2]) for i in zip(*(keys,dimname,datalen))])) #Update variables available in files for enum in enumerate(keys) : ind=enum[0] key=enum[1] #Load variable ############## # var=dataStr.get(key) dum=dataStr.get(key).pop('data') if isStructure else copy.deepcopy(dataStr.get(key)) if flatten : if isinstance(dum,dict) :dum['data']=dum['data'].flatten() else : dum=dum.flatten() if not isStructure : dum={'_dimensions':dum._dimensions if hasattr(dum,'_dimensions') else {}, '_attributes':dum._attributes if hasattr(dum,'_attributes') else {}, 'data':dum} else : dumStr=dataStr.get(key) dumStr.update({'data':dum}) dum=dumStr dumDim=dimStr(dimname[ind],datalen[ind]) # if dataStr[key].has_key('_attributes'): # dum.update(dataStr[key]['_attributes']) # if isinstance(dum,np.ma.masked_array) : # #Get associated dimensions # ################################## # datalen = datalen[ind]#[len(dataStr[key]) for key in keys] # ind = atools.where_list([datalen],dimensions[1])[0] # if (ind == -1) : self.Error('Dimensions of current variable ('+key+') have not been properly defined') # dimname = dimensions[ #Initialize variable if required # if toCreate : # updateDim.append(self.create_Variable(key, dum, dimensions={dimname[ind]:datalen[ind]},toCreate=toCreate[ind],createDim=createDim[ind])) updateDim.append(self.create_Variable(key, dum, dimensions=dumDim,toCreate=toCreate[ind],createDim=createDim[ind])) #Extend missing variables # missing__keys = list(set(self.par_list).difference(keys)) # for enum in enumerate(missing__keys) : # ind=enum[0] # key=enum[1] # updateDim.append(self.create_Variable(key, np.ma.repeat(self.dist_to_coast_leuliette.fill_value), dimensions=dumDim,toCreate=False,createDim=False)) #Final sequence zipped_upd=zip(*(np.hstack(dimname)[~np.hstack(createDim)],np.hstack(datalen)[~np.hstack(createDim)])) updateDim_List = np.array(list(set(tuple(i) for i in np.array(zipped_upd,dtype='|S16').tolist()))) #2D unique # updateDim_List = np.unique(np.array(zipped_upd,dtype='|S16')) #[str(i) for i in datalen] # if updateDim_List.size > 0 : updateDim_List.resize((2,updateDim_List.size/2)) # updateDim_List = np.unique(zip(*(np.array(dimname)[~createDim],np.array(datalen)[~createDim]))) #[str(i) for i in datalen] zipped_dims=zip(*(np.hstack(dimname)[np.hstack(createDim)],np.hstack(datalen)[np.hstack(createDim)])) createDim_list = np.array(list(set(tuple(i) for i in np.array(zipped_dims,dtype='|S16').tolist()))) #2D unique # clist, inv = np.unique(np.array(zipped_dims,dtype='|S16'),return_inverse=True) #RQ : THIS WILL FAIL IF NUMBERS HAVE MORE THAN 16 DIGITS #[str(i) for i in datalen] # if createDim_list.size > 0 : createDim_list.resize((2,createDim_list.size/2)) # createDim_list = np.unique(zip(*(np.array(dimname)[createDim],np.array(datalen)[createDim]))) #[str(i) for i in datalen] for dname,dim in createDim_list : self.create_Dim(dname, np.int(dim)) for dname,dim in updateDim_List: self.update_Dim(dname, np.int(dim))
python
def write_config(ip, mac, single, double, long, touch): """Write the current configuration of a myStrom button.""" click.echo("Write configuration to device %s" % ip) data = { 'single': single, 'double': double, 'long': long, 'touch': touch, } request = requests.post( 'http://{}/{}/{}/'.format(ip, URI, mac), data=data, timeout=TIMEOUT) if request.status_code == 200: click.echo("Configuration of %s set" % mac)
java
private int[] buildIndex(final int[] counts, int[] positions, int minsupp) { // Count the number of frequent items: int numfreq = 0; for(int i = 0; i < counts.length; i++) { if(counts[i] >= minsupp) { ++numfreq; } } // Build the index table int[] idx = new int[numfreq]; for(int i = 0, j = 0; i < counts.length; i++) { if(counts[i] >= minsupp) { idx[j++] = i; } } IntegerArrayQuickSort.sort(idx, (x, y) -> Integer.compare(counts[y], counts[x])); Arrays.fill(positions, -1); for(int i = 0; i < idx.length; i++) { positions[idx[i]] = i; } return idx; }
java
private String ipAddressFromNI(String niSpec, String name) { String result = "UNKNOWN"; NetworkInterface ni = null; String[] parts = niSpec.split(":"); String niName = "eth0"; // default NIC name Scheme scheme = Scheme.ipv4; int index = 0; // default index Scope scope = Scope.global; // can be global, linklocal or sitelocal - is // global by default // Parse up the spec for (int idx = 0; idx < parts.length; idx++) { switch (idx) { case 0: niName = parts[idx]; break; case 1: String _schemeStr = parts[idx].toLowerCase(); try { scheme = Scheme.valueOf(_schemeStr); } catch (Exception e) { warn("Error parsing scheme for resolveIP named [" + name + "]. Expecting ipv4 or ipv6 but got [" + _schemeStr + "]. Using default of ipv4."); scheme = Scheme.ipv4; // default } break; case 2: String scopeTarget = parts[idx].toLowerCase(); try { scope = Scope.valueOf(scopeTarget); } catch (Exception e) { warn("Error parsing scope for resolveIP named [" + name + "]. Expecting global, sitelocal or linklocal but got [" + scopeTarget + "]. Using default of global."); scope = Scope.global; // default } break; case 3: try { index = Integer.parseInt(parts[idx]); } catch (NumberFormatException e) { index = 0; // default } break; default: break; } } // Find the specified NIC try { // if the niName is localhost, get the IP address associated with // localhost if (niName.equalsIgnoreCase("localhost")) { if (scope != Scope.sitelocal) { warn("resolveIP named [" + name + "] has ni of localhost and will default to scope of sitelocal (or it won't work). Expects sitelocal but got [" + scope + "]."); scope = Scope.sitelocal; // force scope to site local } try { InetAddress addr = InetAddress.getLocalHost(); ni = NetworkInterface.getByInetAddress(addr); } catch (UnknownHostException e) { // This should not happen warn("The lookup of the NI for localhost for resolveIP named [" + name + "] caused an exception. Look for odd entries in /etc/hosts."); return "UNKNOWN NI"; } } else { ni = NetworkInterface.getByName(niName); } } catch (SocketException e) { error("An error occured looking up the interface named [" + niName + "] for resolveIP named [" + name + "]", e); return "UNKNOWN NI"; } // if we have a network interface, then get the right ip List<InetAddress> ipv4Addrs = new ArrayList<InetAddress>(); List<InetAddress> ipv6Addrs = new ArrayList<InetAddress>(); if (ni != null) { // group the two types of addresses Enumeration<InetAddress> addrList = ni.getInetAddresses(); do { InetAddress addr = addrList.nextElement(); // filter out only the type specified (linklocal, sitelocal or global) switch (scope) { case linklocal: if (addr.isLinkLocalAddress()) { if (addr instanceof Inet4Address) ipv4Addrs.add((Inet4Address) addr); if (addr instanceof Inet6Address) ipv6Addrs.add((Inet6Address) addr); } break; case sitelocal: if (addr.isSiteLocalAddress()) { if (addr instanceof Inet4Address) ipv4Addrs.add((Inet4Address) addr); if (addr instanceof Inet6Address) ipv6Addrs.add((Inet6Address) addr); } break; case global: if (!addr.isSiteLocalAddress() && !addr.isLinkLocalAddress()) { if (addr instanceof Inet4Address) ipv4Addrs.add((Inet4Address) addr); if (addr instanceof Inet6Address) ipv6Addrs.add((Inet6Address) addr); } break; default: break; } } while (addrList.hasMoreElements()); } List<InetAddress> targetAddrs = null; switch (scheme) { case ipv4: targetAddrs = ipv4Addrs; break; case ipv6: targetAddrs = ipv6Addrs; break; default: break; } // Get a candidate addr from the list InetAddress candidateAddr = null; if (!targetAddrs.isEmpty()) { if (index < targetAddrs.size()) { candidateAddr = targetAddrs.get(index); result = candidateAddr.getHostAddress(); } else { error("Error getting index [" + index + "] addrees for resolveIP named [" + name + "]. Index is out of bounds."); return "INDEX OUT OF BOUNDS"; } } else { error("Empty list of addresses for resolveIP named [" + name + "]"); return "EMPTY LIST"; } return result; }
python
def install_dap_from_path(path, update=False, update_allpaths=False, first=True, force=False, nodeps=False, reinstall=False, __ui__=''): '''Installs a dap from a given path''' will_uninstall = False dap_obj = dapi.Dap(path) name = dap_obj.meta['package_name'] if name in get_installed_daps(): if not update and not reinstall: raise DapiLocalError( 'DAP {name} is already installed. ' 'Run `da pkg list` to see it\'s location, or use --reinstall to ignore this check.' .format(name=name)) elif not update_allpaths and name in get_installed_daps(_install_path()): will_uninstall = True elif update_allpaths and name in get_installed_daps(): will_uninstall = True if update and update_allpaths: install_locations = [] for pair in get_installed_daps_detailed()[name]: install_locations.append(pair['location']) else: install_locations = [_install_path()] # This should not happen unless someone did it on purpose for location in install_locations: if os.path.isfile(location): raise DapiLocalError( '{i} is a file, not a directory.'.format(i=_install_path())) _dir = tempfile.mkdtemp() old_level = logger.getEffectiveLevel() logger.setLevel(logging.ERROR) ok = dapi.DapChecker.check(dap_obj) logger.setLevel(old_level) if not ok: raise DapiLocalError('The DAP you want to install has errors, not installing.') installed = [] if first: if not force and not _is_supported_here(dap_obj.meta): raise DapiLocalError( '{0} is not supported on this platform (use --force to suppress this check)'. format(name)) deps = set() if 'dependencies' in dap_obj.meta and not nodeps: for dep in dap_obj.meta['dependencies']: dep = _strip_version_from_dependency(dep) if dep not in get_installed_daps(): deps |= _get_all_dependencies_of(dep, force=force) for dep in deps: if dep not in get_installed_daps(): installed += install_dap(dep, first=False, __ui__=__ui__) dap_obj.extract(_dir) if will_uninstall: uninstall_dap(name, allpaths=update_allpaths, __ui__=__ui__) _dapdir = os.path.join(_dir, name + '-' + dap_obj.meta['version']) if not os.path.isdir(_install_path()): os.makedirs(_install_path()) os.mkdir(os.path.join(_dapdir, 'meta')) os.rename(os.path.join(_dapdir, 'meta.yaml'), os.path.join(_dapdir, 'meta', name + '.yaml')) for location in install_locations: for f in glob.glob(_dapdir + '/*'): dst = os.path.join(location, os.path.basename(f)) if os.path.isdir(f): if not os.path.exists(dst): os.mkdir(dst) for src_dir, dirs, files in os.walk(f): dst_dir = src_dir.replace(f, dst) if not os.path.exists(dst_dir): os.mkdir(dst_dir) for file_ in files: src_file = os.path.join(src_dir, file_) dst_file = os.path.join(dst_dir, file_) shutil.copyfile(src_file, dst_file) else: shutil.copyfile(f, dst) try: shutil.rmtree(_dir) except: pass return [name] + installed
java
public static IntCountsHistogram decodeFromByteBuffer(final ByteBuffer buffer, final long minBarForHighestTrackableValue) { return (IntCountsHistogram) decodeFromByteBuffer(buffer, IntCountsHistogram.class, minBarForHighestTrackableValue); }
java
boolean isNameUnique(String name, String currentJobName) { Item item = getItem(name); if(null==item) { // the candidate name didn't return any items so the name is unique return true; } else if(item.getName().equals(currentJobName)) { // the candidate name returned an item, but the item is the item // that the user is configuring so this is ok return true; } else { // the candidate name returned an item, so it is not unique return false; } }
java
public boolean canTraverseOutsideSubtree() { if (null != m_left && m_left.canTraverseOutsideSubtree()) return true; if (null != m_right && m_right.canTraverseOutsideSubtree()) return true; return false; }
python
def check_cousins(self, individual_1_id, individual_2_id): """ Check if two family members are cousins. If two individuals share any grandparents they are cousins. Arguments: individual_1_id (str): The id of an individual individual_2_id (str): The id of an individual Returns: bool : True if the individuals are cousins False if they are not cousins """ self.logger.debug("Checking if {0} and {1} are cousins".format( individual_1_id, individual_2_id )) #TODO check if any of the parents are siblings pass
python
def dry_run_scan(self, scan_id, targets): """ Dry runs a scan. """ os.setsid() for _, target in enumerate(targets): host = resolve_hostname(target[0]) if host is None: logger.info("Couldn't resolve %s.", target[0]) continue port = self.get_scan_ports(scan_id, target=target[0]) logger.info("%s:%s: Dry run mode.", host, port) self.add_scan_log(scan_id, name='', host=host, value='Dry run result') self.finish_scan(scan_id)
python
def generate_with_delta_f_and_max_freq(self, t_num, max_freq, delta_f, low_frequency_cutoff=None, cached_mem=None): """Generate the template with index t_num using custom length.""" approximant = self.approximant(t_num) # Don't want to use INTERP waveforms in here if approximant.endswith('_INTERP'): approximant = approximant.replace('_INTERP', '') # Using SPAtmplt here is bad as the stored cbrt and logv get # recalculated as we change delta_f values. Fall back to TaylorF2 # in lalsimulation. if approximant == 'SPAtmplt': approximant = 'TaylorF2' if cached_mem is None: wav_len = int(max_freq / delta_f) + 1 cached_mem = zeros(wav_len, dtype=np.complex64) if self.has_compressed_waveforms and self.enable_compressed_waveforms: htilde = self.get_decompressed_waveform(cached_mem, t_num, f_lower=low_frequency_cutoff, approximant=approximant, df=delta_f) else : htilde = pycbc.waveform.get_waveform_filter( cached_mem, self.table[t_num], approximant=approximant, f_lower=low_frequency_cutoff, f_final=max_freq, delta_f=delta_f, distance=1./DYN_RANGE_FAC, delta_t=1./(2.*max_freq)) return htilde
python
def sensitivity(imgs, bg=None): ''' Extract pixel sensitivity from a set of homogeneously illuminated images This method is detailed in Section 5 of: --- K.Bedrich, M.Bokalic et al.: ELECTROLUMINESCENCE IMAGING OF PV DEVICES: ADVANCED FLAT FIELD CALIBRATION,2017 --- ''' bg = getBackground(bg) for n, i in enumerate(imgs): i = imread(i, dtype=float) i -= bg smooth = fastMean(median_filter(i, 3)) i /= smooth if n == 0: out = i else: out += i out /= (n + 1) return out
java
void splitBranches(final Element elem, final Branch filter) { final List<Element> ditavalRefs = getChildElements(elem, DITAVAREF_D_DITAVALREF); if (ditavalRefs.size() > 0) { // remove ditavalrefs for (final Element branch: ditavalRefs) { elem.removeChild(branch); } // create additional branches after current element final List<Element> branches = new ArrayList<>(ditavalRefs.size()); branches.add(elem); final Node next = elem.getNextSibling(); for (int i = 1; i < ditavalRefs.size(); i++) { final Element clone = (Element) elem.cloneNode(true); if (next != null) { elem.getParentNode().insertBefore(clone, next); } else { elem.getParentNode().appendChild(clone); } branches.add(clone); } // insert ditavalrefs for (int i = 0; i < branches.size(); i++) { final Element branch = branches.get(i); final Element ditavalref = ditavalRefs.get(i); branch.insertBefore(ditavalref, branch.getFirstChild()); final Branch currentFilter = filter.merge(ditavalref); processAttributes(branch, currentFilter); final Branch childFilter = new Branch(currentFilter.resourcePrefix, currentFilter.resourceSuffix, Optional.empty(), Optional.empty()); // process children of all branches for (final Element child: getChildElements(branch, MAP_TOPICREF)) { if (DITAVAREF_D_DITAVALREF.matches(child)) { continue; } splitBranches(child, childFilter); } } } else { processAttributes(elem, filter); for (final Element child: getChildElements(elem, MAP_TOPICREF)) { splitBranches(child, filter); } } }
java
private void buildAttributeValueObject(final String name) { attributeValue_5.name = name; attributeValue_5.quality = AttrQuality.ATTR_VALID; attributeValue_5.data_format = AttrDataFormat.FMT_UNKNOWN; attributeValue_5.time = new TimeVal(); attributeValue_5.r_dim = new AttributeDim(); attributeValue_5.w_dim = new AttributeDim(); attributeValue_5.r_dim.dim_x = 1; attributeValue_5.r_dim.dim_y = 0; attributeValue_5.w_dim.dim_x = 0; attributeValue_5.w_dim.dim_y = 0; attributeValue_5.value = new AttrValUnion(); final long now = System.currentTimeMillis(); attributeValue_5.time.tv_sec = (int) (now / 1000); attributeValue_5.time.tv_usec = (int) (now - attributeValue_5.time.tv_sec * 1000) * 1000; attributeValue_5.time.tv_nsec = 0; attributeValue_5.err_list = new DevError[0]; }
java
public Iterable<Result<DeleteError>> removeObjects(final String bucketName, final Iterable<String> objectNames) { return new Iterable<Result<DeleteError>>() { @Override public Iterator<Result<DeleteError>> iterator() { return new Iterator<Result<DeleteError>>() { private Result<DeleteError> error; private Iterator<DeleteError> errorIterator; private boolean completed = false; private Iterator<String> objectNameIter = objectNames.iterator(); private synchronized void populate() { List<DeleteError> errorList = null; try { List<DeleteObject> objectList = new LinkedList<DeleteObject>(); int i = 0; while (objectNameIter.hasNext() && i < 1000) { objectList.add(new DeleteObject(objectNameIter.next())); i++; } if (i > 0) { errorList = removeObject(bucketName, objectList); } } catch (InvalidBucketNameException | NoSuchAlgorithmException | InsufficientDataException | IOException | InvalidKeyException | NoResponseException | XmlPullParserException | ErrorResponseException | InternalException e) { this.error = new Result<>(null, e); } finally { if (errorList != null) { this.errorIterator = errorList.iterator(); } else { this.errorIterator = new LinkedList<DeleteError>().iterator(); } } } @Override public boolean hasNext() { if (this.completed) { return false; } if (this.error == null && this.errorIterator == null) { populate(); } if (this.error == null && this.errorIterator != null && !this.errorIterator.hasNext()) { populate(); } if (this.error != null) { return true; } if (this.errorIterator.hasNext()) { return true; } this.completed = true; return false; } @Override public Result<DeleteError> next() { if (this.completed) { throw new NoSuchElementException(); } if (this.error == null && this.errorIterator == null) { populate(); } if (this.error == null && this.errorIterator != null && !this.errorIterator.hasNext()) { populate(); } if (this.error != null) { this.completed = true; return this.error; } if (this.errorIterator.hasNext()) { return new Result<>(this.errorIterator.next(), null); } this.completed = true; throw new NoSuchElementException(); } @Override public void remove() { throw new UnsupportedOperationException(); } }; } }; }
java
@Override public ListPhoneNumbersOptedOutResult listPhoneNumbersOptedOut(ListPhoneNumbersOptedOutRequest request) { request = beforeClientExecution(request); return executeListPhoneNumbersOptedOut(request); }
java
public static int calculateMod11CheckSum(int[] weights, StringNumber number) { int c = calculateChecksum(weights, number, false) % 11; if (c == 1) { throw new IllegalArgumentException(ERROR_INVALID_CHECKSUM + number); } return c == 0 ? 0 : 11 - c; }
java
private void mergePossibleResponse() { // TODO only HttpResponse element? if (!runtimeStack.isEmpty() && runtimeStack.peek().getTypes().contains(Types.RESPONSE)) { mergeReturnElement(runtimeStack.peek()); } }
java
public String showPrompt() { String prompt = isAnsiEnabled() ? ansiPrompt(PROMPT).toString() : PROMPT; return showPrompt(prompt); }
python
def async_refresh_state(self): """Request each state to provide status update.""" _LOGGER.debug('Setting up extended status') ext_status = ExtendedSend( address=self._address, commandtuple=COMMAND_EXTENDED_GET_SET_0X2E_0X00, cmd2=0x02, userdata=Userdata()) ext_status.set_crc() _LOGGER.debug('Sending ext status: %s', ext_status) self._send_msg(ext_status) _LOGGER.debug('Sending temp status request') self.temperature.async_refresh_state()
python
def first_seen(self, first_seen): """Set Document first seen.""" self._group_data['firstSeen'] = self._utils.format_datetime( first_seen, date_format='%Y-%m-%dT%H:%M:%SZ' )
python
def compare_disks(disk0, disk1, configuration): """Compares two disks according to the given configuration.""" with DiskComparator(disk0, disk1) as comparator: results = comparator.compare( size=configuration.get('get_file_size', False), identify=configuration.get('identify_files', False), concurrent=configuration.get('use_concurrency', False)) if configuration.get('extract_files', False): extract = results['created_files'] + results['modified_files'] files = comparator.extract(1, extract, path=configuration['results_folder']) results.update(files) if configuration.get('compare_registries', False): results['registry'] = comparator.compare_registry( concurrent=configuration.get('use_concurrency', False)) return results
java
public void marshall(GetRateBasedRuleManagedKeysRequest getRateBasedRuleManagedKeysRequest, ProtocolMarshaller protocolMarshaller) { if (getRateBasedRuleManagedKeysRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(getRateBasedRuleManagedKeysRequest.getRuleId(), RULEID_BINDING); protocolMarshaller.marshall(getRateBasedRuleManagedKeysRequest.getNextMarker(), NEXTMARKER_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public void printStackTrace(PrintStream stream) { if (getRootCause() != null) { stream.println(this.getMessage()); getRootCause().printStackTrace(stream); } else { this.printStackTrace(stream); } }
python
def _parse_season(self, row): """ Parse the season string from the table. The season is generally located in the first column of the stats tables and should be parsed to detonate which season metrics are being pulled from. Parameters ---------- row : PyQuery object A PyQuery object of a single row in a stats table. Returns ------- string A string representation of the season in the format 'YYYY', such as '2017'. """ season = utils._parse_field(PLAYER_SCHEME, row, 'season') return season.replace('*', '').replace('+', '')
java
protected boolean matches(File f, boolean isFile) { if (fileFilter != null) { if (isFile && directoriesOnly) { return false; } else if (!isFile && filesOnly) { return false; } else if (fileNameRegex != null) { Matcher m = fileNameRegex.matcher(f.getName()); if (!m.matches()) { return false; } } } return true; }
java
public static Optional<SuggestedFix> removeModifiers( ModifiersTree originalModifiers, VisitorState state, Set<Modifier> toRemove) { SuggestedFix.Builder fix = SuggestedFix.builder(); List<ErrorProneToken> tokens = state.getTokensForNode(originalModifiers); int basePos = ((JCTree) originalModifiers).getStartPosition(); boolean empty = true; for (ErrorProneToken tok : tokens) { Modifier mod = getTokModifierKind(tok); if (toRemove.contains(mod)) { empty = false; fix.replace(basePos + tok.pos(), basePos + tok.endPos() + 1, ""); } } if (empty) { return Optional.empty(); } return Optional.of(fix.build()); }
python
def to_networkx(self, labels=None, edge_labels=False): """ Get a networkx representation of the binary search tree. """ import networkx as nx graph = nx.DiGraph() for node in self._traverse_nodes(): u = node.key graph.add_node(u) # Minor redundancy # Set node properties graph.nodes[u]['value'] = node.value if labels is not None: label = ','.join([str(getattr(node, k)) for k in labels]) graph.nodes[u]['label'] = label if node.left is not None: v = node.left.key graph.add_node(v) graph.add_edge(u, v) if edge_labels: graph.edge[u][v]['label'] = 'L' if node.right is not None: v = node.right.key graph.add_node(v) graph.add_edge(u, v) if edge_labels: graph.edge[u][v]['label'] = 'R' return graph
java
public String convertOBPYoaOrentToString(EDataType eDataType, Object instanceValue) { return instanceValue == null ? null : instanceValue.toString(); }
python
def linreg_ols_pinv(y, X, rcond=1e-15): """Linear Regression, OLS, by multiplying with Pseudoinverse""" import numpy as np try: # multiply with inverse to compute coefficients return np.dot(np.linalg.pinv( np.dot(X.T, X), rcond=rcond), np.dot(X.T, y)) except np.linalg.LinAlgError: print("LinAlgError: SVD does not converge") return None
java
public static double spearmanRankCorrelationCoefficient(double[] a, double[] b) { check(a, b); int N = a.length; int NcubedMinusN = (N * N * N) - N; // Convert a and b into rankings. The last value of this array is the // correction factor for computing the correlation based on the number // of ties. (Eq. 9.6; p. 239). Note that the ranks are not integers // beacused tied values are assigned the average of their ranks (e.g., // a tie at positions 1 and 2 would be be assigned a rank of 1.5). double[] rankedA = rank(a); double[] rankedB = rank(b); double sumDiffs = 0; for (int i = 0; i < rankedA.length - 1; ++i) { double diff = rankedA[i] - rankedB[i]; sumDiffs += diff * diff; } double aCorrectionFactor = rankedA[rankedA.length - 1]; double bCorrectionFactor = rankedB[rankedB.length - 1]; double tiesSum = aCorrectionFactor + bCorrectionFactor; // Compute Spearman's rho using Eq. 9.7 (p. 239) return (NcubedMinusN - (6 * sumDiffs) - ((tiesSum) / 2d)) / Math.sqrt((NcubedMinusN * NcubedMinusN) - (tiesSum * NcubedMinusN) + (aCorrectionFactor * bCorrectionFactor)); }
java
@Override public String selectMajorCurrency(final String ccy1, final String ccy2) { return ranks.getOrDefault(StringUtil.toUpperCase(ccy1), DEFAULT_UNRANKED_VALUE).intValue() <= ranks.getOrDefault(StringUtil.toUpperCase(ccy2), DEFAULT_UNRANKED_VALUE).intValue() ? ccy1 : ccy2; }
python
def update_or_create(cls, append_lists=True, with_status=False, remove_members=False, **kwargs): """ Update or create group entries. If the group exists, the members will be updated. Set append_lists=True to add new members to the list, or False to reset the list to the provided members. If setting remove_members, this will override append_lists if set. :param bool append_lists: add to existing members, if any :param bool remove_members: remove specified members instead of appending or overwriting :paran dict kwargs: keyword arguments to satisfy the `create` constructor if the group needs to be created. :raises CreateElementFailed: could not create element with reason :return: element instance by type :rtype: Element """ was_created, was_modified = False, False element = None try: element = cls.get(kwargs.get('name')) was_modified = element.update_members( kwargs.get('members', []), append_lists=append_lists, remove_members=remove_members) except ElementNotFound: element = cls.create( kwargs.get('name'), members = kwargs.get('members', [])) was_created = True if with_status: return element, was_modified, was_created return element
java
@Override public Response toResponse(Throwable ex) { return Response.status(Status.INTERNAL_SERVER_ERROR).entity(new ErrorMessage(Status.INTERNAL_SERVER_ERROR.getStatusCode(), ex.getMessage())) .type(MediaType.APPLICATION_JSON_TYPE).build(); }
java
public static synchronized void suppressMethod(Class<?> clazz, boolean excludePrivateMethods) { Method[] methods = null; if (excludePrivateMethods) { methods = clazz.getMethods(); } else { methods = clazz.getDeclaredMethods(); } for (Method method : methods) { MockRepository.addMethodToSuppress(method); } }
python
def minimum_image_dr( self, r1, r2, cutoff=None ): """ Calculate the shortest distance between two points in the cell, accounting for periodic boundary conditions. Args: r1 (np.array): fractional coordinates of point r1. r2 (np.array): fractional coordinates of point r2. cutoff (:obj: `float`, optional): if set, return zero if the minimum distance is greater than `cutoff`. Defaults to None. Returns: (float): The distance between r1 and r2. """ delta_r_vector = self.minimum_image( r1, r2 ) return( self.dr( np.zeros( 3 ), delta_r_vector, cutoff ) )
python
def clear_highest_numeric_score(self): """Clears the highest numeric score. raise: NoAccess - ``Metadata.isRequired()`` or ``Metadata.isReadOnly()`` is ``true`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.grading.GradeSystemForm.clear_lowest_numeric_score if (self.get_highest_numeric_score_metadata().is_read_only() or self.get_highest_numeric_score_metadata().is_required()): raise errors.NoAccess() self._my_map['highestNumericScore'] = self._highest_numeric_score_default