language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def where_has(self, relation, extra, operator='>=', count=1): """ Add a relationship count condition to the query with where clauses. :param relation: The relation to count :type relation: str :param extra: The extra query :type extra: Builder or callable :param operator: The operator :type operator: str :param count: The count :type count: int :rtype: Builder """ return self.has(relation, operator, count, 'and', extra)
java
@RequestMapping(value = "/api/profile/{profileIdentifier}/clients/{clientUUID}", method = RequestMethod.POST) public @ResponseBody HashMap<String, Object> updateClient(Model model, @PathVariable("profileIdentifier") String profileIdentifier, @PathVariable("clientUUID") String clientUUID, @RequestParam(required = false) Boolean active, @RequestParam(required = false) String friendlyName, @RequestParam(required = false) Boolean reset) throws Exception { Integer profileId = ControllerUtils.convertProfileIdentifier(profileIdentifier); if (active != null) { logger.info("Active: {}", active); clientService.updateActive(profileId, clientUUID, active); } if (friendlyName != null) { clientService.setFriendlyName(profileId, clientUUID, friendlyName); } if (reset != null && reset) { clientService.reset(profileId, clientUUID); } HashMap<String, Object> valueHash = new HashMap<String, Object>(); valueHash.put("client", clientService.findClient(clientUUID, profileId)); return valueHash; }
java
private void setLocation(WebElement webElement, WebView webView, int x, int y, int width, int height ){ float scale = webView.getScale(); int[] locationOfWebViewXY = new int[2]; webView.getLocationOnScreen(locationOfWebViewXY); int locationX = (int) (locationOfWebViewXY[0] + (x + (Math.floor(width / 2))) * scale); int locationY = (int) (locationOfWebViewXY[1] + (y + (Math.floor(height / 2))) * scale); webElement.setLocationX(locationX); webElement.setLocationY(locationY); }
python
def param_set(name, value, retries=3): '''set a parameter''' name = name.upper() return mpstate.mav_param.mavset(mpstate.master(), name, value, retries=retries)
java
public boolean ready() { for (VertxFileUpload file : files) { if (file.getErrorIfAny() != null) { return false; } } String contentType = request.headers().get(HeaderNames.CONTENT_TYPE); if (contentType != null) { contentType = HttpUtils.getContentTypeFromContentTypeAndCharacterSetting(contentType); if ((HttpUtils.isPostOrPut(request)) && (contentType.equalsIgnoreCase(MimeTypes.FORM) || contentType.equalsIgnoreCase(MimeTypes.MULTIPART))) { formData = new HashMap<>(); for (String key : request.formAttributes().names()) { formData.put(key, request.formAttributes().getAll(key)); } return true; } } formData = new HashMap<>(); return true; }
java
public List<CorporationContactsLabelsResponse> getCorporationsCorporationIdContactsLabels(Integer corporationId, String datasource, String ifNoneMatch, String token) throws ApiException { ApiResponse<List<CorporationContactsLabelsResponse>> resp = getCorporationsCorporationIdContactsLabelsWithHttpInfo( corporationId, datasource, ifNoneMatch, token); return resp.getData(); }
python
def _teardown_redundancy_router_gw_connectivity(self, context, router, router_db, plugging_driver): """To be called in update_router() if the router gateway is to change BEFORE router has been updated in DB . """ if not router[ha.ENABLED]: # No HA currently enabled so we're done return e_context = context.elevated() # since gateway is about to change the ha group for the current gateway # is removed, a new one will be created later self._delete_ha_group(e_context, router_db.gw_port_id) # teardown connectivity for the gw ports on the redundancy routers # and remove those ports as new ones will be created later rr_ids = [] for r_b_db in router_db.redundancy_bindings: if plugging_driver is not None: plugging_driver.teardown_logical_port_connectivity( e_context, r_b_db.redundancy_router.gw_port, r_b_db.redundancy_router.hosting_info.hosting_device_id) self._update_router_no_notify( e_context, r_b_db.redundancy_router_id, {'router': {EXTERNAL_GW_INFO: None, ha.ENABLED: False}}) rr_ids.append(r_b_db.redundancy_router_id) self.notify_routers_updated(e_context, rr_ids)
java
private Optional<String> tryGerritHttpFormAuth(HttpClientBuilder client, HttpContext httpContext) throws IOException, HttpStatusException { if (!authData.isLoginAndPasswordAvailable()) { return Optional.absent(); } String loginUrl = authData.getHost() + "/login/"; HttpPost method = new HttpPost(loginUrl); List<BasicNameValuePair> parameters = Lists.newArrayList( new BasicNameValuePair("username", authData.getLogin()), new BasicNameValuePair("password", authData.getPassword()) ); method.setEntity(new UrlEncodedFormEntity(parameters, Consts.UTF_8)); HttpResponse loginResponse = httpRequestExecutor.execute(client, method, httpContext); return extractGerritAuth(loginResponse); }
java
public void loadXmlConfiguration() throws SAXException, IOException { URL baseUrl = m_baseFolder.toURI().toURL(); if (LOG.isDebugEnabled()) { LOG.debug(Messages.get().getBundle().key(Messages.LOG_BASE_URL_1, baseUrl)); } // first load the base configuration loadXmlConfiguration(baseUrl, this); // now iterate all sub-configurations Iterator<I_CmsXmlConfiguration> i = m_configurations.iterator(); while (i.hasNext()) { loadXmlConfiguration(baseUrl, i.next()); } // remove the old backups removeOldBackups(MAX_BACKUP_DAYS); }
java
public static void tracePostInvokeBegins(EJSDeployedSupport s, EJSWrapperBase wrapper) { if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { StringBuffer sbuf = new StringBuffer(); sbuf .append(MthdPostInvokeEntry_Type_Str).append(DataDelimiter) .append(MthdPostInvokeEntry_Type).append(DataDelimiter); writeDeployedSupportInfo(s, sbuf, wrapper, null); Tr.debug(tc, sbuf.toString()); } }
python
def _filter_execs(self, isSubroutine): """Filters the executables in the dictionary by their type.""" result = {} for key in self.executables: if (isinstance(self.executables[key], Subroutine) and isSubroutine) or \ (isinstance(self.executables[key], Function) and not isSubroutine): result[key] = self.executables[key] return result
java
public final VoltTableRow fetchRow(int index) { assert(verifyTableInvariants()); // check bounds if ((index < 0) || (index >= m_rowCount)) { throw new IndexOutOfBoundsException("index = " + index + "; rows = " + m_rowCount); } // if no memoized value or looking in front of the memoized value, reset if ((m_memoizedRowOffset == NO_MEMOIZED_ROW_OFFSET) || (index < m_memoizedRowOffset)) { m_memoizedRowOffset = 0; m_memoizedBufferOffset = m_rowStart + ROW_COUNT_SIZE; } while (m_memoizedRowOffset < index) { // add 4 bytes as the row size is non-inclusive m_memoizedBufferOffset += m_buffer.getInt(m_memoizedBufferOffset) + ROW_HEADER_SIZE; m_memoizedRowOffset++; } Row retval = new Row(m_memoizedBufferOffset + ROW_HEADER_SIZE); retval.m_activeRowIndex = index; return retval; }
java
public static <T> double seed(smile.math.distance.Distance<T> distance, T[] data, T[] medoids, int[] y, double[] d) { int n = data.length; int k = medoids.length; T medoid = data[Math.randomInt(n)]; medoids[0] = medoid; Arrays.fill(d, Double.MAX_VALUE); // pick the next center for (int j = 1; j < k; j++) { // Loop over the samples and compare them to the most recent center. Store // the distance from each sample to its closest center in scores. for (int i = 0; i < n; i++) { // compute the distance between this sample and the current center double dist = distance.d(data[i], medoid); if (dist < d[i]) { d[i] = dist; y[i] = j - 1; } } double cutoff = Math.random() * Math.sum(d); double cost = 0.0; int index = 0; for (; index < n; index++) { cost += d[index]; if (cost >= cutoff) { break; } } medoid = data[index]; medoids[j] = medoid; } for (int i = 0; i < n; i++) { // compute the distance between this sample and the current center double dist = distance.d(data[i], medoid); if (dist < d[i]) { d[i] = dist; y[i] = k - 1; } } double distortion = 0.0; for (int i = 0; i < n; ++i) { distortion += d[i]; } return distortion; }
java
public static ManagedChannel createNettyChannel(String host, BigtableOptions options, ClientInterceptor ... interceptors) throws SSLException { LOG.info("Creating new channel for %s", host); if (LOG.getLog().isDebugEnabled()) { LOG.debug(Throwables.getStackTraceAsString(new Throwable())); } // Ideally, this should be ManagedChannelBuilder.forAddress(...) rather than an explicit // call to NettyChannelBuilder. Unfortunately, that doesn't work for shaded artifacts. ManagedChannelBuilder<?> builder = ManagedChannelBuilder .forAddress(host, options.getPort()); if (options.usePlaintextNegotiation()) { // NOTE: usePlaintext(true) is deprecated in newer versions of grpc (1.11.0). // usePlantxext() is the preferred approach, but won't work with older versions. // This means that plaintext negotiation can't be used with Beam. builder.usePlaintext(); } return builder .idleTimeout(Long.MAX_VALUE, TimeUnit.SECONDS) .maxInboundMessageSize(MAX_MESSAGE_SIZE) .userAgent(BigtableVersionInfo.CORE_USER_AGENT + "," + options.getUserAgent()) .intercept(interceptors) .build(); }
python
def add_instance(self, name, properties): # type: (str, dict) -> None """ Stores the description of a component instance. The given properties are stored as is. :param name: Instance name :param properties: Instance properties :raise NameError: Already known instance name """ if name in self.__instances: raise NameError(name) # Store properties "as-is" self.__instances[name] = properties
python
def parse_atoms(self): '''All ATOM lines are parsed even though only one per residue needs to be parsed. The reason for parsing all the lines is just to sanity-checks that the ATOMs within one residue are consistent with each other.''' atom_site_header_tag = self.main_tag.getElementsByTagName("PDBx:atom_siteCategory") assert(len(atom_site_header_tag) == 1) atom_site_header_tag = atom_site_header_tag[0] atom_site_tags = atom_site_header_tag.getElementsByTagName("PDBx:atom_site") residue_map = {} residues_read = {} int_type = types.IntType for t in atom_site_tags: r, seqres, ResidueAA, Residue3AA = PDBML_slow.parse_atom_site(t, self.modified_residues) if r: # skip certain ACE residues if not(self.pdb_id in cases_with_ACE_residues_we_can_ignore and Residue3AA == 'ACE'): full_residue_id = str(r) if residues_read.get(full_residue_id): assert(residues_read[full_residue_id] == (r.ResidueAA, seqres)) else: residues_read[full_residue_id] = (r.ResidueAA, seqres) residue_map[r.Chain] = residue_map.get(r.Chain, {}) assert(type(seqres) == int_type) residue_map[r.Chain][str(r)] = seqres ## Create SequenceMap objects to map the ATOM Sequences to the SEQRES Sequences atom_to_seqres_sequence_maps = {} for chain_id, atom_seqres_mapping in residue_map.iteritems(): atom_to_seqres_sequence_maps[chain_id] = SequenceMap.from_dict(atom_seqres_mapping) self.atom_to_seqres_sequence_maps = atom_to_seqres_sequence_maps
java
private ResponseEntity<Object> translateState(boolean selectedTemplateUpdated) { if (selectedTemplateUpdated) { return new ResponseEntity<>("updated", HttpStatus.OK); } else { return new ResponseEntity<>(HttpStatus.NOT_MODIFIED); } }
python
def MPTfileCSV(file_or_path): """Simple function to open MPT files as csv.DictReader objects Checks for the correct headings, skips any comments and returns a csv.DictReader object and a list of comments """ if isinstance(file_or_path, str): mpt_file = open(file_or_path, 'r') else: mpt_file = file_or_path magic = next(mpt_file) if magic.rstrip() != 'EC-Lab ASCII FILE': raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) nb_headers_match = re.match('Nb header lines : (\d+)\s*$', next(mpt_file)) nb_headers = int(nb_headers_match.group(1)) if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) ## The 'magic number' line, the 'Nb headers' line and the column headers ## make three lines. Every additional line is a comment line. comments = [next(mpt_file) for i in range(nb_headers - 3)] mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab') expected_fieldnames = ( ["mode", "ox/red", "error", "control changes", "Ns changes", "counter inc.", "time/s", "control/V/mA", "Ewe/V", "dq/mA.h", "P/W", "<I>/mA", "(Q-Qo)/mA.h", "x"], ['mode', 'ox/red', 'error', 'control changes', 'Ns changes', 'counter inc.', 'time/s', 'control/V', 'Ewe/V', 'dq/mA.h', '<I>/mA', '(Q-Qo)/mA.h', 'x'], ["mode", "ox/red", "error", "control changes", "Ns changes", "counter inc.", "time/s", "control/V", "Ewe/V", "I/mA", "dQ/mA.h", "P/W"], ["mode", "ox/red", "error", "control changes", "Ns changes", "counter inc.", "time/s", "control/V", "Ewe/V", "<I>/mA", "dQ/mA.h", "P/W"]) if mpt_csv.fieldnames not in expected_fieldnames: raise ValueError("Unrecognised headers for MPT file format") return mpt_csv, comments
java
public Yytoken yylex() throws java.io.IOException, ParseException { int zzInput; int zzAction; // cached fields: int zzCurrentPosL; int zzMarkedPosL; int zzEndReadL = zzEndRead; char [] zzBufferL = zzBuffer; char [] zzCMapL = ZZ_CMAP; int [] zzTransL = ZZ_TRANS; int [] zzRowMapL = ZZ_ROWMAP; int [] zzAttrL = ZZ_ATTRIBUTE; while (true) { zzMarkedPosL = zzMarkedPos; yychar+= zzMarkedPosL-zzStartRead; zzAction = -1; zzCurrentPosL = zzCurrentPos = zzStartRead = zzMarkedPosL; zzState = ZZ_LEXSTATE[zzLexicalState]; zzForAction: { while (true) { if (zzCurrentPosL < zzEndReadL) zzInput = zzBufferL[zzCurrentPosL++]; else if (zzAtEOF) { zzInput = YYEOF; break zzForAction; } else { // store back cached positions zzCurrentPos = zzCurrentPosL; zzMarkedPos = zzMarkedPosL; boolean eof = zzRefill(); // get translated positions and possibly new buffer zzCurrentPosL = zzCurrentPos; zzMarkedPosL = zzMarkedPos; zzBufferL = zzBuffer; zzEndReadL = zzEndRead; if (eof) { zzInput = YYEOF; break zzForAction; } else { zzInput = zzBufferL[zzCurrentPosL++]; } } int zzNext = zzTransL[ zzRowMapL[zzState] + zzCMapL[zzInput] ]; if (zzNext == -1) break zzForAction; zzState = zzNext; int zzAttributes = zzAttrL[zzState]; if ( (zzAttributes & 1) == 1 ) { zzAction = zzState; zzMarkedPosL = zzCurrentPosL; if ( (zzAttributes & 8) == 8 ) break zzForAction; } } } // store back cached position zzMarkedPos = zzMarkedPosL; switch (zzAction < 0 ? zzAction : ZZ_ACTION[zzAction]) { case 11: { sb.append(yytext()); } case 25: break; case 4: { sb = null; sb = new StringBuffer(); yybegin(STRING_BEGIN); } case 26: break; case 16: { sb.append('\b'); } case 27: break; case 6: { return new Yytoken(Yytoken.TYPE_RIGHT_BRACE,null); } case 28: break; case 23: { Boolean val=Boolean.valueOf(yytext()); return new Yytoken(Yytoken.TYPE_VALUE, val); } case 29: break; case 22: { return new Yytoken(Yytoken.TYPE_VALUE, null); } case 30: break; case 13: { yybegin(YYINITIAL);return new Yytoken(Yytoken.TYPE_VALUE, sb.toString()); } case 31: break; case 12: { sb.append('\\'); } case 32: break; case 21: { Double val=Double.valueOf(yytext()); return new Yytoken(Yytoken.TYPE_VALUE, val); } case 33: break; case 1: { throw new ParseException(yychar, ParseException.ERROR_UNEXPECTED_CHAR, new Character(yycharat(0))); } case 34: break; case 8: { return new Yytoken(Yytoken.TYPE_RIGHT_SQUARE,null); } case 35: break; case 19: { sb.append('\r'); } case 36: break; case 15: { sb.append('/'); } case 37: break; case 10: { return new Yytoken(Yytoken.TYPE_COLON,null); } case 38: break; case 14: { sb.append('"'); } case 39: break; case 5: { return new Yytoken(Yytoken.TYPE_LEFT_BRACE,null); } case 40: break; case 17: { sb.append('\f'); } case 41: break; case 24: { try{ int ch=Integer.parseInt(yytext().substring(2),16); sb.append((char)ch); } catch(Exception e){ throw new ParseException(yychar, ParseException.ERROR_UNEXPECTED_EXCEPTION, e); } } case 42: break; case 20: { sb.append('\t'); } case 43: break; case 7: { return new Yytoken(Yytoken.TYPE_LEFT_SQUARE,null); } case 44: break; case 2: { Long val=Long.valueOf(yytext()); return new Yytoken(Yytoken.TYPE_VALUE, val); } case 45: break; case 18: { sb.append('\n'); } case 46: break; case 9: { return new Yytoken(Yytoken.TYPE_COMMA,null); } case 47: break; case 3: { } case 48: break; default: if (zzInput == YYEOF && zzStartRead == zzCurrentPos) { zzAtEOF = true; return null; } else { zzScanError(ZZ_NO_MATCH); } } } }
java
public static long parseDuration(Object configAlias, String propertyKey, Object obj, long defaultValue) { return parseDuration(configAlias, propertyKey, obj, defaultValue, TimeUnit.MILLISECONDS); }
java
public MultiPoint fromTransferObject(MultiPointTo input, CrsId crsId) { if (input == null) { return null; } crsId = getCrsId(input, crsId); isValid(input); Point[] points = new Point[input.getCoordinates().length]; for (int i = 0; i < points.length; i++) { points[i] = createPoint(input.getCoordinates()[i], crsId); } return new MultiPoint(points); }
java
void materialize() { // already materialized? if (cachefill < 0) { return; } // Compute minimum and maximum double min = Double.MAX_VALUE, max = Double.MIN_VALUE; for (int i = 0; i < cachefill; i++) { min = Math.min(min, cachec[i]); max = Math.max(max, cachec[i]); } // use the LinearScale magic to round to "likely suiteable" step sizes. // TODO: extract into a reusable function? LinearScale scale = new LinearScale(min, max); min = scale.getMin(); max = scale.getMax(); this.base = min; this.max = max; this.binsize = (max - min) / this.destsize; // initialize array this.data = new double[this.destsize << 1]; size = destsize; // re-insert data we have final int end = cachefill; cachefill = -1; // So reinsert works! for (int i = 0; i < end; i++) { increment(cachec[i], cachev[i]); } // delete cache, signal that we're initialized cachec = null; cachev = null; }
python
def y(self): """ Returns a dictionary with all the current values for the Y of the grid. Keys for the dictionary: property, min, max, step, base, expression Types: property=str, min=float, max=float, step=float, base=float, expression=str :return: the dictionary with the parameters :rtype: dict """ result = {} result["property"] = javabridge.call(self.jobject, "getYProperty", "()Ljava/lang/String;") result["min"] = javabridge.call(self.jobject, "getYMin", "()D") result["max"] = javabridge.call(self.jobject, "getYMax", "()D") result["step"] = javabridge.call(self.jobject, "getYStep", "()D") result["base"] = javabridge.call(self.jobject, "getYBase", "()D") result["expression"] = javabridge.call(self.jobject, "getYExpression", "()Ljava/lang/String;") return result
java
public final TrmFirstContactMessageType getMessageType() { /* Get the int value and get the corresponding TrmFirstContactMessageType to return */ int mType = jmo.getIntField(TrmFirstContactAccess.MESSAGETYPE); return TrmFirstContactMessageType.getTrmFirstContactMessageType(mType); }
java
public VirtualNetworkGatewayInner getByResourceGroup(String resourceGroupName, String virtualNetworkGatewayName) { return getByResourceGroupWithServiceResponseAsync(resourceGroupName, virtualNetworkGatewayName).toBlocking().single().body(); }
java
@Override protected HylaFAXClient createResourceImpl() { HylaFAXClient client=null; try { //create new instance client=this.createHylaFAXClient(); //open host client.open(this.host,this.port); //set user client.user(this.userName); //set transfer mode and type client.mode(this.mode); client.type(this.type); //set password if(this.password!=null) { client.pass(this.password); } if(this.enableAdminOperations) { //enable admin operations client.admin(this.password); } } catch(RuntimeException exception) { //release client this.releaseResource(client); throw exception; } catch(Exception exception) { //release client this.releaseResource(client); throw new FaxException("General error.",exception); } return client; }
java
public final void entryRuleOpAdd() throws RecognitionException { try { // InternalXbase.g:434:1: ( ruleOpAdd EOF ) // InternalXbase.g:435:1: ruleOpAdd EOF { if ( state.backtracking==0 ) { before(grammarAccess.getOpAddRule()); } pushFollow(FOLLOW_1); ruleOpAdd(); state._fsp--; if (state.failed) return ; if ( state.backtracking==0 ) { after(grammarAccess.getOpAddRule()); } match(input,EOF,FOLLOW_2); if (state.failed) return ; } } catch (RecognitionException re) { reportError(re); recover(input,re); } finally { } return ; }
java
private static double get_angle(final int p1, final int p2) { int delta_x, delta_y; double ret = 0.0; /* * Calculate (x2 - x1) and (y2 - y1). The points are passed in the form * x1y1 and x2y2. get_x() and get_y() are passed these points and return * the x and y values respectively. For example, get_x(1020) returns 10. */ delta_x = get_x( p2 ) - get_x( p1 ); delta_y = get_y( p2 ) - get_y( p1 ); if ( delta_x == 0 ) { if ( delta_y > 0 ) { ret = WaltzUtil.PI / 2; } else if ( delta_y < 0 ) { ret = -WaltzUtil.PI / 2; } } else if ( delta_y == 0 ) { if ( delta_x > 0 ) { ret = 0.0; } else if ( delta_x < 0 ) { ret = WaltzUtil.PI; } } else { ret = Math.atan2( delta_y, delta_x ); } return ret; }
java
public Observable<Page<DetectorDefinitionInner>> getSiteDetectorNextAsync(final String nextPageLink) { return getSiteDetectorNextWithServiceResponseAsync(nextPageLink) .map(new Func1<ServiceResponse<Page<DetectorDefinitionInner>>, Page<DetectorDefinitionInner>>() { @Override public Page<DetectorDefinitionInner> call(ServiceResponse<Page<DetectorDefinitionInner>> response) { return response.body(); } }); }
python
def enrich_fields(cls, fields, eitem): """Enrich the fields property of an issue. Loops through al properties in issue['fields'], using those that are relevant to enrich eitem with new properties. Those properties are user defined, depending on options configured in Jira. For example, if SCRUM is activated, we have a field named "Story Points". :param fields: fields property of an issue :param eitem: enriched item, which will be modified adding more properties """ for field in fields: if field.startswith('customfield_'): if type(fields[field]) is dict: if 'name' in fields[field]: if fields[field]['name'] == "Story Points": eitem['story_points'] = fields[field]['value'] elif fields[field]['name'] == "Sprint": value = fields[field]['value'] if value: sprint = value[0].partition(",name=")[2].split(',')[0] sprint_start = value[0].partition(",startDate=")[2].split(',')[0] sprint_end = value[0].partition(",endDate=")[2].split(',')[0] sprint_complete = value[0].partition(",completeDate=")[2].split(',')[0] eitem['sprint'] = sprint eitem['sprint_start'] = cls.fix_value_null(sprint_start) eitem['sprint_end'] = cls.fix_value_null(sprint_end) eitem['sprint_complete'] = cls.fix_value_null(sprint_complete)
java
public static File[] getLogFiles(File[] logDirList,long snapshotZxid) { List<File> files = Util.sortDataDir(logDirList, "log", true); long logZxid = 0; // Find the log file that starts before or at the same time as the // zxid of the snapshot for (File f : files) { long fzxid = Util.getZxidFromName(f.getName(), "log"); if (fzxid > snapshotZxid) { continue; } // the files // are sorted with zxid's if (fzxid > logZxid) { logZxid = fzxid; } } List<File> v=new ArrayList<File>(5); for (File f : files) { long fzxid = Util.getZxidFromName(f.getName(), "log"); if (fzxid < logZxid) { continue; } v.add(f); } return v.toArray(new File[0]); }
java
private Point2d horizontalOffset(Point2d[] points, Turn[] turns, Projection projection) { // Haworth must currently be drawn vertically, I have seen them drawn // slanted but it's difficult to determine which way the projection // is relative if (projection != Projection.Chair) return new Point2d(0, 0); // the atoms either side of a central atom are our reference int offset = chairCenterOffset(turns); int prev = (offset + 5) % 6; int next = (offset + 7) % 6; // and the axis formed by these atoms is our horizontal reference which // we normalise double deltaX = points[prev].x - points[next].x; double deltaY = points[prev].y - points[next].y; double mag = Math.sqrt(deltaX * deltaX + deltaY * deltaY); deltaX /= mag; deltaY /= mag; // we now ensure the reference always points left to right (presumes no // vertical chairs) if (deltaX < 0) { deltaX = -deltaX; deltaY = -deltaY; } // horizontal = <1,0> so the offset if the difference from this return new Point2d(1 - deltaX, deltaY); }
java
@Override public boolean eIsSet(int featureID) { switch (featureID) { case AfplibPackage.TEXT_FIDELITY__STP_TXT_EX: return STP_TXT_EX_EDEFAULT == null ? stpTxtEx != null : !STP_TXT_EX_EDEFAULT.equals(stpTxtEx); case AfplibPackage.TEXT_FIDELITY__REP_TXT_EX: return REP_TXT_EX_EDEFAULT == null ? repTxtEx != null : !REP_TXT_EX_EDEFAULT.equals(repTxtEx); } return super.eIsSet(featureID); }
java
public String interpolate() { getValues(); if (definition == null) { return ""; } int variableStart = definition.indexOf("{"); if (variableStart < 0) { return definition; } int index = 0; int variableEnd = 0; StringBuilder sb = new StringBuilder(); while (variableStart >= 0) { sb.append(definition.substring(variableEnd, variableStart)); variableEnd = getIndexForEndOfVariable(definition, variableStart) + 1; variableStart = definition.indexOf("{", variableEnd); sb.append(values.get(index++).getValue()); } if (variableEnd < definition.length()) { sb.append(definition.substring(variableEnd)); } return sb.toString(); }
java
public Map<String, String> toMetaData() { Map<String, String> metaData = new HashMap<String, String>(); metaData.put(META_KEY_ID, id); return metaData; }
java
public final String prn(final Map<String, Object> pAddParam, final BigDecimal pVal) { return this.srvNumberToString.print(pVal.toString(), (String) pAddParam.get("decSepv"), (String) pAddParam.get("decGrSepv"), (Integer) pAddParam.get("reportDp"), (Integer) pAddParam.get("digInGr")); }
python
def get_address_transactions(self, account_id, address_id, **params): """https://developers.coinbase.com/api/v2#list-address39s-transactions""" response = self._get( 'v2', 'accounts', account_id, 'addresses', address_id, 'transactions', params=params) return self._make_api_object(response, Transaction)
java
public void deleteFromTask(String jobId, String taskId, String filePath, Boolean recursive, FileDeleteFromTaskOptions fileDeleteFromTaskOptions) { deleteFromTaskWithServiceResponseAsync(jobId, taskId, filePath, recursive, fileDeleteFromTaskOptions).toBlocking().single().body(); }
python
def set_user_avatar(self, username, avatar): """Set a user's avatar. :param username: the user to set the avatar for :param avatar: ID of the avatar to set """ self._set_avatar( {'username': username}, self._get_url('user/avatar'), avatar)
python
def embedManifestDllCheck(target, source, env): """Function run by embedManifestDllCheckAction to check for existence of manifest and other conditions, and embed the manifest by calling embedManifestDllAction if so.""" if env.get('WINDOWS_EMBED_MANIFEST', 0): manifestSrc = target[0].get_abspath() + '.manifest' if os.path.exists(manifestSrc): ret = (embedManifestDllAction) ([target[0]],None,env) if ret: raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0])) return ret else: print('(embed: no %s.manifest found; not embedding.)'%str(target[0])) return 0
python
def transform_source(text): '''Replaces instances of repeat n: by for __VAR_i in range(n): where __VAR_i is a string that does not appear elsewhere in the code sample. ''' loop_keyword = 'repeat' nb = text.count(loop_keyword) if nb == 0: return text var_names = get_unique_variable_names(text, nb) toks = tokenize.generate_tokens(StringIO(text).readline) result = [] replacing_keyword = False for toktype, tokvalue, _, _, _ in toks: if toktype == tokenize.NAME and tokvalue == loop_keyword: result.extend([ (tokenize.NAME, 'for'), (tokenize.NAME, var_names.pop()), (tokenize.NAME, 'in'), (tokenize.NAME, 'range'), (tokenize.OP, '(') ]) replacing_keyword = True elif replacing_keyword and tokvalue == ':': result.extend([ (tokenize.OP, ')'), (tokenize.OP, ':') ]) replacing_keyword = False else: result.append((toktype, tokvalue)) return tokenize.untokenize(result)
java
protected void processProjectListItem(Map<Integer, String> result, Row row) { Integer id = row.getInteger("PROJ_ID"); String name = row.getString("PROJ_NAME"); result.put(id, name); }
java
@Override public void clear() { removeAllUserData(); if (this.children != null) { final List<N> nodes = new ArrayList<>(this.children); while (!this.children.isEmpty()) { setChildAt(0, null); } for (final N child : nodes) { child.clear(); } nodes.clear(); } }
java
public void setData (Object data) { _data.clear(); _data.add(data); _model.fireTableDataChanged(); }
python
def set_image(self, img, bgr=False): '''set the currently displayed image''' if not self.is_alive(): return if bgr: img = cv.CloneImage(img) cv.CvtColor(img, img, cv.CV_BGR2RGB) self.in_queue.put(MPImageData(img))
python
def run(self, batch: Batch, train: bool=False, stream: StreamWrapper=None) -> Batch: """ Run feed-forward pass with the given batch using all the models, aggregate and return the results. .. warning:: :py:class:`Ensemble` can not be trained. :param batch: batch to be processed :param train: ``True`` if this batch should be used for model update, ``False`` otherwise :param stream: stream wrapper (useful for precise buffer management) :return: aggregated results dict :raise ValueError: if the ``train`` flag is set to ``True`` """ if train: raise ValueError('Ensemble model cannot be trained.') self._load_models() # run all the models batch_outputs = [model.run(batch, False, stream) for model in self._models] # aggregate the outputs aggregated = {} for output_name in self._outputs: output_values = [batch_output[output_name] for batch_output in batch_outputs] if self._aggregation == 'mean': aggregated[output_name] = np.mean(output_values, axis=0) elif self._aggregation == 'major_vote': output_values_arr = np.array(output_values) output = major_vote(output_values_arr.reshape((output_values_arr.shape[0], -1))) aggregated[output_name] = np.array(output).reshape(output_values_arr[0].shape) return aggregated
python
def sparql(self, select='*', body=None, inject_prefixes=None, single_column=False): """ Execute a SPARQL query. The query is specified using `select` and `body` parameters. The argument for the Named Graph is injected into the query. The select parameter should be either '*' or a list of vars (not prefixed with '?'). - If '*' is passed, then the result is a list of dicts, { $var: {value: $val } } - If a list of vars is passed, then the result is a list of lists - Unless single_column=True, in which case the results are a simple list of values from the first var The inject_prefixes argument can be used to inject a list of prefixes - these are expanded using the prefixcommons library """ if inject_prefixes is None: inject_prefixes = [] namedGraph = get_named_graph(self.handle) cols = [] select_val = None if select is None or select=='*': if not single_column: cols=None select_val='*' else: if isinstance(cols,list): cols = [select] else: cols = select select_val = ", ".join(['?'+c for c in cols]) prefixes = "" if inject_prefixes is not None: plist = ["prefix {}: <{}> ".format(p,expand_uri(p+":")) for p in inject_prefixes if p != "" and p is not None] prefixes = "\n".join(plist) query = """ {prefixes} SELECT {s} WHERE {{ GRAPH <{g}> {{ {b} }} }} """.format(prefixes=prefixes, s=select_val, b=body, g=namedGraph) bindings = run_sparql(query) if len(bindings) == 0: return [] if cols is None: return bindings else: if single_column: c = list(bindings[0].keys())[0] return [r[c]['value'] for r in bindings] else: return [r[c]['value'] for c in cols for r in bindings]
java
public static void write(CharSequence content, File file, String encoding) { OutputStream os = null; try { os = new FileOutputStream(file); PrintWriter printWriter = new PrintWriter(new OutputStreamWriter(os, encoding)); printWriter.print(content); printWriter.flush(); os.flush(); } catch (IOException e) { throw E.unexpected(e); } finally { close(os); } }
python
def get_criteria(self, sess, model, advx, y, batch_size=BATCH_SIZE): """ Returns a dictionary mapping the name of each criterion to a NumPy array containing the value of that criterion for each adversarial example. Subclasses can add extra criteria by implementing the `extra_criteria` method. :param sess: tf.session.Session :param model: cleverhans.model.Model :param adv_x: numpy array containing the adversarial examples made so far by earlier work in the bundling process :param y: numpy array containing true labels :param batch_size: int, batch size """ names, factory = self.extra_criteria() factory = _CriteriaFactory(model, factory) results = batch_eval_multi_worker(sess, factory, [advx, y], batch_size=batch_size, devices=devices) names = ['correctness', 'confidence'] + names out = dict(safe_zip(names, results)) return out
java
private Iterator<ConcurrentServiceReferenceElement<T>> elements() { Collection<ConcurrentServiceReferenceElement<T>> set; synchronized (elementMap) { if (elementSetUnsorted) { elementSet = new ConcurrentSkipListSet<ConcurrentServiceReferenceElement<T>>(elementMap.values()); elementSetUnsorted = false; } set = elementSet; } return set.iterator(); }
python
def subtrees_for_phrase(self, phrase_type): """ Returns subtrees corresponding all phrases matching a given phrase type :param phrase_type: POS such as "NP", "VP", "det", etc. :type phrase_type: str :return: a list of NLTK.Tree.Subtree instances :rtype: list of NLTK.Tree.Subtree """ return [subtree for subtree in self.parse.subtrees() if subtree.node.lower() == phrase_type.lower()]
java
public static boolean removeConnectionManager(HttpClientConnectionManager connectionManager) { boolean wasRemoved = connectionManagers.remove(connectionManager) != null; if (connectionManagers.isEmpty()) { shutdown(); } return wasRemoved; }
python
def an_text_url(identifiant, code): """ Port of the PHP function used by the National Assembly: public function urlOpaque($identifiant, $codeType = NULL) { $datas = array( 'PRJL' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => ''), 'PION' => array('repertoire' => 'propositions', 'prefixe' => 'pion', 'suffixe' => ''), 'PNRECOMENQ' => array('repertoire' => 'propositions', 'prefixe' => 'pion', 'suffixe' => ''), 'PNREAPPART341' => array('repertoire' => 'propositions', 'prefixe' => 'pion', 'suffixe' => ''), 'PNREMODREGLTAN' => array('repertoire' => 'propositions', 'prefixe' => 'pion', 'suffixe' => ''), 'AVCE' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => '-ace'), 'ETDI' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => '-ei'), 'ACIN' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => '-ai'), 'LETT' => array('repertoire' => 'projets', 'prefixe' => 'pl', 'suffixe' => '-l'), 'PNRETVXINSTITEUROP' => array('repertoire' => 'europe/resolutions', 'prefixe' => 'ppe', 'suffixe' => ''), 'PNRE' => array('repertoire' => 'europe/resolutions', 'prefixe' => 'ppe', 'suffixe' => ''), 'RION' => array('repertoire' => '', 'prefixe' => '', 'suffixe' => ''), 'TCOM' => array('repertoire' => 'ta-commission', 'prefixe' => 'r', 'suffixe' => '-a0'), 'TCOMMODREGLTAN' => array('repertoire' => 'ta-commission', 'prefixe' => 'r', 'suffixe' => '-a0'), 'TCOMTVXINSTITEUROP' => array('repertoire' => 'ta-commission', 'prefixe' => 'r', 'suffixe' => '-a0'), 'TCOMCOMENQ' => array('repertoire' => 'ta-commission', 'prefixe' => 'r', 'suffixe' => '-a0'), 'TADO' => array('repertoire' => 'ta', 'prefixe' => 'ta', 'suffixe' => ''), ); preg_match('/(.{4})([ANS]*)(R[0-9])([LS]*)([0-9]*)([BTACP]*)(.*)/', $identifiant, $matches); $leg = $matches[5]; $typeTa = $matches[6]; $num = $matches[7]; switch ($typeTa) { case 'BTC': $type = 'TCOM'; break; case 'BTA': $type = 'TADO'; break; default: $type = $codeType; } $host = "http://www.assemblee-nationale.fr/"; return $host . $leg . "/" . $datas[$type]['repertoire'] . "/" . $datas[$type]['prefixe'] . $num . $datas[$type]['suffixe'] . ".pdf"; } """ datas = { 'PRJL': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '', }, 'PION': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'PNRECOMENQ': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'PNREAPPART341': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'PNREMODREGLTAN': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'AVCE': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '-ace', }, 'ETDI': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '-ei', }, 'ACIN': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '-ai', }, 'LETT': { 'repertoire': 'projets', 'prefixe': 'pl', 'suffixe': '-l', }, 'PNRETVXINSTITEUROP': { 'repertoire': 'europe/resolutions', 'prefixe': 'ppe', 'suffixe': '', }, 'PNRE': { 'repertoire': 'propositions', 'prefixe': 'pion', 'suffixe': '', }, 'RION': { 'repertoire': '', 'prefixe': '', 'suffixe': '', }, 'TCOM': { 'repertoire': 'ta-commission', 'prefixe': 'r', 'suffixe': '-a0', }, 'TCOMMODREGLTAN': { 'repertoire': 'ta-commission', 'prefixe': 'r', 'suffixe': '-a0', }, 'TCOMTVXINSTITEUROP': { 'repertoire': 'ta-commission', 'prefixe': 'r', 'suffixe': '-a0', }, 'TCOMCOMENQ': { 'repertoire': 'ta-commission', 'prefixe': 'r', 'suffixe': '-a0', }, 'TADO': { 'repertoire': 'ta', 'prefixe': 'ta', 'suffixe': '', }, # NOT IN NATIONAL ASSEMBLY PHP CODE 'RAPP': { 'repertoire': 'rapports', 'prefixe': 'r', 'suffixe': '', }, 'RINF': { 'repertoire': 'rapports', 'prefixe': 'r', 'suffixe': '', } } match = re.match(r'(.{4})([ANS]*)(R[0-9])([LS]*)([0-9]*)([BTACP]*)(.*)', identifiant) leg = match.group(5) typeTa = match.group(6) num = match.group(7) if typeTa == 'BTC': type = 'TCOM' elif typeTa == 'BTA': type = 'TADO' else: type = code host = "http://www.assemblee-nationale.fr/" if type not in datas: # ex: ALCNANR5L15B0002 (allocution du président) raise Exception('Unknown document type for %s' % identifiant) return host + leg + "/" + datas[type]['repertoire'] + "/" + datas[type]['prefixe'] + num + datas[type]['suffixe'] + ".asp"
java
public static SearchType parseSearchType(final String type) { if (type == null || type.isEmpty()) { throw new IllegalArgumentException("Type provided was null or empty"); } if (type.toLowerCase().equals("tsmeta")) { return SearchType.TSMETA; } else if (type.toLowerCase().equals("tsmeta_summary")) { return SearchType.TSMETA_SUMMARY; } else if (type.toLowerCase().equals("tsuids")) { return SearchType.TSUIDS; } else if (type.toLowerCase().equals("uidmeta")) { return SearchType.UIDMETA; } else if (type.toLowerCase().equals("annotation")) { return SearchType.ANNOTATION; } else if (type.toLowerCase().equals("lookup")) { return SearchType.LOOKUP; } else { throw new IllegalArgumentException("Unknown type: " + type); } }
python
def _list_ports(self): """ Generate the list of port display in the client if the compute has sent a list we return it (use by node where you can not personnalize the port naming). """ self._ports = [] # Some special cases if self._node_type == "atm_switch": atm_port = set() # Mapping is like {"1:0:100": "10:0:200"} for source, dest in self._properties["mappings"].items(): atm_port.add(int(source.split(":")[0])) atm_port.add(int(dest.split(":")[0])) atm_port = sorted(atm_port) for port in atm_port: self._ports.append(PortFactory("{}".format(port), 0, 0, port, "atm")) return elif self._node_type == "frame_relay_switch": frame_relay_port = set() # Mapping is like {"1:101": "10:202"} for source, dest in self._properties["mappings"].items(): frame_relay_port.add(int(source.split(":")[0])) frame_relay_port.add(int(dest.split(":")[0])) frame_relay_port = sorted(frame_relay_port) for port in frame_relay_port: self._ports.append(PortFactory("{}".format(port), 0, 0, port, "frame_relay")) return elif self._node_type == "dynamips": self._ports = DynamipsPortFactory(self._properties) return elif self._node_type == "docker": for adapter_number in range(0, self._properties["adapters"]): self._ports.append(PortFactory("eth{}".format(adapter_number), 0, adapter_number, 0, "ethernet", short_name="eth{}".format(adapter_number))) elif self._node_type in ("ethernet_switch", "ethernet_hub"): # Basic node we don't want to have adapter number port_number = 0 for port in self._properties["ports_mapping"]: self._ports.append(PortFactory(port["name"], 0, 0, port_number, "ethernet", short_name="e{}".format(port_number))) port_number += 1 elif self._node_type in ("vpcs"): self._ports.append(PortFactory("Ethernet0", 0, 0, 0, "ethernet", short_name="e0")) elif self._node_type in ("cloud", "nat"): port_number = 0 for port in self._properties["ports_mapping"]: self._ports.append(PortFactory(port["name"], 0, 0, port_number, "ethernet", short_name=port["name"])) port_number += 1 else: self._ports = StandardPortFactory(self._properties, self._port_by_adapter, self._first_port_name, self._port_name_format, self._port_segment_size)
java
private void save(OutputStream os, String id, SessionData data) throws IOException { DataOutputStream out = new DataOutputStream(os); out.writeUTF(id); out.writeLong(data.getCreationTime()); out.writeLong(data.getAccessedTime()); out.writeLong(data.getLastAccessedTime()); out.writeLong(data.getExpiryTime()); out.writeLong(data.getMaxInactiveInterval()); List<String> keys = new ArrayList<>(data.getKeys()); out.writeInt(keys.size()); ObjectOutputStream oos = new ObjectOutputStream(out); for (String name : keys) { oos.writeUTF(name); oos.writeObject(data.getAttribute(name)); } }
python
def keycmp(a, b, pth=()): """Recurse down the tree of nested dicts `b`, at each level checking that it does not have any keys that are not also at the same level in `a`. The key path is recorded in `pth`. If an unknown key is encountered in `b`, an `UnknownKeyError` exception is raised. If a non-dict value is encountered in `b` for which the corresponding value in `a` is a dict, an `InvalidValueError` exception is raised.""" akey = list(a.keys()) # Iterate over all keys in b for key in list(b.keys()): # If a key is encountered that is not in a, raise an # UnknownKeyError exception. if key not in akey: raise UnknownKeyError(pth + (key,)) else: # If corresponding values in a and b for the same key # are both dicts, recursively call this method for # those values. If the value in a is a dict and the # value in b is not, raise an InvalidValueError # exception. if isinstance(a[key], dict): if isinstance(b[key], dict): keycmp(a[key], b[key], pth + (key,)) else: raise InvalidValueError(pth + (key,))
python
def transition(self, data, year, linked_tables=None): """ Add or remove rows from a table based on population targets. Parameters ---------- data : pandas.DataFrame Rows will be removed from or added to this table. year : int Year number that will be passed to `transitioner`. linked_tables : dict of tuple, optional Dictionary of (table, 'column name') pairs. The column name should match the index of `data`. Indexes in `data` that are copied or removed will also be copied and removed in linked tables. They dictionary keys are used in the returned `updated_links`. Returns ------- updated : pandas.DataFrame Table with rows removed or added. added : pandas.Series Indexes of new rows in `updated`. updated_links : dict of pandas.DataFrame """ logger.debug('start: transition') linked_tables = linked_tables or {} updated_links = {} with log_start_finish('add/remove rows', logger): updated, added, copied, removed = self.transitioner(data, year) for table_name, (table, col) in linked_tables.items(): logger.debug('updating linked table {}'.format(table_name)) updated_links[table_name] = \ _update_linked_table(table, col, added, copied, removed) logger.debug('finish: transition') return updated, added, updated_links
java
@Override public void add(Difference difference) { differenceCount += 1; if(differenceCount <= DIFFERENCE_COUNT_MAX) { list.add(difference); } try { writer.write(difference.toString()); } catch (IOException e) { throw new DiffException("Failed to write difference to report file", e); } }
java
public static NearbySearchRequest nearbySearchQuery(GeoApiContext context, LatLng location) { NearbySearchRequest request = new NearbySearchRequest(context); request.location(location); return request; }
python
def result(self): """ Get the result for a job. This will block if the job is incomplete. Returns: The result for the Job. Raises: An exception if the Job resulted in an exception. """ self.wait() if self._fatal_error: raise self._fatal_error return self._result
python
def _get_drive_api(credentials): """ For a given set of credentials, return a drive API object. """ http = httplib2.Http() http = credentials.authorize(http) service = discovery.build('drive', 'v2', http=http) service.credentials = credentials # duck punch service obj. with credentials return service
java
@Override public InputStream downloadToStream(SFSession connection, String command, int parallelism, String remoteStorageLocation, String stageFilePath, String stageRegion) throws SnowflakeSQLException { int retryCount = 0; do { try { CloudBlobContainer container = azStorageClient.getContainerReference(remoteStorageLocation); CloudBlob blob = container.getBlockBlobReference(stageFilePath); InputStream stream = blob.openInputStream(); Map<String, String> userDefinedMetadata = blob.getMetadata(); AbstractMap.SimpleEntry<String, String> encryptionData = parseEncryptionData(userDefinedMetadata.get(AZ_ENCRYPTIONDATAPROP)); String key = encryptionData.getKey(); String iv = encryptionData.getValue(); if (this.isEncrypting() && this.getEncryptionKeySize() <= 256) { if (key == null || iv == null) { throw new SnowflakeSQLException(SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "File metadata incomplete"); } try { return EncryptionProvider.decryptStream(stream, key, iv, encMat); } catch (Exception ex) { logger.error("Error in decrypting file", ex); throw ex; } } else { return stream; } } catch (Exception ex) { logger.debug("Downloading unsuccessful {}", ex); handleAzureException(ex, ++retryCount, "download", connection, command , this); } } while (retryCount < getMaxRetries()); throw new SnowflakeSQLException(SqlState.INTERNAL_ERROR, ErrorCode.INTERNAL_ERROR.getMessageCode(), "Unexpected: download unsuccessful without exception!"); }
java
public static <K, V> ImmutableMapJsonDeserializer<K, V> newInstance( KeyDeserializer<K> keyDeserializer, JsonDeserializer<V> valueDeserializer ) { return new ImmutableMapJsonDeserializer<K, V>( keyDeserializer, valueDeserializer ); }
java
public String printBody() throws IOException { final ByteArrayOutputStream baos = new ByteArrayOutputStream(); this.printBody(baos); return new Utf8String(baos.toByteArray()).asString(); }
python
def read_cpp_source_file(self, source_file): """ Reads C++ source file and returns declarations tree :param source_file: path to C++ source file :type source_file: str """ xml_file = '' try: ffname = self.__file_full_name(source_file) self.logger.debug("Reading source file: [%s].", ffname) decls = self.__dcache.cached_value(ffname, self.__config) if not decls: self.logger.debug( "File has not been found in cache, parsing...") xml_file = self.create_xml_file(ffname) decls, files = self.__parse_xml_file(xml_file) self.__dcache.update( ffname, self.__config, decls, files) else: self.logger.debug(( "File has not been changed, reading declarations " + "from cache.")) except Exception: if xml_file: utils.remove_file_no_raise(xml_file, self.__config) raise if xml_file: utils.remove_file_no_raise(xml_file, self.__config) return decls
java
public TableCellStyle addChildCellStyle(final TableCellStyle style, final TableCell.Type type) { final TableCellStyle newStyle; final DataStyle dataStyle = this.format.getDataStyle(type); if (dataStyle == null) { newStyle = style; } else { newStyle = this.stylesContainer.addChildCellStyle(style, dataStyle); } return newStyle; }
python
def get_comments_for_commentor_and_reference(self, resource_id, reference_id): """Gets a list of comments corresponding to a resource and reference ``Id``. arg: resource_id (osid.id.Id): the ``Id`` of the resource arg: reference_id (osid.id.Id): the ``Id`` of the reference return: (osid.commenting.CommentList) - the returned ``CommentList`` raise: NullArgument - ``resource_id`` or ``reference_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.relationship.RelationshipLookupSession.get_relationships_for_peers # NOTE: This implementation currently ignores plenary and effective views collection = JSONClientValidated('commenting', collection='Comment', runtime=self._runtime) result = collection.find( dict({'referenceId': str(resource_id), 'commentorId': str(reference_id)}, **self._view_filter())).sort('_id', ASCENDING) return objects.CommentList(result, runtime=self._runtime)
python
def fixer(base, target): """Parse data from fixer.io.""" api_url = 'http://api.fixer.io/latest' resp = requests.get( api_url, params={ 'base': base, 'symbols': target, }, timeout=1, ) data = resp.json() return decimal.Decimal(data['rates'][target])
python
def cr_login(self, response): """ Login using email/username and password, used to get the auth token @param str account @param str password @param str hash_id (optional) """ self._state_params['auth'] = response['auth'] self._user_data = response['user'] if not self.logged_in: raise ApiLoginFailure(response)
python
def retrieve_ocsps(self, cert, issuer): """ :param cert: An asn1crypto.x509.Certificate object :param issuer: An asn1crypto.x509.Certificate object of cert's issuer :return: A list of asn1crypto.ocsp.OCSPResponse objects """ if not self._allow_fetching: return self._ocsps if cert.issuer_serial not in self._fetched_ocsps: try: ocsp_response = ocsp_client.fetch( cert, issuer, **self._ocsp_fetch_params ) self._fetched_ocsps[cert.issuer_serial] = [ocsp_response] # Responses can contain certificates that are useful in validating the # response itself. We can use these since they will be validated using # the local trust roots. self._extract_ocsp_certs(ocsp_response) except (URLError, socket.error) as e: self._fetched_ocsps[cert.issuer_serial] = [] if self._revocation_mode == "soft-fail": self._soft_fail_exceptions.append(e) raise SoftFailError() else: raise return self._fetched_ocsps[cert.issuer_serial]
python
def func_accepts_var_args(func): """ Return True if function 'func' accepts positional arguments *args. """ if six.PY2: return inspect.getargspec(func)[1] is not None return any( p for p in inspect.signature(func).parameters.values() if p.kind == p.VAR_POSITIONAL )
python
def __connect_to_bus(self, bus): """ Attempt to connect to an I2C bus """ def connect(bus_num): try: self.log.debug("Attempting to connect to bus %s..." % bus_num) self.bus = smbus.SMBus(bus_num) self.log.debug("Success") except IOError: self.log.debug("Failed") raise # If the bus is not explicitly stated, try 0 and then try 1 if that # fails if bus is None: try: connect(0) return except IOError: pass try: connect(1) return except IOError: raise else: try: connect(bus) return except IOError: raise
java
public static <T> T getLast(@javax.annotation.Nonnull final Stream<T> stream) { final List<T> collect = stream.collect(Collectors.toList()); final T last = collect.get(collect.size() - 1); return last; }
java
public static <T extends AbstractJaxb> void removeDescendants(T target, String clazz) { execute(target, null, clazz); }
java
public static boolean isHaveAttribute(AttributeSet attrs, String attribute) { return attrs.getAttributeValue(Ui.androidStyleNameSpace, attribute) != null; }
java
public static Date parseDate( final String dateValue, final String[] dateFormats, final Date startDate) { notNull(dateValue, "Date value"); final String[] localDateFormats = dateFormats != null ? dateFormats : DEFAULT_PATTERNS; final Date localStartDate = startDate != null ? startDate : DEFAULT_TWO_DIGIT_YEAR_START; String v = dateValue; // trim single quotes around date if present // see issue #5279 if (v.length() > 1 && v.startsWith("'") && v.endsWith("'")) { v = v.substring (1, v.length() - 1); } for (final String dateFormat : localDateFormats) { final SimpleDateFormat dateParser = DateFormatHolder.formatFor(dateFormat); dateParser.set2DigitYearStart(localStartDate); final ParsePosition pos = new ParsePosition(0); final Date result = dateParser.parse(v, pos); if (pos.getIndex() != 0) { return result; } } return null; }
java
@SuppressWarnings("unchecked") @Override public int hash(T value) { int i = 0; try { int code = this.comparators[0].hash(value.getFieldNotNull(keyPositions[0])); for (i = 1; i < this.keyPositions.length; i++) { code *= HASH_SALT[i & 0x1F]; // salt code with (i % HASH_SALT.length)-th salt component code += this.comparators[i].hash(value.getFieldNotNull(keyPositions[i])); } return code; } catch (NullFieldException nfex) { throw new NullKeyFieldException(nfex); } catch (IndexOutOfBoundsException iobex) { throw new KeyFieldOutOfBoundsException(keyPositions[i]); } }
python
def get_plugin_module(self, plugin_name): """Returns the module containing the plugin of the given name.""" try: return self.plugin_modules[plugin_name] except KeyError: raise RezPluginError("Unrecognised %s plugin: '%s'" % (self.pretty_type_name, plugin_name))
python
def sort_by(items, attr): """ General sort filter - sorts by either attribute or key. """ def key_func(item): try: return getattr(item, attr) except AttributeError: try: return item[attr] except TypeError: getattr(item, attr) # Reraise AttributeError return sorted(items, key=key_func)
java
public static int[] subset(final int n, final int[] a, final Random random) { requireNonNull(random, "Random"); requireNonNull(a, "Sub set array"); final int k = a.length; checkSubSet(n, k); // Early return. if (a.length == n) { for (int i = 0; i < k; ++i) a[i] = i; return a; } // (A): Initialize a[i] to "zero" point for bin Ri. for (int i = 0; i < k; ++i) { a[i] = (i*n)/k; } // (B) int l = 0, x = 0; for (int c = 0; c < k; ++c) { do { // Choose random x; x = 1 + nextX(random, n - 1); // determine range Rl; l = (x*k - 1)/n; } while (a[l] >= x); // accept or reject. ++a[l]; } int s = k; // (C) Move a[i] of nonempty bins to the left. int m = 0, p = 0; for (int i = 0; i < k; ++i) { if (a[i] == (i*n)/k) { a[i] = 0; } else { ++p; m = a[i]; a[i] = 0; a[p - 1] = m; } } // (D) Determine l, set up space for Bl. int ds = 0; for (; p > 0; --p) { l = 1 + (a[p - 1]*k - 1)/n; ds = a[p - 1] - ((l - 1)*n)/k; a[p - 1] = 0; a[s - 1] = l; s -= ds; } // (E) If a[l] != 0, a new bin is to be processed. int r = 0, m0 = 0; for (int ll = 1; ll <= k; ++ll) { l = k + 1 - ll; if (a[l - 1] != 0) { r = l; m0 = 1 + ((a[l - 1] - 1)*n)/k; m = (a[l-1]*n)/k - m0 + 1; } // (F) Choose a random x. x = m0 + nextX(random, m - 1); int i = l + 1; // (G) Check x against previously entered elements in bin; // increment x as it jumps over elements <= x. while (i <= r && x >= a[i - 1]) { ++x; a[i- 2] = a[i - 1]; ++i; } a[i - 2] = x; --m; } for (int i = 0; i < a.length; ++i) a[i] -= 1; return a; }
python
def _process_function(chaining, routine): """Chain function which returns a function. :param routine: routine to process. :return: routine embedding execution function. """ def processing(*args, **kwargs): """Execute routine with input args and kwargs and add reuslt in chaining.___. :param tuple args: routine varargs. :param dict kwargs: routine kwargs. :return: chaining chaining. :rtype: Chaining """ result = routine(*args, **kwargs) chaining.___.append(result) return chaining return processing
java
public synchronized PathHandler addPrefixPath(final String path, final HttpHandler handler) { Handlers.handlerNotNull(handler); pathMatcher.addPrefixPath(path, handler); return this; }
python
def collect(self): """ Collect s3 bucket stats """ if boto is None: self.log.error("Unable to import boto python module") return {} for s3instance in self.config['s3']: self.log.info("S3: byte_unit: %s" % self.config['byte_unit']) aws_access = self.config['s3'][s3instance]['aws_access_key'] aws_secret = self.config['s3'][s3instance]['aws_secret_key'] for bucket_name in self.config['s3'][s3instance]['buckets']: bucket = self.getBucket(aws_access, aws_secret, bucket_name) # collect bucket size total_size = self.getBucketSize(bucket) for byte_unit in self.config['byte_unit']: new_size = diamond.convertor.binary.convert( value=total_size, oldUnit='byte', newUnit=byte_unit ) self.publish("%s.size.%s" % (bucket_name, byte_unit), new_size)
python
def country_code_for_valid_region(region_code): """Returns the country calling code for a specific region. For example, this would be 1 for the United States, and 64 for New Zealand. Assumes the region is already valid. Arguments: region_code -- The region that we want to get the country calling code for. Returns the country calling code for the region denoted by region_code. """ metadata = PhoneMetadata.metadata_for_region(region_code.upper(), None) if metadata is None: raise Exception("Invalid region code %s" % region_code) return metadata.country_code
java
@Override public <E> Choice4<A, B, C, E> discardL(Applicative<E, Choice4<A, B, C, ?>> appB) { return Monad.super.discardL(appB).coerce(); }
java
public void setReferences(Map<String, List<UniqueId>> references) { if (references != null) { this.references = new HashMap<>(references); } else { this.references = null; } }
java
public FTMusicLabel getMusicLabel(String id) throws FanartTvException { URL url = ftapi.getImageUrl(BaseType.LABEL, id); String page = requestWebPage(url); FTMusicLabel label = null; try { label = mapper.readValue(page, FTMusicLabel.class); } catch (IOException ex) { throw new FanartTvException(ApiExceptionType.MAPPING_FAILED, "fauled to get Music Label with ID " + id, url, ex); } return label; }
python
def _OpenFileObject(self, path_spec): """Opens the file-like object defined by path specification. Args: path_spec (PathSpec): path specification. Returns: pysmraw.handle: a file-like object or None. Raises: PathSpecError: if the path specification is invalid. """ if not path_spec.HasParent(): raise errors.PathSpecError( 'Unsupported path specification without parent.') parent_path_spec = path_spec.parent file_system = resolver.Resolver.OpenFileSystem( parent_path_spec, resolver_context=self._resolver_context) # Note that we cannot use pysmraw's glob function since it does not # handle the file system abstraction dfvfs provides. segment_file_path_specs = raw.RawGlobPathSpec(file_system, path_spec) if not segment_file_path_specs: return None if parent_path_spec.IsSystemLevel(): # Typically the file-like object cache should have room for 127 items. self._resolver_context.SetMaximumNumberOfFileObjects( len(segment_file_path_specs) + 127) file_objects = [] for segment_file_path_spec in segment_file_path_specs: file_object = resolver.Resolver.OpenFileObject( segment_file_path_spec, resolver_context=self._resolver_context) file_objects.append(file_object) raw_handle = pysmraw.handle() raw_handle.open_file_objects(file_objects) return raw_handle
python
def info(self, event=None, *args, **kw): """ Process event and call :meth:`logging.Logger.info` with the result. """ if not self._logger.isEnabledFor(logging.INFO): return kw = self._add_base_info(kw) kw['level'] = "info" return self._proxy_to_logger('info', event, *args, **kw)
python
def find_document_type_by_name(self, entity_name, active='Y', match_case=True): """ search document types by name and active(Y/N) status :param entity_name: entity name :return: """ all_types = self.get_dictionary('Document_Type_DE') if match_case: filtered = filter( lambda x: x['Active'] == active and x['EntryName'].find(entity_name) >= 0, all_types) else: token = entity_name.lower() filtered = filter( lambda x: x['Active'] == active and x['EntryName'].lower().find(token) >= 0, all_types) return filtered
java
public List<Option> getOptions(boolean includeDisabled) { List< Option> result = new ArrayList<>(); for (OptionGroup group : optionGroups.values()) { result.addAll(group.getOptions(includeDisabled)); } return result; }
java
@Override public boolean unsubscribe(String topic, MessageListener messageListener) { Set<MessageListener> subcription = topicSubscriptions.get(topic); boolean unsubscribe = false; WrappedJedisPubSub wrappedJedisPubSub = null; if (subcription != null) { synchronized (subcription) { if (subcription.remove(messageListener)) { wrappedJedisPubSub = topicSubscriptionMappings.remove(messageListener); if (wrappedJedisPubSub != null) { unsubscribe = true; } } } } if (unsubscribe) { byte[] topicName = SafeEncoder.encode(topic); wrappedJedisPubSub.unsubscribe(topicName); return true; } else { return false; } }
python
def volume_list(search_opts=None, profile=None, **kwargs): ''' List storage volumes search_opts Dictionary of search options profile Profile to use CLI Example: .. code-block:: bash salt '*' nova.volume_list search_opts='{"display_name": "myblock"}' profile=openstack ''' conn = _auth(profile, **kwargs) return conn.volume_list(search_opts=search_opts)
java
private Entry<V> computeIfAbsent(K key) { Entry<V> v = map.get(key); if (v == null) { Entry<V> tmp = entryGetter.apply(key); v = map.putIfAbsent(key, tmp); if (v == null) { v = tmp; } } return v; }
java
@Override public boolean logout() throws LoginException { if (mSubject.isReadOnly()) { throw new LoginException("logout Failed: Subject is Readonly."); } if (mUser != null) { mSubject.getPrincipals().remove(mUser); } return true; }
python
def verify_cb(conn, cert, errnum, depth, ok): """ The default OpenSSL certificate verification callback. """ if not ok: raise SecurityError("Could not verify CA certificate {0}" .format(cert.get_subject())) return ok
java
public boolean handleControlWord(RtfCtrlWordData ctrlWordData) { boolean result = true; // just let fonttbl fall through and set last ctrl word object. if(ctrlWordData.ctrlWord.equals("f")) { this.setFontNumber(ctrlWordData.param); result=true;} if(ctrlWordData.ctrlWord.equals("fcharset")) { this.setCharset(ctrlWordData.param); result=true; } // font families if(ctrlWordData.ctrlWord.equals("fnil")) { this.setFontFamily("roman"); result=true; } if(ctrlWordData.ctrlWord.equals("froman")) { this.setFontFamily("roman"); result=true; } if(ctrlWordData.ctrlWord.equals("fswiss")) { this.setFontFamily("swiss"); result=true; } if(ctrlWordData.ctrlWord.equals("fmodern")) { this.setFontFamily("modern"); result=true; } if(ctrlWordData.ctrlWord.equals("fscript")) { this.setFontFamily("script"); result=true; } if(ctrlWordData.ctrlWord.equals("fdecor")) { this.setFontFamily("decor"); result=true; } if(ctrlWordData.ctrlWord.equals("ftech")) { this.setFontFamily("tech"); result=true; } if(ctrlWordData.ctrlWord.equals("fbidi")) { this.setFontFamily("bidi"); result=true; } // pitch if(ctrlWordData.ctrlWord.equals("fprq")) { this.setPitch(ctrlWordData.param); result=true; } // bias if(ctrlWordData.ctrlWord.equals("fbias")) { this.setBias(ctrlWordData.param); result=true; } // theme font information if(ctrlWordData.ctrlWord.equals("flomajor")) { this.setThemeFont("flomajor"); result= true; } if(ctrlWordData.ctrlWord.equals("fhimajor")) { this.setThemeFont("fhimajor"); result= true; } if(ctrlWordData.ctrlWord.equals("fdbmajor")) { this.setThemeFont("fdbmajor"); result= true; } if(ctrlWordData.ctrlWord.equals("fbimajor")) { this.setThemeFont("fbimajor"); result= true; } if(ctrlWordData.ctrlWord.equals("flominor")) { this.setThemeFont("flominor"); result= true; } if(ctrlWordData.ctrlWord.equals("fhiminor")) { this.setThemeFont("fhiminor"); result= true; } if(ctrlWordData.ctrlWord.equals("fdbminor")) { this.setThemeFont("fdbminor"); result= true; } if(ctrlWordData.ctrlWord.equals("fbiminor")) { this.setThemeFont("fbiminor"); result= true; } // panose if(ctrlWordData.ctrlWord.equals("panose")) {state = SETTING_PANOSE; result = true; } // \*\fname // <font name> #PCDATA if(ctrlWordData.ctrlWord.equals("fname")) {state = SETTING_FONTNAME; result = true; } // \*\falt if(ctrlWordData.ctrlWord.equals("falt")) { state = SETTING_ALTERNATE; result = true; } // \*\fontemb if(ctrlWordData.ctrlWord.equals("fontemb")) { state = SETTING_FONT_EMBED; result = true; } // font type if(ctrlWordData.ctrlWord.equals("ftnil")) { this.setTrueType("ftnil"); result= true; } if(ctrlWordData.ctrlWord.equals("fttruetype")) { this.setTrueType("fttruetype"); result= true; } // \*\fontfile if(ctrlWordData.ctrlWord.equals("fontemb")) { state = SETTING_FONT_FILE; result = true; } // codepage if(ctrlWordData.ctrlWord.equals("cpg")) { this.setCodePage(ctrlWordData.param); result= true; } this.lastCtrlWord = ctrlWordData; return result; }
java
public static <S extends Storable> Class<? extends S> getAbstractClass(Class<S> type, EnumSet<MasterFeature> features) throws SupportException, IllegalArgumentException { StorableInfo<S> info = StorableIntrospector.examine(type); if (features == null) { features = EnumSet.noneOf(MasterFeature.class); } else { features = features.clone(); } // Remove any features which don't apply. { boolean anySequences = false; boolean doNormalize = false; if (features.contains(MasterFeature.INSERT_SEQUENCES) || features.contains(MasterFeature.NORMALIZE)) { for (StorableProperty<S> property : info.getAllProperties().values()) { if (property.isDerived() || property.isJoin()) { continue; } if (!anySequences) { if (property.getSequenceName() != null) { anySequences = true; } } if (!doNormalize) { if (BigDecimal.class.isAssignableFrom(property.getType())) { doNormalize = true; } } if (anySequences && doNormalize) { break; } } if (!anySequences) { features.remove(MasterFeature.INSERT_SEQUENCES); } if (!doNormalize) { features.remove(MasterFeature.NORMALIZE); } } if (info.getVersionProperty() == null) { features.remove(MasterFeature.VERSIONING); } if (info.getPartitionKey() == null || info.getPartitionKey().getProperties().size() == 0) { features.remove(MasterFeature.PARTITIONING); } } // Add implied features. if (features.contains(MasterFeature.VERSIONING)) { // Implied feature. features.add(MasterFeature.UPDATE_FULL); } if (alwaysHasTxn(INSERT_OP, features)) { // Implied feature. features.add(MasterFeature.INSERT_TXN); } if (alwaysHasTxn(UPDATE_OP, features)) { // Implied feature. features.add(MasterFeature.UPDATE_TXN); } if (alwaysHasTxn(DELETE_OP, features)) { // Implied feature. features.add(MasterFeature.DELETE_TXN); } if (requiresTxnForUpdate(INSERT_OP, features)) { // Implied feature. features.add(MasterFeature.INSERT_TXN_FOR_UPDATE); } if (requiresTxnForUpdate(UPDATE_OP, features)) { // Implied feature. features.add(MasterFeature.UPDATE_TXN_FOR_UPDATE); } if (requiresTxnForUpdate(DELETE_OP, features)) { // Implied feature. features.add(MasterFeature.DELETE_TXN_FOR_UPDATE); } Object key = KeyFactory.createKey(new Object[] {type, features}); synchronized (cCache) { Class<? extends S> abstractClass = (Class<? extends S>) cCache.get(key); if (abstractClass != null) { return abstractClass; } abstractClass = new MasterStorableGenerator<S>(type, features).generateAndInjectClass(); cCache.put(key, abstractClass); return abstractClass; } }
java
public static void initTEAPPS (@Nonnull final ValidationExecutorSetRegistry aRegistry) { ValueEnforcer.notNull (aRegistry, "Registry"); final boolean bNotDeprecated = false; // No Schematrons here aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_TEAPPS_272, "TEAPPSXML " + VID_TEAPPS_272.getVersion (), bNotDeprecated, ValidationExecutorXSD.create (new ClassPathResource ("/schemas/TEAPPSXMLv272_schema_INVOICES.xsd")))); aRegistry.registerValidationExecutorSet (ValidationExecutorSet.create (VID_TEAPPS_30, "TEAPPSXML " + VID_TEAPPS_30.getVersion (), bNotDeprecated, ValidationExecutorXSD.create (new ClassPathResource ("/schemas/teappsxmlv30_schema_invoices_0.xsd")))); }