language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public static void main(String[] args) throws InvalidKeyException, NoSuchAlgorithmException, InvalidEndpointException, InvalidPortException, InvalidBucketNameException, InsufficientDataException, NoResponseException, ErrorResponseException, InternalException, InvalidArgumentException, IOException, XmlPullParserException { /* play.min.io for test and development. */ MinioClient minioClient = new MinioClient("https://play.min.io:9000", "Q3AM3UQ867SPQQA43P2F", "zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG"); /* Amazon S3: */ // MinioClient minioClient = new MinioClient("https://s3.amazonaws.com", // "YOUR-ACCESSKEYID", // "YOUR-SECRETACCESSKEY"); String objectName = "my-objectname"; String bucketName = "my-bucketname"; File file = new File("my-filename"); InputStream pis = new BufferedInputStream(new ProgressStream("Uploading... ", ProgressBarStyle.ASCII, new FileInputStream(file))); minioClient.putObject(bucketName, objectName, pis, pis.available(), "application/octet-stream"); pis.close(); System.out.println("my-objectname is uploaded successfully"); }
python
def setdefault(self: dict, other: dict): """Like .update() but values in self take priority.""" for k, v in other.items(): self.setdefault(k, v)
java
private void render(SVG.Group obj) { debug("Group render"); updateStyleForElement(state, obj); if (!display()) return; if (obj.transform != null) { canvas.concat(obj.transform); } checkForClipPath(obj); boolean compositing = pushLayer(); renderChildren(obj, true); if (compositing) popLayer(obj); updateParentBoundingBox(obj); }
java
public String generateSourceMap() throws IOException { final String sourceMethod = "generateSourceMap"; //$NON-NLS-1$ final boolean isTraceLogging = log.isLoggable(Level.FINER); if (isTraceLogging) { log.entering(sourceClass, sourceMethod, new Object[]{name}); } String result = null; lineLengths = getLineLengths(); Compiler compiler = new Compiler(); sgen = new SourceMapGeneratorV3(); compiler.initOptions(compiler_options); currentLine = currentChar = 0; Node node = compiler.parse(JSSourceFile.fromCode(name, source)); if (compiler.hasErrors()) { if (log.isLoggable(Level.WARNING)) { JSError[] errors = compiler.getErrors(); for (JSError error : errors) { log.logp(Level.WARNING, sourceClass, sourceMethod, error.toString()); } } } if (node != null) { processNode(node); StringWriter writer = new StringWriter(); sgen.appendTo(writer, name); result = writer.toString(); } sgen = null; lineLengths = null; if (isTraceLogging) { log.exiting(sourceClass, sourceMethod, result); } return result; }
python
def get_service_framework_id( service_name, inactive=False, completed=False ): """ Get the framework ID for a service :param service_name: the service name :type service_name: str :param inactive: whether to include inactive services :type inactive: bool :param completed: whether to include completed services :type completed: bool :return: a framework id :rtype: str, or None """ service = get_service(service_name, inactive, completed) if service is not None and service['id']: return service['id'] return None
java
public int readAll(CharBuffer buf, int length) throws IOException { int len = buf.length(); buf.length(len + length); int readLength = readAll(buf.buffer(), len, length); if (readLength < 0) buf.length(len); else if (readLength < length) buf.length(len + readLength); return length; }
python
def zlist(columns, items, print_columns=None, text="", title="", width=DEFAULT_WIDTH, height=ZLIST_HEIGHT, timeout=None): """ Display a list of values :param columns: a list of columns name :type columns: list of strings :param items: a list of values :type items: list of strings :param print_columns: index of a column (return just the values from this column) :type print_columns: int (None if all the columns) :param text: text inside the window :type text: str :param title: title of the window :type title: str :param width: window width :type width: int :param height: window height :type height: int :param timeout: close the window after n seconds :type timeout: int :return: A row of values from the table :rtype: list """ dialog = ZList(columns, items, print_columns, text, title, width, height, timeout) dialog.run() return dialog.response
java
public void setNAME(Integer newNAME) { Integer oldNAME = name; name = newNAME; if (eNotificationRequired()) eNotify(new ENotificationImpl(this, Notification.SET, AfplibPackage.BEGIN_SEGMENT_COMMAND__NAME, oldNAME, name)); }
java
@Check public void checkContainerType(SarlEvent event) { final XtendTypeDeclaration declaringType = event.getDeclaringType(); if (declaringType != null) { final String name = canonicalName(declaringType); assert name != null; error(MessageFormat.format(Messages.SARLValidator_32, name), event, null, INVALID_NESTED_DEFINITION); } }
python
def match_concept(self,string): '''Find all matches in this :class:`Bottle` for ``string`` and return the best match''' matches = self.match_all_concepts(string) if len(matches)>0: return matches[0] return None
java
public void process(DocumentAndOp doc, Analyzer analyzer) throws IOException { if (doc.getOp() == DocumentAndOp.Op.DELETE || doc.getOp() == DocumentAndOp.Op.UPDATE) { deleteList.add(doc.getTerm()); } if (doc.getOp() == DocumentAndOp.Op.INSERT || doc.getOp() == DocumentAndOp.Op.UPDATE) { if (writer == null) { // analyzer is null because we specify an analyzer with addDocument writer = createWriter(); } writer.addDocument(doc.getDocument(), analyzer); numDocs++; } }
java
public void setSortMode(final SortMode sortMode) { getOrCreateComponentModel().sortMode = sortMode == null ? SortMode.NONE : sortMode; }
python
def update_name(self, force=False, create_term=False, report_unchanged=True): """Generate the Root.Name term from DatasetName, Version, Origin, TIme and Space""" updates = [] self.ensure_identifier() name_term = self.find_first('Root.Name') if not name_term: if create_term: name_term = self['Root'].new_term('Root.Name','') else: updates.append("No Root.Name, can't update name") return updates orig_name = name_term.value identifier = self.get_value('Root.Identifier') datasetname = self.get_value('Root.Dataset') if datasetname: name = self._generate_identity_name() if name != orig_name or force: name_term.value = name updates.append("Changed Name") else: if report_unchanged: updates.append("Name did not change") elif not orig_name: if not identifier: updates.append("Failed to find DatasetName term or Identity term. Giving up") else: updates.append("Setting the name to the identifier") name_term.value = identifier elif orig_name == identifier: if report_unchanged: updates.append("Name did not change") else: # There is no DatasetName, so we can't gneerate name, and the Root.Name is not empty, so we should # not set it to the identity. updates.append("No Root.Dataset, so can't update the name") return updates
java
@Override public boolean fileCreated(File file) throws WatchingException { if (WatcherUtils.isInDirectory(file, internalSources)) { processDirectory(internalSources, destinationForInternals); } else if (WatcherUtils.isInDirectory(file, externalSources)) { processDirectory(externalSources, destinationForExternals); } return true; }
java
private <T> InternalProviderImpl installInternalProvider(Class<T> clazz, String bindingName, InternalProviderImpl<? extends T> internalProvider, boolean isBound, boolean isTestProvider) { if (bindingName == null) { if (isBound) { return installUnNamedProvider(mapClassesToUnNamedBoundProviders, clazz, internalProvider, isTestProvider); } else { return installUnNamedProvider(mapClassesToUnNamedUnBoundProviders, clazz, internalProvider, isTestProvider); } } else { return installNamedProvider(mapClassesToNamedBoundProviders, clazz, bindingName, internalProvider, isTestProvider); } }
java
public void removeConstraint(final GVRConstraint gvrConstraint) { mPhysicsContext.runOnPhysicsThread(new Runnable() { @Override public void run() { if (contains(gvrConstraint)) { NativePhysics3DWorld.removeConstraint(getNative(), gvrConstraint.getNative()); mPhysicsObject.remove(gvrConstraint.getNative()); } } }); }
java
public static TranslatableComponent make(final @NonNull String key, final @NonNull List<Component> args, final @NonNull Consumer<Builder> consumer) { final Builder builder = builder(key).args(args); consumer.accept(builder); return builder.build(); }
python
def get_data(self): """ reads data from current WD measurement.txt or magic_measurements.txt depending on data model and sorts it into main measurements data structures given bellow: Data - {specimen: { zijdblock:[[treatment temp-str,dec-float, inc-float, mag_moment-float, ZI-float, meas_flag-str ('b','g'), method_codes-str]], zijdblock_geo:[[treatment temp-str,dec-float, inc-float, mag_moment-float, ZI-float, meas_flag-str ('b','g'), method_codes-str]], zijdblock_tilt:[[treatment temp-str,dec-float, inc-float, mag_moment-float, ZI-float, meas_flag-str ('b','g'), method_codes-str]], zijdblock_lab_treatments: [str], zijdblock_steps: [str], measurement_flag: [str ('b','g')], mag_meas_data_index: [int], csds: [float], pars: {}, zdata: array.shape = 2x2 (float), zdata_geo: array.shape = 2x2 (float), zdata_tilt: array.shape = 2x2 (float), vector_diffs: [float], vds: float }} Data_hierarchy - {specimen: { study: {} locations: {} sites: {} samples: {} specimens: {} sample_of_specimen: {} site_of_specimen: {} site_of_sample: {} location_of_site: {} location_of_specimen: {} study_of_specimen: {} expedition_name_of_specimen: {} }} """ # ------------------------------------------------ # Read magic measurement file and sort to blocks # ------------------------------------------------ # All meas data information is stored in Data[secimen]={} Data = {} Data_hierarchy = {} Data_hierarchy['study'] = {} Data_hierarchy['locations'] = {} Data_hierarchy['sites'] = {} Data_hierarchy['samples'] = {} Data_hierarchy['specimens'] = {} Data_hierarchy['sample_of_specimen'] = {} Data_hierarchy['site_of_specimen'] = {} Data_hierarchy['site_of_sample'] = {} Data_hierarchy['location_of_site'] = {} Data_hierarchy['location_of_specimen'] = {} Data_hierarchy['study_of_specimen'] = {} Data_hierarchy['expedition_name_of_specimen'] = {} if self.data_model == 3: if 'measurements' not in self.con.tables: self.user_warning( "Measurement data file is empty and the GUI cannot start, aborting") return Data, Data_hierarchy if self.con.tables['measurements'].df.empty: self.user_warning( "Measurement data file is empty and the GUI cannot start, aborting") return Data, Data_hierarchy # extract specimen data from measurements table if not len(self.spec_data): specs = self.con.tables['measurements'].df['specimen'].unique() df = pd.DataFrame(index=specs, columns=['specimen']) df.index.name = 'specimen_name' df['specimen'] = specs self.con.tables['specimens'].df = df self.spec_data = df if not len(self.spec_data): self.user_warning( "Measurement data file does not seem to have specimen data and the GUI cannot start, aborting") return Data, Data_hierarchy if 'sample' not in self.spec_data.columns or 'sample' not in self.samp_data.columns: if 'specimen' not in self.spec_data.columns: self.spec_data['specimen'] = self.con.tables['measurements'].df['specimen'] self.spec_data.set_index('specimen', inplace=True) self.spec_data['specimen'] = self.spec_data.index ui_dialog = demag_dialogs.user_input( self, ["# of characters to remove"], heading="Sample data could not be found attempting to generate sample names by removing characters from specimen names") self.show_dlg(ui_dialog) ui_data = ui_dialog.get_values() try: samp_ncr = int(ui_data[1]["# of characters to remove"]) except ValueError: self.user_warning( "Invalid input, specimen names will be used for sample names instead") samp_ncr = 0 self.spec_data['sample'] = [x[:-samp_ncr] for x in self.spec_data['specimen']] self.samp_data['sample'] = self.spec_data['sample'] self.samp_data.set_index('sample', inplace=True) self.samp_data['sample'] = self.samp_data.index if 'site' not in self.samp_data.columns or 'site' not in self.site_data.columns: ui_dialog = demag_dialogs.user_input( self, ["# of characters to remove", "site delimiter"], heading="No Site Data found attempting to create site names from specimen names") self.show_dlg(ui_dialog) ui_data = ui_dialog.get_values() try: site_ncr = int(ui_data[1]["# of characters to remove"]) self.samp_data['site'] = [x[:-site_ncr] for x in self.spec_data['specimen']] except ValueError: try: sd = ui_data[1]["site delimiter"] self.samp_data['site'] = [ x.split(sd)[0] for x in self.spec_data['specimen']] except ValueError: self.samp_data['site'] = [ x for x in self.spec_data['specimen']] self.site_data['site'] = self.samp_data['site'] self.site_data.drop_duplicates(inplace=True) self.site_data.set_index('site', inplace=True) self.site_data['site'] = self.site_data.index if 'location' not in self.site_data.columns or 'location' not in self.loc_data.columns: ui_dialog = demag_dialogs.user_input( self, ["location name for all sites"], heading="No Location found") self.show_dlg(ui_dialog) ui_data = ui_dialog.get_values() self.site_data['location'] = ui_data[1]["location name for all sites"] self.loc_data['location'] = self.site_data['location'] self.loc_data.drop_duplicates(inplace=True) self.loc_data.set_index('location', inplace=True) self.loc_data['location'] = self.loc_data.index # add data to other dataframes self.con.propagate_location_to_measurements() self.con.propagate_location_to_specimens() # get measurement data from contribution object meas_container = self.con.tables['measurements'] meas_data3_0 = meas_container.df meas_data3_0.replace({'specimen': {nan: 'unknown'}, 'sample': {nan: 'unknown'}, 'site': { nan: 'unknown'}, 'location': {nan: 'unknown'}}, inplace=True) meas_data3_0['specimen'] = meas_data3_0['specimen'].apply(str) meas_data3_0['sample'] = meas_data3_0['sample'].apply(str) meas_data3_0['site'] = meas_data3_0['site'].apply(str) meas_data3_0['location'] = meas_data3_0['location'].apply(str) # do some filtering # if 'location' in meas_data3_0.columns: # if any(meas_data3_0['location'].isnull()): # print("-W- Some measurements are missing location data, and will not be used") # meas_data3_0 = meas_data3_0[meas_data3_0['location'].notnull()] # meas_data3_0.replace({'location':float('nan')},'unknown',inplace=True) # if 'site' in meas_data3_0.columns: # if any(meas_data3_0['site'].isnull()): # print("-W- Some measurements are missing site data, and will not be used") # meas_data3_0 = meas_data3_0[meas_data3_0['site'].notnull()] # meas_data3_0.replace({'site':float('nan')},'unknown',inplace=True) # if 'sample' in meas_data3_0.columns: # if any(meas_data3_0['sample'].isnull()): # print("-W- Some measurements are missing sample data, and will not be used") # meas_data3_0 = meas_data3_0[meas_data3_0['sample'].notnull()] # meas_data3_0.replace({'sample':float('nan')},'unknown',inplace=True) # if 'specimen' in meas_data3_0.columns: # missing = meas_data3_0[meas_data3_0['specimen'].isnull()] # if len(missing): # print("-W- {} measurements are missing specimen data, and will not be used".format(missing)) # meas_data3_0 = meas_data3_0[meas_data3_0['specimen'].notnull()] # meas_data3_0.replace({'specimen':float('nan')},'unknown',inplace=True) # col_names = ['specimen', 'sample', 'site', 'location'] # for col_name in col_names: # if col_name in meas_data3_0.columns: # pruned = meas_data3_0[meas_data3_0[col_name].apply(cb.not_null)] # num_missing = len(meas_data3_0) - len(pruned) # if num_missing: # msg = "{} measurements cannot be associated with a {} and will be excluded\nTry using Pmag GUI (step 3) to make sure you have provided the full chain from specimen to location.".format(num_missing, col_name) # pw.simple_warning(msg) # print("-W- {} measurements are missing {} data and will be excluded".format(num_missing, col_name)) # meas_data3_0 = pruned Mkeys = ['magn_moment', 'magn_volume', 'magn_mass'] # fish out all the relavent data meas_data3_0 = meas_data3_0[meas_data3_0['method_codes'].str.contains( 'LT-NO|LT-AF-Z|LT-T-Z|LT-M-Z|LT-LT-Z') == True] if not len(meas_data3_0): self.user_warning("Your measurements table contains none of the required method codes to run Demag GUI: [LT-NO, LT-AF-Z, LT-T-Z, LT-M-Z, LT-LT-Z]") return {}, {} # now convert back to 2.5 changing only those keys that are necessary for thellier_gui meas_con_dict = map_magic.get_thellier_gui_meas_mapping( meas_data3_0, output=2) intensity_col = cb.get_intensity_col(meas_data3_0) if not intensity_col: self.user_warning("Your measurements table must have one of the following columns to run Demag GUI: 'magn_moment', 'magn_volume', 'magn_mass',or 'magn_uncal'") return {}, {} print('-I- Using {} for intensity'.format(intensity_col)) self.intensity_col = meas_con_dict[intensity_col] meas_data2_5 = meas_data3_0.rename(columns=meas_con_dict) # make a list of dictionaries to maintain backward compatibility mag_meas_data = meas_data2_5.to_dict("records") else: # data model 2.5 try: print(("-I- Read magic file %s" % self.magic_file)) except ValueError: self.magic_measurement = self.choose_meas_file() print(("-I- Read magic file %s" % self.magic_file)) mag_meas_data, file_type = pmag.magic_read(self.magic_file) if file_type != "magic_measurements": self.user_warning("You have selected data model 2.5, but your measurements file is either not in 2.5, or is not a measurements file.\n{} has file type: {}".format(self.magic_file, file_type)) return {}, {} self.mag_meas_data = self.merge_pmag_recs(mag_meas_data) # get list of unique specimen names with measurement data CurrRec = [] sids = pmag.get_specs(self.mag_meas_data) # specimen ID's for s in sids: if s not in list(Data.keys()): Data[s] = {} Data[s]['zijdblock'] = [] Data[s]['zijdblock_geo'] = [] Data[s]['zijdblock_tilt'] = [] Data[s]['zijdblock_lab_treatments'] = [] Data[s]['pars'] = {} Data[s]['csds'] = [] Data[s]['zijdblock_steps'] = [] Data[s]['measurement_flag'] = [] # a list of points 'g' or 'b' # index in original magic_measurements.txt Data[s]['mag_meas_data_index'] = [] Data[s]['measurement_names'] = [] prev_s = None cnt = -1 # list of excluded lab protocols. copied from pmag.find_dmag_rec(s,data) self.excluded_methods = ["LP-AN-ARM", "LP-AN-TRM", "LP-ARM-AFD", "LP-ARM2-AFD", "LP-TRM-AFD", "LP-TRM", "LP-TRM-TD", "LP-X", "LP-PI-ARM"] self.included_methods = [ "LT-NO", "LT-AF-Z", "LT-T-Z", "LT-M-Z", "LT-LT-Z"] # self.mag_meas_data.sort(key=meas_key) # asiigned default values for NRM if len(self.mag_meas_data) > 0 and self.intensity_col in list(self.mag_meas_data[0].keys()): NRM = float(self.mag_meas_data[0][self.intensity_col]) for rec in self.mag_meas_data: # if "measurement_number" in rec.keys() and str(rec['measurement_number']) == '1' and "magic_method_codes" in rec.keys() and "LT-NO" not in rec["magic_method_codes"].split(':'): # NRM = 1 #not really sure how to handle this case but assume that data is already normalized cnt += 1 # index counter s = rec["er_specimen_name"] if "er_sample_name" in list(rec.keys()): sample = rec["er_sample_name"] else: sample = '' if "er_site_name" in list(rec.keys()): site = rec["er_site_name"] else: site = '' if "er_location_name" in list(rec.keys()): location = rec["er_location_name"] else: location = '' expedition_name = "" if "er_expedition_name" in list(rec.keys()): expedition_name = rec["er_expedition_name"] methods = rec["magic_method_codes"].replace( " ", "").strip("\n").split(":") LP_methods = [] LT_methods = [] for k in ['zdata', 'zdata_geo', 'zdata_tilt', 'vector_diffs']: if k not in Data[s]: Data[s][k] = [] for i in range(len(methods)): methods[i] = methods[i].strip() if 'measurement_flag' not in list(rec.keys()): rec['measurement_flag'] = 'g' SKIP = True lab_treatment = "" for meth in methods: if 'DIR' in meth: SKIP = False if meth in self.included_methods: lab_treatment = meth SKIP = False if "LP" in meth: LP_methods.append(meth) for meth in self.excluded_methods: if meth in methods: SKIP = True break if SKIP: continue tr, LPcode, measurement_step_unit = "", "", "" if "LT-NO" in methods: tr = 0 if prev_s != s and self.intensity_col in rec: try: NRM = float(rec[self.intensity_col]) except ValueError: NRM = 1 for method in methods: if "AF" in method: LPcode = "LP-DIR-AF" measurement_step_unit = "mT" if "TRM" in method: LPcode = "LP-DIR-T" measurement_step_unit = "C" elif "LT-AF-Z" in methods: try: tr = float(rec["treatment_ac_field"])*1e3 # (mT) except ValueError: print(("Could not convert ac field for measurement, was given %s, skipping" % rec["treatment_ac_field"])) continue measurement_step_unit = "mT" # in magic its T in GUI its mT LPcode = "LP-DIR-AF" elif "LT-T-Z" in methods or "LT-LT-Z" in methods: try: tr = float(rec["treatment_temp"])-273. # celsius except ValueError: print( ("Could not convert temperature for measurement, was given %s, skipping" % rec["treatment_temp"])) continue measurement_step_unit = "C" # in magic its K in GUI its C LPcode = "LP-DIR-T" elif "LT-M-Z" in methods: # temporary for microwave tr = float(rec["measurement_number"]) else: # attempt to determine from treatment data if all(im not in methods for im in self.included_methods): if 'treatment_temp' in list(rec.keys()) and not str(rec['treatment_temp']).isalpha() and rec['treatment_temp'] != '' and float(rec['treatment_temp']) > 0: tr = float(rec["treatment_temp"])-273. # celsius measurement_step_unit = "C" # in magic its K in GUI its C LPcode = "LP-DIR-T" elif 'treatment_ac_field' in list(rec.keys()) and not str(rec['treatment_ac_field']).isalpha() and rec['treatment_ac_field'] != '' and float(rec['treatment_ac_field']) > 0: tr = float(rec["treatment_ac_field"])*1e3 # (mT) measurement_step_unit = "mT" # in magic its T in GUI its mT LPcode = "LP-DIR-AF" else: tr = 0 if prev_s != s and self.intensity_col in rec: try: NRM = float(rec[self.intensity_col]) except ValueError: NRM = 1 for method in methods: if "AF" in method: LPcode = "LP-DIR-AF" measurement_step_unit = "mT" if "TRM" in method: LPcode = "LP-DIR-T" measurement_step_unit = "C" else: tr = float(rec["measurement_number"]) if prev_s != s and len(Data[s]['zijdblock']) > 0: NRM = Data[s]['zijdblock'][0][3] ZI = 0 if tr != "": Data[s]['mag_meas_data_index'].append( cnt) # magic_measurement file intex if not int(self.data_model) == 2: try: Data[s]['measurement_names'].append(rec['measurement']) except KeyError: Data[s]['measurement_names'].append(rec['measurement_number']) Data[s]['zijdblock_lab_treatments'].append(lab_treatment) if measurement_step_unit != "": if 'measurement_step_unit' in list(Data[s].keys()): if measurement_step_unit not in Data[s]['measurement_step_unit'].split(":"): Data[s]['measurement_step_unit'] = Data[s]['measurement_step_unit'] + \ ":"+measurement_step_unit else: Data[s]['measurement_step_unit'] = measurement_step_unit dec, inc, inten = "", "", "" if "measurement_dec" in list(rec.keys()) and cb.not_null(rec["measurement_dec"], False): dec = float(rec["measurement_dec"]) else: continue if "measurement_inc" in list(rec.keys()) and cb.not_null(rec["measurement_inc"], False): inc = float(rec["measurement_inc"]) else: continue if self.intensity_col in list(rec.keys()) and cb.not_null(rec[self.intensity_col], False): intensity = float(rec[self.intensity_col]) else: intensity = 1. # just assume a normal vector if 'magic_instrument_codes' not in list(rec.keys()): rec['magic_instrument_codes'] = '' if 'measurement_csd' in list(rec.keys()): csd = str(rec['measurement_csd']) else: csd = '' Data[s]['zijdblock'].append( [tr, dec, inc, intensity, ZI, rec['measurement_flag'], rec['magic_instrument_codes']]) Data[s]['csds'].append(csd) DIR = [dec, inc, intensity/NRM] cart = pmag.dir2cart(DIR) Data[s]['zdata'].append(array([cart[0], cart[1], cart[2]])) if 'magic_experiment_name' in list(Data[s].keys()) and Data[s]['magic_experiment_name'] != rec["magic_experiment_name"]: print(("-E- ERROR: specimen %s has more than one demagnetization experiment name. You need to merge them to one experiment-name?" % (s))) if float(tr) == 0 or float(tr) == 273: Data[s]['zijdblock_steps'].append("0") elif measurement_step_unit == "C": Data[s]['zijdblock_steps'].append( "%.0f%s" % (tr, measurement_step_unit)) else: Data[s]['zijdblock_steps'].append( "%.1f%s" % (tr, measurement_step_unit)) # -------------- if 'magic_experiment_name' in list(rec.keys()): Data[s]['magic_experiment_name'] = rec["magic_experiment_name"] if "magic_instrument_codes" in list(rec.keys()): Data[s]['magic_instrument_codes'] = rec['magic_instrument_codes'] Data[s]["magic_method_codes"] = LPcode # -------------- # ""good" or "bad" data # -------------- flag = 'g' if 'measurement_flag' in list(rec.keys()): if str(rec["measurement_flag"]) == 'b': flag = 'b' Data[s]['measurement_flag'].append(flag) # gegraphic coordinates try: sample_azimuth = float( self.Data_info["er_samples"][sample]['sample_azimuth']) sample_dip = float( self.Data_info["er_samples"][sample]['sample_dip']) d_geo, i_geo = pmag.dogeo( dec, inc, sample_azimuth, sample_dip) # if d_geo or i_geo is null, we can't do geographic coordinates # otherwise, go ahead if not any([np.isnan(val) for val in [d_geo, i_geo]]): Data[s]['zijdblock_geo'].append( [tr, d_geo, i_geo, intensity, ZI, rec['measurement_flag'], rec['magic_instrument_codes']]) DIR = [d_geo, i_geo, intensity/NRM] cart = pmag.dir2cart(DIR) Data[s]['zdata_geo'].append([cart[0], cart[1], cart[2]]) except (IOError, KeyError, ValueError, TypeError) as e: pass # if prev_s != s: # print( "-W- can't find sample_azimuth,sample_dip for sample %s"%sample) # tilt-corrected coordinates try: sample_bed_dip_direction = float( self.Data_info["er_samples"][sample]['sample_bed_dip_direction']) sample_bed_dip = float( self.Data_info["er_samples"][sample]['sample_bed_dip']) d_tilt, i_tilt = pmag.dotilt( d_geo, i_geo, sample_bed_dip_direction, sample_bed_dip) Data[s]['zijdblock_tilt'].append( [tr, d_tilt, i_tilt, intensity, ZI, rec['measurement_flag'], rec['magic_instrument_codes']]) DIR = [d_tilt, i_tilt, intensity/NRM] cart = pmag.dir2cart(DIR) Data[s]['zdata_tilt'].append([cart[0], cart[1], cart[2]]) except (IOError, KeyError, TypeError, ValueError, UnboundLocalError) as e: pass # if prev_s != s: # printd("-W- can't find tilt-corrected data for sample %s"%sample) if len(Data[s]['zdata']) > 1: Data[s]['vector_diffs'].append( sqrt(sum((array(Data[s]['zdata'][-2])-array(Data[s]['zdata'][-1]))**2))) # --------------------- # hierarchy is determined from magic_measurements.txt # --------------------- if sample not in list(Data_hierarchy['samples'].keys()): Data_hierarchy['samples'][sample] = {} Data_hierarchy['samples'][sample]['specimens'] = [] if site not in list(Data_hierarchy['sites'].keys()): Data_hierarchy['sites'][site] = {} Data_hierarchy['sites'][site]['samples'] = [] Data_hierarchy['sites'][site]['specimens'] = [] if location not in list(Data_hierarchy['locations'].keys()): Data_hierarchy['locations'][location] = {} Data_hierarchy['locations'][location]['sites'] = [] Data_hierarchy['locations'][location]['samples'] = [] Data_hierarchy['locations'][location]['specimens'] = [] if 'this study' not in list(Data_hierarchy['study'].keys()): Data_hierarchy['study']['this study'] = {} Data_hierarchy['study']['this study']['sites'] = [] Data_hierarchy['study']['this study']['samples'] = [] Data_hierarchy['study']['this study']['specimens'] = [] if s not in Data_hierarchy['samples'][sample]['specimens']: Data_hierarchy['samples'][sample]['specimens'].append(s) if s not in Data_hierarchy['sites'][site]['specimens']: Data_hierarchy['sites'][site]['specimens'].append(s) if s not in Data_hierarchy['locations'][location]['specimens']: Data_hierarchy['locations'][location]['specimens'].append(s) if s not in Data_hierarchy['study']['this study']['specimens']: Data_hierarchy['study']['this study']['specimens'].append(s) if sample not in Data_hierarchy['sites'][site]['samples']: Data_hierarchy['sites'][site]['samples'].append(sample) if sample not in Data_hierarchy['locations'][location]['samples']: Data_hierarchy['locations'][location]['samples'].append(sample) if sample not in Data_hierarchy['study']['this study']['samples']: Data_hierarchy['study']['this study']['samples'].append(sample) if site not in Data_hierarchy['locations'][location]['sites']: Data_hierarchy['locations'][location]['sites'].append(site) if site not in Data_hierarchy['study']['this study']['sites']: Data_hierarchy['study']['this study']['sites'].append(site) # Data_hierarchy['specimens'][s]=sample Data_hierarchy['sample_of_specimen'][s] = sample Data_hierarchy['site_of_specimen'][s] = site Data_hierarchy['site_of_sample'][sample] = site Data_hierarchy['location_of_site'][site] = location Data_hierarchy['location_of_specimen'][s] = location if expedition_name != "": Data_hierarchy['expedition_name_of_specimen'][s] = expedition_name prev_s = s print("-I- done sorting meas data") self.specimens = list(Data.keys()) for s in self.specimens: if len(Data[s]['zdata']) > 0: Data[s]['vector_diffs'].append( sqrt(sum(array(Data[s]['zdata'][-1])**2))) # last vector of the vds vds = sum(Data[s]['vector_diffs']) # vds calculation Data[s]['vector_diffs'] = array(Data[s]['vector_diffs']) Data[s]['vds'] = vds Data[s]['zdata'] = array(Data[s]['zdata']) Data[s]['zdata_geo'] = array(Data[s]['zdata_geo']) Data[s]['zdata_tilt'] = array(Data[s]['zdata_tilt']) return(Data, Data_hierarchy)
python
def merge_tags(left, right, factory=Tags): """ Merge two sets of tags into a new troposphere object Args: left (Union[dict, troposphere.Tags]): dictionary or Tags object to be merged with lower priority right (Union[dict, troposphere.Tags]): dictionary or Tags object to be merged with higher priority factory (type): Type of object to create. Defaults to the troposphere Tags class. """ if isinstance(left, Mapping): tags = dict(left) elif hasattr(left, 'tags'): tags = _tags_to_dict(left.tags) else: tags = _tags_to_dict(left) if isinstance(right, Mapping): tags.update(right) elif hasattr(left, 'tags'): tags.update(_tags_to_dict(right.tags)) else: tags.update(_tags_to_dict(right)) return factory(**tags)
java
public static SecretKeyFactory getSecretKeyFactory(String algorithm) { final Provider provider = GlobalBouncyCastleProvider.INSTANCE.getProvider(); SecretKeyFactory keyFactory; try { keyFactory = (null == provider) // ? SecretKeyFactory.getInstance(getMainAlgorithm(algorithm)) // : SecretKeyFactory.getInstance(getMainAlgorithm(algorithm), provider); } catch (NoSuchAlgorithmException e) { throw new CryptoException(e); } return keyFactory; }
java
public final void parse(final InputStream in, final ContentHandler handler) throws IOException, ParserException { parse(new InputStreamReader(in), handler); }
java
public ActionForm createActionForm(ActionServlet servlet) throws IllegalAccessException, InstantiationException { // Create a new form bean instance if (getDynamic()) { return super.createActionForm(servlet); } try { ReloadableClassHandler rch = Handlers.get(servlet.getServletContext()).getReloadableClassHandler(); Object obj = rch.newInstance(getType()); assert obj instanceof ActionForm : obj.getClass().getName(); ActionForm form = (ActionForm) obj; form.setServlet(servlet); return form; } catch (ClassNotFoundException e) { _log.error("Could not find form bean class " + getType(), e); return null; } }
python
def parse_authentication_request(self, request_body, http_headers=None): # type: (str, Optional[Mapping[str, str]]) -> oic.oic.message.AuthorizationRequest """ Parses and verifies an authentication request. :param request_body: urlencoded authentication request :param http_headers: http headers """ auth_req = AuthorizationRequest().deserialize(request_body) for validator in self.authentication_request_validators: validator(auth_req) logger.debug('parsed authentication_request: %s', auth_req) return auth_req
python
def reset(self, hard=False): '''reset the card dispense, either soft or hard based on boolean 2nd arg''' if hard: self.sendcommand(Vendapin.RESET, 1, 0x01) time.sleep(2) else: self.sendcommand(Vendapin.RESET) time.sleep(2) # parse the reply response = self.receivepacket() print('Vendapin.reset(soft): ' + str(response))
python
def save_form(self, form, request, **resources): """Save self form.""" resource = yield from super(PWAdminHandler, self).save_form(form, request, **resources) resource.save() return resource
java
protected final MemorySegment nextSegment(MemorySegment current, int posInSegment) throws IOException { if (current != null) { writeSegment(current, posInSegment, false); } final MemorySegment next = this.writer.getNextReturnedBlock(); this.blockCount++; return next; }
java
Object replaceObject(ResourceRefConfigFactory resRefConfigFactory) { DSConfig config = dsConfig.get(); String filter = config.jndiName == null || config.jndiName.startsWith("java:") ? FilterUtils.createPropertyFilter("config.displayId", config.id) : FilterUtils.createPropertyFilter(ResourceFactory.JNDI_NAME, config.jndiName); ResourceRefConfig resRefConfig = resRefInfo == null ? null : resRefConfigFactory.createResourceRefConfig(DataSource.class.getName()); if (resRefInfo != null) { resRefConfig.setBranchCoupling(resRefInfo.getBranchCoupling()); resRefConfig.setCommitPriority(resRefInfo.getCommitPriority()); resRefConfig.setIsolationLevel(resRefInfo.getIsolationLevel()); resRefConfig.setJNDIName(resRefInfo.getJNDIName()); resRefConfig.setLoginConfigurationName(resRefInfo.getLoginConfigurationName()); resRefConfig.setResAuthType(resRefInfo.getAuth()); resRefConfig.setSharingScope(resRefInfo.getSharingScope()); } return new SerializedDataSourceWrapper(filter, resRefConfig); }
java
protected boolean householderPivot(int j) { final double u[] = dataQR[j]; // find the largest value in this column // this is used to normalize the column and mitigate overflow/underflow final double max = QrHelperFunctions_DDRM.findMax(u, j, numRows - j); if( max <= singularThreshold*maxValueAbs ) { return false; } else { // computes tau and normalizes u by max tau = QrHelperFunctions_DDRM.computeTauAndDivide(j, numRows, u, max); // divide u by u_0 double u_0 = u[j] + tau; QrHelperFunctions_DDRM.divideElements(j + 1, numRows, u, u_0); gamma = u_0/tau; tau *= max; u[j] = -tau; } gammas[j] = gamma; return true; }
java
public static String digest(ByteBuffer script) { try { MessageDigest md = MessageDigest.getInstance("SHA1"); md.update(script); return new String(Base16.encode(md.digest(), false)); } catch (NoSuchAlgorithmException e) { throw new RedisException("JVM does not support SHA1"); } }
python
def create_mapping(record, keys): """Create a field mapping for use in API updates and creates. Args: record (BaseModel): Record that should be mapped. keys (list[str]): Fields that should be mapped as keys. Returns: dict: Dictionary with keys: * ``field_mappings``: Field mappings as required by API. * ``data``: Ordered data dictionary for input record. """ ordered = OrderedDict() field_mappings = [] for key, value in record.items(): ordered[key] = value field_mappings.append({ 'columnNumber': len(ordered), # Five9 is not zero indexed. 'fieldName': key, 'key': key in keys, }) return { 'field_mappings': field_mappings, 'data': ordered, 'fields': list(ordered.values()), }
java
public static Optional<Path> createTempFilePathAsOpt(Path path) { debug(log, "createTempFilePathAsOpt", path); String[] prefixSuffix = JMFiles.getPrefixSuffix(path.toFile()); try { return Optional .of(Files.createTempFile(prefixSuffix[0], prefixSuffix[1])) .filter(JMPath.ExistFilter) .map(JMPathOperation::deleteOnExit); } catch (Exception e) { return JMExceptionManager.handleExceptionAndReturnEmptyOptional(log, e, "createTempFilePathAsOpt", path); } }
python
def load(self, skey, sdesc, sdict=None, loaders=None, merge=False, writeback=False): ''' Loads a dictionary into current settings :param skey: Type of data to load. Is be used to reference the data \ in the files sections within settings :param sdesc: Either filename of yaml-file to load or further description of \ imported data when `sdict` is used :param dict sdict: Directly pass data as dictionary instead of loading \ it from a yaml-file. \ Make sure to set `skey` and `sdesc` accordingly :param list loaders: Append custom loaders to the YAML-loader. :param merge: Merge received data into current settings or \ place it under `skey` within meta :param writeback: Write back loaded (and merged/imported) result back \ to the original file. \ This is used to generate the summary files :returns: The loaded (or directly passed) content .. seealso:: |yaml_loaders| ''' y = sdict if sdict else read_yaml(sdesc, add_constructor=loaders) if y and isinstance(y, dict): if not sdict: self.__settings['files'].update({skey: sdesc}) if merge: self.__settings = dict_merge(self.__settings, y) else: self.__settings[skey] = y shell_notify( 'load %s data and %s it into settings' % ( 'got' if sdict else 'read', 'merged' if merge else 'imported' ), more=dict(skey=skey, sdesc=sdesc, merge=merge, writeback=writeback), verbose=self.__verbose ) if writeback and y != self.__settings: write_yaml(sdesc, self.__settings) return y
python
def _find_start_time(hdr, s_freq): """Find the start time, usually in STC, but if that's not correct, use ERD Parameters ---------- hdr : dict header with stc (and stamps) and erd s_freq : int sampling frequency Returns ------- datetime either from stc or from erd Notes ----- Sometimes, but rather rarely, there is a mismatch between the time in the stc and the time in the erd. For some reason, the time in the stc is way off (by hours), which is clearly not correct. We can try to reconstruct the actual time, but looking at the ERD time (of any file apart from the first one) and compute the original time back based on the offset of the number of samples in stc. For some reason, this is not the same for all the ERD, but the jitter is in the order of 1-2s which is acceptable for our purposes (probably, but be careful about the notes). """ start_time = hdr['stc']['creation_time'] for one_stamp in hdr['stamps']: if one_stamp['segment_name'].decode() == hdr['erd']['filename']: offset = one_stamp['start_stamp'] break erd_time = (hdr['erd']['creation_time'] - timedelta(seconds=offset / s_freq)).replace(microsecond=0) stc_erd_diff = (start_time - erd_time).total_seconds() if stc_erd_diff > START_TIME_TOL: lg.warn('Time difference between ERD and STC is {} s so using ERD time' ' at {}'.format(stc_erd_diff, erd_time)) start_time = erd_time return start_time
java
private static boolean isCollapsibleValue(Node value, boolean isLValue) { switch (value.getToken()) { case GETPROP: // Do not collapse GETPROPs on arbitrary objects, because // they may be implemented setter functions, and oftentimes // setter functions fail on native objects. This is OK for "THIS" // objects, because we assume that they are non-native. return !isLValue || value.getFirstChild().isThis(); case NAME: return true; default: return NodeUtil.isImmutableValue(value); } }
java
public static String replaceAll(final String input, final String pattern, final String replacement) { final StringBuilder result = new StringBuilder(); int startIndex = 0; int newIndex; while ((newIndex = input.indexOf(pattern, startIndex)) >= 0) { result.append(input, startIndex, newIndex); result.append(replacement); startIndex = newIndex + pattern.length(); } result.append(input.substring(startIndex)); return result.toString(); }
python
def _set_fabric_isl(self, v, load=False): """ Setter method for fabric_isl, mapped from YANG variable /interface/fortygigabitethernet/fabric/fabric_isl (container) If this variable is read-only (config: false) in the source YANG file, then _set_fabric_isl is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_fabric_isl() directly. YANG Description: Configure the Fabric Protocol ISL parameters """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=fabric_isl.fabric_isl, is_container='container', presence=False, yang_name="fabric-isl", rest_name="isl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fabric isl status ', u'alt-name': u'isl', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """fabric_isl must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=fabric_isl.fabric_isl, is_container='container', presence=False, yang_name="fabric-isl", rest_name="isl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Fabric isl status ', u'alt-name': u'isl', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-fcoe', defining_module='brocade-fcoe', yang_type='container', is_config=True)""", }) self.__fabric_isl = t if hasattr(self, '_set'): self._set()
java
public void plusNanoSeconds(long delta) { if (delta != 0) { long result = getNanoSecond() + delta; setNanoSecond((int) Math.floorMod(result, 1000000000)); plusSeconds(Math.floorDiv(result, 1000000000)); } }
java
private void buildMetaZoneNames(MethodSpec.Builder method, TimeZoneData data) { CodeBlock.Builder code = CodeBlock.builder(); code.beginControlFlow("\nthis.$L = new $T<$T, $T>() {", "metazoneNames", HASHMAP, STRING, TIMEZONE_NAMES); for (MetaZoneInfo info : data.metaZoneInfo) { if (info.nameLong == null && info.nameShort == null) { continue; } code.add("\nput($S, new $T($S, ", info.zone, TIMEZONE_NAMES, info.zone); if (info.nameLong == null) { code.add("\n null,"); } else { code.add("\n new $T.Name($S, $S, $S),", TIMEZONE_NAMES, info.nameLong.generic, info.nameLong.standard, info.nameLong.daylight); } if (info.nameShort == null) { code.add("\n null"); } else { code.add("\n new $T.Name($S, $S, $S)", TIMEZONE_NAMES, info.nameShort.generic, info.nameShort.standard, info.nameShort.daylight); } code.add("));\n"); } code.endControlFlow("}"); method.addCode(code.build()); }
java
public ZealotKhala andNotLike(String field, Object value) { return this.doLike(ZealotConst.AND_PREFIX, field, value, true, false); }
java
public void setTextValue(String v) { if (BodyCitation_Type.featOkTst && ((BodyCitation_Type)jcasType).casFeat_textValue == null) jcasType.jcas.throwFeatMissing("textValue", "ch.epfl.bbp.uima.types.BodyCitation"); jcasType.ll_cas.ll_setStringValue(addr, ((BodyCitation_Type)jcasType).casFeatCode_textValue, v);}
python
def get_package_by_key(self, package_keyname, mask=None): """Get a single package with a given key. If no packages are found, returns None :param package_keyname: string representing the package key name we are interested in. :param string mask: Mask to specify the properties we want to retrieve """ _filter = {'keyName': {'operation': package_keyname}} packages = self.package_svc.getAllObjects(mask=mask, filter=_filter) if len(packages) == 0: raise exceptions.SoftLayerError("Package {} does not exist".format(package_keyname)) return packages.pop()
python
def get_resources(cls): """Returns Ext Resources.""" plural_mappings = resource_helper.build_plural_mappings( {}, RESOURCE_ATTRIBUTE_MAP) if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]: attr.PLURALS.update(plural_mappings) return resource_helper.build_resource_info(plural_mappings, RESOURCE_ATTRIBUTE_MAP, bc.constants.L3)
python
def system_which(command, mult=False): """Emulates the system's which. Returns None if not found.""" _which = "which -a" if not os.name == "nt" else "where" os.environ = { vistir.compat.fs_str(k): vistir.compat.fs_str(val) for k, val in os.environ.items() } result = None try: c = delegator.run("{0} {1}".format(_which, command)) try: # Which Not found… if c.return_code == 127: click.echo( "{}: the {} system utility is required for Pipenv to find Python installations properly." "\n Please install it.".format( crayons.red("Warning", bold=True), crayons.red(_which) ), err=True, ) assert c.return_code == 0 except AssertionError: result = fallback_which(command, allow_global=True) except TypeError: if not result: result = fallback_which(command, allow_global=True) else: if not result: result = next(iter([c.out, c.err]), "").split("\n") result = next(iter(result)) if not mult else result return result if not result: result = fallback_which(command, allow_global=True) result = [result] if mult else result return result
python
def utc2et(utcstr): """ Convert an input time from Calendar or Julian Date format, UTC, to ephemeris seconds past J2000. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/utc2et_c.html :param utcstr: Input time string, UTC. :type utcstr: str :return: Output epoch, ephemeris seconds past J2000. :rtype: float """ utcstr = stypes.stringToCharP(utcstr) et = ctypes.c_double() libspice.utc2et_c(utcstr, ctypes.byref(et)) return et.value
java
protected SchemaInformedGrammar getSchemaInformedElementFragmentGrammar() throws EXIException { // unique qname map Map<QName, List<XSElementDeclaration>> uniqueNamedElements = getUniqueNamedElements(); if (elementFragment0 != null) { return elementFragment0; } // 8.5.3 Schema-informed Element Fragment Grammar SchemaInformedGrammar elementFragment1 = new SchemaInformedElement(); elementFragment0 = new SchemaInformedFirstStartTag(elementFragment1); // // ElementFragment 1 : // SE ( F0 ) ElementFragment 1 0 // SE ( F1 ) ElementFragment 1 1 // ... // SE ( Fm-1 ) ElementFragment 1 m-1 // SE ( * ) ElementFragment 1 m // EE m+1 // CH [untyped value] ElementFragment 1 m+2 /* * The variable m in the grammar above represents the number of unique * element qnames declared in the schema. The variables F0 , F1 , ... * Fm-1 represent these qnames sorted lexicographically, first by * local-name, then by uri. If there is more than one element declared * with the same qname, the qname is included only once. If all such * elements have the same type name and {nillable} property value, their * content is evaluated according to specific grammar for that element * declaration. Otherwise, their content is evaluated according to the * relaxed Element Fragment grammar described above. */ List<QName> uniqueNamedElementsList = new ArrayList<QName>(); Iterator<QName> iter = uniqueNamedElements.keySet().iterator(); while (iter.hasNext()) { uniqueNamedElementsList.add(iter.next()); } Collections.sort(uniqueNamedElementsList, qnameSort); for (int i = 0; i < uniqueNamedElementsList.size(); i++) { QName fm = uniqueNamedElementsList.get(i); StartElement se; List<XSElementDeclaration> elements = uniqueNamedElements.get(fm); if (elements.size() == 1 || isSameElementGrammar(elements)) { // se = getStartElement(elements.get(0)); se = translatElementDeclarationToFSA(elements.get(0)); } else { // content is evaluated according to the relaxed Element // Fragment grammar se = createStartElement(fm); // new StartElement(fm); se.setGrammar(elementFragment0); } elementFragment1.addProduction(se, elementFragment1); } // SE ( * ) ElementFragment 1 m elementFragment1.addProduction(START_ELEMENT_GENERIC, elementFragment1); // EE m+1 elementFragment1.addTerminalProduction(END_ELEMENT); // CH [untyped value] ElementFragment 1 m+2 elementFragment1.addProduction(CHARACTERS_GENERIC, elementFragment1); // ElementFragment 0 : // AT ( A 0 ) [schema-typed value] ElementFragment 0 0 // AT ( A 1 ) [schema-typed value] ElementFragment 0 1 // ... // AT (A n-1) [schema-typed value] ElementFragment 0 n-1 // AT ( * ) ElementFragment 0 n // SE ( F0 ) ElementFragment 1 n+1 // SE ( F1 ) ElementFragment 1 n+2 // ... // SE ( Fm-1 ) ElementFragment 1 n+m // SE ( * ) ElementFragment 1 n+m+1 // EE n+m+2 // CH [untyped value] ElementFragment 1 n+m+3 /* * The variable n in the grammar above represents the number of unique * qnames given to explicitly declared attributes in the schema. The * variables A 0 , A 1 , ... A n-1 represent these qnames sorted * lexicographically, first by local-name, then by uri. If there is more * than one attribute declared with the same qname, the qname is * included only once. If all such attributes have the same schema type * name, their value is represented using that type. Otherwise, their * value is represented as a String. */ List<QName> uniqueNamedAttributeList = new ArrayList<QName>(); // create unique qname map Map<QName, List<XSAttributeDeclaration>> uniqueNamedAttributes = new HashMap<QName, List<XSAttributeDeclaration>>(); Iterator<XSAttributeDeclaration> atts = attributePool.keySet() .iterator(); while (atts.hasNext()) { XSAttributeDeclaration atDecl = atts.next(); QName atQname = new QName(atDecl.getNamespace(), atDecl.getName()); if (uniqueNamedAttributes.containsKey(atQname)) { uniqueNamedAttributes.get(atQname).add(atDecl); } else { List<XSAttributeDeclaration> list = new ArrayList<XSAttributeDeclaration>(); list.add(atDecl); uniqueNamedAttributes.put(atQname, list); uniqueNamedAttributeList.add(atQname); } } // add global attributes XSNamedMap nm = xsModel .getComponents(XSConstants.ATTRIBUTE_DECLARATION); for (int i = 0; i < nm.getLength(); i++) { XSAttributeDeclaration atDecl = (XSAttributeDeclaration) nm.item(i); QName atQname = new QName(atDecl.getNamespace(), atDecl.getName()); if (uniqueNamedAttributes.containsKey(atQname)) { uniqueNamedAttributes.get(atQname).add(atDecl); } else { List<XSAttributeDeclaration> list = new ArrayList<XSAttributeDeclaration>(); list.add(atDecl); uniqueNamedAttributes.put(atQname, list); uniqueNamedAttributeList.add(atQname); } } Collections.sort(uniqueNamedAttributeList, qnameSort); for (int i = 0; i < uniqueNamedAttributeList.size(); i++) { QName an = uniqueNamedAttributeList.get(i); Attribute at; List<XSAttributeDeclaration> attributes = uniqueNamedAttributes .get(an); if (attributes.size() == 1 || isSameAttributeGrammar(attributes)) { at = getAttribute(attributes.get(0)); } else { // represented as a String // at = new Attribute(an); at = createAttribute(an, BuiltIn.getDefaultDatatype()); // BuiltIn.DEFAULT_VALUE_NAME, } elementFragment0.addProduction(at, elementFragment0); } // AT ( * ) ElementFragment 0 n elementFragment0.addProduction(ATTRIBUTE_GENERIC, elementFragment0); // SE ( F0 ) ElementFragment 1 n+1 // .. for (int i = 0; i < uniqueNamedElementsList.size(); i++) { QName fm = uniqueNamedElementsList.get(i); StartElement se; List<XSElementDeclaration> elements = uniqueNamedElements.get(fm); if (elements.size() == 1 || isSameElementGrammar(elements)) { // se = getStartElement(elements.get(0)); se = translatElementDeclarationToFSA(elements.get(0)); } else { // content is evaluated according to the relaxed Element // Fragment grammar se = createStartElement(fm); // new StartElement(fm); se.setGrammar(elementFragment0); } elementFragment0.addProduction(se, elementFragment1); } // SE ( * ) ElementFragment 1 n+m+1 elementFragment0.addProduction(START_ELEMENT_GENERIC, elementFragment1); // EE n+m+2 elementFragment0.addTerminalProduction(END_ELEMENT); // CH [untyped value] ElementFragment 1 n+m+3 elementFragment0.addProduction(CHARACTERS_GENERIC, elementFragment1); SchemaInformedGrammar elementFragmentEmpty1 = new SchemaInformedElement(); SchemaInformedFirstStartTagGrammar elementFragmentEmpty0 = new SchemaInformedFirstStartTag( elementFragmentEmpty1); // ElementFragmentTypeEmpty 0 : // AT ( A 0 ) [schema-typed value] ElementFragmentTypeEmpty 0 0 // AT ( A 1 ) [schema-typed value] ElementFragmentTypeEmpty 0 1 // ... // AT ( A n-1 ) [schema-typed value] ElementFragmentTypeEmpty 0 n-1 // AT ( * ) ElementFragmentTypeEmpty 0 n // EE n+1 for (int i = 0; i < uniqueNamedAttributeList.size(); i++) { QName an = uniqueNamedAttributeList.get(i); Attribute at; List<XSAttributeDeclaration> attributes = uniqueNamedAttributes .get(an); if (attributes.size() == 1 || isSameAttributeGrammar(attributes)) { at = getAttribute(attributes.get(0)); } else { // represented as a String // at = new Attribute(an); at = createAttribute(an, BuiltIn.getDefaultDatatype()); // BuiltIn.DEFAULT_VALUE_NAME, } elementFragmentEmpty0.addProduction(at, elementFragmentEmpty0); } elementFragmentEmpty0.addProduction(ATTRIBUTE_GENERIC, elementFragmentEmpty0); elementFragmentEmpty0.addTerminalProduction(END_ELEMENT); // ElementFragmentTypeEmpty 1 : // EE 0 elementFragmentEmpty1.addTerminalProduction(END_ELEMENT); /* * As with all schema informed element grammars, the schema-informed * element fragment grammar is augmented with additional productions * that describe events that may occur in an EXI stream, but are not * explicity declared in the schema. The process for augmenting the * grammar is described in 8.5.4.4 Undeclared Productions. For the * purposes of this process, the schema-informed element fragment * grammar is treated as though it is created from an element * declaration with a {nillable} property value of true and a type * declaration that has named sub-types, and ElementFragmentTypeEmpty is * used to serve as the TypeEmpty of the type in the process. */ elementFragment0.setNillable(true); elementFragment0.setTypeEmpty(elementFragmentEmpty0); elementFragment0.setTypeCastable(true); return elementFragment0; }
java
public static DoubleChromosome of(final DoubleGene... genes) { checkGeneRange(Stream.of(genes).map(DoubleGene::range)); return new DoubleChromosome(ISeq.of(genes), IntRange.of(genes.length)); }
java
@Deprecated @Override public JsonParser createJsonParser(Reader r) throws IOException, JsonParseException { return createParser(r); }
python
def get_absolute(self, points): """Given a set of points geo referenced to this instance, return the points as absolute values. """ # remember if we got a list is_list = isinstance(points, list) points = ensure_numeric(points, num.float) if len(points.shape) == 1: # One point has been passed msg = 'Single point must have two elements' if not len(points) == 2: raise ValueError(msg) msg = 'Input must be an N x 2 array or list of (x,y) values. ' msg += 'I got an %d x %d array' %points.shape if not points.shape[1] == 2: raise ValueError(msg) # Add geo ref to points if not self.is_absolute(): points = copy.copy(points) # Don't destroy input points[:,0] += self.xllcorner points[:,1] += self.yllcorner if is_list: points = points.tolist() return points
python
def _meanprecision(D, tol=1e-7, maxiter=None): '''Mean and precision alternating method for MLE of Dirichlet distribution''' N, K = D.shape logp = log(D).mean(axis=0) a0 = _init_a(D) s0 = a0.sum() if s0 < 0: a0 = a0/s0 s0 = 1 elif s0 == 0: a0 = ones(a.shape) / len(a) s0 = 1 m0 = a0/s0 # Start updating if maxiter is None: maxiter = MAXINT for i in xrange(maxiter): a1 = _fit_s(D, a0, logp, tol=tol) s1 = sum(a1) a1 = _fit_m(D, a1, logp, tol=tol) m = a1/s1 # if norm(a1-a0) < tol: if abs(loglikelihood(D, a1)-loglikelihood(D, a0)) < tol: # much faster return a1 a0 = a1 raise Exception('Failed to converge after {} iterations, values are {}.' .format(maxiter, a1))
java
public void clearUserValues() { ArrayList<String> buckets = getBuckets(); for (String bucket : buckets) { setCreditCount(bucket, 0); } setBuckets(new ArrayList<String>()); ArrayList<String> actions = getActions(); for (String action : actions) { setActionTotalCount(action, 0); setActionUniqueCount(action, 0); } setActions(new ArrayList<String>()); }
java
public CmsUUID addForbiddenParentFolder(String parentFolder, String reason) { CmsUUID id = new CmsUUID(); m_forbiddenParentFolders.put(id, new ForbiddenFolderEntry(parentFolder, reason)); return id; }
java
public final void xmlRequestWriteRawDataOnSystemOut() throws IOException { FormattedWriter writer = new FormattedWriter(new OutputStreamWriter(System.out)); xmlWriteRawOn(writer, false); writer.flush(); }
python
def coarsen(self, windows, func, boundary='exact', side='left'): """ Apply """ windows = {k: v for k, v in windows.items() if k in self.dims} if not windows: return self.copy() reshaped, axes = self._coarsen_reshape(windows, boundary, side) if isinstance(func, str): name = func func = getattr(duck_array_ops, name, None) if func is None: raise NameError('{} is not a valid method.'.format(name)) return type(self)(self.dims, func(reshaped, axis=axes), self._attrs)
java
public static Label.Builder format(final Label.Builder label) { for (final MapFieldEntry.Builder entryBuilder : label.getEntryBuilderList()) { final List<String> valueList = new ArrayList<>(entryBuilder.getValueList()); entryBuilder.clearValue(); for (String value : valueList) { entryBuilder.addValue(format(value)); } } return label; }
java
public static void deleteCommerceSubscriptionEntry( long commerceSubscriptionEntryId) throws com.liferay.portal.kernel.exception.PortalException { getService().deleteCommerceSubscriptionEntry(commerceSubscriptionEntryId); }
python
def do_GET(self): # check here request header to identify the type of req, if http or ws # if this is a ws req, instance a ws handler, add it to App's ws list, return if "Upgrade" in self.headers: if self.headers['Upgrade'] == 'websocket': #passing arguments to websocket handler, otherwise it will lost the last message, # and will be unable to handshake ws = WebSocketsHandler(self.headers, self.request, self.client_address, self.server) return """Handler for the GET requests.""" do_process = False if self.server.auth is None: do_process = True else: if not ('Authorization' in self.headers) or self.headers['Authorization'] is None: self._log.info("Authenticating") self.do_AUTHHEAD() self.wfile.write(encode_text('no auth header received')) elif self.headers['Authorization'] == 'Basic ' + self.server.auth.decode(): do_process = True else: self.do_AUTHHEAD() self.wfile.write(encode_text(self.headers['Authorization'])) self.wfile.write(encode_text('not authenticated')) if do_process: path = str(unquote(self.path)) # noinspection PyBroadException try: self._instance() # build the page (call main()) in user code, if not built yet with self.update_lock: # build the root page once if necessary if not 'root' in self.page.children['body'].children.keys(): self._log.info('built UI (path=%s)' % path) self.set_root_widget(self.main(*self.server.userdata)) self._process_all(path) except: self._log.error('error processing GET request', exc_info=True)
python
def _read_ini_config(ini_file_contents: str) -> Dict[str, str]: """ Parses the given ini file contents and converts to a dictionary of key/value pairs. :param ini_file_contents: the contents of the ini file :return: dictionary where the variable names are key and their values are the values """ config = configparser.ConfigParser() config.optionxform = str config.read_string(_FAKE_SECTION + ini_file_contents) items = {} for section in config.sections(): items.update(dict(config[section].items())) return items
python
def _prepare_wsdl_objects(self): """ Preps the WSDL data structures for the user. """ self.DeletionControlType = self.client.factory.create('DeletionControlType') self.TrackingId = self.client.factory.create('TrackingId') self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType')
java
public static void main(String[] args) { String icuApiVer; if (ICU_VERSION.getMajor() <= 4) { if (ICU_VERSION.getMinor() % 2 != 0) { // Development mile stone int major = ICU_VERSION.getMajor(); int minor = ICU_VERSION.getMinor() + 1; if (minor >= 10) { minor -= 10; major++; } icuApiVer = "" + major + "." + minor + "M" + ICU_VERSION.getMilli(); } else { icuApiVer = ICU_VERSION.getVersionString(2, 2); } } else { if (ICU_VERSION.getMinor() == 0) { // Development mile stone icuApiVer = "" + ICU_VERSION.getMajor() + "M" + ICU_VERSION.getMilli(); } else { icuApiVer = ICU_VERSION.getVersionString(2, 2); } } System.out.println("International Components for Unicode for Java " + icuApiVer); System.out.println(""); System.out.println("Implementation Version: " + ICU_VERSION.getVersionString(2, 4)); System.out.println("Unicode Data Version: " + UNICODE_VERSION.getVersionString(2, 4)); System.out.println("CLDR Data Version: " + LocaleData.getCLDRVersion().getVersionString(2, 4)); System.out.println("Time Zone Data Version: " + getTZDataVersion()); }
python
def build_taxonomy(self, level, namespace, predicate, value): """ :param level: info, safe, suspicious or malicious :param namespace: Name of analyzer :param predicate: Name of service :param value: value :return: dict """ return { 'level': level, 'namespace': namespace, 'predicate': predicate, 'value': value }
python
def do_random(context, seq): """Return a random item from the sequence.""" try: return random.choice(seq) except IndexError: return context.environment.undefined('No random item, sequence was empty.')
python
def gen_gmfs(gmf_set): """ Generate GMF nodes from a gmf_set :param gmf_set: a sequence of GMF objects with attributes imt, sa_period, sa_damping, event_id and containing a list of GMF nodes with attributes gmv and location. The nodes are sorted by lon/lat. """ for gmf in gmf_set: gmf_node = Node('gmf') gmf_node['IMT'] = gmf.imt if gmf.imt == 'SA': gmf_node['saPeriod'] = str(gmf.sa_period) gmf_node['saDamping'] = str(gmf.sa_damping) gmf_node['ruptureId'] = gmf.event_id sorted_nodes = sorted(gmf) gmf_node.nodes = ( Node('node', dict(gmv=n.gmv, lon=n.location.x, lat=n.location.y)) for n in sorted_nodes) yield gmf_node
java
public void createPageControlsPostconfig() { try { getMainPageButton("projectFromArchiveRadio").setSelection(true); getMainPageButton("projectFromArchiveRadio").setEnabled(false); getMainPageButton("projectFromDirectoryRadio").setSelection(false); getMainPageButton("projectFromDirectoryRadio").setEnabled(false); getMainPageButton("browseDirectoriesButton").setEnabled(false); getMainPageButton("browseArchivesButton").setEnabled(false); invokeMainPageMethod("archiveRadioSelected"); Control pathfield = getMainPageField("archivePathField"); if (pathfield instanceof Text) { ((Text) pathfield).setText(this.inputPath); } else if (pathfield instanceof Combo) { ((Combo) pathfield).setText(this.inputPath); } pathfield.setEnabled(false); } catch (Exception e) { VdmUIPlugin .log("Failed to configure throug reflection WizardProjectsImportPage", e); } }
python
def split_str(string): """Split string in half to return two strings""" split = string.split(' ') return ' '.join(split[:len(split) // 2]), ' '.join(split[len(split) // 2:])
python
def call_agent_side(self, method, *args, **kwargs): ''' Call the method, wrap it in Deferred and bind error handler. ''' assert not self._finalize_called, ("Attempt to call agent side code " "after finalize() method has been " "called. Method: %r" % (method, )) ensure_state = kwargs.pop('ensure_state', None) d = defer.Deferred(canceller=self._cancel_agent_side_call) self._agent_jobs.append(d) if ensure_state: # call method only if state check is checks in d.addCallback( lambda _: (self._ensure_state(ensure_state) and method(*args, **kwargs))) else: d.addCallback(defer.drop_param, method, *args, **kwargs) d.addErrback(self._error_handler, method) d.addBoth(defer.bridge_param, self._remove_agent_job, d) time.call_next(d.callback, None) return d
python
def visit_IfExp(self, node): ''' Resulting node alias to either branch >>> from pythran import passmanager >>> pm = passmanager.PassManager('demo') >>> module = ast.parse('def foo(a, b, c): return a if c else b') >>> result = pm.gather(Aliases, module) >>> Aliases.dump(result, filter=ast.IfExp) (a if c else b) => ['a', 'b'] ''' self.visit(node.test) rec = [self.visit(n) for n in (node.body, node.orelse)] return self.add(node, set.union(*rec))
java
public DataDescriptor parentDescriptor() { if (type == TYPE_INVALID) { return null; } else if (dataIds != null) { return new DataDescriptor(dataPath); } else { if (dataPath.length == 1) return null; String[] myPath = new String[dataPath.length - 1]; System.arraycopy(dataPath, 0, myPath, 0, myPath.length); return new DataDescriptor(myPath); } }
python
def add_nodes(self, nodes, attr_dict=None, **attr): """Adds multiple nodes to the graph, along with any related attributes of the nodes. :param nodes: iterable container to either references of the nodes OR tuples of (node reference, attribute dictionary); if an attribute dictionary is provided in the tuple, its values will override both attr_dict's and attr's values. :param attr_dict: dictionary of attributes shared by all the nodes. :param attr: keyword arguments of attributes of the node; attr's values will override attr_dict's values if both are provided. See also: add_node Examples: :: >>> H = DirectedHypergraph() >>> attributes = {label: "positive"} >>> node_list = ["A", ("B", {label="negative"}), ("C", {root=True})] >>> H.add_nodes(node_list, attributes) """ attr_dict = self._combine_attribute_arguments(attr_dict, attr) for node in nodes: # Note: This won't behave properly if the node is actually a tuple if type(node) is tuple: # See ("B", {label="negative"}) in the documentation example new_node, node_attr_dict = node # Create a new dictionary and load it with node_attr_dict and # attr_dict, with the former (node_attr_dict) taking precedence new_dict = attr_dict.copy() new_dict.update(node_attr_dict) self.add_node(new_node, new_dict) else: # See "A" in the documentation example self.add_node(node, attr_dict.copy())
python
def value_get(method_name): """ Creates a getter that will call value's method with specified name using the context's key as first argument. @param method_name: the name of a method belonging to the value. @type method_name: str """ def value_get(value, context, **_params): method = getattr(value, method_name) return _get(method, context["key"], (), {}) return value_get
java
static <I, O> CipherExecutor<I, O> getInstance() { if (INSTANCE == null) { synchronized (NoOpCipherExecutor.class) { if (INSTANCE == null) { INSTANCE = new NoOpCipherExecutor<>(); } } } return INSTANCE; }
java
public static ApiOvhCore getInstance() { ApiOvhCore core = new ApiOvhCore(); // core._consumerKey = core.config.getConsumerKey(); core._consumerKey = core.getConsumerKeyOrNull();// config.getConsumerKey(); if (core._consumerKey == null) { File file = ApiOvhConfigBasic.getOvhConfig(); String location = ApiOvhConfigBasic.configFiles; if (file != null) location = file.getAbsolutePath(); String url = ""; String CK = ""; try { OvhCredential credential = core.requestToken(null); url = credential.validationUrl; CK = credential.consumerKey; } catch (Exception e) { log.error("Fail to request a new Credential", e); } log.error("activate the CK {} here: {}", CK, url); throw new NullPointerException("no 'consumer_key' present in " + location + " or environement 'OVH_CONSUMER_KEY', activate the CK '" + CK + "' here: " + url); } return core; }
python
def influx_query_(self, q): """ Runs an Influx db query """ if self.influx_cli is None: self.err( self.influx_query_, "No database connected. Please initialize a connection") return try: return self.influx_cli.query(q) except Exception as e: self.err(e, self.influx_query_, "Can not query database")
java
@VisibleForTesting static void propagateMetadataFromProps(Map<String, String> metaData, Props inputProps, String nodeType, String nodeName, Logger logger) { // Backward compatibility: Unless user specifies, this will be absent from flows and jobs // .. if so, do a no-op like before if (!inputProps.containsKey(AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE)) { return; } if (null == metaData || null == inputProps || null == logger || Strings.isNullOrEmpty(nodeType) || Strings.isNullOrEmpty(nodeName)) { throw new IllegalArgumentException("Input params should not be null or empty."); } final String propsToPropagate = inputProps.getString(AZKABAN_EVENT_REPORTING_PROPERTIES_TO_PROPAGATE); if (Strings.isNullOrEmpty(propsToPropagate)) { // Nothing to propagate logger.info(String.format("No properties to propagate to metadata for %s: %s", nodeType, nodeName)); return; } else { logger.info(String.format("Propagating: %s to metadata for %s: %s", propsToPropagate, nodeType, nodeName)); } final List<String> propsToPropagateList = SPLIT_ON_COMMA.splitToList(propsToPropagate); for (String propKey : propsToPropagateList) { if (!inputProps.containsKey(propKey)) { logger.warn(String.format("%s does not contains: %s property; " + "skipping propagation to metadata", nodeName, propKey)); continue; } metaData.put(propKey, inputProps.getString(propKey)); } }
java
public static Optional<OffsetRange<CharOffset>> charOffsetsOfWholeString(String s) { if (s.isEmpty()) { return Optional.absent(); } return Optional.of(charOffsetRange(0, s.length() - 1)); }
java
private NodeStruct nodesWithDataTakeUnsafe() { LOG.entering("OperatorTopologyStructImpl", "nodesWithDataTakeUnsafe"); try { final NodeStruct child = nodesWithData.take(); LOG.exiting("OperatorTopologyStructImpl", "nodesWithDataTakeUnsafe", child); return child; } catch (final InterruptedException e) { throw new RuntimeException("InterruptedException while waiting to take data from nodesWithData queue", e); } }
python
def show_pricing(kwargs=None, call=None): ''' Show pricing for a particular profile. This is only an estimate, based on unofficial pricing sources. .. versionadded:: 2015.8.0 CLI Examples: .. code-block:: bash salt-cloud -f show_pricing my-digitalocean-config profile=my-profile ''' profile = __opts__['profiles'].get(kwargs['profile'], {}) if not profile: return {'Error': 'The requested profile was not found'} # Make sure the profile belongs to DigitalOcean provider = profile.get('provider', '0:0') comps = provider.split(':') if len(comps) < 2 or comps[1] != 'digitalocean': return {'Error': 'The requested profile does not belong to DigitalOcean'} raw = {} ret = {} sizes = avail_sizes() ret['per_hour'] = decimal.Decimal(sizes[profile['size']]['price_hourly']) ret['per_day'] = ret['per_hour'] * 24 ret['per_week'] = ret['per_day'] * 7 ret['per_month'] = decimal.Decimal(sizes[profile['size']]['price_monthly']) ret['per_year'] = ret['per_week'] * 52 if kwargs.get('raw', False): ret['_raw'] = raw return {profile['profile']: ret}
java
@Override public void deleteUser(String username) throws AuthenticationException { String user = file_store.getProperty(username); if (user == null) { throw new AuthenticationException("User '" + username + "' not found."); } file_store.remove(username); try { saveUsers(); } catch (IOException e) { throw new AuthenticationException("Error deleting user: ", e); } }
java
public static List<ConnectionParams> manyFromConfig(ConfigParams config, boolean configAsDefault) { List<ConnectionParams> result = new ArrayList<ConnectionParams>(); // Try to get multiple connections first ConfigParams connections = config.getSection("connections"); if (connections.size() > 0) { List<String> connectionSections = connections.getSectionNames(); for (String section : connectionSections) { ConfigParams connection = connections.getSection(section); result.add(new ConnectionParams(connection)); } } // Then try to get a single connection else { ConfigParams connection = config.getSection("connection"); if (connection.size() > 0) result.add(new ConnectionParams(connection)); // Apply default if possible else if (configAsDefault) result.add(new ConnectionParams(config)); } return result; }
python
def creates_cycle(connections, test): """ Returns true if the addition of the 'test' connection would create a cycle, assuming that no cycle already exists in the graph represented by 'connections'. """ i, o = test if i == o: return True visited = {o} while True: num_added = 0 for a, b in connections: if a in visited and b not in visited: if b == i: return True visited.add(b) num_added += 1 if num_added == 0: return False
python
def remove_listener(self, registration_id): """ Removes the specified membership listener. :param registration_id: (str), registration id of the listener to be deleted. :return: (bool), if the registration is removed, ``false`` otherwise. """ try: self.listeners.pop(registration_id) return True except KeyError: return False
python
def create(self, name, plan, zone, service_type='mongodb', instance_type='mongodb_sharded', version='2.4.6'): """Create an ObjectRocket instance. :param str name: The name to give to the new instance. :param int plan: The plan size of the new instance. :param str zone: The zone that the new instance is to exist in. :param str service_type: The type of service that the new instance is to provide. :param str instance_type: The instance type to create. :param str version: The version of the service the new instance is to provide. """ # Build up request data. url = self._url request_data = { 'name': name, 'service': service_type, 'plan': plan, 'type': instance_type, 'version': version, 'zone': zone } # Call to create an instance. response = requests.post( url, data=json.dumps(request_data), **self._default_request_kwargs ) # Log outcome of instance creation request. if response.status_code == 200: logger.info('Successfully created a new instance with: {}'.format(request_data)) else: logger.info('Failed to create instance with: {}'.format(request_data)) logger.info('Response: [{0}] {1}'.format(response.status_code, response.content)) data = self._get_response_data(response) return self._concrete_instance(data)
java
public static RouteMeta build(RouteType type, Class<?> destination, String path, String group, int priority, int extra) { return new RouteMeta(type, null, destination, null, path, group, null, priority, extra); }
java
public static Builder from(URI swaggerUri) { Validate.notNull(swaggerUri, "swaggerUri must not be null"); String scheme = swaggerUri.getScheme(); if (scheme != null && swaggerUri.getScheme().startsWith("http")) { try { return from(swaggerUri.toURL()); } catch (MalformedURLException e) { throw new RuntimeException("Failed to convert URI to URL", e); } } else if (scheme != null && swaggerUri.getScheme().startsWith("file")) { return from(Paths.get(swaggerUri)); } else { return from(URIUtils.convertUriWithoutSchemeToFileScheme(swaggerUri)); } }
python
def tile_images(img1, img2, mask1, mask2, opts): """Combine two images into one by tiling them. ``mask1`` and ``mask2`` provide optional masks for alpha-blending; pass None to avoid. Fills unused areas with ``opts.bgcolor``. Puts a ``opts.spacing``-wide bar with a thin line of ``opts.sepcolor`` color between them. ``opts.orientation`` can be 'lr' for left-and-right, 'tb' for top-and-bottom, or 'auto' for automatic. """ w1, h1 = img1.size w2, h2 = img2.size if opts.orientation == 'auto': opts.orientation = pick_orientation(img1, img2, opts.spacing) B, S = opts.border, opts.spacing if opts.orientation == 'lr': w, h = (B + w1 + S + w2 + B, B + max(h1, h2) + B) pos1 = (B, (h - h1) // 2) pos2 = (B + w1 + S, (h - h2) // 2) separator_line = [(B + w1 + S//2, 0), (B + w1 + S//2, h)] else: w, h = (B + max(w1, w2) + B, B + h1 + S + h2 + B) pos1 = ((w - w1) // 2, B) pos2 = ((w - w2) // 2, B + h1 + S) separator_line = [(0, B + h1 + S//2), (w, B + h1 + S//2)] img = Image.new('RGBA', (w, h), opts.bgcolor) img.paste(img1, pos1, mask1) img.paste(img2, pos2, mask2) ImageDraw.Draw(img).line(separator_line, fill=opts.sepcolor) return img
java
@Override public void afterCompletion(HttpServletRequest request, HttpServletResponse response, Object handler, Exception ex) { lepManager.endThreadContext(); }
python
def server_to_dict(server): """ Returns the :class:`dict` representation of a server object. The returned :class:`dict` is meant to be consumed by :class:`~bang.deployers.cloud.ServerDeployer` objects. """ return { A.server.ID: server.id, A.server.PUBLIC_IPS: [server.public_dns_name], A.server.PRIVATE_IPS: [server.private_dns_name], }
python
def _remove_column(self, name): """ Removes a column from the blueprint. :param name: The column name :type name: str :rtype: Blueprint """ self._columns = filter(lambda c: c.name != name, self._columns) return self
java
public boolean bind () { // open our TCP listening ports int successes = 0; for (int port : _ports) { try { acceptConnections(port); successes++; } catch (IOException ioe) { log.warning("Failure listening to socket", "hostname", _bindHostname, "port", port, ioe); } } return successes > 0; }
java
@Override public AuthenticationInfo beforeAllAttempts(Collection<? extends Realm> realms, AuthenticationToken token) throws AuthenticationException { final SimpleAuthenticationInfo authenticationInfo = (SimpleAuthenticationInfo) super.beforeAllAttempts(realms, token); authenticationInfo.setPrincipals(new PrincipalCollectionWithSinglePrincipalForApplicationUserInAnyRealm()); return authenticationInfo; }
java
void apMessage(AnnotationProcessingError ex) { log.printLines(PrefixKind.JAVAC, "msg.proc.annotation.uncaught.exception"); ex.getCause().printStackTrace(log.getWriter(WriterKind.NOTICE)); }
java
public Collection<ScmConfiguration> parseRepositoriesFile(String fileName, String scmType, String scmPpk, String scmUser, String scmPassword) { try (InputStream is = new FileInputStream(fileName)) { String jsonText = IOUtils.toString(is); JSONObject json = new JSONObject(jsonText); JSONArray arr = json.getJSONArray(SCM_REPOSITORIES); List<ScmConfiguration> configurationList = new LinkedList<>(); arr.forEach(scm -> { JSONObject obj = (JSONObject) scm; String url = obj.getString(URL); String branch = obj.getString(BRANCH); String tag = obj.getString(Constants.TAG); configurationList.add(new ScmConfiguration(scmType, scmUser, scmPassword, scmPpk, url, branch, tag, null, false ,1)); }); return configurationList; } catch (FileNotFoundException e) { logger.error("file Not Found: {}", fileName); } catch (IOException e) { logger.error("error getting file : {}", e.getMessage()); } return null; }
python
async def reset(self): """ reset the tournament on Challonge |methcoro| Note: |from_api| Reset a tournament, clearing all of its scores and attachments. You can then add/remove/edit participants before starting the tournament again. Raises: APIException """ params = { 'include_participants': 1 if AUTO_GET_PARTICIPANTS else 0, 'include_matches': 1 if AUTO_GET_MATCHES else 0 } res = await self.connection('POST', 'tournaments/{}/reset'.format(self._id), **params) self._refresh_from_json(res)
python
def list_roles(): """ List existing roles """ click.echo(green('\nListing roles:')) click.echo(green('-' * 40)) with get_app().app_context(): roles = Role.query.all() if not roles: click.echo(red('No roles found')) return for index,role in enumerate(roles): click.echo('{}. {}: {}'.format( index + 1, yellow(role.handle), role.title )) click.echo()
python
def save_data(self, trigger_id, **data): """ let's save the data :param trigger_id: trigger ID from which to save data :param data: the data to check to be used and save :type trigger_id: int :type data: dict :return: the status of the save statement :rtype: boolean """ if self.token: title = self.set_title(data) body = self.set_content(data) # get the details of this trigger trigger = Github.objects.get(trigger_id=trigger_id) # check if it remains more than 1 access # then we can create an issue limit = self.gh.ratelimit_remaining if limit > 1: # repo goes to "owner" # project goes to "repository" r = self.gh.create_issue(trigger.repo, trigger.project, title, body) else: # rate limit reach logger.warning("Rate limit reached") update_result(trigger_id, msg="Rate limit reached", status=True) # put again in cache the data that could not be # published in Github yet cache.set('th_github_' + str(trigger_id), data, version=2) return True sentence = str('github {} created').format(r) logger.debug(sentence) status = True else: sentence = "no token or link provided for trigger ID {} ".format(trigger_id) logger.critical(sentence) update_result(trigger_id, msg=sentence, status=False) status = False return status
python
def analyze_insertions(fa, threads = 6): """ - find ORFs using Prodigal - find introns using cmscan (vs Rfam intron database) - check that ORFs and intron overlap with insertion region - plot insertion length versus model position for each insertion (based on insertion type) """ safe, sequences, id2name, names, insertions = analyze_fa(fa) seqs = seq_info(names, id2name, insertions, sequences) seqs, orfs = find_orfs(safe, seqs) seqs = find_introns(safe, seqs, sequences, threads) seqs = seqs2bool(seqs) seqs = annotate_orfs(orfs, seqs, threads) return seqs, id2name
python
def lookup_asset_types(self, sids): """ Retrieve asset types for a list of sids. Parameters ---------- sids : list[int] Returns ------- types : dict[sid -> str or None] Asset types for the provided sids. """ found = {} missing = set() for sid in sids: try: found[sid] = self._asset_type_cache[sid] except KeyError: missing.add(sid) if not missing: return found router_cols = self.asset_router.c for assets in group_into_chunks(missing): query = sa.select((router_cols.sid, router_cols.asset_type)).where( self.asset_router.c.sid.in_(map(int, assets)) ) for sid, type_ in query.execute().fetchall(): missing.remove(sid) found[sid] = self._asset_type_cache[sid] = type_ for sid in missing: found[sid] = self._asset_type_cache[sid] = None return found
python
def remove(property_name, system=False): """ Remove a configuration property/value setting from the config file. :param property_name: The name of the property to remove. :keyword system: Set to True to modify the system configuration file. If not set, the user config file will be modified. """ config_filename = \ _SYSTEM_CONFIG_FILE if system is True else _USER_CONFIG_FILE config = _read_config(config_filename) section = _MAIN_SECTION_NAME config.remove_option(section, property_name) _write_config(config, config_filename)
python
def txt(self, txt, h=None, at_x=None, to_x=None, change_style=None, change_size=None): """print string to defined (at_x) position to_x can apply only if at_x is None and if used then forces align='R' """ h = h or self.height self._change_props(change_style, change_size) align = 'L' w = None if at_x is None: if to_x is not None: align = 'R' self.oPdf.set_x(0) w = to_x else: self.oPdf.set_x(at_x) if w is None: w = self.oPdf.get_string_width(txt) self.oPdf.cell(w, h=h, txt=txt, align=align)
java
@Override public Set<String> convert(String value) { //shouldn't ever get called but if it does, the best we can do is return a String set List<String> list = Arrays.asList(ConversionManager.split(value)); Set<String> set = new HashSet<>(list); return set; }
python
def grouper(iterable, n): """ Slice up `iterable` into iterables of `n` items. :param iterable: Iterable to splice. :param n: Number of items per slice. :returns: iterable of iterables """ it = iter(iterable) while True: chunk = itertools.islice(it, n) try: first = next(chunk) except StopIteration: return yield itertools.chain([first], chunk)