Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
11,800
def nmap_smb_vulnscan(): service_search = ServiceSearch() services = service_search.get_services(ports=[], tags=[], up=True) services = [service for service in services] service_dict = {} for service in services: service.add_tag() service_dict[str(service.address)] = service nmap_args = "-Pn -n --disable-arp-ping --script smb-security-mode.nse,smb-vuln-ms17-010.nse -p 445".split(" ") if services: result = nmap(nmap_args, [str(s.address) for s in services]) parser = NmapParser() report = parser.parse_fromstring(result) smb_signing = 0 ms17 = 0 for nmap_host in report.hosts: for script_result in nmap_host.scripts_results: script_result = script_result.get(, {}) service = service_dict[str(nmap_host.address)] if script_result.get(, ) == : print_success("({}) SMB Signing disabled".format(nmap_host.address)) service.add_tag() smb_signing += 1 if script_result.get(, {}).get(, ) == : print_success("({}) Vulnerable for MS17-010".format(nmap_host.address)) service.add_tag() ms17 += 1 service.update(tags=service.tags) print_notification("Completed, tag added to systems with smb signing disabled, tag added to systems that did not apply MS17-010.") stats = {: smb_signing, : ms17, : len(services)} Logger().log(, .format(len(services)), stats) else: print_notification("No services found to scan.")
Scans available smb services in the database for smb signing and ms17-010.
11,801
def main(command_line=True, **kwargs): def fix_separation(filename, new_filename): old_file = open(filename, ) data = old_file.readlines() new_data = [] for line in data: new_line = line.replace(, ) new_line = new_line.replace(, ) new_data.append(new_line) new_file = open(new_filename, ) for s in new_data: new_file.write(s) old_file.close() new_file.close() return new_filename def old_fix_separation(filename, new_filename): old_file = open(filename, ) data = old_file.readlines() new_data = [] for line in data: new_line = [] for i in line.split(): if in i[1:]: lead_char = if i[0] == else if lead_char: v = i[1:].split() else: v = i.split() new_line.append(lead_char + v[0]) new_line.append( + v[1]) else: new_line.append(i) new_line = (.join(new_line)) + new_data.append(new_line) new_file = open(new_filename, ) for s in new_data: new_file.write(s) new_file.close() old_file.close() return new_filename noave=0 volume=2.5**3 inst="" samp_con,Z=,"" missing=1 demag="N" er_location_name="unknown" citation= args=sys.argv meth_code="LP-NO" version_num=pmag.get_version() dir_path= MagRecs=[] samp_file = meas_file = mag_file = if command_line: if in sys.argv: ind = sys.argv.index() dir_path=sys.argv[ind+1] if in sys.argv: ind = sys.argv.index() input_dir_path = sys.argv[ind+1] else: input_dir_path = dir_path output_dir_path = dir_path if "-h" in args: print(main.__doc__) return False if in args: ind=args.index("-F") meas_file = args[ind+1] if in args: ind = args.index("-fsa") samp_file = args[ind+1] if samp_file[0]!=: samp_file = os.path.join(input_dir_path, samp_file) try: open(samp_file,) ErSamps,file_type=pmag.magic_read(samp_file) except: print(samp_file,) print() if in args: ind = args.index("-f") mag_file= args[ind+1] if "-loc" in args: ind=args.index("-loc") er_location_name=args[ind+1] if "-A" in args: noave=1 if not command_line: dir_path = kwargs.get(, ) input_dir_path = kwargs.get(, dir_path) output_dir_path = dir_path meas_file = kwargs.get(, ) mag_file = kwargs.get(, ) samp_file = kwargs.get(, ) specnum = kwargs.get(, 1) samp_con = kwargs.get(, ) if len(str(samp_con)) > 1: samp_con, Z = samp_con.split() else: Z = er_location_name = kwargs.get(, ) noave = kwargs.get(, 0) meth_code = kwargs.get(, "LP-NO") meth_code=meth_code+":FS-C-DRILL-IODP:SP-SS-C:SO-V" meth_code=meth_code.strip(":") if mag_file: mag_file = os.path.join(input_dir_path, mag_file) samp_file = os.path.join(input_dir_path, samp_file) meas_file = os.path.join(output_dir_path, meas_file) if not mag_file: print("You must provide an IODP_jr6 format file") return False, "You must provide an IODP_jr6 format file" if not os.path.exists(mag_file): print(.format(mag_file)) return False, .format(mag_file) if not os.path.exists(samp_file): print("Your input directory:\n{}\nmust contain an er_samples.txt file, or you must explicitly provide one".format(input_dir_path)) return False, "Your input directory:\n{}\nmust contain an er_samples.txt file, or you must explicitly provide one".format(input_dir_path) temp = os.path.join(output_dir_path, ) fix_separation(mag_file, temp) samples, filetype = pmag.magic_read(samp_file) with open(temp, ) as finput: lines = finput.readlines() os.remove(temp) for line in lines: MagRec = {} line = line.split() spec_text_id = line[0].split()[1] SampRecs=pmag.get_dictitem(samples,,spec_text_id,) if len(SampRecs)>0: MagRec[]=SampRecs[0][] MagRec[]=MagRec[] MagRec[]=MagRec[] MagRec["er_citation_names"]="This study" MagRec[]=er_location_name MagRec[]=version_num MagRec["treatment_temp"]= % (273) MagRec["measurement_temp"]= % (273) MagRec["measurement_flag"]= MagRec["measurement_standard"]= MagRec["measurement_number"]= MagRec["treatment_ac_field"]= volume=float(SampRecs[0][]) x = float(line[4]) y = float(line[3]) negz = float(line[2]) cart=np.array([x,y,-negz]).transpose() direction = pmag.cart2dir(cart).transpose() expon = float(line[5]) magn_volume = direction[2] * (10.0**expon) moment = magn_volume * volume MagRec["measurement_magn_moment"]=str(moment) MagRec["measurement_magn_volume"]=str(magn_volume) MagRec["measurement_dec"]=%(direction[0]) MagRec["measurement_inc"]=%(direction[1]) step = line[1] if step == : meas_type="LT-NO" elif step[0:2] == : meas_type="LT-AF-Z" treat=float(step[2:]) MagRec["treatment_ac_field"]= %(treat*1e-3) elif step[0:2] == : meas_type="LT-T-Z" treat=float(step[2:]) MagRec["treatment_temp"]= % (treat+273.) elif step[0:3]==: meas_type="LT-AF-I" treat=float(row[][3:]) MagRec["treatment_ac_field"]= %(treat*1e-3) MagRec["treatment_dc_field"]= %(50e-6) MagRec["measurement_description"]= elif step[0:3]==: meas_type="LT-IRM" treat=float(step[3:]) MagRec["treatment_dc_field"]= %(treat*1e-3) else: print(,row) return False, ,row MagRec[]=meas_type MagRecs.append(MagRec.copy()) else: print(,row[]) MagOuts=pmag.measurements_methods(MagRecs,noave) file_created, error_message = pmag.magic_write(meas_file,MagOuts,) if file_created: return True, meas_file else: return False,
NAME iodp_jr6_magic.py DESCRIPTION converts shipboard .jr6 format files to magic_measurements format files SYNTAX iodp_jr6_magic.py [command line options] OPTIONS -h: prints the help message and quits. -f FILE: specify input file, or -F FILE: specify output file, default is magic_measurements.txt -fsa FILE: specify er_samples.txt file for sample name lookup , default is 'er_samples.txt' -loc HOLE : specify hole name (U1456A) -A: don't average replicate measurements INPUT JR6 .jr6 format file
11,802
def use(cls, ec): if isinstance(ec, (str, unicode)): m = get_model(cls._alias, ec, signal=False) else: m = cls._use(ec) return m
use will duplicate a new Model class and bind ec ec is Engine name or Sesstion object
11,803
def is_orderable(cls): if not getattr(settings, , None): return False labels = resolve_labels(cls) if labels[] in settings.ORDERABLE_MODELS: return settings.ORDERABLE_MODELS[labels[]] return False
Checks if the provided class is specified as an orderable in settings.ORDERABLE_MODELS. If it is return its settings.
11,804
def ensure_matplotlib_figure(obj): import matplotlib from matplotlib.figure import Figure if obj == matplotlib.pyplot: obj = obj.gcf() elif not isinstance(obj, Figure): if hasattr(obj, "figure"): obj = obj.figure if not isinstance(obj, Figure): raise ValueError( "Only matplotlib.pyplot or matplotlib.pyplot.Figure objects are accepted.") if not obj.gca().has_data(): raise ValueError( "You attempted to log an empty plot, pass a figure directly or ensure the global plot isn't closed.") return obj
Extract the current figure from a matplotlib object or return the object if it's a figure. raises ValueError if the object can't be converted.
11,805
def set( self, key, value, loader_identifier=None, tomlfy=False, dotted_lookup=True, is_secret=False, ): if "." in key and dotted_lookup is True: return self._dotted_set( key, value, loader_identifier=loader_identifier, tomlfy=tomlfy ) value = parse_conf_data(value, tomlfy=tomlfy) key = key.strip().upper() existing = getattr(self, key, None) if existing is not None and existing != value: value = self._merge_before_set(key, existing, value, is_secret) if isinstance(value, dict): value = DynaBox(value, box_it_up=True) setattr(self, key, value) self.store[key] = value self._deleted.discard(key) if loader_identifier and loader_identifier in self.loaded_by_loaders: self.loaded_by_loaders[loader_identifier][key] = value elif loader_identifier: self.loaded_by_loaders[loader_identifier] = {key: value} elif loader_identifier is None: self._defaults[key] = value
Set a value storing references for the loader :param key: The key to store :param value: The value to store :param loader_identifier: Optional loader name e.g: toml, yaml etc. :param tomlfy: Bool define if value is parsed by toml (defaults False) :param is_secret: Bool define if secret values is hidden on logs.
11,806
def _write_init_models(self, filenames): self.write(destination=self.output_directory, filename="__init__.py", template_name="__init_model__.py.tpl", filenames=self._prepare_filenames(filenames), class_prefix=self._class_prefix, product_accronym=self._product_accronym, header=self.header_content)
Write init file Args: filenames (dict): dict of filename and classes
11,807
def link(self, stream_instance): if isinstance(stream_instance, collections.Iterable): self.input_stream = stream_instance elif getattr(stream_instance, , None): self.input_stream = stream_instance.output_stream else: raise RuntimeError( % type(stream_instance))
Set my input stream
11,808
def _is_gs_folder(cls, result): return (cls.is_key(result) and result.size == 0 and result.name.endswith(cls._gs_folder_suffix))
Return ``True`` if GS standalone folder object. GS will create a 0 byte ``<FOLDER NAME>_$folder$`` key as a pseudo-directory place holder if there are no files present.
11,809
def buttonbox(msg="", title=" ", choices=("Button[1]", "Button[2]", "Button[3]"), image=None, root=None, default_choice=None, cancel_choice=None): global boxRoot, __replyButtonText, buttonsFrame if default_choice is None: default_choice = choices[0] __replyButtonText = choices[0] if root: root.withdraw() boxRoot = Toplevel(master=root) boxRoot.withdraw() else: boxRoot = Tk() boxRoot.withdraw() boxRoot.title(title) boxRoot.iconname() boxRoot.geometry(st.rootWindowPosition) boxRoot.minsize(400, 100) messageFrame = Frame(master=boxRoot) messageFrame.pack(side=TOP, fill=BOTH) if image: tk_Image = None try: tk_Image = ut.load_tk_image(image) except Exception as inst: print(inst) if tk_Image: imageFrame = Frame(master=boxRoot) imageFrame.pack(side=TOP, fill=BOTH) label = Label(imageFrame, image=tk_Image) label.image = tk_Image label.pack(side=TOP, expand=YES, fill=X, padx=, pady=) buttonsFrame = Frame(master=boxRoot) buttonsFrame.pack(side=TOP, fill=BOTH) messageWidget = Message(messageFrame, text=msg, width=400) messageWidget.configure( font=(st.PROPORTIONAL_FONT_FAMILY, st.PROPORTIONAL_FONT_SIZE)) messageWidget.pack(side=TOP, expand=YES, fill=X, padx=, pady=) __put_buttons_in_buttonframe(choices, default_choice, cancel_choice) boxRoot.deiconify() boxRoot.mainloop() boxRoot.destroy() if root: root.deiconify() return __replyButtonText
Display a msg, a title, an image, and a set of buttons. The buttons are defined by the members of the choices list. :param str msg: the msg to be displayed :param str title: the window title :param list choices: a list or tuple of the choices to be displayed :param str image: Filename of image to display :param str default_choice: The choice you want highlighted when the gui appears :param str cancel_choice: If the user presses the 'X' close, which button should be pressed :return: the text of the button that the user selected
11,810
def draw_flat_samples(**kwargs): nsamples = kwargs.get(, 1) min_mass = kwargs.get(, 1.) max_mass = kwargs.get(, 2.) m1 = np.random.uniform(min_mass, max_mass, nsamples) m2 = np.random.uniform(min_mass, max_mass, nsamples) return np.maximum(m1, m2), np.minimum(m1, m2)
Draw samples for uniform in mass Parameters ---------- **kwargs: string Keyword arguments as model parameters and number of samples Returns ------- array The first mass array The second mass
11,811
def safe_urlencode(params, doseq=0): if IS_PY3: return urlencode(params, doseq) if hasattr(params, "items"): params = params.items() new_params = [] for k, v in params: k = k.encode("utf-8") if isinstance(v, (list, tuple)): new_params.append((k, [force_bytes(i) for i in v])) else: new_params.append((k, force_bytes(v))) return urlencode(new_params, doseq)
UTF-8-safe version of safe_urlencode The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values which can't fail down to ascii.
11,812
def handle_json_GET_routepatterns(self, params): schedule = self.server.schedule route = schedule.GetRoute(params.get(, None)) if not route: self.send_error(404) return time = int(params.get(, 0)) date = params.get(, "") sample_size = 3 pattern_id_trip_dict = route.GetPatternIdTripDict() patterns = [] for pattern_id, trips in pattern_id_trip_dict.items(): time_stops = trips[0].GetTimeStops() if not time_stops: continue has_non_zero_trip_type = False; trips_with_service = [] for trip in trips: service_id = trip.service_id service_period = schedule.GetServicePeriod(service_id) if date and not service_period.IsActiveOn(date): continue trips_with_service.append(trip) if trip[] and trip[] != : has_non_zero_trip_type = True start_sample_index = len(trips) for i, trip in enumerate(trips): if trip.GetStartTime() >= time: start_sample_index = i break num_after_sample = num_trips - (start_sample_index + sample_size) if num_after_sample < 0: num_after_sample = 0 start_sample_index = num_trips - sample_size sample = [] for t in trips[start_sample_index:start_sample_index + sample_size]: sample.append( (t.GetStartTime(), t.trip_id) ) patterns.append((name, pattern_id, start_sample_index, sample, num_after_sample, (0,1)[has_non_zero_trip_type])) patterns.sort() return patterns
Given a route_id generate a list of patterns of the route. For each pattern include some basic information and a few sample trips.
11,813
def start(self): self._events_to_write = [] self._new_contracts_to_write = [] @events.on(SmartContractEvent.CONTRACT_CREATED) @events.on(SmartContractEvent.CONTRACT_MIGRATED) def call_on_success_event(sc_event: SmartContractEvent): self.on_smart_contract_created(sc_event) @events.on(SmartContractEvent.RUNTIME_NOTIFY) def call_on_event(sc_event: NotifyEvent): self.on_smart_contract_event(sc_event) Blockchain.Default().PersistCompleted.on_change += self.on_persist_completed
Handle EventHub events for SmartContract decorators
11,814
def update_table(self, tablename, throughput=None, global_indexes=None, index_updates=None): kwargs = { : tablename } all_attrs = set() if throughput is not None: kwargs[] = throughput.schema() if index_updates is not None: updates = [] for update in index_updates: all_attrs.update(update.get_attrs()) updates.append(update.serialize()) kwargs[] = updates elif global_indexes is not None: kwargs[] = [ { : { : key, : value.schema(), } } for key, value in six.iteritems(global_indexes) ] if all_attrs: attr_definitions = [attr.definition() for attr in all_attrs] kwargs[] = attr_definitions return self.call(, **kwargs)
Update the throughput of a table and/or global indexes Parameters ---------- tablename : str Name of the table to update throughput : :class:`~dynamo3.fields.Throughput`, optional The new throughput of the table global_indexes : dict, optional DEPRECATED. Use index_updates now. Map of index name to :class:`~dynamo3.fields.Throughput` index_updates : list of :class:`~dynamo3.fields.IndexUpdate`, optional List of IndexUpdates to perform
11,815
def getChecked(self): attrs = [] layout = self.layout() for i in range(layout.count()): w = layout.itemAt(i).widget() if w.isChecked(): attrs.append(str(w.text())) return attrs
Gets the checked attributes :returns: list<str> -- checked attribute names
11,816
def get_passage(self, objectId, subreference): passage = self.resolver.getTextualNode( textId=objectId, subreference=subreference, metadata=True ) return passage
Retrieve the passage identified by the parameters :param objectId: Collection Identifier :type objectId: str :param subreference: Subreference of the passage :type subreference: str :return: An object bearing metadata and its text :rtype: InteractiveTextualNode
11,817
def stage_name(self): if in self.data and self.data.stage_name: return self.data.get() else: return self.stage.data.name
Get stage name of current job instance. Because instantiating job instance could be performed in different ways and those return different results, we have to check where from to get name of the stage. :return: stage name.
11,818
def _write_family(family, filename): with open(filename, ) as f: for detection in family.detections: det_str = for key in detection.__dict__.keys(): if key == and detection.__dict__[key] is not None: value = str(detection.event.resource_id) elif key in [, , ]: value = format(detection.__dict__[key], ).rstrip() else: value = str(detection.__dict__[key]) det_str += key + + value + f.write(det_str + ) return
Write a family to a csv file. :type family: :class:`eqcorrscan.core.match_filter.Family` :param family: Family to write to file :type filename: str :param filename: File to write to.
11,819
def _get_MAP_spikes(F, c_hat, theta, dt, tol=1E-6, maxiter=100, verbosity=0): npix, nt = F.shape sigma, alpha, beta, lamb, gamma = theta alpha_ss = np.dot(alpha, alpha) c = np.dot(alpha, F) - np.dot(alpha, beta) scale_var = 1. / (2 * sigma * sigma) lD = lamb * dt grad_lnprior = np.zeros(nt, dtype=DTYPE) grad_lnprior[1:] = lD grad_lnprior[:-1] -= lD * gamma z = 1. n_hat = c_hat[1:] - gamma * c_hat[:-1] res = c - alpha_ss * c_hat LL_best = _post_LL(n_hat, res, scale_var, lD, z) LL_barrier = LL_best nloop1 = 0 terminate_interior = False % maxiter) terminate_interior = True nloop1 += 1 z /= Z_FAC return n_hat, c_hat, LL_best
Used internally by deconvolve to compute the maximum a posteriori spike train for a given set of fluorescence traces and model parameters. See the documentation for deconvolve for the meaning of the arguments Returns: n_hat_best, c_hat_best, LL_best
11,820
def _socket_reconnect_and_wait_ready(self): logger.info("Start connecting: host={}; port={};".format(self.__host, self.__port)) with self._lock: self._status = ContextStatus.Connecting ret, msg, conn_id = self._net_mgr.connect((self.__host, self.__port), self, 5) if ret == RET_OK: self._conn_id = conn_id else: logger.warning(msg) if ret == RET_OK: while True: with self._lock: if self._sync_req_ret is not None: if self._sync_req_ret.ret == RET_OK: self._status = ContextStatus.Ready else: ret, msg = self._sync_req_ret.ret, self._sync_req_ret.msg self._sync_req_ret = None break sleep(0.01) if ret == RET_OK: ret, msg = self.on_api_socket_reconnected() else: self._wait_reconnect() return RET_OK,
sync_socket & async_socket recreate :return: (ret, msg)
11,821
def pick(self, filenames: Iterable[str]) -> str: filenames = sorted(filenames, reverse=True) for priority in sorted(self.rules.keys(), reverse=True): patterns = self.rules[priority] for pattern in patterns: for filename in filenames: if pattern.search(filename): return filename return filenames[0]
Pick one filename based on priority rules.
11,822
def solveConsKinkedR(solution_next,IncomeDstn,LivPrb,DiscFac,CRRA,Rboro,Rsave, PermGroFac,BoroCnstArt,aXtraGrid,vFuncBool,CubicBool): s one period problem. IncomeDstn : [np.array] A list containing three arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rboro: float Interest factor on assets between this period and the succeeding period when assets are negative. Rsave: float Interest factor on assets between this period and the succeeding period when assets are positive. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean Indicator for whether the solver should use cubic or linear interpolation. Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc. ' solver = ConsKinkedRsolver(solution_next,IncomeDstn,LivPrb, DiscFac,CRRA,Rboro,Rsave,PermGroFac,BoroCnstArt, aXtraGrid,vFuncBool,CubicBool) solver.prepareToSolve() solution = solver.solve() return solution
Solves a single period consumption-saving problem with CRRA utility and risky income (subject to permanent and transitory shocks), and different interest factors on borrowing and saving. Restriction: Rboro >= Rsave. Currently cannot construct a cubic spline consumption function, only linear. Can gen- erate a value function if requested. Parameters ---------- solution_next : ConsumerSolution The solution to next period's one period problem. IncomeDstn : [np.array] A list containing three arrays of floats, representing a discrete approximation to the income process between the period being solved and the one immediately following (in solution_next). Order: event probabilities, permanent shocks, transitory shocks. LivPrb : float Survival probability; likelihood of being alive at the beginning of the succeeding period. DiscFac : float Intertemporal discount factor for future utility. CRRA : float Coefficient of relative risk aversion. Rboro: float Interest factor on assets between this period and the succeeding period when assets are negative. Rsave: float Interest factor on assets between this period and the succeeding period when assets are positive. PermGroFac : float Expected permanent income growth factor at the end of this period. BoroCnstArt: float or None Borrowing constraint for the minimum allowable assets to end the period with. If it is less than the natural borrowing constraint, then it is irrelevant; BoroCnstArt=None indicates no artificial bor- rowing constraint. aXtraGrid: np.array Array of "extra" end-of-period asset values-- assets above the absolute minimum acceptable level. vFuncBool: boolean An indicator for whether the value function should be computed and included in the reported solution. CubicBool: boolean Indicator for whether the solver should use cubic or linear interpolation. Returns ------- solution_now : ConsumerSolution The solution to the single period consumption-saving problem. Includes a consumption function cFunc (using cubic or linear splines), a marginal value function vPfunc, a minimum acceptable level of normalized market resources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin and MPCmax. It might also have a value function vFunc.
11,823
def main(argv=None): arguments = cli_common(__doc__, argv=argv) es_export = ESExporter(arguments[], arguments[]) es_export.export() if argv is not None: return es_export
ben-elastic entry point
11,824
def get_sources(src_dir=, ending=): return [os.path.join(src_dir, fnm) for fnm in os.listdir(src_dir) if fnm.endswith(ending)]
Function to get a list of files ending with `ending` in `src_dir`.
11,825
def command_gen(self, *names): if not names: sys.exit() for name in names: name, count = name, 0 if in name: name, count = name.split(, 1) count = int(count) create = self.generators[name] print(.format(name, count)) create(self.session, count) self.session.commit()
Runs generator functions. Run `docs` generator function:: ./manage.py sqla:gen docs Run `docs` generator function with `count=10`:: ./manage.py sqla:gen docs:10
11,826
def pdftojpg(filehandle, meta): resolution = meta.get(, 300) width = meta.get(, 1080) bgcolor = Color(meta.get(, )) stream = BytesIO() with Image(blob=filehandle.stream, resolution=resolution) as img: img.background_color = bgcolor img.alpha_channel = False img.format = ratio = width / img.width img.resize(width, int(ratio * img.height)) img.compression_quality = 90 img.save(file=stream) stream.seek(0) filehandle.stream = stream return filehandle
Converts a PDF to a JPG and places it back onto the FileStorage instance passed to it as a BytesIO object. Optional meta arguments are: * resolution: int or (int, int) used for wand to determine resolution, defaults to 300. * width: new width of the image for resizing, defaults to 1080 * bgcolor: new background color, defaults to 'white'
11,827
def filter(self, query: Query, entity: type) -> Tuple[Query, Any]: new_query = query c_filter_list = [] for child in self._childs: new_query, f_list = child.filter(new_query, entity) c_filter_list.append(f_list) return ( new_query, self._method(*c_filter_list) )
Apply the `_method` to all childs of the node. :param query: The sqlachemy query. :type query: Query :param entity: The entity model of the query. :type entity: type :return: A tuple with in first place the updated query and in second place the list of filters to apply to the query. :rtype: Tuple[Query, Any]
11,828
def clear(self): self.grid = [[EMPTY for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]
Clears grid to be EMPTY
11,829
def writeRunSetInfoToLog(self, runSet): runSetInfo = "\n\n" if runSet.name: runSetInfo += runSet.name + "\n" runSetInfo += "Run set {0} of {1} with options and propertyfile \n\n".format( runSet.index, len(self.benchmark.run_sets), " ".join(runSet.options), runSet.propertyfile) titleLine = self.create_output_line(runSet, "inputfile", "status", "cpu time", "wall time", "host", self.benchmark.columns, True) runSet.simpleLine = "-" * (len(titleLine)) runSetInfo += titleLine + "\n" + runSet.simpleLine + "\n" self.txt_file.append(runSetInfo)
This method writes the information about a run set into the txt_file.
11,830
def dmp_path(regex, kwargs=None, name=None, app_name=None): dmp_pageprocess_request return PagePattern(regex, kwargs, name, app_name)
Creates a DMP-style, convention-based pattern that resolves to various view functions based on the 'dmp_page' value. The following should exist as 1) regex named groups or 2) items in the kwargs dict: dmp_app Should resolve to a name in INSTALLED_APPS. If missing, defaults to DEFAULT_APP. dmp_page The page name, which should resolve to a module: project_dir/{dmp_app}/views/{dmp_page}.py If missing, defaults to DEFAULT_PAGE. dmp_function The function name (or View class name) within the module. If missing, defaults to 'process_request' dmp_urlparams The urlparams string to parse. If missing, defaults to ''. The reason for this convenience function is to be similar to Django functions like url(), re_path(), and path().
11,831
def ex6_2(n): x = np.zeros(len(n)) for k, nn in enumerate(n): if nn >= -2 and nn <= 5: x[k] = 8 - nn return x
Generate a triangle pulse as described in Example 6-2 of Chapter 6. You need to supply an index array n that covers at least [-2, 5]. The function returns the hard-coded signal of the example. Parameters ---------- n : time index ndarray covering at least -2 to +5. Returns ------- x : ndarray of signal samples in x Examples -------- >>> import numpy as np >>> import matplotlib.pyplot as plt >>> from sk_dsp_comm import sigsys as ss >>> n = np.arange(-5,8) >>> x = ss.ex6_2(n) >>> plt.stem(n,x) # creates a stem plot of x vs n
11,832
def addResource(self, key, filePath, text): url = self.root + "/addresource" params = { "f": "json", "token" : self._securityHandler.token, "key" : key, "text" : text } files = {} files[] = filePath res = self._post(url=url, param_dict=params, files=files, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) return res
The add resource operation allows the administrator to add a file resource, for example, the organization's logo or custom banner. The resource can be used by any member of the organization. File resources use storage space from your quota and are scanned for viruses. Inputs: key - The name the resource should be stored under. filePath - path of file to upload text - Some text to be written (for example, JSON or JavaScript) directly to the resource from a web client.
11,833
def reading_order(e1, e2): b1 = e1.bbox b2 = e2.bbox if round(b1[y0]) == round(b2[y0]) or round(b1[y1]) == round(b2[y1]): return float_cmp(b1[x0], b2[x0]) return float_cmp(b1[y0], b2[y0])
A comparator to sort bboxes from top to bottom, left to right
11,834
def _adjustSyllabification(adjustedPhoneList, syllableList): i = 0 retSyllableList = [] for syllableNum, syllable in enumerate(syllableList): j = len(syllable) if syllableNum == len(syllableList) - 1: j = len(adjustedPhoneList) - i tmpPhoneList = adjustedPhoneList[i:i + j] numBlanks = -1 phoneList = tmpPhoneList[:] while numBlanks != 0: numBlanks = tmpPhoneList.count(u"") if numBlanks > 0: tmpPhoneList = adjustedPhoneList[i + j:i + j + numBlanks] phoneList.extend(tmpPhoneList) j += numBlanks for k, phone in enumerate(phoneList): if phone == u"": syllable.insert(k, u"") i += j retSyllableList.append(syllable) return retSyllableList
Inserts spaces into a syllable if needed Originally the phone list and syllable list contained the same number of phones. But the adjustedPhoneList may have some insertions which are not accounted for in the syllableList.
11,835
def publish(self, topic, obj, reference_message=None): logging.debug("Publishing topic (%s): \n%s" % (topic, obj)) e = Event( data=obj, type=topic, ) if hasattr(obj, "sender"): e.sender = obj.sender if reference_message: original_incoming_event_hash = None if hasattr(reference_message, "original_incoming_event_hash"): original_incoming_event_hash = reference_message.original_incoming_event_hash elif hasattr(reference_message, "source") and hasattr(reference_message.source, "hash"): original_incoming_event_hash = reference_message.source.hash elif hasattr(reference_message, "source") and hasattr(reference_message.source, "original_incoming_event_hash"): original_incoming_event_hash = reference_message.source.original_incoming_event_hash elif hasattr(reference_message, "hash"): original_incoming_event_hash = reference_message.hash if original_incoming_event_hash: e.original_incoming_event_hash = original_incoming_event_hash return self.publish_to_backend( self._localize_topic(topic), self.encrypt(e) )
Sends an object out over the pubsub connection, properly formatted, and conforming to the protocol. Handles pickling for the wire, etc. This method should *not* be subclassed.
11,836
def _getch_unix(_getall=False): import sys, termios fd = sys.stdin.fileno() old_settings = termios.tcgetattr(fd) chars = [] try: newattr = list(old_settings) newattr[3] &= ~termios.ICANON newattr[3] &= ~termios.ECHO newattr[6][termios.VMIN] = 1 newattr[6][termios.VTIME] = 0 termios.tcsetattr(fd, termios.TCSANOW, newattr) ch = sys.stdin.read(1) chars = [ch] if _getall: newattr = termios.tcgetattr(fd) newattr[6][termios.VMIN] = 0 newattr[6][termios.VTIME] = 0 termios.tcsetattr(fd, termios.TCSANOW, newattr) while True: ch = sys.stdin.read(1) if ch != : chars.append(ch) else: break finally: termios.tcsetattr(fd, termios.TCSADRAIN, old_settings) if _getall: return chars else: return chars[0]
# --- current algorithm --- # 1. switch to char-by-char input mode # 2. turn off echo # 3. wait for at least one char to appear # 4. read the rest of the character buffer (_getall=True) # 5. return list of characters (_getall on) # or a single char (_getall off)
11,837
async def send_nym(self, did: str, verkey: str = None, alias: str = None, role: Role = None) -> None: LOGGER.debug( , did, verkey, alias, role) if not ok_did(did): LOGGER.debug(, did) raise BadIdentifier(.format(did)) req_json = await ledger.build_nym_request(self.did, did, verkey, alias, (role or Role.USER).token()) await self._sign_submit(req_json) LOGGER.debug()
Send input anchor's cryptonym (including DID, verification key, plus optional alias and role) to the distributed ledger. Raise BadLedgerTxn on failure, BadIdentifier for bad DID, or BadRole for bad role. :param did: anchor DID to send to ledger :param verkey: optional anchor verification key :param alias: optional alias :param role: anchor role on the ledger (default value of USER)
11,838
def parse_legacy_argstring(argstring): arg1arg2item1item2 argstring = argstring.replace(, ) argstring = argstring.replace(, ) argstring = argstring.replace(, ) argbits = shlex.split(argstring) args = [] arg_buff = [] list_buff = [] in_list = False for bit in argbits: if bit == and not in_list: in_list = True continue elif bit == and in_list: in_list = False args.append(list_buff) list_buff = [] continue elif bit == : if not in_list and arg_buff: args.append(.join(arg_buff)) arg_buff = [] continue bit = bit.replace(, ) bit = bit.replace(, ) bit = bit.replace(, ) if in_list: list_buff.append(bit) else: arg_buff.append(bit) if arg_buff: args.append(.join(arg_buff)) return args
Preparses CLI input: ``arg1,arg2`` => ``['arg1', 'arg2']`` ``[item1, item2],arg2`` => ``[['item1', 'item2'], arg2]``
11,839
def handle_read(self): if self.state == STATE_DEAD: return global nr_handle_read nr_handle_read += 1 new_data = self._handle_read_chunk() if self.debug: self.print_debug(b + new_data) if self.handle_read_fast_case(self.read_buffer): return lf_pos = new_data.find(b) if lf_pos >= 0: lf_pos += len(self.read_buffer) - len(new_data) elif self.state is STATE_NOT_STARTED and \ options.password is not None and \ b in self.read_buffer.lower(): self.dispatch_write(.format(options.password).encode()) self.read_buffer = b return while lf_pos >= 0: line = self.read_buffer[:lf_pos + 1] if callbacks.process(line): pass elif self.state in (STATE_IDLE, STATE_RUNNING): self.print_lines(line) elif self.state is STATE_NOT_STARTED: self.read_in_state_not_started += line if b in line: msg = line.strip(b) + b self.disconnect() elif b in line: msg = b else: msg = None if msg: self.print_lines(msg + b b) self.read_buffer = self.read_buffer[lf_pos + 1:] if self.handle_read_fast_case(self.read_buffer): return lf_pos = self.read_buffer.find(b) if self.state is STATE_NOT_STARTED and not self.init_string_sent: self.dispatch_write(self.init_string) self.init_string_sent = True
We got some output from a remote shell, this is one of the state machine
11,840
def config_diff(args): config_1 = config_get(args).splitlines() args.project = args.Project args.workspace = args.Workspace cfg_1_name = args.config if args.Config is not None: args.config = args.Config if args.Namespace is not None: args.namespace = args.Namespace config_2 = config_get(args).splitlines() if not args.verbose: config_1 = skip_cfg_ver(config_1) config_2 = skip_cfg_ver(config_2) return list(unified_diff(config_1, config_2, cfg_1_name, args.config, lineterm=))
Compare method configuration definitions across workspaces. Ignores methodConfigVersion if the verbose argument is not set
11,841
def create_build(self, tarball_url, env=None, app_name=None): data = { : { : tarball_url } } if env: data[] = {: env} if app_name: data[] = {: app_name} return self.api_request(, , data=data)
Creates an app-setups build. Returns response data as a dict. :param tarball_url: URL of a tarball containing an ``app.json``. :param env: Dict containing environment variable overrides. :param app_name: Name of the Heroku app to create. :returns: Response data as a ``dict``.
11,842
def compact_interval_string(value_list): if not value_list: return value_list.sort() interval_list = [] curr = [] for val in value_list: if curr and (val > curr[-1] + 1): interval_list.append((curr[0], curr[-1])) curr = [val] else: curr.append(val) if curr: interval_list.append((curr[0], curr[-1])) return .join([ .format(pair[0], pair[1]) if pair[0] != pair[1] else str(pair[0]) for pair in interval_list ])
Compact a list of integers into a comma-separated string of intervals. Args: value_list: A list of sortable integers such as a list of numbers Returns: A compact string representation, such as "1-5,8,12-15"
11,843
def write(self, inline): frame = inspect.currentframe().f_back if frame: mod = frame.f_globals.get() else: mod = sys._getframe(0).f_globals.get() if not mod in self.modulenames: self.stdout.write(inline)
Write a line to stdout if it isn't in a blacklist Try to get the name of the calling module to see if we want to filter it. If there is no calling module, use current frame in case there's a traceback before there is any calling module
11,844
def rollout(self, batch_info: BatchInfo, model: RlModel, number_of_steps: int) -> Rollout: assert not model.is_recurrent, "Replay env roller does not support recurrent models" accumulator = TensorAccumulator() episode_information = [] for step_idx in range(number_of_steps): step = model.step(self.last_observation) if self.action_noise is not None: step[] = self.action_noise(step[], batch_info=batch_info) replay_extra_information = {} accumulator.add(, self.last_observation_cpu) for name, tensor in step.items(): tensor_cpu = tensor.cpu() accumulator.add(name, tensor_cpu) if name != : replay_extra_information[name] = tensor_cpu.numpy() actions_numpy = step[].detach().cpu().numpy() new_obs, new_rewards, new_dones, new_infos = self.environment.step(actions_numpy) self.replay_buffer.store_transition( frame=self.last_observation_cpu.numpy(), action=actions_numpy, reward=new_rewards, done=new_dones, extra_info=replay_extra_information ) if self.ret_rms is not None: self.accumulated_returns = new_rewards + self.discount_factor * self.accumulated_returns self.ret_rms.update(self.accumulated_returns) dones_tensor = torch.from_numpy(new_dones.astype(np.float32)).clone() accumulator.add(, dones_tensor) if self.action_noise is not None: self.action_noise.reset_training_state(dones_tensor, batch_info=batch_info) self.accumulated_returns = self.accumulated_returns * (1.0 - new_dones.astype(np.float32)) self.last_observation_cpu = torch.from_numpy(new_obs).clone() self.last_observation = self.last_observation_cpu.to(self.device) if self.ret_rms is not None: new_rewards = np.clip(new_rewards / np.sqrt(self.ret_rms.var + 1e-8), -self.clip_obs, self.clip_obs) accumulator.add(, torch.from_numpy(new_rewards.astype(np.float32)).clone()) episode_information.append(new_infos) accumulated_tensors = accumulator.result() return Trajectories( num_steps=accumulated_tensors[].size(0), num_envs=accumulated_tensors[].size(1), environment_information=episode_information, transition_tensors=accumulated_tensors, rollout_tensors={} ).to_transitions()
Calculate env rollout
11,845
def _sendline(self, line): self.lines = [] try: self._read() except socket.error: logging.debug() logger.debug(, line) self._write(line + ) time.sleep(0.5)
Send exactly one line to the device Args: line str: data send to device
11,846
def decorate(decorator_cls, *args, **kwargs): global _wrappers wrapper_cls = _wrappers.get(decorator_cls, None) if wrapper_cls is None: class PythonWrapper(decorator_cls): pass wrapper_cls = PythonWrapper wrapper_cls.__name__ = decorator_cls.__name__ + "PythonWrapper" _wrappers[decorator_cls] = wrapper_cls def decorator(fn): wrapped = wrapper_cls(fn, *args, **kwargs) _update_wrapper(wrapped, fn) return wrapped return decorator
Creates a decorator function that applies the decorator_cls that was passed in.
11,847
def import_orm(self): orm = {} data_source = self.config[][] mv_grid_districts_name = self.config[data_source][] mv_stations_name = self.config[data_source][] lv_load_areas_name = self.config[data_source][] lv_grid_district_name = self.config[data_source][] lv_stations_name = self.config[data_source][] conv_generators_name = self.config[data_source][] re_generators_name = self.config[data_source][] from egoio.db_tables import model_draft as orm_model_draft, \ supply as orm_supply, \ demand as orm_demand, \ grid as orm_grid if data_source == : orm[] = orm_model_draft.__getattribute__(mv_grid_districts_name) orm[] = orm_model_draft.__getattribute__(mv_stations_name) orm[] = orm_model_draft.__getattribute__(lv_load_areas_name) orm[] = orm_model_draft.__getattribute__(lv_grid_district_name) orm[] = orm_model_draft.__getattribute__(lv_stations_name) orm[] = orm_model_draft.__getattribute__(conv_generators_name) orm[] = orm_model_draft.__getattribute__(re_generators_name) orm[] = 1 == 1 orm[] = 1 == 1 orm[] = 1 == 1 orm[] = 1 == 1 orm[] = 1 == 1 orm[] = 1 == 1 orm[] = 1 == 1 elif data_source == : orm[] = orm_grid.__getattribute__(mv_grid_districts_name) orm[] = orm_grid.__getattribute__(mv_stations_name) orm[] = orm_demand.__getattribute__(lv_load_areas_name) orm[] = orm_grid.__getattribute__(lv_grid_district_name) orm[] = orm_grid.__getattribute__(lv_stations_name) orm[] = orm_supply.__getattribute__(conv_generators_name) orm[] = orm_supply.__getattribute__(re_generators_name) orm[] = self.config[data_source][] orm[] =\ orm[].version == orm[] orm[] = \ orm[].version == orm[] orm[] =\ orm[].version == orm[] orm[] =\ orm[].version == orm[] orm[] =\ orm[].version == orm[] orm[] =\ orm[].columns.version == orm[] orm[] =\ orm[].columns.version == orm[] else: logger.error("Invalid data source {} provided. Please re-check the file " "`config_db_tables.cfg`".format(data_source)) raise NameError("{} is no valid data source!".format(data_source)) return orm
Import ORM classes for oedb access depending on input in config in self.config which is loaded from 'config_db_tables.cfg' Returns ------- int Descr #TODO check type
11,848
def dumps(self): r string = "" if self.row_height is not None: row_height = Command(, arguments=[ NoEscape(r), self.row_height]) string += row_height.dumps() + if self.col_space is not None: col_space = Command(, arguments=[ NoEscape(r), self.col_space]) string += col_space.dumps() + return string + super().dumps()
r"""Turn the Latex Object into a string in Latex format.
11,849
def visit_List(self, node: ast.List) -> List[Any]: if isinstance(node.ctx, ast.Store): raise NotImplementedError("Can not compute the value of a Store on a list") result = [self.visit(node=elt) for elt in node.elts] self.recomputed_values[node] = result return result
Visit the elements and assemble the results into a list.
11,850
def reindex_like(self, other, method=None, tolerance=None, copy=True): indexers = alignment.reindex_like_indexers(self, other) return self.reindex(indexers=indexers, method=method, copy=copy, tolerance=tolerance)
Conform this object onto the indexes of another object, filling in missing values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to pandas.Index objects, which provides coordinates upon which to index the variables in this dataset. The indexes on this other object need not be the same as the indexes on this dataset. Any mis-matched index values will be filled in with NaN, and any mis-matched dimension names will simply be ignored. method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional Method to use for filling index values from other not found in this dataset: * None (default): don't fill gaps * pad / ffill: propagate last valid index value forward * backfill / bfill: propagate next valid index value backward * nearest: use nearest valid index value (requires pandas>=0.16) tolerance : optional Maximum distance between original and new labels for inexact matches. The values of the index at the matching locations must satisfy the equation ``abs(index[indexer] - target) <= tolerance``. Requires pandas>=0.17. copy : bool, optional If ``copy=True``, data in the return value is always copied. If ``copy=False`` and reindexing is unnecessary, or can be performed with only slice operations, then the output may share memory with the input. In either case, a new xarray object is always returned. Returns ------- reindexed : Dataset Another dataset, with this dataset's data but coordinates from the other object. See Also -------- Dataset.reindex align
11,851
def get(self, url=None, delimiter="/"): params = {: delimiter} bucket, obj_key = _parse_url(url) if bucket: params[] = bucket else: return self.call("ListBuckets", response_data_key="Buckets") if obj_key: params[] = obj_key objects = self.call("ListObjects", response_data_key="Contents", **params) if objects: for obj in objects: obj[] = "s3://{0}/{1}".format(bucket, obj[]) return objects
Path is an s3 url. Ommiting the path or providing "s3://" as the path will return a list of all buckets. Otherwise, all subdirectories and their contents will be shown.
11,852
def get_id_constraints(pkname, pkey): if isinstance(pkname, str): return {pkname: pkey} else: return dict(zip(pkname, pkey))
Returns primary key consraints. :pkname: if a string, returns a dict with pkname=pkey. pkname and pkey must be enumerables of matching length.
11,853
def _get_lane_properties(self, node): lane_name = self.get_lane(node.get()) lane_data = {: lane_name} for a in self.xpath(".//bpmn:lane[@name=]/*/*/" % lane_name): lane_data[a.attrib[]] = a.attrib[].strip() return lane_data
Parses the given XML node Args: node (xml): XML node. .. code-block:: xml <bpmn2:lane id="Lane_8" name="Lane 8"> <bpmn2:extensionElements> <camunda:properties> <camunda:property value="foo,bar" name="perms"/> </camunda:properties> </bpmn2:extensionElements> </bpmn2:lane> Returns: {'perms': 'foo,bar'}
11,854
def get_volume_object_info(self, location): param = {: location} data = self._api.get(url=self._URL[].format( id=self.id), params=param).json() return VolumeObject(api=self._api, **data)
Fetches information about single volume object - usually file :param location: object location :return:
11,855
def start(self, exceptions): if not self._interval: return False self._running.set() with self._lock: self._threshold = 0 self._reads_since_check = 0 self._writes_since_check = 0 self._exceptions = exceptions LOGGER.debug() return self._start_new_timer()
Start the Heartbeat Checker. :param list exceptions: :return:
11,856
def GetFormattedMessages(self, event): event_formatter = self.GetEventFormatter(event) if not event_formatter: return None, None return event_formatter.GetMessages(self._formatter_mediator, event)
Retrieves the formatted messages related to the event. Args: event (EventObject): event. Returns: tuple: containing: str: full message string or None if no event formatter was found. str: short message string or None if no event formatter was found.
11,857
def generic_visit(self, node): if (isinstance(node, ast.stmt) and not isinstance(node, ast.FunctionDef)): new_node = self.wrap_with_try(node) if isinstance(node, self.ast_try_except): self.try_except_handler(node) return new_node super(ErrorsCatchTransformer, self).generic_visit(node) return new_node return super(ErrorsCatchTransformer, self).generic_visit(node)
Surround node statement with a try/except block to catch errors. This method is called for every node of the parsed code, and only changes statement lines. Args: node (ast.AST): node statement to surround.
11,858
def calculate(self, **state): T = state[] y = state[] x = amount_fractions(y) return super().calculate(T=T, x=x)
Calculate dynamic viscosity at the specified temperature and composition: :param T: [K] temperature :param y: [mass fraction] composition dictionary , e.g. \ {'SiO2': 0.25, 'CaO': 0.25, 'MgO': 0.25, 'FeO': 0.25} :returns: [Pa.s] dynamic viscosity The **state parameter contains the keyword argument(s) specified above\ that are used to describe the state of the material.
11,859
def search_directory(self, **kwargs): search_response = self.request(, kwargs) result = {} items = { "account": zobjects.Account.from_dict, "domain": zobjects.Domain.from_dict, "dl": zobjects.DistributionList.from_dict, "cos": zobjects.COS.from_dict, "calresource": zobjects.CalendarResource.from_dict } for obj_type, func in items.items(): if obj_type in search_response: if isinstance(search_response[obj_type], list): result[obj_type] = [ func(v) for v in search_response[obj_type]] else: result[obj_type] = func(search_response[obj_type]) return result
SearchAccount is deprecated, using SearchDirectory :param query: Query string - should be an LDAP-style filter string (RFC 2254) :param limit: The maximum number of accounts to return (0 is default and means all) :param offset: The starting offset (0, 25, etc) :param domain: The domain name to limit the search to :param applyCos: applyCos - Flag whether or not to apply the COS policy to account. Specify 0 (false) if only requesting attrs that aren't inherited from COS :param applyConfig: whether or not to apply the global config attrs to account. specify 0 (false) if only requesting attrs that aren't inherited from global config :param sortBy: Name of attribute to sort on. Default is the account name. :param types: Comma-separated list of types to return. Legal values are: accounts|distributionlists|aliases|resources|domains|coses (default is accounts) :param sortAscending: Whether to sort in ascending order. Default is 1 (true) :param countOnly: Whether response should be count only. Default is 0 (false) :param attrs: Comma-seperated list of attrs to return ("displayName", "zimbraId", "zimbraAccountStatus") :return: dict of list of "account" "alias" "dl" "calresource" "domain" "cos"
11,860
def update(self, obj, **kwargs): "Update the tree item when the object name changes" child = self.tree.FindItem(self.root, kwargs[]) if DEBUG: print "update child", child, kwargs if child: self.tree.ScrollTo(child) self.tree.SetCurrentItem(child) self.tree.SelectItem(child) child.Selected = True self.tree.SetItemText(child, obj.name, 0)
Update the tree item when the object name changes
11,861
def mtie_phase_fast(phase, rate=1.0, data_type="phase", taus=None): rate = float(rate) phase = np.asarray(phase) k_max = int(np.floor(np.log2(len(phase)))) phase = phase[0:pow(2, k_max)] assert len(phase) == pow(2, k_max) taus = [ pow(2,k) for k in range(k_max)] print("taus N=", len(taus), " ",taus) devs = np.zeros(len(taus)) deverrs = np.zeros(len(taus)) ns = np.zeros(len(taus)) taus_used = np.array(taus) mtie_max = np.zeros((len(phase)-1, k_max)) mtie_min = np.zeros((len(phase)-1, k_max)) for kidx in range(k_max): k = kidx+1 imax = len(phase)-pow(2, k)+1 tie = np.zeros(imax) ns[kidx]=imax for i in range(imax): if k == 1: mtie_max[i, kidx] = max(phase[i], phase[i+1]) mtie_min[i, kidx] = min(phase[i], phase[i+1]) else: p = int(pow(2, k-1)) mtie_max[i, kidx] = max(mtie_max[i, kidx-1], mtie_max[i+p, kidx-1]) mtie_min[i, kidx] = min(mtie_min[i, kidx-1], mtie_min[i+p, kidx-1]) tie[i] = mtie_max[i, kidx] - mtie_min[i, kidx] devs[kidx] = np.amax(tie) devs = np.array(devs) print("devs N=",len(devs)," ",devs) print("taus N=", len(taus_used), " ",taus_used) return remove_small_ns(taus_used, devs, deverrs, ns)
fast binary decomposition algorithm for MTIE See: STEFANO BREGNI "Fast Algorithms for TVAR and MTIE Computation in Characterization of Network Synchronization Performance"
11,862
def trust_key(keyid=None, fingerprint=None, trust_level=None, user=None): s keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. CLI Example: .. code-block:: bash salt gpg.trust_key keyid= trust_level= salt gpg.trust_key fingerprint= trust_level= salt gpg.trust_key keys=3FAD9F1E trust_level= user= resmessageexpiredunknownnot_trustedmarginallyfullyultimatelyresmessageOnly specify one argument, fingerprint or keyidfingerprintresmessageFingerprint not found for keyid {0}fingerprintresmessageKeyID {0} not in GPG keychainresmessageRequired argument, fingerprint or keyidERROR: Valid trust levels - {0},{0}:{1}\n--import-ownertrustsaltconfig.getconfig_dirgpgkeys--homedirrootcmd.run_allretcoderesmessagestderrstderr\dstderrfingerprintmessageChanging ownership trust from {0} to {1}.fingerprintmessageSetting ownership trust to {0}.messagestderr'] return ret
Set the trust level for a key in GPG keychain keyid The keyid of the key to set the trust level for. fingerprint The fingerprint of the key to set the trust level for. trust_level The trust level to set for the specified key, must be one of the following: expired, unknown, not_trusted, marginally, fully, ultimately user Which user's keychain to access, defaults to user Salt is running as. Passing the user as ``salt`` will set the GnuPG home directory to the ``/etc/salt/gpgkeys``. CLI Example: .. code-block:: bash salt '*' gpg.trust_key keyid='3FAD9F1E' trust_level='marginally' salt '*' gpg.trust_key fingerprint='53C96788253E58416D20BCD352952C84C3252192' trust_level='not_trusted' salt '*' gpg.trust_key keys=3FAD9F1E trust_level='ultimately' user='username'
11,863
def _block(self, rdd, bsize, dtype): return rdd.mapPartitions(lambda x: _block_tuple(x, dtype, bsize))
Execute the blocking process on the given rdd. Parameters ---------- rdd : pyspark.rdd.RDD Distributed data to block bsize : int or None The desired size of the blocks Returns ------- rdd : pyspark.rdd.RDD Blocked rdd.
11,864
def _pop_import_LOAD_ATTRs(module_name, queue): popped = popwhile(is_a(instrs.LOAD_ATTR), queue, side=) if popped: expected = module_name.split(, maxsplit=1)[1] actual = .join(map(op.attrgetter(), popped)) if expected != actual: raise DecompilationError( "Decompiling import of module %s, but LOAD_ATTRS imply %s" % ( expected, actual, ) ) return popped
Pop LOAD_ATTR instructions for an import of the form:: import a.b.c as d which should generate bytecode like this:: 1 0 LOAD_CONST 0 (0) 3 LOAD_CONST 1 (None) 6 IMPORT_NAME 0 (a.b.c.d) 9 LOAD_ATTR 1 (b) 12 LOAD_ATTR 2 (c) 15 LOAD_ATTR 3 (d) 18 STORE_NAME 3 (d)
11,865
def on_all_ok(self): out_ddb = self.merge_ddb_files() return self.Results(node=self, returncode=0, message="DDB merge done")
This method is called when all tasks reach S_OK Ir runs `mrgddb` in sequential on the local machine to produce the final DDB file in the outdir of the `Work`.
11,866
def linear_reaction_coefficients(model, reactions=None): linear_coefficients = {} reactions = model.reactions if not reactions else reactions try: objective_expression = model.solver.objective.expression coefficients = objective_expression.as_coefficients_dict() except AttributeError: return linear_coefficients for rxn in reactions: forward_coefficient = coefficients.get(rxn.forward_variable, 0) reverse_coefficient = coefficients.get(rxn.reverse_variable, 0) if forward_coefficient != 0: if forward_coefficient == -reverse_coefficient: linear_coefficients[rxn] = float(forward_coefficient) return linear_coefficients
Coefficient for the reactions in a linear objective. Parameters ---------- model : cobra model the model object that defined the objective reactions : list an optional list for the reactions to get the coefficients for. All reactions if left missing. Returns ------- dict A dictionary where the key is the reaction object and the value is the corresponding coefficient. Empty dictionary if there are no linear terms in the objective.
11,867
def describe_alarms(self, action_prefix=None, alarm_name_prefix=None, alarm_names=None, max_records=None, state_value=None, next_token=None): params = {} if action_prefix: params[] = action_prefix if alarm_name_prefix: params[] = alarm_name_prefix elif alarm_names: self.build_list_params(params, alarm_names, ) if max_records: params[] = max_records if next_token: params[] = next_token if state_value: params[] = state_value return self.get_list(, params, [(, MetricAlarms)])[0]
Retrieves alarms with the specified names. If no name is specified, all alarms for the user are returned. Alarms can be retrieved by using only a prefix for the alarm name, the alarm state, or a prefix for any action. :type action_prefix: string :param action_name: The action name prefix. :type alarm_name_prefix: string :param alarm_name_prefix: The alarm name prefix. AlarmNames cannot be specified if this parameter is specified. :type alarm_names: list :param alarm_names: A list of alarm names to retrieve information for. :type max_records: int :param max_records: The maximum number of alarm descriptions to retrieve. :type state_value: string :param state_value: The state value to be used in matching alarms. :type next_token: string :param next_token: The token returned by a previous call to indicate that there is more data. :rtype list
11,868
def _check_hla_alleles( alleles, valid_alleles=None): require_iterable_of(alleles, string_types, "HLA alleles") missing_alleles = [ allele for allele in alleles if allele not in valid_alleles ] if len(missing_alleles) > 0: raise UnsupportedAllele( "Unsupported HLA alleles: %s" % missing_alleles) return list(alleles)
Given a list of HLA alleles and an optional list of valid HLA alleles, return a set of alleles that we will pass into the MHC binding predictor.
11,869
def which(program, add_win_suffixes=True): def is_exe(fpath): return os.path.isfile(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if add_win_suffixes and platform.system().lower() == and not ( fname.endswith() or fname.endswith()): fnames = [fname + , fname + ] else: fnames = [fname] for i in fnames: if fpath: exe_file = os.path.join(fpath, i) if is_exe(exe_file): return exe_file else: for path in os.environ[].split(os.pathsep): exe_file = os.path.join(path, i) if is_exe(exe_file): return exe_file return None
Mimic 'which' command behavior. Adapted from https://stackoverflow.com/a/377028
11,870
def prettify_json_file(file_list): for json_file in set(file_list): if not json_file.endswith(".json"): logger.log_warning("Only JSON file format can be prettified, skip: {}".format(json_file)) continue logger.color_print("Start to prettify JSON file: {}".format(json_file), "GREEN") dir_path = os.path.dirname(json_file) file_name, file_suffix = os.path.splitext(os.path.basename(json_file)) outfile = os.path.join(dir_path, "{}.pretty.json".format(file_name)) with io.open(json_file, , encoding=) as stream: try: obj = json.load(stream) except ValueError as e: raise SystemExit(e) with io.open(outfile, , encoding=) as out: json.dump(obj, out, indent=4, separators=(, )) out.write() print("success: {}".format(outfile))
prettify JSON testcase format
11,871
def latexify(obj, **kwargs): if hasattr(obj, ): return obj.__pk_latex__(**kwargs) if isinstance(obj, text_type): from .unicode_to_latex import unicode_to_latex return unicode_to_latex(obj) if isinstance(obj, bool): raise ValueError( % obj) if isinstance(obj, float): nplaces = kwargs.get() if nplaces is None: return % obj return % (nplaces, obj) if isinstance(obj, int): return % obj if isinstance(obj, binary_type): if all(c in _printable_ascii for c in obj): return obj.decode() raise ValueError( % obj) raise ValueError(t LaTeXify %r' % obj)
Render an object in LaTeX appropriately.
11,872
def _asarray(self, vec): shape = self.domain[0, 0].shape + self.pshape arr = np.empty(shape, dtype=self.domain.dtype) for i, xi in enumerate(vec): for j, xij in enumerate(xi): arr[..., i, j] = xij.asarray() return arr
Convert ``x`` to an array. Here the indices are changed such that the "outer" indices come last in order to have the access order as `numpy.linalg.svd` needs it. This is the inverse of `_asvector`.
11,873
def file_download(context, id, file_id, target): dci_file.download(context, id=id, file_id=file_id, target=target)
file_download(context, id, path) Download a job file >>> dcictl job-download-file [OPTIONS] :param string id: ID of the job to download file [required] :param string file_id: ID of the job file to download [required] :param string target: Destination file [required]
11,874
def add(addon, dev, interactive): application = get_current_application() application.add( addon, dev=dev, interactive=interactive )
Add a dependency. Examples: $ django add dynamic-rest==1.5.0 + dynamic-rest == 1.5.0
11,875
def get_locale(): * ret = lc_ctl = salt.utils.systemd.booted(__context__) if lc_ctl and not (__grains__[] in [] and __grains__[] in [12]): ret = (_parse_dbus_locale() if dbus is not None else _localectl_status()[]).get(, ) else: if in __grains__[]: cmd = elif in __grains__[]: cmd = elif in __grains__[]: cmd = elif in __grains__[]: cmd = return __salt__[](cmd).strip() elif in __grains__[]: cmd = else: return ret
Get the current system locale CLI Example: .. code-block:: bash salt '*' locale.get_locale
11,876
def libvlc_media_list_player_new(p_instance): f = _Cfunctions.get(, None) or \ _Cfunction(, ((1,),), class_result(MediaListPlayer), ctypes.c_void_p, Instance) return f(p_instance)
Create new media_list_player. @param p_instance: libvlc instance. @return: media list player instance or NULL on error.
11,877
def cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None, metrics=(), obj=None, feval=None, maximize=False, early_stopping_rounds=None, fpreproc=None, as_pandas=True, verbose_eval=None, show_stdv=True, seed=0, callbacks=None, shuffle=True): if stratified is True and not SKLEARN_INSTALLED: raise XGBoostError() if isinstance(metrics, str): metrics = [metrics] if isinstance(params, list): _metrics = [x[1] for x in params if x[0] == ] params = dict(params) if in params: params[] = _metrics else: params = dict((k, v) for k, v in params.items()) if (not metrics) and in params: if isinstance(params[], list): metrics = params[] else: metrics = [params[]] params.pop("eval_metric", None) results = {} cvfolds = mknfold(dtrain, nfold, params, seed, metrics, fpreproc, stratified, folds, shuffle) callbacks = [] if callbacks is None else callbacks if early_stopping_rounds is not None: callbacks.append(callback.early_stop(early_stopping_rounds, maximize=maximize, verbose=False)) if isinstance(verbose_eval, bool) and verbose_eval: callbacks.append(callback.print_evaluation(show_stdv=show_stdv)) else: if isinstance(verbose_eval, int): callbacks.append(callback.print_evaluation(verbose_eval, show_stdv=show_stdv)) callbacks_before_iter = [ cb for cb in callbacks if cb.__dict__.get(, False)] callbacks_after_iter = [ cb for cb in callbacks if not cb.__dict__.get(, False)] for i in range(num_boost_round): for cb in callbacks_before_iter: cb(CallbackEnv(model=None, cvfolds=cvfolds, iteration=i, begin_iteration=0, end_iteration=num_boost_round, rank=0, evaluation_result_list=None)) for fold in cvfolds: fold.update(i, obj) res = aggcv([f.eval(i, feval) for f in cvfolds]) for key, mean, std in res: if key + not in results: results[key + ] = [] if key + not in results: results[key + ] = [] results[key + ].append(mean) results[key + ].append(std) try: for cb in callbacks_after_iter: cb(CallbackEnv(model=None, cvfolds=cvfolds, iteration=i, begin_iteration=0, end_iteration=num_boost_round, rank=0, evaluation_result_list=res)) except EarlyStopException as e: for k in results: results[k] = results[k][:(e.best_iteration + 1)] break if as_pandas: try: import pandas as pd results = pd.DataFrame.from_dict(results) except ImportError: pass return results
Cross-validation with given parameters. Parameters ---------- params : dict Booster params. dtrain : DMatrix Data to be trained. num_boost_round : int Number of boosting iterations. nfold : int Number of folds in CV. stratified : bool Perform stratified sampling. folds : a KFold or StratifiedKFold instance or list of fold indices Sklearn KFolds or StratifiedKFolds object. Alternatively may explicitly pass sample indices for each fold. For ``n`` folds, **folds** should be a length ``n`` list of tuples. Each tuple is ``(in,out)`` where ``in`` is a list of indices to be used as the training samples for the ``n`` th fold and ``out`` is a list of indices to be used as the testing samples for the ``n`` th fold. metrics : string or list of strings Evaluation metrics to be watched in CV. obj : function Custom objective function. feval : function Custom evaluation function. maximize : bool Whether to maximize feval. early_stopping_rounds: int Activates early stopping. CV error needs to decrease at least every <early_stopping_rounds> round(s) to continue. Last entry in evaluation history is the one from best iteration. fpreproc : function Preprocessing function that takes (dtrain, dtest, param) and returns transformed versions of those. as_pandas : bool, default True Return pd.DataFrame when pandas is installed. If False or pandas is not installed, return np.ndarray verbose_eval : bool, int, or None, default None Whether to display the progress. If None, progress will be displayed when np.ndarray is returned. If True, progress will be displayed at boosting stage. If an integer is given, progress will be displayed at every given `verbose_eval` boosting stage. show_stdv : bool, default True Whether to display the standard deviation in progress. Results are not affected, and always contains std. seed : int Seed used to generate the folds (passed to numpy.random.seed). callbacks : list of callback functions List of callback functions that are applied at end of each iteration. It is possible to use predefined callbacks by using :ref:`Callback API <callback_api>`. Example: .. code-block:: python [xgb.callback.reset_learning_rate(custom_rates)] shuffle : bool Shuffle data before creating folds. Returns ------- evaluation history : list(string)
11,878
def resample_melody_series(times, frequencies, voicing, times_new, kind=): if times.shape == times_new.shape and np.allclose(times, times_new): return frequencies, voicing.astype(np.bool) if not (np.allclose(np.diff(times), np.diff(times).mean()) or (np.allclose(np.diff(times[1:]), np.diff(times[1:]).mean()) and frequencies[0] == frequencies[1])): warnings.warn( "Non-uniform timescale passed to resample_melody_series. Pitch " "will be linearly interpolated, which will result in undesirable " "behavior if silences are indicated by missing values. Silences " "should be indicated by nonpositive frequency values.") times = np.round(times, 10) times_new = np.round(times_new, 10) return frequencies_resampled, voicing_resampled.astype(np.bool)
Resamples frequency and voicing time series to a new timescale. Maintains any zero ("unvoiced") values in frequencies. If ``times`` and ``times_new`` are equivalent, no resampling will be performed. Parameters ---------- times : np.ndarray Times of each frequency value frequencies : np.ndarray Array of frequency values, >= 0 voicing : np.ndarray Boolean array which indicates voiced or unvoiced times_new : np.ndarray Times to resample frequency and voicing sequences to kind : str kind parameter to pass to scipy.interpolate.interp1d. (Default value = 'linear') Returns ------- frequencies_resampled : np.ndarray Frequency array resampled to new timebase voicing_resampled : np.ndarray, dtype=bool Boolean voicing array resampled to new timebase
11,879
def set_eol_chars(self, text): if not is_text_string(text): text = to_text_string(text) eol_chars = sourcecode.get_eol_chars(text) is_document_modified = eol_chars is not None and self.eol_chars is not None self.eol_chars = eol_chars if is_document_modified: self.document().setModified(True) if self.sig_eol_chars_changed is not None: self.sig_eol_chars_changed.emit(eol_chars)
Set widget end-of-line (EOL) characters from text (analyzes text)
11,880
def flip(self): self._load() groups = self.config.keys() tabular = {} for g in groups: config = self.config[g] for k in config: r = tabular.get(k, {}) r[g] = config[k] tabular[k] = r return tabular
Provide flip view to compare how key/value pair is defined in each environment for administrative usage. :rtype: dict
11,881
def contains_rva(self, rva): if (self.next_section_virtual_address is not None and self.next_section_virtual_address > self.VirtualAddress and VirtualAddress_adj + size > self.next_section_virtual_address): size = self.next_section_virtual_address - VirtualAddress_adj return VirtualAddress_adj <= rva < VirtualAddress_adj + size
Check whether the section contains the address provided.
11,882
def find_best_root(self, force_positive=True, slope=None): self._calculate_averages() best_root = {"chisq": np.inf} for n in self.tree.find_clades(): if n==self.tree.root: continue tv = self.tip_value(n) bv = self.branch_value(n) var = self.branch_variance(n) x, chisq = self._optimal_root_along_branch(n, tv, bv, var, slope=slope) if (chisq<best_root["chisq"]): tmpQ = self.propagate_averages(n, tv, bv*x, var*x) \ + self.propagate_averages(n, tv, bv*(1-x), var*(1-x), outgroup=True) reg = base_regression(tmpQ, slope=slope) if reg["slope"]>=0 or (force_positive==False): best_root = {"node":n, "split":x} best_root.update(reg) if not in best_root: print("TreeRegression.find_best_root: No valid root found!", force_positive) return None if in best_root: deriv = [] n = best_root["node"] tv = self.tip_value(n) bv = self.branch_value(n) var = self.branch_variance(n) for dx in [-0.001, 0.001]: y = min(1.0, max(0.0, best_root["split"]+dx)) tmpQ = self.propagate_averages(n, tv, bv*y, var*y) \ + self.propagate_averages(n, tv, bv*(1-y), var*(1-y), outgroup=True) reg = base_regression(tmpQ, slope=slope) deriv.append([y,reg[], tmpQ[tavgii], tmpQ[davgii]]) estimator_hessian = np.zeros((3,3)) estimator_hessian[:2,:2] = best_root[] estimator_hessian[2,2] = (deriv[0][1] + deriv[1][1] - 2.0*best_root[])/(deriv[0][0] - deriv[1][0])**2 estimator_hessian[0,2] = estimator_hessian[2,0] estimator_hessian[1,2] = estimator_hessian[2,1] best_root[] = estimator_hessian best_root[] = np.linalg.inv(estimator_hessian) return best_root
determine the position on the tree that minimizes the bilinear product of the inverse covariance and the data vectors. Returns ------- best_root : (dict) dictionary with the node, the fraction `x` at which the branch is to be split, and the regression parameters
11,883
def pipe_dateformat(context=None, _INPUT=None, conf=None, **kwargs): conf = DotDict(conf) loop_with = kwargs.pop(, None) date_format = conf.get(, **kwargs) for item in _INPUT: _with = item.get(loop_with, **kwargs) if loop_with else item try: date_string = time.strftime(date_format, _with) except TypeError as e: if context and context.verbose: print % item print e continue else: yield date_string
Formats a datetime value. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : pipedatebuilder pipe like object (iterable of date timetuples) conf : { 'format': {'value': <'%B %d, %Y'>}, 'timezone': {'value': <'EST'>} } Yields ------ _OUTPUT : formatted dates
11,884
def add(self, device): if not isinstance(device, Device): raise TypeError() self.__devices.append(device)
Add device.
11,885
def connect(self, From, to, protocolName, clientFactory, chooser): publicIP = self._determinePublicIP() A = dict(From=From, to=to, protocol=protocolName) if self.service.dispatcher is not None: A[] = (publicIP, self.service.sharedUDPPortnum) else: log.msg("dispatcher unavailable when connecting") D = self.callRemote(Inbound, **A) def _connected(answer): listenersD = defer.maybeDeferred(chooser, answer[]) def gotListeners(listeners): allConnectionAttempts = [] for listener in listeners: d = self.attemptConnectionMethods( listener[], listener[], From, to, protocolName, clientFactory, ) allConnectionAttempts.append(d) return defer.DeferredList(allConnectionAttempts) listenersD.addCallback(gotListeners) def finishedAllAttempts(results): succeededAny = False failures = [] if not results: return Failure(NoAttemptsMade( "there was no available path for connections " "(%r->%r/%s)" % (From, to, protocolName))) for succeeded, result in results: if succeeded: succeededAny = True randomConnection = result break else: failures.append(result) if not succeededAny: return Failure( AttemptsFailed( [failure.getBriefTraceback() for failure in failures] ) ) return randomConnection return listenersD.addCallback(finishedAllAttempts) return D.addCallback(_connected)
Issue an INBOUND command, creating a virtual connection to the peer, given identifying information about the endpoint to connect to, and a protocol factory. @param clientFactory: a *Client* ProtocolFactory instance which will generate a protocol upon connect. @return: a Deferred which fires with the protocol instance that was connected, or fails with AttemptsFailed if the connection was not possible.
11,886
def pickle_dump(self): with open(os.path.join(self.workdir, self.PICKLE_FNAME), mode="wb") as fh: pickle.dump(self, fh)
Save the status of the object in pickle format.
11,887
def mcp_als(X, rank, mask, random_state=None, init=, **options): optim_utils._check_cpd_inputs(X, rank) U, _ = optim_utils._get_initial_ktensor(init, X, rank, random_state, scale_norm=False) result = FitResult(U, , **options) normX = np.linalg.norm((X * mask)) while result.still_optimizing: for n in range(X.ndim): U.rebalance() unf = unfold(X, n) m = unfold(mask, n) components = [U[j] for j in range(X.ndim) if j != n] krt = khatri_rao(components).T lhs_stack = np.matmul(m[:, None, :] * krt[None, :, :], krt.T[None, :, :]) rhs_stack = np.dot(unf * m, krt.T)[:, :, None] U[n] = np.linalg.solve(lhs_stack, rhs_stack).reshape(X.shape[n], rank) obj = linalg.norm(mask * (U.full() - X)) / normX result.update(obj) return result.finalize()
Fits CP Decomposition with missing data using Alternating Least Squares (ALS). Parameters ---------- X : (I_1, ..., I_N) array_like A tensor with ``X.ndim >= 3``. rank : integer The `rank` sets the number of components to be computed. mask : (I_1, ..., I_N) array_like A binary tensor with the same shape as ``X``. All entries equal to zero correspond to held out or missing data in ``X``. All entries equal to one correspond to observed entries in ``X`` and the decomposition is fit to these datapoints. random_state : integer, ``RandomState``, or ``None``, optional (default ``None``) If integer, sets the seed of the random number generator; If RandomState instance, random_state is the random number generator; If None, use the RandomState instance used by ``numpy.random``. init : str, or KTensor, optional (default ``'randn'``). Specifies initial guess for KTensor factor matrices. If ``'randn'``, Gaussian random numbers are used to initialize. If ``'rand'``, uniform random numbers are used to initialize. If KTensor instance, a copy is made to initialize the optimization. options : dict, specifying fitting options. tol : float, optional (default ``tol=1E-5``) Stopping tolerance for reconstruction error. max_iter : integer, optional (default ``max_iter = 500``) Maximum number of iterations to perform before exiting. min_iter : integer, optional (default ``min_iter = 1``) Minimum number of iterations to perform before exiting. max_time : integer, optional (default ``max_time = np.inf``) Maximum computational time before exiting. verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``) Display progress. Returns ------- result : FitResult instance Object which holds the fitted results. It provides the factor matrices in form of a KTensor, ``result.factors``. Notes ----- Fitting CP decompositions with missing data can be exploited to perform cross-validation. References ---------- Williams, A. H. "Solving Least-Squares Regression with Missing Data." http://alexhwilliams.info/itsneuronalblog/2018/02/26/censored-lstsq/
11,888
def display_task_progress( self, instance, project, region, request_id=None, user=None, poll_interval=60): total_completed = 0 while True: task_results = self.client.get_task_data( instance, project, region, request_id=request_id, user=user) tasks = {task[]: task for task in task_results} completed_tasks = set() pending_tasks = set() for task in tasks.values(): if task.get() is not None: completed_tasks.add(task[]) else: pending_tasks.add(task[]) if len(completed_tasks) > total_completed or not completed_tasks: total_completed = len(completed_tasks) print(.format( len(completed_tasks), len(pending_tasks))) print() for task_id in completed_tasks: self._print_task_data(tasks[task_id]) print() for task_id in pending_tasks: self._print_task_data(tasks[task_id]) if len(completed_tasks) == len(task_results) and completed_tasks: print(.format(len(task_results))) return time.sleep(poll_interval)
Displays the overall progress of tasks in a Turbinia job. Args: instance (string): The name of the Turbinia instance project (string): The project containing the disk to process region (string): Region where turbinia is configured. request_id (string): The request ID provided by Turbinia. user (string): The username to filter tasks by. poll_interval (int): The interval at which to poll for new results.
11,889
def is_valid_filename(filename, return_ext=False): ext = Path(filename).suffixes if len(ext) > 2: logg.warn( .format(ext, ext[-2:])) ext = ext[-2:] if len(ext) == 2 and ext[0][1:] in text_exts and ext[1][1:] in (, ): return ext[0][1:] if return_ext else True elif ext and ext[-1][1:] in avail_exts: return ext[-1][1:] if return_ext else True elif .join(ext) == : return if return_ext else True elif .join(ext) == : return if return_ext else True else: if return_ext: raise ValueError( .format(filename, avail_exts)) else: return False
Check whether the argument is a filename.
11,890
def fuzzy_get_value(obj, approximate_key, default=None, **kwargs): dict_obj = OrderedDict(obj) try: return dict_obj[list(dict_obj.keys())[int(approximate_key)]] except (ValueError, IndexError): pass return fuzzy_get(dict_obj, approximate_key, key_and_value=False, **kwargs)
Like fuzzy_get, but assume the obj is dict-like and return the value without the key Notes: Argument order is in reverse order relative to `fuzzywuzzy.process.extractOne()` but in the same order as get(self, key) method on dicts Arguments: obj (dict-like): object to run the get method on using the key that is most similar to one within the dict approximate_key (str): key to look for a fuzzy match within the dict keys default (obj): the value to return if a similar key cannote be found in the `possible_keys` similarity (str): fractional similiarity between the approximate_key and the dict key (0.9 means 90% of characters must be identical) tuple_joiner (str): Character to use as delimitter/joiner between tuple elements. Used to create keys of any tuples to be able to use fuzzywuzzy string matching on it. key_and_value (bool): Whether to return both the key and its value (True) or just the value (False). Default is the same behavior as dict.get (i.e. key_and_value=False) dict_keys (list of str): if you already have a set of keys to search, this will save this funciton a little time and RAM Examples: >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e')}, 'sail') == set(['e']) True >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'SLR') 2.7 >>> fuzzy_get_value({'seller': 2.7, 'sailor': set('e'), 'camera': object()}, 'I') == set(['e']) True >>> fuzzy_get_value({'word': tuple('word'), 'noun': tuple('noun')}, 'woh!', similarity=.3) ('w', 'o', 'r', 'd') >>> df = pd.DataFrame(np.arange(6*2).reshape(2,6), columns=('alpha','beta','omega','begin','life','end')) >>> fuzzy_get_value(df, 'life')[0], fuzzy_get(df, 'omega')[0] (4, 2)
11,891
def render(self, template, context=None, at_paths=None, at_encoding=anytemplate.compat.ENCODING, **kwargs): kwargs = self.filter_options(kwargs, self.render_valid_options()) paths = anytemplate.utils.mk_template_paths(template, at_paths) if context is None: context = {} LOGGER.debug("Render template %s %s context, options=%s", template, "without" if context is None else "with a", str(kwargs)) return self.render_impl(template, context, at_paths=paths, at_encoding=at_encoding, **kwargs)
:param template: Template file path :param context: A dict or dict-like object to instantiate given template file :param at_paths: Template search paths :param at_encoding: Template encoding :param kwargs: Keyword arguments passed to the template engine to render templates with specific features enabled. :return: Rendered string
11,892
def covar(X, remove_mean=False, modify_data=False, weights=None, sparse_mode=, sparse_tol=0.0): w, s, M = moments_XX(X, remove_mean=remove_mean, weights=weights, modify_data=modify_data, sparse_mode=sparse_mode, sparse_tol=sparse_tol) return M / float(w)
Computes the covariance matrix of X Computes .. math: C_XX &=& X^\top X while exploiting zero or constant columns in the data matrix. WARNING: Directly use moments_XX if you can. This function does an additional constant-matrix multiplication and does not return the mean. Parameters ---------- X : ndarray (T, M) Data matrix remove_mean : bool True: remove column mean from the data, False: don't remove mean. modify_data : bool If remove_mean=True, the mean will be removed in the data matrix X, without creating an independent copy. This option is faster but might lead to surprises because your input array is changed. weights : None or ndarray(T, ) weights assigned to each trajectory point of X. If None, all data points have weight one. If ndarray, each data point is assigned a separate weight. sparse_mode : str one of: * 'dense' : always use dense mode * 'sparse' : always use sparse mode if possible * 'auto' : automatic sparse_tol: float Threshold for considering column to be zero in order to save computing effort when the data is sparse or almost sparse. If max(abs(X[:, i])) < sparse_tol, then row i (and also column i if Y is not given) of the covariance matrix will be set to zero. If Y is given and max(abs(Y[:, i])) < sparse_tol, then column i of the covariance matrix will be set to zero. Returns ------- C_XX : ndarray (M, M) Covariance matrix of X See also -------- moments_XX
11,893
def asset_path(path, format_kwargs={}, keep_slash=False): if format_kwargs: path = path.format_map(format_kwargs) has_slash = path.endswith(os.sep) if in path: package_name, *rel_path = path.split(, 1) else: package_name, rel_path = path, () try: package = importlib.import_module(package_name) except ImportError: raise ValueError( .format_map(locals())) if not hasattr(package, ): raise ValueError("Can{path}{slash}'.format(path=path, slash=os.sep) return path
Get absolute path to asset in package. ``path`` can be just a package name like 'package' or it can be a package name and a relative file system path like 'package:util'. If ``path`` ends with a slash, it will be stripped unless ``keep_slash`` is set (for use with ``rsync``, for example). >>> file_path = os.path.normpath(__file__) >>> dir_name = os.path.dirname(file_path) >>> file_name = os.path.basename(file_path) >>> os.chdir(dir_name) >>> >>> asset_path('runcommands.util') == dir_name True >>> asset_path('runcommands.util:path.py') == file_path True >>> asset_path('runcommands.util:{name}.py', format_kwargs={'name': 'path'}) == file_path True >>> asset_path('runcommands.util:dir/') == (dir_name + '/dir') True >>> asset_path('runcommands.util:dir/', keep_slash=True) == (dir_name + '/dir/') True
11,894
def price_options(S=100.0, K=100.0, sigma=0.25, r=0.05, days=260, paths=10000): import numpy as np from math import exp,sqrt h = 1.0/days const1 = exp((r-0.5*sigma**2)*h) const2 = sigma*sqrt(h) stock_price = S*np.ones(paths, dtype=) stock_price_sum = np.zeros(paths, dtype=) for j in range(days): growth_factor = const1*np.exp(const2*np.random.standard_normal(paths)) stock_price = stock_price*growth_factor stock_price_sum = stock_price_sum + stock_price stock_price_avg = stock_price_sum/days zeros = np.zeros(paths, dtype=) r_factor = exp(-r*h*days) euro_put = r_factor*np.mean(np.maximum(zeros, K-stock_price)) asian_put = r_factor*np.mean(np.maximum(zeros, K-stock_price_avg)) euro_call = r_factor*np.mean(np.maximum(zeros, stock_price-K)) asian_call = r_factor*np.mean(np.maximum(zeros, stock_price_avg-K)) return (euro_call, euro_put, asian_call, asian_put)
Price European and Asian options using a Monte Carlo method. Parameters ---------- S : float The initial price of the stock. K : float The strike price of the option. sigma : float The volatility of the stock. r : float The risk free interest rate. days : int The number of days until the option expires. paths : int The number of Monte Carlo paths used to price the option. Returns ------- A tuple of (E. call, E. put, A. call, A. put) option prices.
11,895
def _add_constraints(self, relation): expression = relation.expression constr_count = sum(True for _ in expression.value_sets()) if constr_count == 0: return [] row_indices = count(swiglpk.glp_add_rows(self._p, constr_count)) names = [] for i, value_set in zip(row_indices, expression.value_sets()): value_set = list(value_set) var_indices = swiglpk.intArray(1 + len(value_set)) var_values = swiglpk.doubleArray(1 + len(value_set)) for j, (variable, coeff) in enumerate(value_set): var_indices[1 + j] = self._variables[variable] var_values[1 + j] = float(coeff) swiglpk.glp_set_mat_row( self._p, i, len(value_set), var_indices, var_values) if relation.sense == RelationSense.Greater: swiglpk.glp_set_row_bnds( self._p, i, swiglpk.GLP_LO, -float(expression.offset), 0) elif relation.sense == RelationSense.Less: swiglpk.glp_set_row_bnds( self._p, i, swiglpk.GLP_UP, 0, -float(expression.offset)) else: swiglpk.glp_set_row_bnds( self._p, i, swiglpk.GLP_FX, -float(expression.offset), 0) names.append(i) self._do_presolve = True return names
Add the given relation as one or more constraints. Return a list of the names of the constraints added.
11,896
def format_path(path): localhost:30000managercomp0.rtclocalhost:30000/manager/comp0.rtclocalhostmanagercomp0.rtcinlocalhost/manager/comp0.rtc:in/localhostmanagercomp0.rtc/localhost/manager/comp0.rtc/localhostmanagercomp0.rtcin/localhost/manager/comp0.rtc:inmanagercomp0.rtcmanager/comp0.rtccomp0.rtccomp0.rtc if path[1]: port = + path[1] else: port = if type(path[0]) is str:
Formats a path as a string, placing / between each component. @param path A path in rtctree format, as a tuple with the port name as the second component. Examples: >>> format_path((['localhost:30000', 'manager', 'comp0.rtc'], None)) 'localhost:30000/manager/comp0.rtc' >>> format_path((['localhost', 'manager', 'comp0.rtc'], 'in')) 'localhost/manager/comp0.rtc:in' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], None)) '/localhost/manager/comp0.rtc' >>> format_path((['/', 'localhost', 'manager', 'comp0.rtc'], 'in')) '/localhost/manager/comp0.rtc:in' >>> format_path((['manager', 'comp0.rtc'], None)) 'manager/comp0.rtc' >>> format_path((['comp0.rtc'], None)) 'comp0.rtc'
11,897
def add_pegasus_profile(self, namespace, key, value): self.__pegasus_profile.append((str(namespace),str(key),str(value)))
Add a Pegasus profile to this job which will be written to the dax as <profile namespace="NAMESPACE" key="KEY">VALUE</profile> This can be used to add classads to particular jobs in the DAX @param namespace: A valid Pegasus namespace, e.g. condor. @param key: The name of the attribute. @param value: The value of the attribute.
11,898
def repackage_to_staging(output_path): import google.datalab.ml as ml package_root = os.path.join(os.path.dirname(__file__), ) setup_py = os.path.join(os.path.dirname(__file__), ) staging_package_url = os.path.join(output_path, , ) ml.package_and_copy(package_root, setup_py, staging_package_url) return staging_package_url
Repackage it from local installed location and copy it to GCS.
11,899
def create_qualification_type(Name=None, Keywords=None, Description=None, QualificationTypeStatus=None, RetryDelayInSeconds=None, Test=None, AnswerKey=None, TestDurationInSeconds=None, AutoGranted=None, AutoGrantedValue=None): pass
The CreateQualificationType operation creates a new Qualification type, which is represented by a QualificationType data structure. See also: AWS API Documentation :example: response = client.create_qualification_type( Name='string', Keywords='string', Description='string', QualificationTypeStatus='Active'|'Inactive', RetryDelayInSeconds=123, Test='string', AnswerKey='string', TestDurationInSeconds=123, AutoGranted=True|False, AutoGrantedValue=123 ) :type Name: string :param Name: [REQUIRED] The name you give to the Qualification type. The type name is used to represent the Qualification to Workers, and to find the type using a Qualification type search. It must be unique across all of your Qualification types. :type Keywords: string :param Keywords: One or more words or phrases that describe the Qualification type, separated by commas. The keywords of a type make the type easier to find during a search. :type Description: string :param Description: [REQUIRED] A long description for the Qualification type. On the Amazon Mechanical Turk website, the long description is displayed when a Worker examines a Qualification type. :type QualificationTypeStatus: string :param QualificationTypeStatus: [REQUIRED] The initial status of the Qualification type. Constraints: Valid values are: Active | Inactive :type RetryDelayInSeconds: integer :param RetryDelayInSeconds: The number of seconds that a Worker must wait after requesting a Qualification of the Qualification type before the worker can retry the Qualification request. Constraints: None. If not specified, retries are disabled and Workers can request a Qualification of this type only once, even if the Worker has not been granted the Qualification. It is not possible to disable retries for a Qualification type after it has been created with retries enabled. If you want to disable retries, you must delete existing retry-enabled Qualification type and then create a new Qualification type with retries disabled. :type Test: string :param Test: The questions for the Qualification test a Worker must answer correctly to obtain a Qualification of this type. If this parameter is specified, TestDurationInSeconds must also be specified. Constraints: Must not be longer than 65535 bytes. Must be a QuestionForm data structure. This parameter cannot be specified if AutoGranted is true. Constraints: None. If not specified, the Worker may request the Qualification without answering any questions. :type AnswerKey: string :param AnswerKey: The answers to the Qualification test specified in the Test parameter, in the form of an AnswerKey data structure. Constraints: Must not be longer than 65535 bytes. Constraints: None. If not specified, you must process Qualification requests manually. :type TestDurationInSeconds: integer :param TestDurationInSeconds: The number of seconds the Worker has to complete the Qualification test, starting from the time the Worker requests the Qualification. :type AutoGranted: boolean :param AutoGranted: Specifies whether requests for the Qualification type are granted immediately, without prompting the Worker with a Qualification test. Constraints: If the Test parameter is specified, this parameter cannot be true. :type AutoGrantedValue: integer :param AutoGrantedValue: The Qualification value to use for automatically granted Qualifications. This parameter is used only if the AutoGranted parameter is true. :rtype: dict :return: { 'QualificationType': { 'QualificationTypeId': 'string', 'CreationTime': datetime(2015, 1, 1), 'Name': 'string', 'Description': 'string', 'Keywords': 'string', 'QualificationTypeStatus': 'Active'|'Inactive', 'Test': 'string', 'TestDurationInSeconds': 123, 'AnswerKey': 'string', 'RetryDelayInSeconds': 123, 'IsRequestable': True|False, 'AutoGranted': True|False, 'AutoGrantedValue': 123 } }