id
int64
11
59.9k
original
stringlengths
33
150k
modified
stringlengths
37
150k
32,339
def main() -> None: # pragma: no cover demisto_params = demisto.params() url = urljoin(demisto_params['url'], '/api/v4/') events_collection_management = { 'groups_ids': argToList(demisto_params.get('group_ids', '')), 'projects_ids': argToList(demisto_params.get('project_ids', '')), 'event_types': ['groups', 'projects'] } user_ids = demisto_params.get('user_ids', '').split(',') headers = {'PRIVATE-TOKEN': demisto_params.get('api_key', {}).get('credentials', {}).get('password')} request_object = { 'method': Method.GET, 'url': url, 'headers': headers, } last_run = demisto.getLastRun() if ('groups', 'projects', 'events') not in last_run: last_run = dateparser.parse(demisto_params['after'].strip()).isoformat() last_run = { 'groups': last_run, 'projects': last_run, 'events': last_run, } else: last_run = last_run options = IntegrationOptions(**demisto_params) request = IntegrationHTTPRequest(**request_object) client = Client(request, options, last_run) get_events = GetEvents(client, options) command = demisto.command() try: events = [] for event_type in events_collection_management['event_types']: for obj_id in events_collection_management[f'{event_type}_ids']: call_url_suffix = f'{event_type}/{obj_id}/audit_events' get_events.client.request.url = url + call_url_suffix get_events.client.page = 1 get_events.client.event_type = event_type events.extend(get_events.run()) get_events.client.event_type = 'events' for obj_id in user_ids: get_events.client.request.url = url + f'users/{obj_id}/events' get_events.client.page = 1 events.extend(get_events.run()) if command == 'test-module': return_results('ok') return for event in events: if 'details' in event: for action in ['add', 'change', 'remove']: if action in event['details']: event['details']['action'] = f'{action}_{event["details"][action]}' event['details']['action_type'] = action event["details"]['action_category'] = event['details'][action] break if command == 'gitlab-get-events': command_results = CommandResults( readable_output=tableToMarkdown('gitlab Logs', events, headerTransform=pascalToSpace), raw_response=events, ) return_results(command_results) elif command == 'fetch-events': demisto.setLastRun(get_events.get_last_run(events)) demisto_params['push_events'] = True if demisto_params.get('push_events'): send_events_to_xsiam(events, demisto_params.get('vendor', 'gitlab'), demisto_params.get('product', 'gitlab')) else: return_error(f'Command not found: {command}') except Exception as exc: return_error(f'Failed to execute {command} command.\nError:\n{str(exc)}', error=exc)
def main() -> None: # pragma: no cover demisto_params = demisto.params() url = urljoin(demisto_params['url'], '/api/v4/') events_collection_management = { 'groups_ids': argToList(demisto_params.get('group_ids', '')), 'projects_ids': argToList(demisto_params.get('project_ids', '')), 'event_types': ['groups', 'projects'] } user_ids = demisto_params.get('user_ids', '').split(',') headers = {'PRIVATE-TOKEN': demisto_params.get('api_key', {}).get('password')} request_object = { 'method': Method.GET, 'url': url, 'headers': headers, } last_run = demisto.getLastRun() if ('groups', 'projects', 'events') not in last_run: last_run = dateparser.parse(demisto_params['after'].strip()).isoformat() last_run = { 'groups': last_run, 'projects': last_run, 'events': last_run, } else: last_run = last_run options = IntegrationOptions(**demisto_params) request = IntegrationHTTPRequest(**request_object) client = Client(request, options, last_run) get_events = GetEvents(client, options) command = demisto.command() try: events = [] for event_type in events_collection_management['event_types']: for obj_id in events_collection_management[f'{event_type}_ids']: call_url_suffix = f'{event_type}/{obj_id}/audit_events' get_events.client.request.url = url + call_url_suffix get_events.client.page = 1 get_events.client.event_type = event_type events.extend(get_events.run()) get_events.client.event_type = 'events' for obj_id in user_ids: get_events.client.request.url = url + f'users/{obj_id}/events' get_events.client.page = 1 events.extend(get_events.run()) if command == 'test-module': return_results('ok') return for event in events: if 'details' in event: for action in ['add', 'change', 'remove']: if action in event['details']: event['details']['action'] = f'{action}_{event["details"][action]}' event['details']['action_type'] = action event["details"]['action_category'] = event['details'][action] break if command == 'gitlab-get-events': command_results = CommandResults( readable_output=tableToMarkdown('gitlab Logs', events, headerTransform=pascalToSpace), raw_response=events, ) return_results(command_results) elif command == 'fetch-events': demisto.setLastRun(get_events.get_last_run(events)) demisto_params['push_events'] = True if demisto_params.get('push_events'): send_events_to_xsiam(events, demisto_params.get('vendor', 'gitlab'), demisto_params.get('product', 'gitlab')) else: return_error(f'Command not found: {command}') except Exception as exc: return_error(f'Failed to execute {command} command.\nError:\n{str(exc)}', error=exc)
31,356
def list_persons_command(client: Client, args: Dict[str, Any]) -> CommandResults: """Get persons list from TOPdesk""" persons = client.get_list_with_query(list_type="persons", start=args.get('start', None), page_size=args.get('page_size', None), query=args.get('query', None)) if len(persons) == 0: return CommandResults(readable_output='No persons found') headers = ['id', 'name', 'telephone', 'job title', 'department', 'city', 'branch name', 'room'] readable_persons = [] for person in persons: readable_person = { 'id': person.get('id', None), 'name': person.get('dynamicName', None), 'telephone': person.get('phoneNumber', None), 'job title': person.get('jobTitle', None), 'department': person.get('department', None), 'city': person.get('city', None), 'branch name': replace_none(person.get('branch', {}), {}).get('name', None), 'room': None } if person.get('location', None): readable_person['room'] = person.get('location', None).get('room', None) readable_persons.append(readable_person) readable_output = tableToMarkdown(f'{INTEGRATION_NAME} persons', readable_persons, headers=headers, removeNull=True) return CommandResults( readable_output=readable_output, outputs_prefix=f'{INTEGRATION_NAME}.person', outputs_key_field='id', outputs=persons )
def list_persons_command(client: Client, args: Dict[str, Any]) -> CommandResults: """Get persons list from TOPdesk""" persons = client.get_list_with_query(list_type="persons", start=args.get('start', None), page_size=args.get('page_size', None), query=args.get('query', None)) if len(persons) == 0: return CommandResults(readable_output='No persons found') headers = ['id', 'name', 'telephone', 'job title', 'department', 'city', 'branch name', 'room'] readable_persons = [] for person in persons: readable_person = { 'id': person.get('id', None), 'name': person.get('dynamicName', None), 'telephone': person.get('phoneNumber', None), 'job title': person.get('jobTitle', None), 'department': person.get('department', None), 'city': person.get('city', None), 'branch name': person.get('branch', {}).get('name', None) if person.get('branch') else None, 'room': None } if person.get('location', None): readable_person['room'] = person.get('location', None).get('room', None) readable_persons.append(readable_person) readable_output = tableToMarkdown(f'{INTEGRATION_NAME} persons', readable_persons, headers=headers, removeNull=True) return CommandResults( readable_output=readable_output, outputs_prefix=f'{INTEGRATION_NAME}.person', outputs_key_field='id', outputs=persons )
38,081
def test_earth_relief_holes(): """ Check that the @earth_relief_20m_holes.grd dataset loads without errors. """ grid = load_sample_data("earth_relief_holes") assert grid.shape == (30, 30) npt.assert_allclose(grid.max(), 1878) npt.assert_allclose(grid.min(), -4947) # Test for the NaN values in the remote file assert math.isnan(grid[2, 19])
def test_earth_relief_holes(): """ Check that the @earth_relief_20m_holes.grd dataset loads without errors. """ grid = load_sample_data("earth_relief_holes") assert grid.shape == (30, 30) npt.assert_allclose(grid.max(), 1878) npt.assert_allclose(grid.min(), -4947) # Test for the NaN values in the remote file assert grid[2, 19].isnull()
58,172
def search_incidents(args: Dict): # pragma: no cover if not is_valid_args(args): return if fromdate := arg_to_datetime(args.get('fromdate')): from_date = fromdate.isoformat() args['fromdate'] = from_date if todate := arg_to_datetime(args.get('todate')): to_date = todate.isoformat() args['todate'] = to_date if args.get('trimevents') == '0': args.pop('trimevents') # handle list of ids if args.get('id'): args['id'] = ','.join(argToList(args.get('id'), transform=str)) res: List = execute_command('getIncidents', args, extract_contents=False) incident_found: bool = check_if_found_incident(res) if incident_found is False: return 'Incidents not found.', {}, {} else: data = apply_filters(res[0]['Contents']['data'], args) data = add_incidents_link(data) if len(data) ==0: return 'Incidents not found.', {}, {} headers: List[str] = ['id', 'name', 'severity', 'status', 'owner', 'created', 'closed', 'incidentLink'] md: str = tableToMarkdown(name="Incidents found", t=data, headers=headers) return md, data, res
def search_incidents(args: Dict): # pragma: no cover if not is_valid_args(args): return if fromdate := arg_to_datetime(args.get('fromdate')): from_date = fromdate.isoformat() args['fromdate'] = from_date if todate := arg_to_datetime(args.get('todate')): to_date = todate.isoformat() args['todate'] = to_date if args.get('trimevents') == '0': args.pop('trimevents') # handle list of ids if args.get('id'): args['id'] = ','.join(argToList(args.get('id'), transform=str)) res: List = execute_command('getIncidents', args, extract_contents=False) incident_found: bool = check_if_found_incident(res) if incident_found is False: return 'Incidents not found.', {}, {} data = apply_filters(res[0]['Contents']['data'], args) data = add_incidents_link(data) if not data: return 'Incidents not found.', {}, {} headers: List[str] = ['id', 'name', 'severity', 'status', 'owner', 'created', 'closed', 'incidentLink'] md: str = tableToMarkdown(name="Incidents found", t=data, headers=headers) return md, data, res
46,427
def _get_matching_index(ts_target: TimeSeries, ts_covariate: TimeSeries, idx: int): """ Given two overlapping series `ts_target` and `ts_covariate` and an index point of `idx`, returns the matching index point in `ts_covariate`, based on the ending times of the two series. The indices are starting from the end of the series. This function is used to jointly slice target and covariate series in datasets. It supports both datetime and integer indexed series. Note: this function does not check if the matching index value is in `ts_covariate` or not. """ raise_if_not(ts_target.freq == ts_covariate.freq, 'The dataset contains some target/covariates series pair that have incompatible ' 'time axes (not the same "freq") and thus cannot be matched') freq = ts_target.freq if isinstance(freq, int): return idx + int(ts_covariate.end_time() - ts_target.end_time()) elif ts_target.freq.freqstr in DIVISIBLE_FREQS: return idx + int((ts_covariate.end_time() - ts_target.end_time()) / freq) # /!\ THIS IS TAKING LINEAR TIME IN THE LENGTH OF THE SERIES # it won't scale if the end of target and covariates are far apart and the freq is not in DIVISIBLE_FREQS # (Not sure there's a way around it for exotic freqs) if ts_covariate.end_time() >= ts_target.end_time(): return idx - 1 + len( pd.date_range(start=ts_target.end_time(), end=ts_covariate.end_time(), freq=ts_target.freq)) else: return idx + 1 - len( pd.date_range(start=ts_covariate.end_time(), end=ts_target.end_time(), freq=ts_target.freq))
def _get_matching_index(ts_target: TimeSeries, ts_covariate: TimeSeries, idx: int): """ Given two overlapping series `ts_target` and `ts_covariate` and an index point `idx` of `ts_target`, returns the matching index point in `ts_covariate`, based on the ending times of the two series. The indices are starting from the end of the series. This function is used to jointly slice target and covariate series in datasets. It supports both datetime and integer indexed series. Note: this function does not check if the matching index value is in `ts_covariate` or not. """ raise_if_not(ts_target.freq == ts_covariate.freq, 'The dataset contains some target/covariates series pair that have incompatible ' 'time axes (not the same "freq") and thus cannot be matched') freq = ts_target.freq if isinstance(freq, int): return idx + int(ts_covariate.end_time() - ts_target.end_time()) elif ts_target.freq.freqstr in DIVISIBLE_FREQS: return idx + int((ts_covariate.end_time() - ts_target.end_time()) / freq) # /!\ THIS IS TAKING LINEAR TIME IN THE LENGTH OF THE SERIES # it won't scale if the end of target and covariates are far apart and the freq is not in DIVISIBLE_FREQS # (Not sure there's a way around it for exotic freqs) if ts_covariate.end_time() >= ts_target.end_time(): return idx - 1 + len( pd.date_range(start=ts_target.end_time(), end=ts_covariate.end_time(), freq=ts_target.freq)) else: return idx + 1 - len( pd.date_range(start=ts_covariate.end_time(), end=ts_target.end_time(), freq=ts_target.freq))
26,041
def subnet_list_available_ips(cmd, resource_group_name, virtual_network_name, subnet_name): from .aaz.latest.network.vnet import CheckIpAddress from .aaz.latest.network.vnet.subnet import Show Show_Subnet = Show(cmd.loader) args = { "resource_group": resource_group_name, "name": subnet_name, "vnet_name": virtual_network_name, } subnet = Show_Subnet(args) if subnet["addressPrefix"]: start_ip = subnet["addressPrefix"].split("/")[0] Check_IP_Address = CheckIpAddress(cmd.loader) args = { "resource_group": resource_group_name, "name": virtual_network_name, "ip_address": start_ip, } available_ips = Check_IP_Address(args) return available_ips["availableIPAddresses"] # endregion
def subnet_list_available_ips(cmd, resource_group_name, virtual_network_name, subnet_name): from .aaz.latest.network.vnet import CheckIpAddress from .aaz.latest.network.vnet.subnet import Show Show_Subnet = Show(cli_ctx=cmd.cli_ctx) args = { "resource_group": resource_group_name, "name": subnet_name, "vnet_name": virtual_network_name, } subnet = Show_Subnet(args) if subnet["addressPrefix"]: start_ip = subnet["addressPrefix"].split("/")[0] Check_IP_Address = CheckIpAddress(cmd.loader) args = { "resource_group": resource_group_name, "name": virtual_network_name, "ip_address": start_ip, } available_ips = Check_IP_Address(args) return available_ips["availableIPAddresses"] # endregion
11,643
def getSplashScreen(): splash_path = os.path.join(os.path.split(ilastik.__file__)[0], 'ilastik-splash.png') splashImage = QPixmap(splash_path) splashScreen = QSplashScreen(splashImage) return splashScreen
def getSplashScreen(): splash_path = os.path.join(os.path.dirname(ilastik.__file__), 'ilastik-splash.png') splashImage = QPixmap(splash_path) splashScreen = QSplashScreen(splashImage) return splashScreen
32,152
def results(results): """Outputs entries to the war-room Args: results (Union[list, dict]): The entry object or array of entry objects to output For example: results = { 'Type' : entryTypes['note'], 'Contents': data, 'ContentsFormat' : formats['json'], 'HumanReadable': md, 'ReadableContentsFormat' : formats['markdown'], 'EntryContext' : context, 'Tags' : ['tag1', 'tag2'] } Returns: None: No data returned """ if isinstance(results, dict) and results.get("contents"): results = results.get("contents") log("demisto results: {}".format(json.dumps(results, indent=4, sort_keys=True)))
def results(results): """Outputs entries to the war-room Args: results (Union[list, dict]): The entry object or array of entry objects to output For example: results = { 'Type' : entryTypes['note'], 'Contents': data, 'ContentsFormat' : formats['json'], 'HumanReadable': md, 'ReadableContentsFormat' : EntryFormat.MARKDOWN, 'EntryContext' : context, 'Tags' : ['tag1', 'tag2'] } Returns: None: No data returned """ if isinstance(results, dict) and results.get("contents"): results = results.get("contents") log("demisto results: {}".format(json.dumps(results, indent=4, sort_keys=True)))
53,392
def test_numpy_regression_crash(finalize_linter: PyLinter) -> None: # Test for a crash on packages whose name start with `numpy` # See: https://github.com/PyCQA/pylint/issues/5244 finalize_linter.check([join(REGR_DATA, "numpy_crash_tester/test_file.py")]) # Check that we do not issue any (fatal) messages assert finalize_linter.msg_status == 0
def test_numpy_regression_crash(finalize_linter: PyLinter) -> None: """ Test for a crash on packages whose name start with `numpy` See: https://github.com/PyCQA/pylint/issues/5244 """ finalize_linter.check([join(REGR_DATA, "numpy_crash_tester/test_file.py")]) # Check that we do not issue any (fatal) messages assert finalize_linter.msg_status == 0
5,523
def is_subscriber(func): @functools.wraps(func) def inner(request, *args, **kwargs): user = request.user if not user.is_authenticated: return HttpResponseForbidden("not signed in") # TEMPORARY until all things auth + subscription come together. if not settings.FAKE_USER_SUBSCRIBER_NUMBER: return HttpResponseForbidden("not a subscriber") # Note: Deliberately commented out until we have figured out OIDC! # if not UserSubscription.objects.filter( # user=user, canceled__isnull=True # ).exists(): # return HttpResponseForbidden("not a subscriber") return func(request, *args, **kwargs) return inner
def require_subscriber(func): @functools.wraps(func) def inner(request, *args, **kwargs): user = request.user if not user.is_authenticated: return HttpResponseForbidden("not signed in") # TEMPORARY until all things auth + subscription come together. if not settings.FAKE_USER_SUBSCRIBER_NUMBER: return HttpResponseForbidden("not a subscriber") # Note: Deliberately commented out until we have figured out OIDC! # if not UserSubscription.objects.filter( # user=user, canceled__isnull=True # ).exists(): # return HttpResponseForbidden("not a subscriber") return func(request, *args, **kwargs) return inner
3,185
def generate_culprit(data): platform = data.get('platform') exceptions = get_path(data, 'exception', 'values') if exceptions: # Synthetic events no longer get a culprit last_exception = get_path(exceptions, -1) if last_exception and (last_exception.get('mechanism') or {}).get('synthetic'): return '' stacktraces = [e['stacktrace'] for e in exceptions if get_path(e, 'stacktrace', 'frames')] else: stacktrace = data.get('stacktrace') if stacktrace and stacktrace.get('frames'): stacktraces = [stacktrace] else: stacktraces = None culprit = None if not culprit and stacktraces: culprit = get_stacktrace_culprit(get_path(stacktraces, -1), platform=platform) if not culprit and data.get('request'): culprit = get_path(data, 'request', 'url') return truncatechars(culprit or '', MAX_CULPRIT_LENGTH)
def generate_culprit(data): platform = data.get('platform') exceptions = get_path(data, 'exception', 'values', filter=True) if exceptions: # Synthetic events no longer get a culprit last_exception = get_path(exceptions, -1) if last_exception and (last_exception.get('mechanism') or {}).get('synthetic'): return '' stacktraces = [e['stacktrace'] for e in exceptions if get_path(e, 'stacktrace', 'frames')] else: stacktrace = data.get('stacktrace') if stacktrace and stacktrace.get('frames'): stacktraces = [stacktrace] else: stacktraces = None culprit = None if not culprit and stacktraces: culprit = get_stacktrace_culprit(get_path(stacktraces, -1), platform=platform) if not culprit and data.get('request'): culprit = get_path(data, 'request', 'url') return truncatechars(culprit or '', MAX_CULPRIT_LENGTH)
8,390
def template_match(observed_spectrum, spectral_templates, resample_method="flux_conserving", min_redshift=None, max_redshift=None, delta_redshift=None): """ Find which spectral templates is the best fit to an observed spectrum by computing the chi-squared. If two template_spectra have the same chi2, the first template is returned. Parameters ---------- observed_spectrum : :class:`~specutils.Spectrum1D` The observed spectrum. spectral_templates : :class:`~specutils.Spectrum1D` or :class:`~specutils.SpectrumCollection` or `list` That will give a single :class:`~specutils.Spectrum1D` when iterated over. The template spectra, which will be resampled, normalized, and compared to the observed spectrum, where the smallest chi2 and normalized template spectrum will be returned. resample_method : `string` Three resample options: flux_conserving, linear_interpolated, and spline_interpolated. Anything else does not resample the spectrum. min_redshift : `float` The minimum redshift allowed max_redshift : `float` The maximum redshift allowed delta_redshift : `float` The amount the redshift will change between loops Returns ------- normalized_template_spectrum : :class:`~specutils.Spectrum1D` The template spectrum that has been normalized. chi2 : `float` The chi2 of the flux of the observed_spectrum and the flux of the normalized template spectrum. smallest_chi_index : `int` The index of the spectrum with the smallest chi2 in spectral templates. """ if hasattr(spectral_templates, 'flux') and len(spectral_templates.flux.shape) == 1: # Account for redshift if provided if min_redshift and max_redshift and delta_redshift: redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectral_templates, min_redshift, max_redshift, delta_redshift) spectral_templates = redshifted_spectrum normalized_spectral_template, chi2 = _chi_sqaure_for_templates( observed_spectrum, spectral_templates, resample_method) return normalized_spectral_template, chi2 # At this point, the template spectrum is either a ``SpectrumCollection`` # or a multi-dimensional``Spectrum1D``. Loop through the object and return # the template spectrum with the lowest chi square and its corresponding # chi square. chi2_min = None smallest_chi_spec = None for index, spectrum in enumerate(spectral_templates): # Account for redshift if provided if min_redshift and max_redshift and delta_redshift: redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectrum, min_redshift, max_redshift, delta_redshift) spectrum = redshifted_spectrum normalized_spectral_template, chi2 = _chi_sqaure_for_templates( observed_spectrum, spectrum, resample_method) if chi2_min is None or chi2 < chi2_min: chi2_min = chi2 smallest_chi_spec = normalized_spectral_template smallest_chi_index = index return smallest_chi_spec, chi2_min, smallest_chi_index
def template_match(observed_spectrum, spectral_templates, resample_method="flux_conserving", min_redshift=None, max_redshift=None, delta_redshift=None): """ Find which spectral templates is the best fit to an observed spectrum by computing the chi-squared. If two template_spectra have the same chi2, the first template is returned. Parameters ---------- observed_spectrum : :class:`~specutils.Spectrum1D` The observed spectrum. spectral_templates : :class:`~specutils.Spectrum1D` or :class:`~specutils.SpectrumCollection` or `list` That will give a single :class:`~specutils.Spectrum1D` when iterated over. The template spectra, which will be resampled, normalized, and compared to the observed spectrum, where the smallest chi2 and normalized template spectrum will be returned. resample_method : `string` Three resample options: flux_conserving, linear_interpolated, and spline_interpolated. Anything else does not resample the spectrum. min_redshift : `float` The minimum redshift allowed max_redshift : `float` The maximum redshift allowed delta_redshift : `float` The amount the redshift will change between loops Returns ------- normalized_template_spectrum : :class:`~specutils.Spectrum1D` The template spectrum that has been normalized. chi2 : `float` The chi2 of the flux of the observed_spectrum and the flux of the normalized template spectrum. smallest_chi_index : `int` The index of the spectrum with the smallest chi2 in spectral templates. """ if hasattr(spectral_templates, 'flux') and len(spectral_templates.flux.shape) == 1: # Account for redshift if provided if min_redshift and max_redshift and delta_redshift: redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectral_templates, min_redshift, max_redshift, delta_redshift) spectral_templates = redshifted_spectrum normalized_spectral_template, chi2 = _chi_square_for_templates( observed_spectrum, spectral_templates, resample_method) return normalized_spectral_template, chi2 # At this point, the template spectrum is either a ``SpectrumCollection`` # or a multi-dimensional``Spectrum1D``. Loop through the object and return # the template spectrum with the lowest chi square and its corresponding # chi square. chi2_min = None smallest_chi_spec = None for index, spectrum in enumerate(spectral_templates): # Account for redshift if provided if min_redshift and max_redshift and delta_redshift: redshift, redshifted_spectrum = template_redshift(observed_spectrum, spectrum, min_redshift, max_redshift, delta_redshift) spectrum = redshifted_spectrum normalized_spectral_template, chi2 = _chi_sqaure_for_templates( observed_spectrum, spectrum, resample_method) if chi2_min is None or chi2 < chi2_min: chi2_min = chi2 smallest_chi_spec = normalized_spectral_template smallest_chi_index = index return smallest_chi_spec, chi2_min, smallest_chi_index
10,583
def run_id_arg(arg): m = re.fullmatch(r"(?:https:\/\/dev\.azure\.com\/ansible\/ansible\/_build\/results\?buildId=)?(\d{4})", arg) if not m: raise ValueError("run does not seems to be a URI or an ID") return m.group(1)
def run_id_arg(arg): m = re.fullmatch(r"(?:https:\/\/dev\.azure\.com\/ansible\/ansible\/_build\/results\?buildId=)?(\d+)", arg) if not m: raise ValueError("run does not seems to be a URI or an ID") return m.group(1)
28,593
def plot_trace( data: InferenceData, var_names: Optional[Sequence[str]] = None, filter_vars: Optional[str] = None, transform: Optional[Callable] = None, coords: Optional[CoordSpec] = None, divergences: Optional[str] = "auto", kind: Optional[str] = "trace", figsize: Optional[Tuple[float, float]] = None, rug: bool = False, lines: Optional[List[Tuple[str, CoordSpec, Any]]] = None, circ_var_names: Optional[List[str]] = None, circ_var_units: str = "radians", compact: bool = True, compact_prop: Optional[Union[str, Mapping[str, Any]]] = None, combined: bool = False, chain_prop: Optional[Union[str, Mapping[str, Any]]] = None, legend: bool = False, plot_kwargs: Optional[KwargSpec] = None, fill_kwargs: Optional[KwargSpec] = None, rug_kwargs: Optional[KwargSpec] = None, hist_kwargs: Optional[KwargSpec] = None, trace_kwargs: Optional[KwargSpec] = None, rank_kwargs: Optional[KwargSpec] = None, labeller=None, axes=None, backend: Optional[str] = None, backend_config: Optional[KwargSpec] = None, backend_kwargs: Optional[KwargSpec] = None, show: Optional[bool] = None, ): """Plot distribution (histogram or kernel density estimates) and sampled values or rank plot. If `divergences` data is available in `sample_stats`, will plot the location of divergences as dashed vertical lines. Parameters ---------- data: obj Any object that can be converted to an :class:`arviz.InferenceData` object Refer to documentation of :func:`arviz.convert_to_dataset` for details var_names: str or list of str, optional One or more variables to be plotted. Prefix the variables by `~` when you want to exclude them from the plot. filter_vars: {None, "like", "regex"}, optional, default=None If `None` (default), interpret var_names as the real variables names. If "like", interpret var_names as substrings of the real variables names. If "regex", interpret var_names as regular expressions on the real variables names. A la `pandas.filter`. coords: dict of {str: slice or array_like}, optional Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel` divergences: {"bottom", "top", None}, optional Plot location of divergences on the traceplots. kind: {"trace", "rank_bar", "rank_vlines"}, optional Choose between plotting sampled values per iteration and rank plots. transform: callable, optional Function to transform data (defaults to None i.e.the identity function) figsize: tuple of (float, float), optional If None, size is (12, variables * 2) rug: bool, optional If True adds a rugplot of samples. Defaults to False. Ignored for 2D KDE. Only affects continuous variables. lines: list of tuple of (str, dict, array_like), optional List of (var_name, {'coord': selection}, [line, positions]) to be overplotted as vertical lines on the density and horizontal lines on the trace. circ_var_names : str or list of str, optional List of circular variables to account for when plotting KDE. circ_var_units : str Whether the variables in `circ_var_names` are in "degrees" or "radians". compact: bool, optional Plot multidimensional variables in a single plot. compact_prop: str or dict {str: array_like}, optional Tuple containing the property name and the property values to distinguish different dimensions with compact=True combined: bool, optional Flag for combining multiple chains into a single line. If False (default), chains will be plotted separately. chain_prop: str or dict {str: array_like}, optional Tuple containing the property name and the property values to distinguish different chains legend: bool, optional Add a legend to the figure with the chain color code. plot_kwargs, fill_kwargs, rug_kwargs, hist_kwargs: dict, optional Extra keyword arguments passed to :func:`arviz.plot_dist`. Only affects continuous variables. trace_kwargs: dict, optional Extra keyword arguments passed to :meth:`matplotlib.axes.Axes.plot` labeller : labeller instance, optional Class providing the method `make_label_vert` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. rank_kwargs : dict, optional Extra keyword arguments passed to :func:`arviz.plot_rank` axes: axes, optional Matplotlib axes or bokeh figures. backend: {"matplotlib", "bokeh"}, optional Select plotting backend. backend_config: dict, optional Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams. backend_kwargs: dict, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_rank : Plot rank order statistics of chains. Examples -------- Plot a subset variables and select them with partial naming .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('non_centered_eight') >>> coords = {'school': ['Choate', 'Lawrenceville']} >>> az.plot_trace(data, var_names=('theta'), filter_vars="like", coords=coords) Show all dimensions of multidimensional variables in the same plot .. plot:: :context: close-figs >>> az.plot_trace(data, compact=True) Display a rank plot instead of trace .. plot:: :context: close-figs >>> az.plot_trace(data, var_names=["mu", "tau"], kind="rank_bars") Combine all chains into one distribution and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_trace( >>> data, var_names=('^theta'), filter_vars="regex", coords=coords, combined=True >>> ) Plot reference lines against distribution and trace .. plot:: :context: close-figs >>> lines = (('theta_t',{'school': "Choate"}, [-1]),) >>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords, lines=lines) """ if kind not in {"trace", "rank_vlines", "rank_bars"}: raise ValueError("The value of kind must be either trace, rank_vlines or rank_bars.") if divergences == "auto": divergences = "top" if rug else "bottom" if divergences: try: divergence_data = convert_to_dataset(data, group="sample_stats").diverging except (ValueError, AttributeError): # No sample_stats, or no `.diverging` divergences = None if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() if divergences: divergence_data = get_coords( divergence_data, {k: v for k, v in coords.items() if k in ("chain", "draw")} ) else: divergence_data = False coords_data = get_coords(convert_to_dataset(data, group="posterior"), coords) if transform is not None: coords_data = transform(coords_data) var_names = _var_names(var_names, coords_data, filter_vars) if compact: skip_dims = set(coords_data.dims) - {"chain", "draw"} else: skip_dims = set() plotters = list( xarray_var_iter(coords_data, var_names=var_names, combined=True, skip_dims=skip_dims) ) max_plots = rcParams["plot.max_subplots"] max_plots = len(plotters) if max_plots is None else max(max_plots // 2, 1) if len(plotters) > max_plots: warnings.warn( "rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number " "of variables to plot ({len_plotters}), generating only {max_plots} " "plots".format(max_plots=max_plots, len_plotters=len(plotters)), UserWarning, ) plotters = plotters[:max_plots] # TODO: Check if this can be further simplified trace_plot_args = dict( # User Kwargs data=coords_data, var_names=var_names, # coords = coords, divergences=divergences, kind=kind, figsize=figsize, rug=rug, lines=lines, circ_var_names=circ_var_names, circ_var_units=circ_var_units, plot_kwargs=plot_kwargs, fill_kwargs=fill_kwargs, rug_kwargs=rug_kwargs, hist_kwargs=hist_kwargs, trace_kwargs=trace_kwargs, rank_kwargs=rank_kwargs, compact=compact, compact_prop=compact_prop, combined=combined, chain_prop=chain_prop, legend=legend, labeller=labeller, # Generated kwargs divergence_data=divergence_data, # skip_dims=skip_dims, plotters=plotters, axes=axes, backend_config=backend_config, backend_kwargs=backend_kwargs, show=show, ) if backend is None: backend = rcParams["plot.backend"] backend = backend.lower() plot = get_plotting_function("plot_trace", "traceplot", backend) axes = plot(**trace_plot_args) return axes
def plot_trace( data: InferenceData, var_names: Optional[Sequence[str]] = None, filter_vars: Optional[str] = None, transform: Optional[Callable] = None, coords: Optional[CoordSpec] = None, divergences: Optional[str] = "auto", kind: Optional[str] = "trace", figsize: Optional[Tuple[float, float]] = None, rug: bool = False, lines: Optional[List[Tuple[str, CoordSpec, Any]]] = None, circ_var_names: Optional[List[str]] = None, circ_var_units: str = "radians", compact: bool = True, compact_prop: Optional[Union[str, Mapping[str, Any]]] = None, combined: bool = False, chain_prop: Optional[Union[str, Mapping[str, Any]]] = None, legend: bool = False, plot_kwargs: Optional[KwargSpec] = None, fill_kwargs: Optional[KwargSpec] = None, rug_kwargs: Optional[KwargSpec] = None, hist_kwargs: Optional[KwargSpec] = None, trace_kwargs: Optional[KwargSpec] = None, rank_kwargs: Optional[KwargSpec] = None, labeller=None, axes=None, backend: Optional[str] = None, backend_config: Optional[KwargSpec] = None, backend_kwargs: Optional[KwargSpec] = None, show: Optional[bool] = None, ): """Plot distribution (histogram or kernel density estimates) and sampled values or rank plot. If `divergences` data is available in `sample_stats`, will plot the location of divergences as dashed vertical lines. Parameters ---------- data: obj Any object that can be converted to an :class:`arviz.InferenceData` object Refer to documentation of :func:`arviz.convert_to_dataset` for details var_names: str or list of str, optional One or more variables to be plotted. Prefix the variables by `~` when you want to exclude them from the plot. filter_vars: {None, "like", "regex"}, optional, default=None If `None` (default), interpret var_names as the real variables names. If "like", interpret var_names as substrings of the real variables names. If "regex", interpret var_names as regular expressions on the real variables names. A la `pandas.filter`. coords: dict of {str: slice or array_like}, optional Coordinates of var_names to be plotted. Passed to :meth:`xarray.Dataset.sel` divergences: {"bottom", "top", None}, optional Plot location of divergences on the traceplots. kind: {"trace", "rank_bar", "rank_vlines"}, optional Choose between plotting sampled values per iteration and rank plots. transform: callable, optional Function to transform data (defaults to None i.e.the identity function) figsize: tuple of (float, float), optional If None, size is (12, variables * 2) rug: bool, optional If True adds a rugplot of samples. Defaults to False. Ignored for 2D KDE. Only affects continuous variables. lines: list of tuple of (str, dict, array_like), optional List of (var_name, {'coord': selection}, [line, positions]) to be overplotted as vertical lines on the density and horizontal lines on the trace. circ_var_names : str or list of str, optional List of circular variables to account for when plotting KDE. circ_var_units : str Whether the variables in `circ_var_names` are in "degrees" or "radians". compact: bool, optional Plot multidimensional variables in a single plot. compact_prop: str or dict {str: array_like}, optional Tuple containing the property name and the property values to distinguish different dimensions with compact=True combined: bool, optional Flag for combining multiple chains into a single line. If False (default), chains will be plotted separately. chain_prop: str or dict {str: array_like}, optional Tuple containing the property name and the property values to distinguish different chains legend: bool, optional Add a legend to the figure with the chain color code. plot_kwargs, fill_kwargs, rug_kwargs, hist_kwargs: dict, optional Extra keyword arguments passed to :func:`arviz.plot_dist`. Only affects continuous variables. trace_kwargs: dict, optional Extra keyword arguments passed to :meth:`matplotlib.axes.Axes.plot` labeller : labeller instance, optional Class providing the method ``make_label_vert`` to generate the labels in the plot titles. Read the :ref:`label_guide` for more details and usage examples. rank_kwargs : dict, optional Extra keyword arguments passed to :func:`arviz.plot_rank` axes: axes, optional Matplotlib axes or bokeh figures. backend: {"matplotlib", "bokeh"}, optional Select plotting backend. backend_config: dict, optional Currently specifies the bounds to use for bokeh axes. Defaults to value set in rcParams. backend_kwargs: dict, optional These are kwargs specific to the backend being used, passed to :func:`matplotlib.pyplot.subplots` or :func:`bokeh.plotting.figure`. show: bool, optional Call backend show function. Returns ------- axes: matplotlib axes or bokeh figures See Also -------- plot_rank : Plot rank order statistics of chains. Examples -------- Plot a subset variables and select them with partial naming .. plot:: :context: close-figs >>> import arviz as az >>> data = az.load_arviz_data('non_centered_eight') >>> coords = {'school': ['Choate', 'Lawrenceville']} >>> az.plot_trace(data, var_names=('theta'), filter_vars="like", coords=coords) Show all dimensions of multidimensional variables in the same plot .. plot:: :context: close-figs >>> az.plot_trace(data, compact=True) Display a rank plot instead of trace .. plot:: :context: close-figs >>> az.plot_trace(data, var_names=["mu", "tau"], kind="rank_bars") Combine all chains into one distribution and select variables with regular expressions .. plot:: :context: close-figs >>> az.plot_trace( >>> data, var_names=('^theta'), filter_vars="regex", coords=coords, combined=True >>> ) Plot reference lines against distribution and trace .. plot:: :context: close-figs >>> lines = (('theta_t',{'school': "Choate"}, [-1]),) >>> az.plot_trace(data, var_names=('theta_t', 'theta'), coords=coords, lines=lines) """ if kind not in {"trace", "rank_vlines", "rank_bars"}: raise ValueError("The value of kind must be either trace, rank_vlines or rank_bars.") if divergences == "auto": divergences = "top" if rug else "bottom" if divergences: try: divergence_data = convert_to_dataset(data, group="sample_stats").diverging except (ValueError, AttributeError): # No sample_stats, or no `.diverging` divergences = None if coords is None: coords = {} if labeller is None: labeller = BaseLabeller() if divergences: divergence_data = get_coords( divergence_data, {k: v for k, v in coords.items() if k in ("chain", "draw")} ) else: divergence_data = False coords_data = get_coords(convert_to_dataset(data, group="posterior"), coords) if transform is not None: coords_data = transform(coords_data) var_names = _var_names(var_names, coords_data, filter_vars) if compact: skip_dims = set(coords_data.dims) - {"chain", "draw"} else: skip_dims = set() plotters = list( xarray_var_iter(coords_data, var_names=var_names, combined=True, skip_dims=skip_dims) ) max_plots = rcParams["plot.max_subplots"] max_plots = len(plotters) if max_plots is None else max(max_plots // 2, 1) if len(plotters) > max_plots: warnings.warn( "rcParams['plot.max_subplots'] ({max_plots}) is smaller than the number " "of variables to plot ({len_plotters}), generating only {max_plots} " "plots".format(max_plots=max_plots, len_plotters=len(plotters)), UserWarning, ) plotters = plotters[:max_plots] # TODO: Check if this can be further simplified trace_plot_args = dict( # User Kwargs data=coords_data, var_names=var_names, # coords = coords, divergences=divergences, kind=kind, figsize=figsize, rug=rug, lines=lines, circ_var_names=circ_var_names, circ_var_units=circ_var_units, plot_kwargs=plot_kwargs, fill_kwargs=fill_kwargs, rug_kwargs=rug_kwargs, hist_kwargs=hist_kwargs, trace_kwargs=trace_kwargs, rank_kwargs=rank_kwargs, compact=compact, compact_prop=compact_prop, combined=combined, chain_prop=chain_prop, legend=legend, labeller=labeller, # Generated kwargs divergence_data=divergence_data, # skip_dims=skip_dims, plotters=plotters, axes=axes, backend_config=backend_config, backend_kwargs=backend_kwargs, show=show, ) if backend is None: backend = rcParams["plot.backend"] backend = backend.lower() plot = get_plotting_function("plot_trace", "traceplot", backend) axes = plot(**trace_plot_args) return axes
7,880
def dose_coefficients(particle, geometry='AP'): """Return effective dose conversion coefficients from ICRP-116 This function provides fluence to dose conversion coefficients for effective dose for various types of external exposures based on values in `ICRP Publication 116 <https://doi.org/10.1016/j.icrp.2011.10.001>`_. Parameters ---------- particle : {'neutron', 'photon', 'electron', 'positron'} Incident particle geometry : {'AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO'} Irradiation geometry assumed. Refer to ICRP-116 for the meaning of the options here. Returns ------- energy : numpy.ndarray Energies at which dose conversion coefficients are given dose : numpy.ndarray Effective dose in [pSv cm^2] at provided energies """ if not _DOSE_ICRP116: _load_dose_icrp116() # Get all data for selected particle data = _DOSE_ICRP116.get(particle) if data is None: raise ValueError("{} has no effective dose data".format(particle)) # Determine index for selected geometry if particle in ('neutron', 'photon', 'proton'): index = ('AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO').index(geometry) else: index = ('AP', 'PA', 'ISO').index(geometry) # Pull out energy and dose from table energy = data[:, 0].copy() dose = data[:, index + 1].copy() return energy, dose
def dose_coefficients(particle, geometry='AP'): """Return effective dose conversion coefficients from ICRP-116 This function provides fluence to dose conversion coefficients for effective dose for various types of external exposures based on values in `ICRP Publication 116 <https://doi.org/10.1016/j.icrp.2011.10.001>`_. Parameters ---------- particle : {'neutron', 'photon', 'electron', 'positron'} Incident particle geometry : {'AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO'} Irradiation geometry assumed. Refer to ICRP-116 for the meaning of the options here. Returns ------- energy : numpy.ndarray Energies at which dose conversion coefficients are given dose : numpy.ndarray Effective dose coefficients in [pSv cm^2] at provided energies """ if not _DOSE_ICRP116: _load_dose_icrp116() # Get all data for selected particle data = _DOSE_ICRP116.get(particle) if data is None: raise ValueError("{} has no effective dose data".format(particle)) # Determine index for selected geometry if particle in ('neutron', 'photon', 'proton'): index = ('AP', 'PA', 'LLAT', 'RLAT', 'ROT', 'ISO').index(geometry) else: index = ('AP', 'PA', 'ISO').index(geometry) # Pull out energy and dose from table energy = data[:, 0].copy() dose = data[:, index + 1].copy() return energy, dose
55,854
def _select_one( c: Container, key: str, throw_on_missing: bool, throw_on_type_error: bool = True ) -> Tuple[Optional[Node], Union[str, int]]: from .dictconfig import DictConfig from .listconfig import ListConfig ret_key: Union[str, int] = key assert isinstance(c, (DictConfig, ListConfig)), f"Unexpected type : {c}" if isinstance(c, DictConfig): assert isinstance(ret_key, str) val = c._get_node(ret_key, validate_access=False) assert val is None or isinstance(val, Node) if val is not None: assert isinstance(val, Node) if val._is_missing(): if throw_on_missing: raise MissingMandatoryValue( f"Missing mandatory value : {c._get_full_key(ret_key)}" ) else: return val, ret_key else: val = None elif isinstance(c, ListConfig): assert isinstance(ret_key, str) if not is_int(ret_key): if throw_on_type_error: raise TypeError( f"Index '{ret_key}' ({type(ret_key).__name__}) is not an int" ) else: val = None else: ret_key = int(ret_key) if ret_key < 0 or ret_key + 1 > len(c): val = None else: val = c._get_node(ret_key) else: assert False assert val is None or isinstance(val, Node) return val, ret_key
def _select_one( c: Container, key: str, throw_on_missing: bool, throw_on_type_error: bool = True ) -> Tuple[Optional[Node], Union[str, int]]: from .dictconfig import DictConfig from .listconfig import ListConfig ret_key: Union[str, int] = key assert isinstance(c, (DictConfig, ListConfig)), f"Unexpected type : {c}" if isinstance(c, DictConfig): assert isinstance(ret_key, str) val = c._get_node(ret_key, validate_access=False) assert val is None or isinstance(val, Node) if val is not None: if val._is_missing(): if throw_on_missing: raise MissingMandatoryValue( f"Missing mandatory value : {c._get_full_key(ret_key)}" ) else: return val, ret_key else: val = None elif isinstance(c, ListConfig): assert isinstance(ret_key, str) if not is_int(ret_key): if throw_on_type_error: raise TypeError( f"Index '{ret_key}' ({type(ret_key).__name__}) is not an int" ) else: val = None else: ret_key = int(ret_key) if ret_key < 0 or ret_key + 1 > len(c): val = None else: val = c._get_node(ret_key) else: assert False assert val is None or isinstance(val, Node) return val, ret_key
27,459
def run_conda_forge_specific(meta, recipe_dir, lints, hints): gh = github.Github(os.environ["GH_TOKEN"]) package_section = get_section(meta, "package", lints) extra_section = get_section(meta, "extra", lints) sources_section = get_section(meta, "source", lints) requirements_section = get_section(meta, "requirements", lints) outputs_section = get_section(meta, "outputs", lints) recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe" recipe_name = package_section.get("name", "").strip() is_staged_recipes = recipe_dirname != "recipe" # 1: Check that the recipe does not exist in conda-forge or bioconda if is_staged_recipes and recipe_name: cf = gh.get_user(os.getenv("GH_ORG", "conda-forge")) for name in set( [ recipe_name, recipe_name.replace("-", "_"), recipe_name.replace("_", "-"), ] ): try: if cf.get_repo("{}-feedstock".format(name)): existing_recipe_name = name feedstock_exists = True break else: feedstock_exists = False except github.UnknownObjectException as e: feedstock_exists = False if feedstock_exists and existing_recipe_name == recipe_name: lints.append("Feedstock with the same name exists in conda-forge.") elif feedstock_exists: hints.append( "Feedstock with the name {} exists in conda-forge. Is it the same as this package ({})?".format( existing_recipe_name, recipe_name, ) ) bio = gh.get_user("bioconda").get_repo("bioconda-recipes") try: bio.get_dir_contents("recipes/{}".format(recipe_name)) except github.UnknownObjectException as e: pass else: hints.append( "Recipe with the same name exists in bioconda: " "please discuss with @conda-forge/bioconda-recipes." ) url = None for source_section in sources_section: if str(source_section.get("url")).startswith( "https://pypi.io/packages/source/" ): url = source_section["url"] if url: # get pypi name from urls like "https://pypi.io/packages/source/b/build/build-0.4.0.tar.gz" pypi_name = url.split("/")[6] mapping_request = requests.get( "https://raw.githubusercontent.com/regro/cf-graph-countyfair/master/mappings/pypi/name_mapping.yaml" ) if mapping_request.status_code == 200: mapping_raw_yaml = mapping_request.content mapping = get_yaml().load(mapping_raw_yaml) for pkg in mapping: if pkg.get("pypi_name", "") == pypi_name: conda_name = pkg["conda_name"] hints.append( f"A conda package with same name ({conda_name}) already exists." ) # 2: Check that the recipe maintainers exists: maintainers = extra_section.get("recipe-maintainers", []) for maintainer in maintainers: if "/" in maintainer: # It's a team. Checking for existence is expensive. Skip for now continue try: gh.get_user(maintainer) except github.UnknownObjectException as e: lints.append( 'Recipe maintainer "{}" does not exist'.format(maintainer) ) # 3: if the recipe dir is inside the example dir if recipe_dir is not None and "recipes/example/" in recipe_dir: lints.append( "Please move the recipe out of the example dir and " "into its own dir." ) # 4: Do not delete example recipe if is_staged_recipes and recipe_dir is not None: example_meta_fname = os.path.abspath( os.path.join(recipe_dir, "..", "example", "meta.yaml") ) if not os.path.exists(example_meta_fname): msg = ( "Please do not delete the example recipe found in " "`recipes/example/meta.yaml`." ) if msg not in lints: lints.append(msg) # 5: Do not depend on matplotlib, only matplotlib-base run_reqs = requirements_section.get("run") or [] for out in outputs_section: _req = out.get("requirements") or {} if isinstance(_req, Mapping): run_reqs += (_req.get("run") or []) else: run_reqs += _req for rq in run_reqs: nm = rq.split(" ")[0].strip() if nm == "matplotlib": msg = ( "Recipes should usually depend on `matplotlib-base` as opposed to " "`matplotlib` so that runtime environments do not require large " "packages like `qt`." ) if msg not in hints: hints.append(msg)
def run_conda_forge_specific(meta, recipe_dir, lints, hints): gh = github.Github(os.environ["GH_TOKEN"]) package_section = get_section(meta, "package", lints) extra_section = get_section(meta, "extra", lints) sources_section = get_section(meta, "source", lints) requirements_section = get_section(meta, "requirements", lints) outputs_section = get_section(meta, "outputs", lints) recipe_dirname = os.path.basename(recipe_dir) if recipe_dir else "recipe" recipe_name = package_section.get("name", "").strip() is_staged_recipes = recipe_dirname != "recipe" # 1: Check that the recipe does not exist in conda-forge or bioconda if is_staged_recipes and recipe_name: cf = gh.get_user(os.getenv("GH_ORG", "conda-forge")) for name in set( [ recipe_name, recipe_name.replace("-", "_"), recipe_name.replace("_", "-"), ] ): try: if cf.get_repo("{}-feedstock".format(name)): existing_recipe_name = name feedstock_exists = True break else: feedstock_exists = False except github.UnknownObjectException as e: feedstock_exists = False if feedstock_exists and existing_recipe_name == recipe_name: lints.append("Feedstock with the same name exists in conda-forge.") elif feedstock_exists: hints.append( "Feedstock with the name {} exists in conda-forge. Is it the same as this package ({})?".format( existing_recipe_name, recipe_name, ) ) bio = gh.get_user("bioconda").get_repo("bioconda-recipes") try: bio.get_dir_contents("recipes/{}".format(recipe_name)) except github.UnknownObjectException as e: pass else: hints.append( "Recipe with the same name exists in bioconda: " "please discuss with @conda-forge/bioconda-recipes." ) url = None for source_section in sources_section: if str(source_section.get("url")).startswith( "https://pypi.io/packages/source/" ): url = source_section["url"] if url: # get pypi name from urls like "https://pypi.io/packages/source/b/build/build-0.4.0.tar.gz" pypi_name = url.split("/")[6] mapping_request = requests.get( "https://raw.githubusercontent.com/regro/cf-graph-countyfair/master/mappings/pypi/name_mapping.yaml" ) if mapping_request.status_code == 200: mapping_raw_yaml = mapping_request.content mapping = get_yaml().load(mapping_raw_yaml) for pkg in mapping: if pkg.get("pypi_name", "") == pypi_name: conda_name = pkg["conda_name"] hints.append( f"A conda package with same name ({conda_name}) already exists." ) # 2: Check that the recipe maintainers exists: maintainers = extra_section.get("recipe-maintainers", []) for maintainer in maintainers: if "/" in maintainer: # It's a team. Checking for existence is expensive. Skip for now continue try: gh.get_user(maintainer) except github.UnknownObjectException as e: lints.append( 'Recipe maintainer "{}" does not exist'.format(maintainer) ) # 3: if the recipe dir is inside the example dir if recipe_dir is not None and "recipes/example/" in recipe_dir: lints.append( "Please move the recipe out of the example dir and " "into its own dir." ) # 4: Do not delete example recipe if is_staged_recipes and recipe_dir is not None: example_meta_fname = os.path.abspath( os.path.join(recipe_dir, "..", "example", "meta.yaml") ) if not os.path.exists(example_meta_fname): msg = ( "Please do not delete the example recipe found in " "`recipes/example/meta.yaml`." ) if msg not in lints: lints.append(msg) # 5: Do not depend on matplotlib, only matplotlib-base run_reqs = requirements_section.get("run") or [] for out in outputs_section: _req = out.get("requirements") or {} if isinstance(_req, Mapping): run_reqs += _req.get("run") or [] else: run_reqs += _req for rq in run_reqs: nm = rq.split(" ")[0].strip() if nm == "matplotlib": msg = ( "Recipes should usually depend on `matplotlib-base` as opposed to " "`matplotlib` so that runtime environments do not require large " "packages like `qt`." ) if msg not in hints: hints.append(msg)
56,826
def do_reindex(hq_index_name, reset): print("Starting pillow preindex %s" % hq_index_name) reindex_commands = get_reindex_commands(hq_index_name) for factory_or_func in reindex_commands: try: is_factory = issubclass(factory_or_func, ReindexerFactory) except TypeError: # TypeError: issubclass() arg 1 must be a class factory_or_func() else: if is_factory: kwargs = {} reindex_args = ReindexerFactory.resumable_reindexer_args if reset \ and factory_or_func.arg_contributors \ and reindex_args in factory_or_func.arg_contributors: kwargs["reset"] = True factory_or_func(**kwargs).build().reindex() else: raise ValueError(f"expected ReindexerFactory, got: {factory_or_func!r}") print("Pillow preindex finished %s" % hq_index_name)
def do_reindex(hq_index_name, reset): print("Starting pillow preindex %s" % hq_index_name) reindex_commands = get_reindex_commands(hq_index_name) for factory_or_func in reindex_commands: if not isinstance(factory_or_func, type): factory_or_func() elif not issubclass(factory_or_func, ReindexerFactory): raise ValueError(f"expected ReindexerFactory, got: {factory_or_func!r}") else: if is_factory: kwargs = {} reindex_args = ReindexerFactory.resumable_reindexer_args if reset \ and factory_or_func.arg_contributors \ and reindex_args in factory_or_func.arg_contributors: kwargs["reset"] = True factory_or_func(**kwargs).build().reindex() else: raise ValueError(f"expected ReindexerFactory, got: {factory_or_func!r}") print("Pillow preindex finished %s" % hq_index_name)
25,765
def plot(n, margin=0.05, ax=None, geomap=True, projection=None, bus_colors='b', bus_alpha = 1, line_colors={'Line':'g', 'Link':'cyan'}, bus_sizes=1e-2, line_widths={'Line':2, 'Link':2}, flow=None, layouter=None, title="", line_cmap=None, bus_cmap=None, boundaries=None, geometry=False, branch_components=['Line', 'Link'], jitter=None, color_geomap=None): """ Plot the network buses and lines using matplotlib and cartopy. Parameters ---------- margin : float Margin at the sides as proportion of distance between max/min x,y ax : matplotlib ax, defaults to plt.gca() Axis to which to plot the network geomap: bool/str, default True Switch to use Cartopy and draw geographical features. If string is passed, it will be used as a resolution argument, valid options are '10m', '50m' and '110m'. projection: cartopy.crs.Projection, defaults to None Define the projection of your geomap, only valid if cartopy is installed. If None (default) is passed the projection for cartopy is set to cartopy.crs.PlateCarree bus_colors : dict/pandas.Series Colors for the buses, defaults to "b". If bus_sizes is a pandas.Series with a Multiindex, bus_colors defaults to the n.carriers['color'] column. bus_sizes : dict/pandas.Series Sizes of bus points, defaults to 1e-2. If a multiindexed Series is passed, the function will draw pies for each bus (first index level) with segments of different color (second index level). Such a Series is ob- tained by e.g. n.generators.groupby(['bus', 'carrier']).p_nom.sum() bus_alpha : float Adds alpha channel to buses, defaults to 1. line_colors : dict/pandas.Series Colors for the lines, defaults to "g" for Lines and "cyan" for Links. Colors for branches other than Lines can be specified using a pandas Series with a MultiIndex. line_widths : dict/pandas.Series Widths of lines, defaults to 2. Widths for branches other than Lines can be specified using a pandas Series with a MultiIndex. flow : snapshot/pandas.Series/function/string Flow to be displayed in the plot, defaults to None. If an element of n.snapshots is given, the flow at this timestamp will be displayed. If an aggregation function is given, is will be applied to the total network flow via pandas.DataFrame.agg (accepts also function names). Otherwise flows can be specified by passing a pandas Series with MultiIndex including all necessary branch components. Use the line_widths argument to additionally adjust the size of the flow arrows. layouter : networkx.drawing.layout function, default None Layouting function from `networkx <https://networkx.github.io/>`_ which overrules coordinates given in ``n.buses[['x','y']]``. See `list <https://networkx.github.io/documentation/stable/reference/drawing.html#module-networkx.drawing.layout>`_ of available options. title : string Graph title line_cmap : plt.cm.ColorMap/str|dict If line_colors are floats, this color map will assign the colors. Use a dict to specify colormaps for more than one branch type. bus_cmap : plt.cm.ColorMap/str If bus_colors are floats, this color map will assign the colors boundaries : list of four floats Boundaries of the plot in format [x1,x2,y1,y2] branch_components : list of str Branch components to be plotted, defaults to Line and Link. jitter : None|float Amount of random noise to add to bus positions to distinguish overlapping buses color_geomap : dict or bool Specify colors to paint land and sea areas in. If True, it defaults to `{'ocean': 'lightblue', 'land': 'whitesmoke'}`. If no dictionary is provided, colors are white. Returns ------- bus_collection, branch_collection1, ... : tuple of Collections Collections for buses and branches. """ defaults_for_branches = pd.Series( {'Link': dict(color="cyan", width=2), 'Line': dict(color="b", width=2), 'Transformer': dict(color='green', width=2)} ).rename_axis('component') x, y = _get_coordinates(n, layouter=layouter) if geomap: if not cartopy_present: logger.warning("Cartopy needs to be installed to use `geomap=True`.") geomap = False if projection is None: projection = get_projection_from_crs(n.srid) if ax is None: ax = plt.gca(projection=projection) else: assert isinstance(ax, cartopy.mpl.geoaxes.GeoAxesSubplot), ( 'The passed axis is not a GeoAxesSubplot. You can ' 'create one with: \nimport cartopy.crs as ccrs \n' 'fig, ax = plt.subplots(' 'subplot_kw={"projection":ccrs.PlateCarree()})') transform = draw_map_cartopy(n, x, y, ax, boundaries, margin, geomap, color_geomap) x, y, z = ax.projection.transform_points(transform, x.values, y.values).T x, y = pd.Series(x, n.buses.index), pd.Series(y, n.buses.index) elif ax is None: ax = plt.gca() if jitter is not None: x = x + np.random.uniform(low=-jitter, high=jitter, size=len(x)) y = y + np.random.uniform(low=-jitter, high=jitter, size=len(y)) if isinstance(bus_sizes, pd.Series) and isinstance(bus_sizes.index, pd.MultiIndex): # We are drawing pies to show all the different shares assert len(bus_sizes.index.levels[0].difference(n.buses.index)) == 0, \ "The first MultiIndex level of bus_sizes must contain buses" if isinstance(bus_colors, dict): bus_colors = pd.Series(bus_colors) # case bus_colors isn't a series or dict: look in n.carriers for existent colors if not isinstance(bus_colors, pd.Series): bus_colors = n.carriers.color.dropna() assert bus_sizes.index.levels[1].isin(bus_colors.index).all(), ( "Colors not defined for all elements in the second MultiIndex " "level of bus_sizes, please make sure that all the elements are " "included in bus_colors or in n.carriers.color") bus_sizes = bus_sizes.sort_index(level=0, sort_remaining=False) if geomap: bus_sizes *= projected_area_factor(ax, n.srid)**2 patches = [] for b_i in bus_sizes.index.levels[0]: s = bus_sizes.loc[b_i] radius = s.sum()**0.5 if radius == 0.0: ratios = s else: ratios = s/s.sum() start = 0.25 for i, ratio in ratios.iteritems(): patches.append(Wedge((x.at[b_i], y.at[b_i]), radius, 360*start, 360*(start+ratio), facecolor=bus_colors[i], alpha=bus_alpha)) start += ratio bus_collection = PatchCollection(patches, match_original=True) ax.add_collection(bus_collection) else: c = pd.Series(bus_colors, index=n.buses.index) s = pd.Series(bus_sizes, index=n.buses.index, dtype="float") if geomap: s *= projected_area_factor(ax, n.srid)**2 if bus_cmap is not None and c.dtype is np.dtype('float'): if isinstance(bus_cmap, str): bus_cmap = plt.cm.get_cmap(bus_cmap) norm = plt.Normalize(vmin=c.min(), vmax=c.max()) c = c.apply(lambda cval: bus_cmap(norm(cval))) patches = [] for b_i in s.index: radius = s.at[b_i]**0.5 patches.append(Circle((x.at[b_i], y.at[b_i]), radius, facecolor=c.at[b_i], alpha=bus_alpha)) bus_collection = PatchCollection(patches, match_original=True) ax.add_collection(bus_collection) def as_branch_series(ser): # ensure that this function always return a multiindexed series if isinstance(ser, dict) and set(ser).issubset(branch_components): return pd.concat( {c.name: pd.Series(s, index=c.df.index) for c, s in zip(n.iterate_components(ser.keys()), ser.values())}, names=['component', 'name']) elif isinstance(ser, pd.Series) and isinstance(ser.index, pd.MultiIndex): return ser.rename_axis(index=['component', 'name']) else: ser = pd.Series(ser, n.lines.index) return pd.concat([ser], axis=0, keys=['Line'], names=['component', 'name']).fillna(0) line_colors = as_branch_series(line_colors) line_widths = as_branch_series(line_widths) if not isinstance(line_cmap, dict): line_cmap = {'Line': line_cmap} branch_collections = [] if flow is not None: flow = (_flow_ds_from_arg(flow, n, branch_components) .pipe(as_branch_series) .div(sum(len(t.df) for t in n.iterate_components(branch_components)) + 100)) flow = flow.mul(line_widths[flow.index], fill_value=1) # update the line width, allows to set line widths separately from flows line_widths.update((5 * flow.abs()).pipe(np.sqrt)) arrows = directed_flow(n, flow, x=x, y=y, ax=ax, geomap=geomap, branch_colors=line_colors, branch_comps=branch_components, cmap=line_cmap['Line']) branch_collections.append(arrows) for c in n.iterate_components(branch_components): l_defaults = defaults_for_branches[c.name] l_widths = line_widths.get(c.name, l_defaults['width']) l_nums = None l_colors = line_colors.get(c.name, l_defaults['color']) if isinstance(l_colors, pd.Series): if issubclass(l_colors.dtype.type, np.number): l_nums = l_colors l_colors = None else: l_colors.fillna(l_defaults['color'], inplace=True) if not geometry: segments = (np.asarray(((c.df.bus0.map(x), c.df.bus0.map(y)), (c.df.bus1.map(x), c.df.bus1.map(y)))) .transpose(2, 0, 1)) else: from shapely.wkt import loads from shapely.geometry import LineString linestrings = c.df.geometry[lambda ds: ds != ''].map(loads) assert all(isinstance(ls, LineString) for ls in linestrings), ( "The WKT-encoded geometry in the 'geometry' column must be " "composed of LineStrings") segments = np.asarray(list(linestrings.map(np.asarray))) l_collection = LineCollection(segments, linewidths=l_widths, antialiaseds=(1,), colors=l_colors, transOffset=ax.transData) if l_nums is not None: l_collection.set_array(np.asarray(l_nums)) l_collection.set_cmap(line_cmap.get(c.name, None)) l_collection.autoscale() ax.add_collection(l_collection) l_collection.set_zorder(3) branch_collections.append(l_collection) bus_collection.set_zorder(4) ax.update_datalim(compute_bbox_with_margins(margin, x, y)) ax.autoscale_view() if geomap: ax.outline_patch.set_visible(False) ax.axis('off') else: ax.set_aspect('equal') ax.set_title(title) return (bus_collection,) + tuple(branch_collections)
def plot(n, margin=0.05, ax=None, geomap=True, projection=None, bus_colors='b', bus_alpha=1, line_colors={'Line':'g', 'Link':'cyan'}, bus_sizes=1e-2, line_widths={'Line':2, 'Link':2}, flow=None, layouter=None, title="", line_cmap=None, bus_cmap=None, boundaries=None, geometry=False, branch_components=['Line', 'Link'], jitter=None, color_geomap=None): """ Plot the network buses and lines using matplotlib and cartopy. Parameters ---------- margin : float Margin at the sides as proportion of distance between max/min x,y ax : matplotlib ax, defaults to plt.gca() Axis to which to plot the network geomap: bool/str, default True Switch to use Cartopy and draw geographical features. If string is passed, it will be used as a resolution argument, valid options are '10m', '50m' and '110m'. projection: cartopy.crs.Projection, defaults to None Define the projection of your geomap, only valid if cartopy is installed. If None (default) is passed the projection for cartopy is set to cartopy.crs.PlateCarree bus_colors : dict/pandas.Series Colors for the buses, defaults to "b". If bus_sizes is a pandas.Series with a Multiindex, bus_colors defaults to the n.carriers['color'] column. bus_sizes : dict/pandas.Series Sizes of bus points, defaults to 1e-2. If a multiindexed Series is passed, the function will draw pies for each bus (first index level) with segments of different color (second index level). Such a Series is ob- tained by e.g. n.generators.groupby(['bus', 'carrier']).p_nom.sum() bus_alpha : float Adds alpha channel to buses, defaults to 1. line_colors : dict/pandas.Series Colors for the lines, defaults to "g" for Lines and "cyan" for Links. Colors for branches other than Lines can be specified using a pandas Series with a MultiIndex. line_widths : dict/pandas.Series Widths of lines, defaults to 2. Widths for branches other than Lines can be specified using a pandas Series with a MultiIndex. flow : snapshot/pandas.Series/function/string Flow to be displayed in the plot, defaults to None. If an element of n.snapshots is given, the flow at this timestamp will be displayed. If an aggregation function is given, is will be applied to the total network flow via pandas.DataFrame.agg (accepts also function names). Otherwise flows can be specified by passing a pandas Series with MultiIndex including all necessary branch components. Use the line_widths argument to additionally adjust the size of the flow arrows. layouter : networkx.drawing.layout function, default None Layouting function from `networkx <https://networkx.github.io/>`_ which overrules coordinates given in ``n.buses[['x','y']]``. See `list <https://networkx.github.io/documentation/stable/reference/drawing.html#module-networkx.drawing.layout>`_ of available options. title : string Graph title line_cmap : plt.cm.ColorMap/str|dict If line_colors are floats, this color map will assign the colors. Use a dict to specify colormaps for more than one branch type. bus_cmap : plt.cm.ColorMap/str If bus_colors are floats, this color map will assign the colors boundaries : list of four floats Boundaries of the plot in format [x1,x2,y1,y2] branch_components : list of str Branch components to be plotted, defaults to Line and Link. jitter : None|float Amount of random noise to add to bus positions to distinguish overlapping buses color_geomap : dict or bool Specify colors to paint land and sea areas in. If True, it defaults to `{'ocean': 'lightblue', 'land': 'whitesmoke'}`. If no dictionary is provided, colors are white. Returns ------- bus_collection, branch_collection1, ... : tuple of Collections Collections for buses and branches. """ defaults_for_branches = pd.Series( {'Link': dict(color="cyan", width=2), 'Line': dict(color="b", width=2), 'Transformer': dict(color='green', width=2)} ).rename_axis('component') x, y = _get_coordinates(n, layouter=layouter) if geomap: if not cartopy_present: logger.warning("Cartopy needs to be installed to use `geomap=True`.") geomap = False if projection is None: projection = get_projection_from_crs(n.srid) if ax is None: ax = plt.gca(projection=projection) else: assert isinstance(ax, cartopy.mpl.geoaxes.GeoAxesSubplot), ( 'The passed axis is not a GeoAxesSubplot. You can ' 'create one with: \nimport cartopy.crs as ccrs \n' 'fig, ax = plt.subplots(' 'subplot_kw={"projection":ccrs.PlateCarree()})') transform = draw_map_cartopy(n, x, y, ax, boundaries, margin, geomap, color_geomap) x, y, z = ax.projection.transform_points(transform, x.values, y.values).T x, y = pd.Series(x, n.buses.index), pd.Series(y, n.buses.index) elif ax is None: ax = plt.gca() if jitter is not None: x = x + np.random.uniform(low=-jitter, high=jitter, size=len(x)) y = y + np.random.uniform(low=-jitter, high=jitter, size=len(y)) if isinstance(bus_sizes, pd.Series) and isinstance(bus_sizes.index, pd.MultiIndex): # We are drawing pies to show all the different shares assert len(bus_sizes.index.levels[0].difference(n.buses.index)) == 0, \ "The first MultiIndex level of bus_sizes must contain buses" if isinstance(bus_colors, dict): bus_colors = pd.Series(bus_colors) # case bus_colors isn't a series or dict: look in n.carriers for existent colors if not isinstance(bus_colors, pd.Series): bus_colors = n.carriers.color.dropna() assert bus_sizes.index.levels[1].isin(bus_colors.index).all(), ( "Colors not defined for all elements in the second MultiIndex " "level of bus_sizes, please make sure that all the elements are " "included in bus_colors or in n.carriers.color") bus_sizes = bus_sizes.sort_index(level=0, sort_remaining=False) if geomap: bus_sizes *= projected_area_factor(ax, n.srid)**2 patches = [] for b_i in bus_sizes.index.levels[0]: s = bus_sizes.loc[b_i] radius = s.sum()**0.5 if radius == 0.0: ratios = s else: ratios = s/s.sum() start = 0.25 for i, ratio in ratios.iteritems(): patches.append(Wedge((x.at[b_i], y.at[b_i]), radius, 360*start, 360*(start+ratio), facecolor=bus_colors[i], alpha=bus_alpha)) start += ratio bus_collection = PatchCollection(patches, match_original=True) ax.add_collection(bus_collection) else: c = pd.Series(bus_colors, index=n.buses.index) s = pd.Series(bus_sizes, index=n.buses.index, dtype="float") if geomap: s *= projected_area_factor(ax, n.srid)**2 if bus_cmap is not None and c.dtype is np.dtype('float'): if isinstance(bus_cmap, str): bus_cmap = plt.cm.get_cmap(bus_cmap) norm = plt.Normalize(vmin=c.min(), vmax=c.max()) c = c.apply(lambda cval: bus_cmap(norm(cval))) patches = [] for b_i in s.index: radius = s.at[b_i]**0.5 patches.append(Circle((x.at[b_i], y.at[b_i]), radius, facecolor=c.at[b_i], alpha=bus_alpha)) bus_collection = PatchCollection(patches, match_original=True) ax.add_collection(bus_collection) def as_branch_series(ser): # ensure that this function always return a multiindexed series if isinstance(ser, dict) and set(ser).issubset(branch_components): return pd.concat( {c.name: pd.Series(s, index=c.df.index) for c, s in zip(n.iterate_components(ser.keys()), ser.values())}, names=['component', 'name']) elif isinstance(ser, pd.Series) and isinstance(ser.index, pd.MultiIndex): return ser.rename_axis(index=['component', 'name']) else: ser = pd.Series(ser, n.lines.index) return pd.concat([ser], axis=0, keys=['Line'], names=['component', 'name']).fillna(0) line_colors = as_branch_series(line_colors) line_widths = as_branch_series(line_widths) if not isinstance(line_cmap, dict): line_cmap = {'Line': line_cmap} branch_collections = [] if flow is not None: flow = (_flow_ds_from_arg(flow, n, branch_components) .pipe(as_branch_series) .div(sum(len(t.df) for t in n.iterate_components(branch_components)) + 100)) flow = flow.mul(line_widths[flow.index], fill_value=1) # update the line width, allows to set line widths separately from flows line_widths.update((5 * flow.abs()).pipe(np.sqrt)) arrows = directed_flow(n, flow, x=x, y=y, ax=ax, geomap=geomap, branch_colors=line_colors, branch_comps=branch_components, cmap=line_cmap['Line']) branch_collections.append(arrows) for c in n.iterate_components(branch_components): l_defaults = defaults_for_branches[c.name] l_widths = line_widths.get(c.name, l_defaults['width']) l_nums = None l_colors = line_colors.get(c.name, l_defaults['color']) if isinstance(l_colors, pd.Series): if issubclass(l_colors.dtype.type, np.number): l_nums = l_colors l_colors = None else: l_colors.fillna(l_defaults['color'], inplace=True) if not geometry: segments = (np.asarray(((c.df.bus0.map(x), c.df.bus0.map(y)), (c.df.bus1.map(x), c.df.bus1.map(y)))) .transpose(2, 0, 1)) else: from shapely.wkt import loads from shapely.geometry import LineString linestrings = c.df.geometry[lambda ds: ds != ''].map(loads) assert all(isinstance(ls, LineString) for ls in linestrings), ( "The WKT-encoded geometry in the 'geometry' column must be " "composed of LineStrings") segments = np.asarray(list(linestrings.map(np.asarray))) l_collection = LineCollection(segments, linewidths=l_widths, antialiaseds=(1,), colors=l_colors, transOffset=ax.transData) if l_nums is not None: l_collection.set_array(np.asarray(l_nums)) l_collection.set_cmap(line_cmap.get(c.name, None)) l_collection.autoscale() ax.add_collection(l_collection) l_collection.set_zorder(3) branch_collections.append(l_collection) bus_collection.set_zorder(4) ax.update_datalim(compute_bbox_with_margins(margin, x, y)) ax.autoscale_view() if geomap: ax.outline_patch.set_visible(False) ax.axis('off') else: ax.set_aspect('equal') ax.set_title(title) return (bus_collection,) + tuple(branch_collections)
36,196
def generate(seed): reseed(seed) home_page = get_homepage() """ reate a couple scenarios that will be best for testing: * A PublicationPage with several child ArticlePages * A PublicationPage with child PublicationPages, each of which has their own ArticlePages * perhaps nested at random levels of depth? """ PublicationPageFactory.create_batch(parent=home_page, size=3) # single PublicationPage with several child Articles ArticlePageFactory.create_batch(parent=PublicationPage.objects.all()[0], size=8) # PublicationPage with child Publicationpages(Chapters) PublicationPageFactory.create_batch(parent=PublicationPage.objects.all()[1], size=3) # probably better/faster ways to do this for chapter in PublicationPage.objects.all()[1].get_children(): ArticlePageFactory.create_batch(parent=chapter, size=8)
def generate(seed): reseed(seed) home_page = get_homepage() """ Ceate a couple scenarios that will be best for testing: * A PublicationPage with several child ArticlePages * A PublicationPage with child PublicationPages, each of which has their own ArticlePages * perhaps nested at random levels of depth? """ PublicationPageFactory.create_batch(parent=home_page, size=3) # single PublicationPage with several child Articles ArticlePageFactory.create_batch(parent=PublicationPage.objects.all()[0], size=8) # PublicationPage with child Publicationpages(Chapters) PublicationPageFactory.create_batch(parent=PublicationPage.objects.all()[1], size=3) # probably better/faster ways to do this for chapter in PublicationPage.objects.all()[1].get_children(): ArticlePageFactory.create_batch(parent=chapter, size=8)
27,812
def pytest_addoption(parser: Parser) -> None: """Add options to control log capturing.""" group = parser.getgroup("logging") def add_option_ini(option, dest, default=None, type=None, **kwargs): parser.addini( dest, default=default, type=type, help="Default value for " + option ) group.addoption(option, dest=dest, **kwargs) add_option_ini( "--log-level", dest="log_level", default=None, metavar="LEVEL", help=( "Level of messages to catch/display.\n" "Not set by default, so it depends on the root/parent log handler's" ' effective level, where it is "WARNING" by default.' ), ) add_option_ini( "--log-format", dest="log_format", default=DEFAULT_LOG_FORMAT, help="Log format used by the logging module", ) add_option_ini( "--log-date-format", dest="log_date_format", default=DEFAULT_LOG_DATE_FORMAT, help="Log date format used by the logging module", ) parser.addini( "log_cli", default=False, type="bool", help='Enable log display during test run (also known as "live logging")', ) add_option_ini( "--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level" ) add_option_ini( "--log-cli-format", dest="log_cli_format", default=None, help="Log format used by the logging module", ) add_option_ini( "--log-cli-date-format", dest="log_cli_date_format", default=None, help="Log date format used by the logging module", ) add_option_ini( "--log-file", dest="log_file", default=None, help="Path to a file when logging will be written to", ) add_option_ini( "--log-file-level", dest="log_file_level", default=None, help="Log file logging level", ) add_option_ini( "--log-file-format", dest="log_file_format", default=DEFAULT_LOG_FORMAT, help="Log format used by the logging module", ) add_option_ini( "--log-file-date-format", dest="log_file_date_format", default=DEFAULT_LOG_DATE_FORMAT, help="Log date format used by the logging module", ) add_option_ini( "--log-auto-indent", dest="log_auto_indent", default=None, help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.", )
def pytest_addoption(parser: Parser) -> None: """Add options to control log capturing.""" group = parser.getgroup("logging") def add_option_ini(option, dest, default=None, type=None, **kwargs): parser.addini( dest, default=default, type=type, help="Default value for " + option ) group.addoption(option, dest=dest, **kwargs) add_option_ini( "--log-level", dest="log_level", default=None, metavar="LEVEL", help=( "Level of messages to catch/display." "Not set by default, so it depends on the root/parent log handler's" ' effective level, where it is "WARNING" by default.' ), ) add_option_ini( "--log-format", dest="log_format", default=DEFAULT_LOG_FORMAT, help="Log format used by the logging module", ) add_option_ini( "--log-date-format", dest="log_date_format", default=DEFAULT_LOG_DATE_FORMAT, help="Log date format used by the logging module", ) parser.addini( "log_cli", default=False, type="bool", help='Enable log display during test run (also known as "live logging")', ) add_option_ini( "--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level" ) add_option_ini( "--log-cli-format", dest="log_cli_format", default=None, help="Log format used by the logging module", ) add_option_ini( "--log-cli-date-format", dest="log_cli_date_format", default=None, help="Log date format used by the logging module", ) add_option_ini( "--log-file", dest="log_file", default=None, help="Path to a file when logging will be written to", ) add_option_ini( "--log-file-level", dest="log_file_level", default=None, help="Log file logging level", ) add_option_ini( "--log-file-format", dest="log_file_format", default=DEFAULT_LOG_FORMAT, help="Log format used by the logging module", ) add_option_ini( "--log-file-date-format", dest="log_file_date_format", default=DEFAULT_LOG_DATE_FORMAT, help="Log date format used by the logging module", ) add_option_ini( "--log-auto-indent", dest="log_auto_indent", default=None, help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.", )
58,410
def benchmark(n): a={'list': [1,2,3,43], 't': (1,2,3), 'str': 'hello', 'subdict': {'a': True}} dc=A('hello', [1,2,3], True) for ii in range(n): for jj in range(60): _ = copy.deepcopy(a) for s in ['red', 'blue', 'green']: dc.string = s for ii in range(10): dc.lst[0] = ii for b in [True, False]: dc.boolean=b _ = copy.deepcopy(dc)
def benchmark(n): a = {'list': [1,2,3,43], 't': (1,2,3), 'str': 'hello', 'subdict': {'a': True}} dc = A('hello', [1,2,3], True) for ii in range(n): for jj in range(60): _ = copy.deepcopy(a) for s in ['red', 'blue', 'green']: dc.string = s for ii in range(10): dc.lst[0] = ii for b in [True, False]: dc.boolean = b _ = copy.deepcopy(dc)
45,923
def _draw_pixel( image: torch.Tensor, x: int, y: int, color: torch.Tensor, ): r"""Draws a pixel into an image. Args: image: the input image to where to draw the lines with shape (C,H,W). x: the x coordinate of the pixel. y: the y coordinate of the pixel. color: the color of the pixel with shape (3). Return: Nothing is returned """ image[:, y, x] = color
def _draw_pixel( image: torch.Tensor, x: int, y: int, color: torch.Tensor, ) -> None: r"""Draws a pixel into an image. Args: image: the input image to where to draw the lines with shape (C,H,W). x: the x coordinate of the pixel. y: the y coordinate of the pixel. color: the color of the pixel with shape (3). Return: Nothing is returned """ image[:, y, x] = color
27,750
def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None: durations = terminalreporter.config.option.durations durations_min = terminalreporter.config.option.durations_min verbose = terminalreporter.config.getvalue("verbose") if durations is None: return tr = terminalreporter dlist = [] for replist in tr.stats.values(): for rep in replist: if hasattr(rep, "duration"): dlist.append(rep) if not dlist: return dlist.sort(key=lambda x: x.duration) # type: ignore[no-any-return] dlist.reverse() if not durations: tr.write_sep("=", "slowest durations") else: tr.write_sep("=", "slowest %s durations" % durations) dlist = dlist[:durations] for i, rep in enumerate(dlist): if verbose < 2 and rep.duration < durations_min: tr.write_line("") tr.write_line( "(%s durations < %gs hidden. Use -vv to show these durations.)" % (len(dlist) - i, durations_min) ) break tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}")
def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None: durations = terminalreporter.config.option.durations durations_min = terminalreporter.config.option.durations_min verbose = terminalreporter.config.getvalue("verbose") if durations is None: return tr = terminalreporter dlist = [] for replist in tr.stats.values(): for rep in replist: if hasattr(rep, "duration"): dlist.append(rep) if not dlist: return dlist.sort(key=lambda x: x.duration, reverse=True) # type: ignore[no-any-return] if not durations: tr.write_sep("=", "slowest durations") else: tr.write_sep("=", "slowest %s durations" % durations) dlist = dlist[:durations] for i, rep in enumerate(dlist): if verbose < 2 and rep.duration < durations_min: tr.write_line("") tr.write_line( "(%s durations < %gs hidden. Use -vv to show these durations.)" % (len(dlist) - i, durations_min) ) break tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}")
32,872
def _get_path(request): """Get path and replace path parameter values with names if route exists.""" path = request.path try: match_info = request.match_info except sanic.exceptions.SanicException: return path for key, value in match_info.items(): try: value = str(value) except Exception: # Best effort continue path = path.replace(str(value), f"<{key}>") return path
def _get_path(request): """Get path and replace path parameter values with names if route exists.""" path = request.path try: match_info = request.match_info except sanic.exceptions.SanicException: return path for key, value in match_info.items(): try: value = str(value) except Exception: # Best effort continue path = path.replace(value, f"<{key}>") return path
13,085
def test_grant_refund_update_by_user_missing_input( staff_api_client, staff_user, order, permission_manage_orders ): # given current_reason = "Granted refund reason." current_amount = Decimal("10.00") granted_refund = order.granted_refunds.create( amount_value=current_amount, currency=order.currency, reason=current_reason, user=staff_user, ) variables = { "id": to_global_id_or_none(granted_refund), "input": {}, } staff_api_client.user.user_permissions.add(permission_manage_orders) # when response = staff_api_client.post_graphql(ORDER_GRANT_REFUND_UPDATE, variables) # then content = get_graphql_content_from_response(response) errors = content["data"]["orderGrantRefundUpdate"]["errors"] assert len(errors) == 1 assert errors[0]["code"] == "REQUIRED" assert errors[0]["field"] == "input"
def test_grant_refund_update_by_user_missing_input( staff_api_client, staff_user, order, permission_manage_orders ): # given current_reason = "Granted refund reason." current_amount = Decimal("10.00") granted_refund = order.granted_refunds.create( amount_value=current_amount, currency=order.currency, reason=current_reason, user=staff_user, ) variables = { "id": to_global_id_or_none(granted_refund), "input": {}, } staff_api_client.user.user_permissions.add(permission_manage_orders) # when response = staff_api_client.post_graphql(ORDER_GRANT_REFUND_UPDATE, variables) # then content = get_graphql_content_from_response(response) errors = content["data"]["orderGrantRefundUpdate"]["errors"] assert len(errors) == 1 assert errors[0]["code"] == OrderGrandRefundUpdateErrorCode.REQUIRED.name assert errors[0]["field"] == "input"
31,676
def main(): args = demisto.args() demisto.info('hi1') query = args.get('query') size = int(args.get('size')) demisto.info('hi') raw_result = demisto.executeCommand("SearchIncidentsV2", {"query": query, "size": size}) incidents_len = len(raw_result[0].get("Contents", [""])[0].get("Contents", {}).get("data")) outputs = { 'Query': query, 'Size': incidents_len, 'ConditionMet': incidents_len >= size } return_results(CommandResults(outputs=outputs, outputs_key_field='Query', outputs_prefix='IncidentsCheck'))
def main(): args = demisto.args() demisto.info('hi1') query = args.get('query') size = int(args.get('size')) demisto.info('hi') raw_result = demisto.executeCommand("SearchIncidentsV2", {"query": query, "size": size}) incidents_len = len(raw_result[0].get("Contents", [{}])[0].get("Contents", {}).get("data")) outputs = { 'Query': query, 'Size': incidents_len, 'ConditionMet': incidents_len >= size } return_results(CommandResults(outputs=outputs, outputs_key_field='Query', outputs_prefix='IncidentsCheck'))
55,384
def _log_accuracy_score_classifier(trained_model, fit_args, fit_kwargs): """ compute and log accuracy_score for classifier https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html By default, we choose the parameter `normalize` to be `True` to output the percentage of accuracy as opposed to `False` that outputs the absolute correct number of sample prediction. 1. Extract X and y_true from fit_args and fit_kwargs. 2. If the sample_weight argument exists in fit_func (accuracy_score by default has sample_weight), extract it from fit_args or fit_kwargs as (y_true, y_pred, normalize, sample_weight), otherwise as (y_true, y_pred, normalize) 3. Compute and log accuracy_score :param trained_model: the already trained classifier :param fit_args: Positional arguments given to fit_func. :param fit_kwargs: Keyword arguments given to fit_func. :returns: NULL """ try: fit_arg_names = _get_arg_names(trained_model.fit) # In most cases, X_var_name and y_var_name become "X" and "y", respectively. # However, certain sklearn models use different variable names for X and y. # See: https://scikit-learn.org/stable/modules/generated/sklearn.covariance.GraphicalLasso.html#sklearn.covariance.GraphicalLasso.score # noqa: E501 X_var_name, y_var_name = fit_arg_names[:2] X, y_true = _get_Xy(fit_args, fit_kwargs, X_var_name, y_var_name) y_pred = trained_model.predict(X) acc_score_args = [] if _SAMPLE_WEIGHT in fit_arg_names: sample_weight = _get_sample_weight(fit_arg_names, fit_args, fit_kwargs) acc_score_args = y_true, y_pred, True, sample_weight else: acc_score_args = y_true, y_pred, True acc_score = sklearn.metrics.accuracy_score(*acc_score_args) except Exception as e: # pylint: disable=broad-except msg = ( sklearn.metrics.accuracy_score.__qualname__ + " failed. The 'accuracy_score' metric will not be recorded. Scoring error: " + str(e) ) _logger.warning(msg) else: try_mlflow_log(mlflow.log_metric, "accuracy_score", acc_score)
def _log_accuracy_score_classifier(trained_model, fit_args, fit_kwargs): """ compute and log accuracy_score for classifier https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html By default, we choose the parameter `normalize` to be `True` to output the percentage of accuracy as opposed to `False` that outputs the absolute correct number of sample prediction. 1. Extract X and y_true from fit_args and fit_kwargs. 2. If the sample_weight argument exists in fit_func (accuracy_score by default has sample_weight), extract it from fit_args or fit_kwargs as (y_true, y_pred, normalize, sample_weight), otherwise as (y_true, y_pred, normalize) 3. Compute and log accuracy_score :param trained_model: the already trained classifier :param fit_args: Positional arguments given to fit_func. :param fit_kwargs: Keyword arguments given to fit_func. :returns: NULL """ try: fit_arg_names = _get_arg_names(trained_model.fit) # In most cases, X_var_name and y_var_name become "X" and "y", respectively. # However, certain sklearn models use different variable names for X and y. # See: https://scikit-learn.org/stable/modules/generated/sklearn.covariance.GraphicalLasso.html#sklearn.covariance.GraphicalLasso.score # noqa: E501 X_var_name, y_var_name = fit_arg_names[:2] X, y_true = _get_Xy(fit_args, fit_kwargs, X_var_name, y_var_name) y_pred = trained_model.predict(X) sample_weight = _get_sample_weight(fit_arg_names, fit_args, fit_kwargs) if _SAMPLE_WEIGHT in fit_arg_names else None acc_score = sklearn.metrics.accuracy_score(y_true, y_pred, normalize=True, sample_weight=sample_weight) except Exception as e: # pylint: disable=broad-except msg = ( sklearn.metrics.accuracy_score.__qualname__ + " failed. The 'accuracy_score' metric will not be recorded. Scoring error: " + str(e) ) _logger.warning(msg) else: try_mlflow_log(mlflow.log_metric, "accuracy_score", acc_score)
52,484
def get_app_name(bench_path, repo_name): app_name = None apps_path = os.path.join(os.path.abspath(bench_path), 'apps') config_path = os.path.join(apps_path, repo_name, 'setup.cfg') if os.path.exists(config_path): config = read_configuration(config_path) app_name = config.get('metadata', {}).get('name') if not app_name: # retrieve app name from setup.py as fallback app_path = os.path.join(apps_path, repo_name, 'setup.py') with open(app_path, 'rb') as f: app_name = re.search(r'name\s*=\s*[\'"](.*)[\'"]', f.read().decode('utf-8')).group(1) if app_name and repo_name != app_name: os.rename(os.path.join(apps_path, repo_name), os.path.join(apps_path, app_name)) return app_name
def get_app_name(bench_path, repo_name): app_name = None apps_path = os.path.join(os.path.abspath(bench_path), 'apps') config_path = os.path.join(apps_path, repo_name, 'setup.cfg') try: config = read_configuration(config_path) app_name = config.get('metadata', {}).get('name') except distutils.errors.DistutilsFileError: # retrieve app name from setup.py as fallback app_path = os.path.join(apps_path, repo_name, 'setup.py') with open(app_path, 'rb') as f: app_name = re.search(r'name\s*=\s*[\'"](.*)[\'"]', f.read().decode('utf-8')).group(1) if app_name and repo_name != app_name: os.rename(os.path.join(apps_path, repo_name), os.path.join(apps_path, app_name)) return app_name
37,604
def call( target: Optional[Union[circuit.QuantumCircuit, Schedule, ScheduleBlock]] = None, name: Optional[str] = None, channels: Optional[List[chans.Channel]] = None, value_dict: Optional[Dict[ParameterValueType, ParameterValueType]] = None, **kw_params: ParameterValueType, ): """Call the subroutine within the currently active builder context with arbitrary parameters which will be assigned to the target program. .. note:: If the ``target`` program is instance of schedule or quantum cirucit, it will be assigned as :class:`~qiskit.pulse.instructions.Call` instruction. Otherwise :class:`~qiskit.pulse.instructions.Reference` instruction is added and ``target`` is separately registered to the references. Examples: 1. Call with substantial program. .. code-block:: python from qiskit import circuit, pulse, schedule, transpile from qiskit.test.mock import FakeOpenPulse2Q backend = FakeOpenPulse2Q() qc = circuit.QuantumCircuit(2) qc.cx(0, 1) qc_transpiled = transpile(qc, optimization_level=3) sched = schedule(qc_transpiled, backend) with pulse.build(backend) as pulse_prog: pulse.call(sched) pulse.call(qc) This function can optionally take parameter dictionary with the parameterized target program. .. code-block:: python from qiskit import circuit, pulse amp = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(0)) with pulse.build() as main_prog: pulse.call(subroutine, amp=0.1) pulse.call(subroutine, amp=0.3) If there is any parameter name collision, you can distinguish them by specifying each parameter object as a python dictionary. Otherwise ``amp1`` and ``amp2`` will be updated with the same value. .. code-block:: python from qiskit import circuit, pulse amp1 = circuit.Parameter('amp') amp2 = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp1, 40), pulse.DriveChannel(0)) pulse.play(pulse.Gaussian(160, amp2, 40), pulse.DriveChannel(1)) with pulse.build() as main_prog: pulse.call(subroutine, value_dict={amp1: 0.1, amp2: 0.2}) 2. Call with unassigned program. .. code-block:: python qiskit import pulse with pulse.build() as main_prog: ref_key = "my_subroutine" pulse.call(name=ref_key, channels=[pulse.DriveChannel(0)]) with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, 0.1, 40), pulse.DriveChannel(0)) main_prog.assign_reference(ref_key=ref_key, schedule=subroutine) When you call without actual program, you can assign the program afterwards through the :meth:`ScheduleBlock.assign_reference` method. Args: target: Target circuit or pulse schedule to call. If this program is not provided, both ``name`` and ``channels`` should be provided instead. name: Name of subroutine if defined. channels: Optional. Channels associated to the subroutine. value_dict: Optional. Local scoped parameters assigned to the subroutine. If this dictionary is provided, the ``target`` program is copied and then stored in the main built schedule with having parameters assigned. This dictionary is keyed on the :class:`~.Parameter` object, thus parameter name collision can be avoided. This option is valid only when the subroutine is called with ``target``. kw_params: Alternative way to provide local scoped parameters. Since this is keyed on the string parameter name, the parameters having the same name are all updated together. If you want to avoid name collision, use ``value_dict`` with :class:`~.Parameter` object instead. Raises: exceptions.PulseError: If the input ``target`` type is not supported. exceptions.PulseError: Target program is empty and name and channels are not both provided. exceptions.PulseError: Subroutine is called by name and channels but local scoped parameters are also provided. """ if target is None: if value_dict is not None or any(kw_params): raise exceptions.PulseError( "Parameters are provided without target program. " "These parameters cannot be assigned." ) if name is None or channels is None: raise exceptions.PulseError( "Subroutine name and channels are not both provided. " "Please call subroutine with target program, or both name and channels." ) _active_builder().append_reference(reference_key=name, channels=channels) else: if not isinstance(target, (circuit.QuantumCircuit, Schedule, ScheduleBlock)): raise exceptions.PulseError( f'Target of type "{target.__class__.__name__}" is not supported.' ) _active_builder().call_subroutine( subroutine=target, name=name, value_dict=value_dict, **kw_params )
def call( target: Optional[Union[circuit.QuantumCircuit, Schedule, ScheduleBlock]] = None, name: Optional[str] = None, channels: Optional[List[chans.Channel]] = None, value_dict: Optional[Dict[ParameterValueType, ParameterValueType]] = None, **kw_params: ParameterValueType, ): """Call the subroutine within the currently active builder context with arbitrary parameters which will be assigned to the target program. .. note:: If the ``target`` program is instance of schedule or quantum cirucit, it will be assigned as :class:`~qiskit.pulse.instructions.Call` instruction. Otherwise :class:`~qiskit.pulse.instructions.Reference` instruction is added and ``target`` is separately registered to the references. Examples: 1. Call with a target program. .. code-block:: python from qiskit import circuit, pulse, schedule, transpile from qiskit.test.mock import FakeOpenPulse2Q backend = FakeOpenPulse2Q() qc = circuit.QuantumCircuit(2) qc.cx(0, 1) qc_transpiled = transpile(qc, optimization_level=3) sched = schedule(qc_transpiled, backend) with pulse.build(backend) as pulse_prog: pulse.call(sched) pulse.call(qc) This function can optionally take parameter dictionary with the parameterized target program. .. code-block:: python from qiskit import circuit, pulse amp = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp, 40), pulse.DriveChannel(0)) with pulse.build() as main_prog: pulse.call(subroutine, amp=0.1) pulse.call(subroutine, amp=0.3) If there is any parameter name collision, you can distinguish them by specifying each parameter object as a python dictionary. Otherwise ``amp1`` and ``amp2`` will be updated with the same value. .. code-block:: python from qiskit import circuit, pulse amp1 = circuit.Parameter('amp') amp2 = circuit.Parameter('amp') with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, amp1, 40), pulse.DriveChannel(0)) pulse.play(pulse.Gaussian(160, amp2, 40), pulse.DriveChannel(1)) with pulse.build() as main_prog: pulse.call(subroutine, value_dict={amp1: 0.1, amp2: 0.2}) 2. Call with unassigned program. .. code-block:: python qiskit import pulse with pulse.build() as main_prog: ref_key = "my_subroutine" pulse.call(name=ref_key, channels=[pulse.DriveChannel(0)]) with pulse.build() as subroutine: pulse.play(pulse.Gaussian(160, 0.1, 40), pulse.DriveChannel(0)) main_prog.assign_reference(ref_key=ref_key, schedule=subroutine) When you call without actual program, you can assign the program afterwards through the :meth:`ScheduleBlock.assign_reference` method. Args: target: Target circuit or pulse schedule to call. If this program is not provided, both ``name`` and ``channels`` should be provided instead. name: Name of subroutine if defined. channels: Optional. Channels associated to the subroutine. value_dict: Optional. Local scoped parameters assigned to the subroutine. If this dictionary is provided, the ``target`` program is copied and then stored in the main built schedule with having parameters assigned. This dictionary is keyed on the :class:`~.Parameter` object, thus parameter name collision can be avoided. This option is valid only when the subroutine is called with ``target``. kw_params: Alternative way to provide local scoped parameters. Since this is keyed on the string parameter name, the parameters having the same name are all updated together. If you want to avoid name collision, use ``value_dict`` with :class:`~.Parameter` object instead. Raises: exceptions.PulseError: If the input ``target`` type is not supported. exceptions.PulseError: Target program is empty and name and channels are not both provided. exceptions.PulseError: Subroutine is called by name and channels but local scoped parameters are also provided. """ if target is None: if value_dict is not None or any(kw_params): raise exceptions.PulseError( "Parameters are provided without target program. " "These parameters cannot be assigned." ) if name is None or channels is None: raise exceptions.PulseError( "Subroutine name and channels are not both provided. " "Please call subroutine with target program, or both name and channels." ) _active_builder().append_reference(reference_key=name, channels=channels) else: if not isinstance(target, (circuit.QuantumCircuit, Schedule, ScheduleBlock)): raise exceptions.PulseError( f'Target of type "{target.__class__.__name__}" is not supported.' ) _active_builder().call_subroutine( subroutine=target, name=name, value_dict=value_dict, **kw_params )
31,824
def main() -> None: params = {k: v for k, v in demisto.params().items() if v is not None} params['indicator_type'] = FeedIndicatorType.File params['feed_name_to_config'] = { 'File': { 'url': params.get("url") + "/api/v1/", 'extractor': "data", 'indicator': 'sha256_hash', 'indicator_type': FeedIndicatorType.File, 'relation_name': EntityRelationship.Relationships.INDICATOR_OF, 'reverse_relationship_name': EntityRelationship.Relationships.INDICATED_BY, 'relation_entity_b': 'signature', 'relation_entity_b_type': 'Malware', 'create_relations_function': custom_build_relationships, 'mapping': { 'sha256_hash': 'sha256', 'sha1_hash': 'sha1', 'md5_hash': 'md5', 'first_seen': 'firstseenbyfeed', 'last_seen': 'lastseenbyfeed', 'file_name': 'Associated File Names', 'file_size': 'size', 'file_type': 'filetype', 'reporter': 'reported_by', 'signature': 'malware_family', 'imphash': 'imphash', 'ssdeep': 'ssdeep', 'tags': 'tags' } } } params['data'] = {'query': 'get_recent', 'selector': 'time'} feed_main(params, 'MalwareBazaar Feed', 'malwarebazzar')
def main() -> None: params = {k: v for k, v in demisto.params().items() if v is not None} params['indicator_type'] = FeedIndicatorType.File params['feed_name_to_config'] = { 'File': { 'url': f'{params.get("url")}/api/v1/', 'extractor': "data", 'indicator': 'sha256_hash', 'indicator_type': FeedIndicatorType.File, 'relation_name': EntityRelationship.Relationships.INDICATOR_OF, 'reverse_relationship_name': EntityRelationship.Relationships.INDICATED_BY, 'relation_entity_b': 'signature', 'relation_entity_b_type': 'Malware', 'create_relations_function': custom_build_relationships, 'mapping': { 'sha256_hash': 'sha256', 'sha1_hash': 'sha1', 'md5_hash': 'md5', 'first_seen': 'firstseenbyfeed', 'last_seen': 'lastseenbyfeed', 'file_name': 'Associated File Names', 'file_size': 'size', 'file_type': 'filetype', 'reporter': 'reported_by', 'signature': 'malware_family', 'imphash': 'imphash', 'ssdeep': 'ssdeep', 'tags': 'tags' } } } params['data'] = {'query': 'get_recent', 'selector': 'time'} feed_main(params, 'MalwareBazaar Feed', 'malwarebazzar')
4,878
def test_normalize_kwarg_pie(): fig,ax = plt.subplots() x=[0.3,0.3,0.1] t1 = ax.pie(x=x, normalize=True) assert abs(t1[0][-1].theta2 - 360.) < 1e-3 t2 = ax.pie(x=x, normalize=False) assert abs(t2[0][-1].theta2 - 360.) > 1e-3
def test_normalize_kwarg_pie(): fig,ax = plt.subplots() x=[0.3, 0.3, 0.1] t1 = ax.pie(x=x, normalize=True) assert abs(t1[0][-1].theta2 - 360.) < 1e-3 t2 = ax.pie(x=x, normalize=False) assert abs(t2[0][-1].theta2 - 360.) > 1e-3
24,797
def main_two(): """ do nothing """ BadClass() # [abstract-class-instantiated]
def main_two(): """ do nothing """ BadClassTwo() # [abstract-class-instantiated]
55,351
def PanopticNet(backbone, input_shape, inputs=None, backbone_levels=['C3', 'C4', 'C5'], pyramid_levels=['P3', 'P4', 'P5', 'P6', 'P7'], create_pyramid_features=__create_pyramid_features, create_semantic_head=__create_semantic_head, frames_per_batch=1, temporal_mode=None, num_semantic_heads=1, num_semantic_classes=[3], required_channels=3, norm_method='whole_image', pooling=None, location=True, use_imagenet=True, lite=False, upsample_type='upsampling2d', interpolation='bilinear', name='panopticnet', **kwargs): """Constructs a mrcnn model using a backbone from keras-applications. Args: backbone (str): Name of backbone to use. input_shape (tuple): The shape of the input data. backbone_levels (list): The backbone levels to be used. to create the feature pyramid. Defaults to ['C3', 'C4', 'C5']. pyramid_levels (list): Pyramid levels to use. Defaults to ['P3','P4','P5','P6','P7'] create_pyramid_features (function): Function to get the pyramid features from the backbone. create_semantic_head (function): Function to build a semantic head submodel. frames_per_batch (int): Defaults to 1. temporal_mode: Mode of temporal convolution. Choose from {'conv','lstm','gru', None}. Defaults to None. num_semantic_heads (int): Defaults to 1. num_semantic_classes (list): Defaults to [3]. norm_method (str): ImageNormalization mode to use. Defaults to 'whole_image'. location (bool): Whether to include location data. Defaults to True use_imagenet (bool): Whether to load imagenet-based pretrained weights. lite (bool): Whether to use a depthwise conv in the feature pyramid rather than regular conv. Defaults to False. upsample_type (str): Choice of upsampling layer to use from ['upsamplelike', 'upsampling2d', 'upsampling3d']. Defaults to 'upsampling2d'. interpolation (str): Choice of interpolation mode for upsampling layers from ['bilinear', 'nearest']. Defaults to bilinear. pooling (str): optional pooling mode for feature extraction when include_top is False. - None means that the output of the model will be the 4D tensor output of the last convolutional layer. - 'avg' means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - 'max' means that global max pooling will be applied. required_channels (int): The required number of channels of the backbone. 3 is the default for all current backbones. kwargs (dict): Other standard inputs for retinanet_mask. Raises: ValueError: temporal_mode not 'conv', 'lstm', 'gru' or None Returns: tensorflow.keras.Model: Panoptic model with a backbone. """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 conv = Conv3D if frames_per_batch > 1 else Conv2D conv_kernel = (1, 1, 1) if frames_per_batch > 1 else (1, 1) # Check input to __merge_temporal_features acceptable_modes = {'conv', 'lstm', 'gru', None} if temporal_mode is not None: temporal_mode = str(temporal_mode).lower() if temporal_mode not in acceptable_modes: raise ValueError('temporal_mode {} not supported. Please choose ' 'from {}.'.format(temporal_mode, acceptable_modes)) # TODO only works for 2D: do we check for 3D as well? # What are the requirements for 3D data? img_shape = input_shape[1:] if channel_axis == 1 else input_shape[:-1] if img_shape[0] != img_shape[1]: raise ValueError('Input data must be square, got dimensions {}'.format( img_shape)) if not math.log(img_shape[0], 2).is_integer(): raise ValueError('Input data dimensions must be a power of 2, ' 'got {}'.format(img_shape[0])) # Check input to interpolation acceptable_interpolation = {'bilinear', 'nearest'} if interpolation not in acceptable_interpolation: raise ValueError('Interpolation mode "{}" not supported. ' 'Choose from {}.'.format( interpolation, list(acceptable_interpolation))) if inputs is None: if frames_per_batch > 1: if channel_axis == 1: input_shape_with_time = tuple( [input_shape[0], frames_per_batch] + list(input_shape)[1:]) else: input_shape_with_time = tuple( [frames_per_batch] + list(input_shape)) inputs = Input(shape=input_shape_with_time, name='input_0') else: inputs = Input(shape=input_shape, name='input_0') # Normalize input images if norm_method is None: norm = inputs else: if frames_per_batch > 1: norm = TimeDistributed(ImageNormalization2D( norm_method=norm_method, name='norm'), name='td_norm')(inputs) else: norm = ImageNormalization2D(norm_method=norm_method, name='norm')(inputs) # Add location layer if location: if frames_per_batch > 1: # TODO: TimeDistributed is incompatible with channels_first loc = TimeDistributed(Location2D(in_shape=input_shape, name='location'), name='td_location')(norm) else: loc = Location2D(in_shape=input_shape, name='location')(norm) concat = Concatenate(axis=channel_axis, name='concatenate_location')([norm, loc]) else: concat = norm # Force the channel size for backbone input to be `required_channels` fixed_inputs = conv(required_channels, conv_kernel, strides=1, padding='same', name='conv_channels')(concat) # Force the input shape axis = 0 if K.image_data_format() == 'channels_first' else -1 fixed_input_shape = list(input_shape) fixed_input_shape[axis] = required_channels fixed_input_shape = tuple(fixed_input_shape) model_kwargs = { 'include_top': False, 'weights': None, 'input_shape': fixed_input_shape, 'pooling': pooling } _, backbone_dict = get_backbone(backbone, fixed_inputs, use_imagenet=use_imagenet, frames_per_batch=frames_per_batch, return_dict=True, **model_kwargs) backbone_dict_reduced = {k: backbone_dict[k] for k in backbone_dict if k in backbone_levels} ndim = 2 if frames_per_batch == 1 else 3 pyramid_dict = create_pyramid_features(backbone_dict_reduced, ndim=ndim, lite=lite, interpolation=interpolation, upsample_type=upsample_type) features = [pyramid_dict[key] for key in pyramid_levels] if frames_per_batch > 1: temporal_features = [__merge_temporal_features(f, mode=temporal_mode, frames_per_batch=frames_per_batch) for f in features] for f, k in zip(temporal_features, pyramid_levels): pyramid_dict[k] = f semantic_levels = [int(re.findall(r'\d+', k)[0]) for k in pyramid_dict] target_level = min(semantic_levels) semantic_head_list = [] for i in range(num_semantic_heads): semantic_head_list.append(create_semantic_head( pyramid_dict, n_classes=num_semantic_classes[i], input_target=inputs, target_level=target_level, semantic_id=i, ndim=ndim, upsample_type=upsample_type, interpolation=interpolation, **kwargs)) outputs = semantic_head_list model = Model(inputs=inputs, outputs=outputs, name=name) return model
def PanopticNet(backbone, input_shape, inputs=None, backbone_levels=['C3', 'C4', 'C5'], pyramid_levels=['P3', 'P4', 'P5', 'P6', 'P7'], create_pyramid_features=__create_pyramid_features, create_semantic_head=__create_semantic_head, frames_per_batch=1, temporal_mode=None, num_semantic_heads=1, num_semantic_classes=[3], required_channels=3, norm_method='whole_image', pooling=None, location=True, use_imagenet=True, lite=False, upsample_type='upsampling2d', interpolation='bilinear', name='panopticnet', **kwargs): """Constructs a mrcnn model using a backbone from keras-applications. Args: backbone (str): Name of backbone to use. input_shape (tuple): The shape of the input data. backbone_levels (list): The backbone levels to be used. to create the feature pyramid. Defaults to ['C3', 'C4', 'C5']. pyramid_levels (list): Pyramid levels to use. Defaults to ['P3','P4','P5','P6','P7'] create_pyramid_features (function): Function to get the pyramid temporal_features = [__merge_temporal_features(f, mode=temporal_mode, frames_per_batch=frames_per_batch) create_semantic_head (function): Function to build a semantic head submodel. frames_per_batch (int): Defaults to 1. temporal_mode: Mode of temporal convolution. Choose from {'conv','lstm','gru', None}. Defaults to None. num_semantic_heads (int): Defaults to 1. num_semantic_classes (list): Defaults to [3]. norm_method (str): ImageNormalization mode to use. Defaults to 'whole_image'. location (bool): Whether to include location data. Defaults to True use_imagenet (bool): Whether to load imagenet-based pretrained weights. lite (bool): Whether to use a depthwise conv in the feature pyramid rather than regular conv. Defaults to False. upsample_type (str): Choice of upsampling layer to use from ['upsamplelike', 'upsampling2d', 'upsampling3d']. Defaults to 'upsampling2d'. interpolation (str): Choice of interpolation mode for upsampling layers from ['bilinear', 'nearest']. Defaults to bilinear. pooling (str): optional pooling mode for feature extraction when include_top is False. - None means that the output of the model will be the 4D tensor output of the last convolutional layer. - 'avg' means that global average pooling will be applied to the output of the last convolutional layer, and thus the output of the model will be a 2D tensor. - 'max' means that global max pooling will be applied. required_channels (int): The required number of channels of the backbone. 3 is the default for all current backbones. kwargs (dict): Other standard inputs for retinanet_mask. Raises: ValueError: temporal_mode not 'conv', 'lstm', 'gru' or None Returns: tensorflow.keras.Model: Panoptic model with a backbone. """ channel_axis = 1 if K.image_data_format() == 'channels_first' else -1 conv = Conv3D if frames_per_batch > 1 else Conv2D conv_kernel = (1, 1, 1) if frames_per_batch > 1 else (1, 1) # Check input to __merge_temporal_features acceptable_modes = {'conv', 'lstm', 'gru', None} if temporal_mode is not None: temporal_mode = str(temporal_mode).lower() if temporal_mode not in acceptable_modes: raise ValueError('temporal_mode {} not supported. Please choose ' 'from {}.'.format(temporal_mode, acceptable_modes)) # TODO only works for 2D: do we check for 3D as well? # What are the requirements for 3D data? img_shape = input_shape[1:] if channel_axis == 1 else input_shape[:-1] if img_shape[0] != img_shape[1]: raise ValueError('Input data must be square, got dimensions {}'.format( img_shape)) if not math.log(img_shape[0], 2).is_integer(): raise ValueError('Input data dimensions must be a power of 2, ' 'got {}'.format(img_shape[0])) # Check input to interpolation acceptable_interpolation = {'bilinear', 'nearest'} if interpolation not in acceptable_interpolation: raise ValueError('Interpolation mode "{}" not supported. ' 'Choose from {}.'.format( interpolation, list(acceptable_interpolation))) if inputs is None: if frames_per_batch > 1: if channel_axis == 1: input_shape_with_time = tuple( [input_shape[0], frames_per_batch] + list(input_shape)[1:]) else: input_shape_with_time = tuple( [frames_per_batch] + list(input_shape)) inputs = Input(shape=input_shape_with_time, name='input_0') else: inputs = Input(shape=input_shape, name='input_0') # Normalize input images if norm_method is None: norm = inputs else: if frames_per_batch > 1: norm = TimeDistributed(ImageNormalization2D( norm_method=norm_method, name='norm'), name='td_norm')(inputs) else: norm = ImageNormalization2D(norm_method=norm_method, name='norm')(inputs) # Add location layer if location: if frames_per_batch > 1: # TODO: TimeDistributed is incompatible with channels_first loc = TimeDistributed(Location2D(in_shape=input_shape, name='location'), name='td_location')(norm) else: loc = Location2D(in_shape=input_shape, name='location')(norm) concat = Concatenate(axis=channel_axis, name='concatenate_location')([norm, loc]) else: concat = norm # Force the channel size for backbone input to be `required_channels` fixed_inputs = conv(required_channels, conv_kernel, strides=1, padding='same', name='conv_channels')(concat) # Force the input shape axis = 0 if K.image_data_format() == 'channels_first' else -1 fixed_input_shape = list(input_shape) fixed_input_shape[axis] = required_channels fixed_input_shape = tuple(fixed_input_shape) model_kwargs = { 'include_top': False, 'weights': None, 'input_shape': fixed_input_shape, 'pooling': pooling } _, backbone_dict = get_backbone(backbone, fixed_inputs, use_imagenet=use_imagenet, frames_per_batch=frames_per_batch, return_dict=True, **model_kwargs) backbone_dict_reduced = {k: backbone_dict[k] for k in backbone_dict if k in backbone_levels} ndim = 2 if frames_per_batch == 1 else 3 pyramid_dict = create_pyramid_features(backbone_dict_reduced, ndim=ndim, lite=lite, interpolation=interpolation, upsample_type=upsample_type) features = [pyramid_dict[key] for key in pyramid_levels] if frames_per_batch > 1: temporal_features = [__merge_temporal_features(f, mode=temporal_mode, frames_per_batch=frames_per_batch) for f in features] for f, k in zip(temporal_features, pyramid_levels): pyramid_dict[k] = f semantic_levels = [int(re.findall(r'\d+', k)[0]) for k in pyramid_dict] target_level = min(semantic_levels) semantic_head_list = [] for i in range(num_semantic_heads): semantic_head_list.append(create_semantic_head( pyramid_dict, n_classes=num_semantic_classes[i], input_target=inputs, target_level=target_level, semantic_id=i, ndim=ndim, upsample_type=upsample_type, interpolation=interpolation, **kwargs)) outputs = semantic_head_list model = Model(inputs=inputs, outputs=outputs, name=name) return model
36,023
def test_get_by_fullname(setup_codes): """Verify that using the LABEL@machinename will retrieve the correct entity.""" entity_01, entity_02, entity_03 = setup_codes param = CodeParamType() identifier = '{}@{}'.format(entity_01.label, entity_01.computer.name) result = param.convert(identifier, None, None) assert result.uuid == entity_01.uuid
def test_get_by_fullname(setup_codes): """Verify that using the LABEL@machinename will retrieve the correct entity.""" entity_01, entity_02, entity_03 = setup_codes param = CodeParamType() identifier = '{}@{}'.format(entity_01.label, entity_01.computer.name) entity_01, _, _ = setup_codes assert result.uuid == entity_01.uuid
35,967
def delete_nodes( pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False ): """ Delete nodes by a list of pks. This command will delete not only the specified nodes, but also the ones that are linked to these and should be also deleted in order to keep a consistent provenance according to the rules explained in the concepts section of the documentation. In summary: 1. If a DATA node is deleted, any process nodes linked to it will also be deleted. 2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes (outputs) will be deleted by default but this can be disabled. 3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by default, but deletion of either of both kind of connected nodes can be enabled. These rules are 'recursive', so if a CALC node is deleted, then its output DATA nodes will be deleted as well, and then any CALC node that may have those as inputs, and so on. :param pks: a list of the PKs of the nodes to delete :param bool force: do not ask for confirmation to delete nodes. :param int verbosity: 0 prints nothing, 1 prints just sums and total, 2 prints individual nodes. :param bool create_forward: This will delete all output data created by any deleted calculation. :param bool call_calc_forward: This will also delete all calculations called by any workflow that is going to be deleted. Note that when you delete a workflow, also all parent workflows are deleted (recursively). Therefore, setting this flag to True may delete calculations that are 'unrelated' to what has been chosen to be deleted, just because they are connected at some point in the upwards provenance. Use with care, and it is advisable to never combine it with force. :param bool call_work_forward: This will also delete all calculations called by any workflow that is going to be deleted. The same disclaimer as forward_calcs applies here as well. :param bool dry_run: Do not delete, a dry run, with statistics printed according to verbosity levels. :param bool force: Do not ask for confirmation to delete nodes. """ # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements from aiida.backends.utils import delete_nodes_and_connections from aiida.common import exceptions from aiida.common.links import LinkType from aiida.orm import Node, QueryBuilder, load_node starting_pks = [] for pk in pks: try: load_node(pk) except exceptions.NotExistent: echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk)) else: starting_pks.append(pk) # An empty set might be problematic for the queries done below. if not starting_pks: if verbosity: echo.echo('Nothing to delete') return follow_upwards = [] follow_upwards.append(LinkType.CREATE.value) follow_upwards.append(LinkType.RETURN.value) follow_upwards.append(LinkType.CALL_CALC.value) follow_upwards.append(LinkType.CALL_WORK.value) follow_downwards = [] follow_downwards.append(LinkType.INPUT_CALC.value) follow_downwards.append(LinkType.INPUT_WORK.value) if create_forward: follow_downwards.append(LinkType.CREATE.value) if call_calc_forward: follow_downwards.append(LinkType.CALL_CALC.value) if call_work_forward: follow_downwards.append(LinkType.CALL_WORK.value) links_upwards = {'type': {'in': follow_upwards}} links_downwards = {'type': {'in': follow_downwards}} operational_set = set().union(set(starting_pks)) accumulator_set = set().union(set(starting_pks)) while operational_set: new_pks_set = set() query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_downwards, with_incoming='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_upwards, with_outgoing='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) operational_set = new_pks_set.difference(accumulator_set) accumulator_set = new_pks_set.union(accumulator_set) pks_set_to_delete = accumulator_set if verbosity > 0: echo.echo( 'I {} delete {} node{}'.format( 'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else '' ) ) if verbosity > 1: builder = QueryBuilder().append( Node, filters={'id': { 'in': pks_set_to_delete }}, project=('uuid', 'id', 'node_type', 'label') ) echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will')) for uuid, pk, type_string, label in builder.iterall(): try: short_type_string = type_string.split('.')[-2] except IndexError: short_type_string = type_string echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label)) if dry_run: if verbosity > 0: echo.echo('\nThis was a dry run, exiting without deleting anything') return # Asking for user confirmation here if force: pass else: echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete))) if not click.confirm('Shall I continue?'): echo.echo('Exiting without deleting') return # Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later, # so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access if verbosity > 0: echo.echo('I am starting node deletion.') delete_nodes_and_connections(pks_set_to_delete) if verbosity > 0: echo.echo('I have finished node deletion and I am starting folder deletion.') # If we are here, we managed to delete the entries from the DB. # I can now delete the folders for repository in repositories: repository.erase(force=True) if verbosity > 0: echo.echo('I have finished folder deletion. Deletion completed.')
def delete_nodes( pks, verbosity=0, dry_run=False, force=False, create_forward=True, call_calc_forward=False, call_work_forward=False ): """ Delete nodes by a list of pks. This command will delete not only the specified nodes, but also the ones that are linked to these and should be also deleted in order to keep a consistent provenance according to the rules explained in the concepts section of the documentation. In summary: 1. If a DATA node is deleted, any process nodes linked to it will also be deleted. 2. If a CALC node is deleted, any incoming WORK node (callers) will be deleted as well whereas any incoming DATA node (inputs) will be kept. Outgoing DATA nodes (outputs) will be deleted by default but this can be disabled. 3. If a WORK node is deleted, any incoming WORK node (callers) will be deleted as well, but all DATA nodes will be kept. Outgoing WORK or CALC nodes will be kept by default, but deletion of either of both kind of connected nodes can be enabled. These rules are 'recursive', so if a CALC node is deleted, then its output DATA nodes will be deleted as well, and then any CALC node that may have those as inputs, and so on. :param pks: a list of the PKs of the nodes to delete :param bool force: do not ask for confirmation to delete nodes. :param int verbosity: 0 prints nothing, 1 prints just sums and total, 2 prints individual nodes. :param bool create_forward: This will delete all output data created by any deleted calculation. :param bool call_calc_forward: This will also delete all calculations called by any workflow that is going to be deleted. Note that when you delete a workflow, also all parent workflows are deleted (recursively). Therefore, setting this flag to True may delete calculations that are 'unrelated' to what has been chosen to be deleted, just because they are connected at some point in the upwards provenance. Use with care, and it is advisable to never combine it with force. :param bool call_work_forward: This will also delete all calculations called by any workflow that is going to be deleted. The same disclaimer as forward_calcs applies here as well. :param bool dry_run: Do not delete, a dry run, with statistics printed according to verbosity levels. :param bool force: Do not ask for confirmation to delete nodes. """ # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements from aiida.backends.utils import delete_nodes_and_connections from aiida.common import exceptions from aiida.common.links import LinkType from aiida.orm import Node, QueryBuilder, load_node starting_pks = [] for pk in pks: try: load_node(pk) except exceptions.NotExistent: echo.echo_warning('warning: node with pk<{}> does not exist, skipping'.format(pk)) else: starting_pks.append(pk) # An empty set might be problematic for the queries done below. if not starting_pks: if verbosity: echo.echo('Nothing to delete') return follow_upwards = [] follow_upwards.append(LinkType.CREATE.value) follow_upwards.append(LinkType.RETURN.value) follow_upwards.append(LinkType.CALL_CALC.value) follow_upwards.append(LinkType.CALL_WORK.value) follow_downwards = [] follow_downwards.append(LinkType.INPUT_CALC.value) follow_downwards.append(LinkType.INPUT_WORK.value) if create_forward: follow_downwards.append(LinkType.CREATE.value) if call_calc_forward: follow_downwards.append(LinkType.CALL_CALC.value) if call_work_forward: follow_downwards.append(LinkType.CALL_WORK.value) links_upwards = {'type': {'in': follow_upwards}} links_downwards = {'type': {'in': follow_downwards}} operational_set = set().union(set(starting_pks)) accumulator_set = set().union(set(starting_pks)) while operational_set: new_pks_set = set() query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_downwards, with_incoming='sources', project='id' ) new_pks_set.update(i for i, in query_nodes.iterall()) query_nodes = QueryBuilder() query_nodes.append(Node, filters={'id': {'in': operational_set}}, tag='sources') query_nodes.append( Node, filters={'id': { '!in': accumulator_set }}, edge_filters=links_upwards, with_outgoing='sources', project='id' ) new_pks_set = new_pks_set.union(set(i for i, in query_nodes.iterall())) operational_set = new_pks_set.difference(accumulator_set) accumulator_set = new_pks_set.union(accumulator_set) pks_set_to_delete = accumulator_set if verbosity > 0: echo.echo( 'I {} delete {} node{}'.format( 'would' if dry_run else 'will', len(pks_set_to_delete), 's' if len(pks_set_to_delete) > 1 else '' ) ) if verbosity > 1: builder = QueryBuilder().append( Node, filters={'id': { 'in': pks_set_to_delete }}, project=('uuid', 'id', 'node_type', 'label') ) echo.echo('The nodes I {} delete:'.format('would' if dry_run else 'will')) for uuid, pk, type_string, label in builder.iterall(): try: short_type_string = type_string.split('.')[-2] except IndexError: short_type_string = type_string echo.echo(' {} {} {} {}'.format(uuid, pk, short_type_string, label)) if dry_run: if verbosity > 0: echo.echo('\nThis was a dry run, exiting without deleting anything') return # Asking for user confirmation here if force: pass else: echo.echo_warning('YOU ARE ABOUT TO DELETE {} NODES! THIS CANNOT BE UNDONE!'.format(len(pks_set_to_delete))) if not click.confirm('Shall I continue?'): echo.echo('Exiting without deleting') return # Recover the list of folders to delete before actually deleting the nodes. I will delete the folders only later, # so that if there is a problem during the deletion of the nodes in the DB, I don't delete the folders repositories = [load_node(pk)._repository for pk in pks_set_to_delete] # pylint: disable=protected-access if verbosity > 0: echo.echo('I am starting node deletion.') delete_nodes_and_connections(pks_set_to_delete) if verbosity > 0: echo.echo('I have finished node deletion and I am starting folder deletion.') # If we are here, we managed to delete the entries from the DB. # I can now delete the folders for repository in repositories: repository.erase(force=True) if verbosity > 0: echo.echo('I have finished folder deletion. Deletion completed.')
57,108
def send_mail_to_notify_contributor_dashboard_reviewers( reviewer_ids, reviewers_suggestion_email_infos): """Sends an email to each reviewer notifying them of the suggestions on the Contributor Dashboard that have been waiting the longest for review, and that the reviewer has permission to review. Args: reviewer_ids: list(str). A list of the Contributor Dashboard reviewer user ids to notify. reviewers_suggestion_email_infos: list(list(ReviewableSuggestionEmailInfo)). A list of suggestion email content info objects for each reviewer. These suggestion email content info objects contain the key information about the suggestions we're notifying reviewers about and will be used to compose the email body for each reviewer. """ email_subject = CONTRIBUTOR_DASHBOARD_REVIEWER_NOTIFICATION_EMAIL_DATA[ 'email_subject'] email_body_template = ( CONTRIBUTOR_DASHBOARD_REVIEWER_NOTIFICATION_EMAIL_DATA[ 'email_body_template']) if not feconf.CAN_SEND_EMAILS: log_new_error('This app cannot send emails to users.') return if not ( config_domain .CONTRIBUTOR_DASHBOARD_REVIEWER_EMAILS_IS_ENABLED.value): log_new_error( 'The "contributor_dashboard_reviewer_emails_is_enabled" property ' 'must be enabled on the admin config page in order to send ' 'reviewers the emails.' ) return if not reviewer_ids: log_new_error('No Contributor Dashboard reviewers to notify.') return reviewer_user_settings = user_services.get_users_settings(reviewer_ids) reviewer_usernames, reviewer_emails = list(python_utils.ZIP(*[ (reviewer_user_setting.username, reviewer_user_setting.email) if reviewer_user_setting is not None else (None, None) for reviewer_user_setting in reviewer_user_settings ])) for index, reviewer_id in enumerate(reviewer_ids): if not reviewers_suggestion_email_infos[index]: logging.info( 'There were no suggestions to recommend to the reviewer with ' 'user id: %s.' % reviewer_id) continue if not reviewer_emails[index]: log_new_error( 'There was no email for the given reviewer id: %s.' % ( reviewer_id)) continue suggestion_descriptions = [] for reviewer_suggestion_email_info in ( reviewers_suggestion_email_infos[index]): suggestion_descriptions.append( _create_html_for_reviewable_suggestion_email_info( reviewer_suggestion_email_info)) email_body = email_body_template % ( reviewer_usernames[index], feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL, ''.join( suggestion_descriptions), EMAIL_FOOTER.value) _send_email( reviewer_id, feconf.SYSTEM_COMMITTER_ID, feconf.EMAIL_INTENT_REVIEW_CONTRIBUTOR_DASHBOARD_SUGGESTIONS, email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS, recipient_email=reviewer_emails[index])
def send_mail_to_notify_contributor_dashboard_reviewers( reviewer_ids, reviewers_suggestion_email_infos): """Sends an email to each reviewer notifying them of the suggestions on the Contributor Dashboard that have been waiting the longest for review, and that the reviewer has permission to review. Args: reviewer_ids: list(str). A list of the Contributor Dashboard reviewer user ids to notify. reviewers_suggestion_email_infos: list(list(ReviewableSuggestionEmailInfo)). A list of suggestion email content info objects for each reviewer. These suggestion email content info objects contain the key information about the suggestions we're notifying reviewers about and will be used to compose the email body for each reviewer. """ email_subject = CONTRIBUTOR_DASHBOARD_REVIEWER_NOTIFICATION_EMAIL_DATA[ 'email_subject'] email_body_template = ( CONTRIBUTOR_DASHBOARD_REVIEWER_NOTIFICATION_EMAIL_DATA[ 'email_body_template']) if not feconf.CAN_SEND_EMAILS: log_new_error('This app cannot send emails to users.') return if not ( config_domain .CONTRIBUTOR_DASHBOARD_REVIEWER_EMAILS_IS_ENABLED.value): log_new_error( 'The "contributor_dashboard_reviewer_emails_is_enabled" property ' 'must be enabled on the admin config page in order to send ' 'reviewers the emails.' ) return if not reviewer_ids: log_new_error('No Contributor Dashboard reviewers to notify.') return reviewer_user_settings = user_services.get_users_settings(reviewer_ids) reviewer_usernames, reviewer_emails = list(python_utils.ZIP(*[ (reviewer_user_setting.username, reviewer_user_setting.email) if reviewer_user_setting is not None else (None, None) for reviewer_user_setting in reviewer_user_settings ])) for index, reviewer_id in enumerate(reviewer_ids): if not reviewers_suggestion_email_infos[index]: logging.info( 'There were no suggestions to recommend to the reviewer with ' 'user id: %s.' % reviewer_id) continue if not reviewer_emails[index]: log_new_error( 'There was no email for the given reviewer id: %s.' % ( reviewer_id)) continue suggestion_descriptions = [] for reviewer_suggestion_email_info in ( reviewers_suggestion_email_infos[index]): suggestion_descriptions.append( _create_html_for_reviewable_suggestion_email_info( reviewer_suggestion_email_info)) email_body = email_body_template % ( reviewer_usernames[index], feconf.OPPIA_SITE_URL, feconf.CONTRIBUTOR_DASHBOARD_URL, ''.join(suggestion_descriptions), EMAIL_FOOTER.value ) _send_email( reviewer_id, feconf.SYSTEM_COMMITTER_ID, feconf.EMAIL_INTENT_REVIEW_CONTRIBUTOR_DASHBOARD_SUGGESTIONS, email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS, recipient_email=reviewer_emails[index])
12,280
def gate_sequence_product(U_list, left_to_right=True, inds_list=None, expand=False): """ Calculate the overall unitary matrix for a given list of unitary operations. Parameters ---------- U_list: list List of gates implementing the quantum circuit. left_to_right: Boolean, optional Check if multiplication is to be done from left to right. inds_list: list of list of int, optional If expand=True, list of qubit indices corresponding to U_list to which each unitary is applied. expand: Boolean, optional Check if list of unitaries to be expanded to full dimension. Returns ------- U_overall : qobj Unitary matrix corresponding to U_list. overall_inds : list of int, optional List of qubit indices on which U_overall applies. """ if expand: return _gate_sequence_product(U_list, inds_list) else: return _gate_sequence_product_expanded(U_list, left_to_right)
def gate_sequence_product(U_list, left_to_right=True, inds_list=None, expand=False): """ Calculate the overall unitary matrix for a given list of unitary operations. Parameters ---------- U_list: list List of gates implementing the quantum circuit. left_to_right: Boolean, optional Check if multiplication is to be done from left to right. inds_list: list of list of int, optional If expand=True, list of qubit indices corresponding to U_list to which each unitary is applied. expand: Boolean, optional Check if list of unitaries needs to be expanded to full dimension. Returns ------- U_overall : qobj Unitary matrix corresponding to U_list. overall_inds : list of int, optional List of qubit indices on which U_overall applies. """ if expand: return _gate_sequence_product(U_list, inds_list) else: return _gate_sequence_product_expanded(U_list, left_to_right)
1,315
def adjusted_mutual_info_score(labels_true, labels_pred, average_method='arithmetic'): """Adjusted Mutual Information between two clusterings. Adjusted Mutual Information (AMI) is an adjustment of the Mutual Information (MI) score to account for chance. It accounts for the fact that the MI is generally higher for two clusterings with a larger number of clusters, regardless of whether there is actually more information shared. For two clusterings :math:`U` and :math:`V`, the AMI is given as:: AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))] This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Be mindful that this function is an order of magnitude slower than other metrics, such as the Adjusted Rand Index. Read more in the :ref:`User Guide <mutual_info_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : array, shape = [n_samples] A clustering of the data into disjoint subsets. average_method : string, optional (default: 'arithmetic') How to compute the normalizer in the denominator. Possible options are 'min', 'geometric', 'arithmetic', and 'max'. .. versionadded:: 0.22 The default value of ``average_method`` changed from 'max' to 'arithmetic'. Returns ------- ami: float (upperlimited by 1.0) The AMI returns a value of 1 when the two partitions are identical (ie perfectly matched). Random partitions (independent labellings) have an expected AMI around 0 on average hence can be negative. See also -------- adjusted_rand_score: Adjusted Rand Index mutual_info_score: Mutual Information (not adjusted for chance) Examples -------- Perfect labelings are both homogeneous and complete, hence have score 1.0:: >>> from sklearn.metrics.cluster import adjusted_mutual_info_score >>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1]) ... # doctest: +SKIP 1.0 >>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0]) ... # doctest: +SKIP 1.0 If classes members are completely split across different clusters, the assignment is totally in-complete, hence the AMI is null:: >>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3]) ... # doctest: +SKIP 0.0 References ---------- .. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for Clusterings Comparison: Variants, Properties, Normalization and Correction for Chance, JMLR <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_ .. [2] `Wikipedia entry for the Adjusted Mutual Information <https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_ """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] classes = np.unique(labels_true) clusters = np.unique(labels_pred) # Special limit cases: no clustering since the data is not split. # This is a perfect match hence return 1.0. if (classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64, **_astype_copy_false(contingency)) # Calculate the MI for the two clusterings mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) # Calculate the expected value for the mutual information emi = expected_mutual_information(contingency, n_samples) # Calculate entropy for each labeling h_true, h_pred = entropy(labels_true), entropy(labels_pred) normalizer = _generalized_average(h_true, h_pred, average_method) denominator = normalizer - emi # Avoid 0.0 / 0.0 when expectation equals maximum, i.e a perfect match. # normalizer should always be >= emi, but because of floating-point # representation, sometimes emi is slightly larger. Correct this # by preserving the sign. if denominator < 0: denominator = min(denominator, -np.finfo('float64').eps) else: denominator = max(denominator, np.finfo('float64').eps) ami = (mi - emi) / denominator return ami
def adjusted_mutual_info_score(labels_true, labels_pred, average_method='arithmetic'): """Adjusted Mutual Information between two clusterings. Adjusted Mutual Information (AMI) is an adjustment of the Mutual Information (MI) score to account for chance. It accounts for the fact that the MI is generally higher for two clusterings with a larger number of clusters, regardless of whether there is actually more information shared. For two clusterings :math:`U` and :math:`V`, the AMI is given as:: AMI(U, V) = [MI(U, V) - E(MI(U, V))] / [avg(H(U), H(V)) - E(MI(U, V))] This metric is independent of the absolute values of the labels: a permutation of the class or cluster label values won't change the score value in any way. This metric is furthermore symmetric: switching ``label_true`` with ``label_pred`` will return the same score value. This can be useful to measure the agreement of two independent label assignments strategies on the same dataset when the real ground truth is not known. Be mindful that this function is an order of magnitude slower than other metrics, such as the Adjusted Rand Index. Read more in the :ref:`User Guide <mutual_info_score>`. Parameters ---------- labels_true : int array, shape = [n_samples] A clustering of the data into disjoint subsets. labels_pred : array, shape = [n_samples] A clustering of the data into disjoint subsets. average_method : string, optional (default: 'arithmetic') How to compute the normalizer in the denominator. Possible options are 'min', 'geometric', 'arithmetic', and 'max'. .. versionchanged:: 0.22 The default value of ``average_method`` changed from 'max' to 'arithmetic'. Returns ------- ami: float (upperlimited by 1.0) The AMI returns a value of 1 when the two partitions are identical (ie perfectly matched). Random partitions (independent labellings) have an expected AMI around 0 on average hence can be negative. See also -------- adjusted_rand_score: Adjusted Rand Index mutual_info_score: Mutual Information (not adjusted for chance) Examples -------- Perfect labelings are both homogeneous and complete, hence have score 1.0:: >>> from sklearn.metrics.cluster import adjusted_mutual_info_score >>> adjusted_mutual_info_score([0, 0, 1, 1], [0, 0, 1, 1]) ... # doctest: +SKIP 1.0 >>> adjusted_mutual_info_score([0, 0, 1, 1], [1, 1, 0, 0]) ... # doctest: +SKIP 1.0 If classes members are completely split across different clusters, the assignment is totally in-complete, hence the AMI is null:: >>> adjusted_mutual_info_score([0, 0, 0, 0], [0, 1, 2, 3]) ... # doctest: +SKIP 0.0 References ---------- .. [1] `Vinh, Epps, and Bailey, (2010). Information Theoretic Measures for Clusterings Comparison: Variants, Properties, Normalization and Correction for Chance, JMLR <http://jmlr.csail.mit.edu/papers/volume11/vinh10a/vinh10a.pdf>`_ .. [2] `Wikipedia entry for the Adjusted Mutual Information <https://en.wikipedia.org/wiki/Adjusted_Mutual_Information>`_ """ labels_true, labels_pred = check_clusterings(labels_true, labels_pred) n_samples = labels_true.shape[0] classes = np.unique(labels_true) clusters = np.unique(labels_pred) # Special limit cases: no clustering since the data is not split. # This is a perfect match hence return 1.0. if (classes.shape[0] == clusters.shape[0] == 1 or classes.shape[0] == clusters.shape[0] == 0): return 1.0 contingency = contingency_matrix(labels_true, labels_pred, sparse=True) contingency = contingency.astype(np.float64, **_astype_copy_false(contingency)) # Calculate the MI for the two clusterings mi = mutual_info_score(labels_true, labels_pred, contingency=contingency) # Calculate the expected value for the mutual information emi = expected_mutual_information(contingency, n_samples) # Calculate entropy for each labeling h_true, h_pred = entropy(labels_true), entropy(labels_pred) normalizer = _generalized_average(h_true, h_pred, average_method) denominator = normalizer - emi # Avoid 0.0 / 0.0 when expectation equals maximum, i.e a perfect match. # normalizer should always be >= emi, but because of floating-point # representation, sometimes emi is slightly larger. Correct this # by preserving the sign. if denominator < 0: denominator = min(denominator, -np.finfo('float64').eps) else: denominator = max(denominator, np.finfo('float64').eps) ami = (mi - emi) / denominator return ami
34,915
def _test_forward_where(input_shape): with tf.Graph().as_default(): dtype = tf.float32 t = tf.constant(np.random.choice([0, 1, 2, 3], size=input_shape).astype(dtype.name)) out = tf.where(t) compare_tf_with_tvm([], [], out.name, mode='interp')
def _test_forward_where(input_shape): with tf.Graph().as_default(): dtype = tf.float32 t = tf.constant(np.random.choice([0, 1, -2, 3, -1, 0.1, -0.2], size=input_shape).astype(dtype.name)) out = tf.where(t) compare_tf_with_tvm([], [], out.name, mode='interp')
25,984
def validate_private_endpoint_connection_id(namespace): if namespace.connection_id: from azure.cli.core.util import parse_proxy_resource_id result = parse_proxy_resource_id(namespace.connection_id) namespace.resource_group_name = result['resource_group'] namespace.namespace_name = result['name'] namespace.private_endpoint_connection_name = result['child_name_1'] # if namespace.account_name and not namespace.resource_group_name: # namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0] if not all([namespace.namespace_name, namespace.resource_group_name, namespace.private_endpoint_connection_name]): raise CLIError('incorrect usage: [--id ID | --name NAME --account-name NAME]') del namespace.connection_id
def validate_private_endpoint_connection_id(namespace): if namespace.connection_id: from azure.cli.core.util import parse_proxy_resource_id result = parse_proxy_resource_id(namespace.connection_id) namespace.resource_group_name = result['resource_group'] namespace.namespace_name = result['name'] namespace.xxx = result.get(key, default=None) # if namespace.account_name and not namespace.resource_group_name: # namespace.resource_group_name = _query_account_rg(cmd.cli_ctx, namespace.account_name)[0] if not all([namespace.namespace_name, namespace.resource_group_name, namespace.private_endpoint_connection_name]): raise CLIError('incorrect usage: [--id ID | --name NAME --account-name NAME]') del namespace.connection_id
25,938
def export_template_spec(cmd, output_folder, resource_group_name=None, name=None, version=None, template_spec=None): rcf = _resource_templatespecs_client_factory(cmd.cli_ctx) if template_spec: id_parts = parse_resource_id(template_spec) resource_group_name = id_parts.get('resource_group') name = id_parts.get('name') version = id_parts.get('resource_name') if version == name: version = None if not version: raise IncorrectUsageError('a template spec version must be specified for export') exported_template = rcf.template_spec_versions.get(resource_group_name, name, version) from azure.cli.command_modules.resource._packing_engine import (unpack) return unpack(cmd, exported_template, output_folder, (str(name) + '.JSON'))
def export_template_spec(cmd, output_folder, resource_group_name=None, name=None, version=None, template_spec=None): rcf = _resource_templatespecs_client_factory(cmd.cli_ctx) if template_spec: id_parts = parse_resource_id(template_spec) resource_group_name = id_parts.get('resource_group') name = id_parts.get('name') version = id_parts.get('resource_name') if version == name: version = None if not version: raise IncorrectUsageError('Please specify the template spec version for export') exported_template = rcf.template_spec_versions.get(resource_group_name, name, version) from azure.cli.command_modules.resource._packing_engine import (unpack) return unpack(cmd, exported_template, output_folder, (str(name) + '.JSON'))
30,876
def main(): params = demisto.params() client = McAfeeESMClient(params) command = demisto.command() commands: Dict[str, Callable] = { 'test-module': client.test_module, 'esm-fetch-fields': client.fetch_fields, 'esm-get-organization-list': client.get_organization_list, 'esm-fetch-alarms': client.fetch_alarms, 'esm-add-case': client.add_case, 'esm-get-case-detail': client.get_case_detail, 'esm-edit-case': client.edit_case, 'esm-get-case-statuses': client.get_case_statuses, 'esm-edit-case-status': client.edit_case_status, 'esm-get-case-event-list': client.get_case_event_list, 'esm-add-case-status': client.add_case_status, 'esm-delete-case-status': client.delete_case_status, 'esm-get-case-list': client.get_case_list, 'esm-get-user-list': client.get_user_list, 'esm-acknowledge-alarms': client.acknowledge_alarms, 'esm-unacknowledge-alarms': client.unacknowledge_alarms, 'esm-delete-alarms': client.delete_alarm, 'esm-get-alarm-event-details': client.get_alarm_event_details, 'esm-list-alarm-events': client.list_alarm_events, 'esm-search': client.complete_search, 'esm-get-watchlists': client.get_watchlists_names_and_ids, 'esm-create-watchlist': client.add_watchlist, 'esm-delete-watchlist': client.delete_watchlist, 'esm-watchlist-add-entry': client.watchlist_add_entry, 'esm-watchlist-delete-entry': client.watchlist_delete_entry, 'esm-watchlist-data-list': client.watchlist_data_list, } try: if command == 'fetch-incidents': client.fetch_incidents(params) elif command in commands: human_readable, context_entry, raw_response = commands[command]() return_outputs(human_readable, context_entry, raw_response) else: raise DemistoException(f'{command} is not a command.') except Exception as error: return_error(str(error), error)
def main(): params = demisto.params() client = McAfeeESMClient(params) command = demisto.command() commands: Dict[str, Callable] = { 'test-module': client.test_module, 'esm-fetch-fields': client.fetch_fields, 'esm-get-organization-list': client.get_organization_list, 'esm-fetch-alarms': client.fetch_alarms, 'esm-add-case': client.add_case, 'esm-get-case-detail': client.get_case_detail, 'esm-edit-case': client.edit_case, 'esm-get-case-statuses': client.get_case_statuses, 'esm-edit-case-status': client.edit_case_status, 'esm-get-case-event-list': client.get_case_event_list, 'esm-add-case-status': client.add_case_status, 'esm-delete-case-status': client.delete_case_status, 'esm-get-case-list': client.get_case_list, 'esm-get-user-list': client.get_user_list, 'esm-acknowledge-alarms': client.acknowledge_alarms, 'esm-unacknowledge-alarms': client.unacknowledge_alarms, 'esm-delete-alarms': client.delete_alarm, 'esm-get-alarm-event-details': client.get_alarm_event_details, 'esm-list-alarm-events': client.list_alarm_events, 'esm-search': client.complete_search, 'esm-get-watchlists': client.get_watchlists_names_and_ids, 'esm-create-watchlist': client.add_watchlist, 'esm-delete-watchlist': client.delete_watchlist, 'esm-watchlist-add-entry': client.watchlist_add_entry, 'esm-watchlist-delete-entry': client.watchlist_delete_entry, 'esm-watchlist-list-entries': client.watchlist_data_list, } try: if command == 'fetch-incidents': client.fetch_incidents(params) elif command in commands: human_readable, context_entry, raw_response = commands[command]() return_outputs(human_readable, context_entry, raw_response) else: raise DemistoException(f'{command} is not a command.') except Exception as error: return_error(str(error), error)
61
def write_image(data, prefix): # type: (bytes, str) -> Optional[Image] path_prefix = find_image_path(prefix) dirname = os.path.dirname(path_prefix) if not os.path.exists(dirname): os.makedirs(dirname) try: # save original image with open(path_prefix + '.jpg', 'wb') as f: f.write(data) img = Image.open(BytesIO(data)) if img.mode != 'RGB': img = img.convert('RGB') for name, size in config.image_sizes.items(): path = "%s-%s.jpg" % (path_prefix, name) resize_image(img, size).save(path, quality=90) return img except IOError as e: print('ERROR:', str(e)) # cleanup rm_f(prefix + '.jpg') rm_f(prefix + '-S.jpg') rm_f(prefix + '-M.jpg') rm_f(prefix + '-L.jpg') return None
def write_image(data, prefix): # type: (bytes, str) -> Image path_prefix = find_image_path(prefix) dirname = os.path.dirname(path_prefix) if not os.path.exists(dirname): os.makedirs(dirname) try: # save original image with open(path_prefix + '.jpg', 'wb') as f: f.write(data) img = Image.open(BytesIO(data)) if img.mode != 'RGB': img = img.convert('RGB') for name, size in config.image_sizes.items(): path = "%s-%s.jpg" % (path_prefix, name) resize_image(img, size).save(path, quality=90) return img except IOError as e: print('ERROR:', str(e)) # cleanup rm_f(prefix + '.jpg') rm_f(prefix + '-S.jpg') rm_f(prefix + '-M.jpg') rm_f(prefix + '-L.jpg') return None
48,072
def from_frictionless_schema(schema): """Create a :class:`~pandera.schemas.DataFrameSchema` from a frictionless json/yaml schema file on disk, or a frictionless schema already loaded into memory. Each field from the frictionless schema will be converted to a pandera column specification using :class:`~pandera.io.FrictionlessFieldParser` to map field characteristics to pandera column specifications. :param frictionless_schema: the frictionless schema object (or a string/Path to the location on disk of a schema specification) to parse. :returns: dataframe schema with frictionless field specs converted to pandera column checks and constraints for use as normal. :example: >>> from pandera.io import from_frictionless_schema >>> >>> FRICTIONLESS_SCHEMA = { ... "fields": [ ... { ... "name": "column_1", ... "type": "integer", ... "constraints": {"minimum": 10, "maximum": 99} ... } ... ], ... "primaryKey": "column_1" ... } >>> schema = from_frictionless_schema(FRICTIONLESS_SCHEMA) >>> schema.columns["column_1"].checks [<Check in_range: in_range(10, 99)>] >>> schema.columns["column_1"].required True >>> schema.columns["column_1"].allow_duplicates False """ if not isinstance(schema, FrictionlessSchema): schema = FrictionlessSchema(schema) assembled_schema = { "columns": { field.name: FrictionlessFieldParser( field, schema.primary_key ).to_pandera_column() for field in schema.fields }, "index": None, "checks": None, "coerce": True, "strict": True, } return _deserialize_schema(assembled_schema)
def from_frictionless_schema(schema): """Create a :class:`~pandera.schemas.DataFrameSchema` from a frictionless json/yaml schema file on disk, or a frictionless schema already loaded into memory. Each field from the frictionless schema will be converted to a pandera column specification using :class:`~pandera.io.FrictionlessFieldParser` to map field characteristics to pandera column specifications. :param schema: the frictionless schema object (or a string/Path to the location on disk of a schema specification) to parse. :returns: dataframe schema with frictionless field specs converted to pandera column checks and constraints for use as normal. :example: >>> from pandera.io import from_frictionless_schema >>> >>> FRICTIONLESS_SCHEMA = { ... "fields": [ ... { ... "name": "column_1", ... "type": "integer", ... "constraints": {"minimum": 10, "maximum": 99} ... } ... ], ... "primaryKey": "column_1" ... } >>> schema = from_frictionless_schema(FRICTIONLESS_SCHEMA) >>> schema.columns["column_1"].checks [<Check in_range: in_range(10, 99)>] >>> schema.columns["column_1"].required True >>> schema.columns["column_1"].allow_duplicates False """ if not isinstance(schema, FrictionlessSchema): schema = FrictionlessSchema(schema) assembled_schema = { "columns": { field.name: FrictionlessFieldParser( field, schema.primary_key ).to_pandera_column() for field in schema.fields }, "index": None, "checks": None, "coerce": True, "strict": True, } return _deserialize_schema(assembled_schema)
7,194
def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=None, exclude_border=True, indices=True, num_peaks=np.inf, footprint=None, labels=None, *, num_peaks_per_label=np.inf, p=np.inf): """Find corners in corner measure response image. This differs from `skimage.feature.peak_local_max` in that it suppresses multiple connected peaks with the same accumulator value. Parameters ---------- image : ndarray Input image. min_distance : int, optional The minimum distance seperating peaks. Use the ``p`` argument to set the Minkowski p-norm defining the distance. * : * See :py:meth:`skimage.feature.peak_local_max`. p : float Which Minkowski p-norm to use. Should be in the range [1, inf]. A finite large p may cause a ValueError if overflow can occur. inf corresponds to the chebychev distance and 2 to the euclidean distance. Returns ------- output : ndarray or ndarray of bools * If `indices = True` : (row, column, ...) coordinates of peaks. * If `indices = False` : Boolean array shaped like `image`, with peaks represented by True values. See also -------- skimage.feature.peak_local_max Notes ----- The `num_peaks` limit is applied before suppression of connected peaks. If you want to limit the number of peaks after suppression, you should set `num_peaks=np.inf` and post-process the output of this function. Examples -------- >>> from skimage.feature import peak_local_max >>> response = np.zeros((5, 5)) >>> response[2:4, 2:4] = 1 >>> response array([[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 1., 1., 0.], [0., 0., 1., 1., 0.], [0., 0., 0., 0., 0.]]) >>> peak_local_max(response) array([[3, 3], [3, 2], [2, 3], [2, 2]]) >>> corner_peaks(response) array([[3, 3]]) """ if threshold_rel is None: threshold_rel = 0.1 warn("Until the version 0.16, threshold_rel was set to 0.1 by default." "Starting from version 0.16, the default value is set to None." "Until version 0.18, a None value corresponds to a threshold " "value of 0.1. The default behavior will match " "skimage.feature.peak_local_max.", category=FutureWarning, stacklevel=2) # Get the coordinates of the detected peaks coords = peak_local_max(image, min_distance=min_distance, threshold_abs=threshold_abs, threshold_rel=threshold_rel, exclude_border=exclude_border, indices=True, num_peaks=num_peaks, footprint=footprint, labels=labels, num_peaks_per_label=num_peaks_per_label) if len(coords): # Use KDtree to find the peaks that are too close to each others tree = spatial.cKDTree(coords) rejected_peaks = set() for idx, point in enumerate(coords): if idx not in rejected_peaks: candidates = tree.query_ball_point(point, r=min_distance, p=p) candidates.remove(idx) rejected_peaks.update(candidates) # Remove the peaks that are too close to each others coords = np.delete(coords, tuple(rejected_peaks), axis=0)[::-1] if indices is True: return coords peaks = np.zeros_like(image, dtype=bool) peaks[tuple(coords.T)] = True return peaks
def corner_peaks(image, min_distance=1, threshold_abs=None, threshold_rel=None, exclude_border=True, indices=True, num_peaks=np.inf, footprint=None, labels=None, *, num_peaks_per_label=np.inf, p=np.inf): """Find corners in corner measure response image. This differs from `skimage.feature.peak_local_max` in that it suppresses multiple connected peaks with the same accumulator value. Parameters ---------- image : ndarray Input image. min_distance : int, optional The minimum distance seperating peaks. Use the ``p`` argument to set the Minkowski p-norm defining the distance. * : * See :py:meth:`skimage.feature.peak_local_max`. p : float Which Minkowski p-norm to use. Should be in the range [1, inf]. A finite large p may cause a ValueError if overflow can occur. inf corresponds to the chebychev distance and 2 to the euclidean distance. Returns ------- output : ndarray or ndarray of bools * If `indices = True` : (row, column, ...) coordinates of peaks. * If `indices = False` : Boolean array shaped like `image`, with peaks represented by True values. See also -------- skimage.feature.peak_local_max Notes ----- The `num_peaks` limit is applied before suppression of connected peaks. If you want to limit the number of peaks after suppression, you should set `num_peaks=np.inf` and post-process the output of this function. Examples -------- >>> from skimage.feature import peak_local_max >>> response = np.zeros((5, 5)) >>> response[2:4, 2:4] = 1 >>> response array([[0., 0., 0., 0., 0.], [0., 0., 0., 0., 0.], [0., 0., 1., 1., 0.], [0., 0., 1., 1., 0.], [0., 0., 0., 0., 0.]]) >>> peak_local_max(response) array([[3, 3], [3, 2], [2, 3], [2, 2]]) >>> corner_peaks(response) array([[3, 3]]) """ if threshold_rel is None: threshold_rel = 0.1 warn("Until the version 0.16, threshold_rel was set to 0.1 by default." "Starting from version 0.16, the default value is set to None." "Until version 0.18, a None value corresponds to a threshold " "value of 0.1. The default behavior will match " "skimage.feature.peak_local_max.", category=FutureWarning, stacklevel=2) # Get the coordinates of the detected peaks coords = peak_local_max(image, min_distance=min_distance, threshold_abs=threshold_abs, threshold_rel=threshold_rel, exclude_border=exclude_border, indices=True, num_peaks=num_peaks, footprint=footprint, labels=labels, num_peaks_per_label=num_peaks_per_label) if len(coords): # Use KDtree to find the peaks that are too close to each others tree = spatial.cKDTree(coords) rejected_peaks = set() for idx, point in enumerate(coords): if idx not in rejected_peaks: candidates = tree.query_ball_point(point, r=min_distance, p=p) candidates.remove(idx) rejected_peaks.update(candidates) # Remove the peaks that are too close to each other coords = np.delete(coords, tuple(rejected_peaks), axis=0)[::-1] if indices is True: return coords peaks = np.zeros_like(image, dtype=bool) peaks[tuple(coords.T)] = True return peaks
17,688
def _search_from_virgin_install(dataset, query): # # this is to be nice to newbies # exc_info = sys.exc_info() if dataset is None: if not ui.is_interactive: raise NoDatasetFound( "No DataLad dataset found. Specify a dataset to be " "searched, or run interactively to get assistance " "installing a queryable superdataset." ) # none was provided so we could ask user whether he possibly wants # to install our beautiful mega-duper-super-dataset? # TODO: following logic could possibly benefit other actions. DEFAULT_DATASET_PATH = cfg.obtain('datalad.locations.default-dataset') if os.path.exists(DEFAULT_DATASET_PATH): default_ds = Dataset(DEFAULT_DATASET_PATH) if default_ds.is_installed(): if ui.yesno( title="No DataLad dataset found at current location", text="Would you like to search the DataLad " "superdataset at %r?" % DEFAULT_DATASET_PATH): pass else: raise exc_info[1] else: raise NoDatasetFound( "No DataLad dataset found at current location. " "The DataLad superdataset location %r exists, " "but does not contain an dataset." % DEFAULT_DATASET_PATH) elif ui.yesno( title="No DataLad dataset found at current location", text="Would you like to install the DataLad " "superdataset at %r?" % DEFAULT_DATASET_PATH): from datalad.api import install default_ds = install( DEFAULT_DATASET_PATH, source='///', result_renderer='disabled', on_failur='continue', result_type='generator') ui.message( "From now on you can refer to this dataset using the " "label '///'" ) else: raise exc_info[1] lgr.info( "Performing search using DataLad superdataset %r", default_ds.path ) for res in default_ds.search(query): yield res return else: raise # this function is called within exception handling block
def _search_from_virgin_install(dataset, query): # # this is to be nice to newbies # exc_info = sys.exc_info() if dataset is None: if not ui.is_interactive: raise NoDatasetFound( "No DataLad dataset found. Specify a dataset to be " "searched, or run interactively to get assistance " "installing a queryable superdataset." ) # none was provided so we could ask user whether he possibly wants # to install our beautiful mega-duper-super-dataset? # TODO: following logic could possibly benefit other actions. DEFAULT_DATASET_PATH = cfg.obtain('datalad.locations.default-dataset') if os.path.exists(DEFAULT_DATASET_PATH): default_ds = Dataset(DEFAULT_DATASET_PATH) if default_ds.is_installed(): if ui.yesno( title="No DataLad dataset found at current location", text="Would you like to search the DataLad " "superdataset at %r?" % DEFAULT_DATASET_PATH): pass else: raise exc_info[1] else: raise NoDatasetFound( "No DataLad dataset found at current location. " "The DataLad superdataset location %r exists, " "but does not contain an dataset." % DEFAULT_DATASET_PATH) elif ui.yesno( title="No DataLad dataset found at current location", text="Would you like to install the DataLad " "superdataset at %r?" % DEFAULT_DATASET_PATH): from datalad.api import install default_ds = install( DEFAULT_DATASET_PATH, source='///', result_renderer='disabled', on_failure='continue', result_type='generator') ui.message( "From now on you can refer to this dataset using the " "label '///'" ) else: raise exc_info[1] lgr.info( "Performing search using DataLad superdataset %r", default_ds.path ) for res in default_ds.search(query): yield res return else: raise # this function is called within exception handling block
27,457
def _render_ci_provider( provider_name, jinja_env, forge_config, forge_dir, platforms, archs, fast_finish_text, platform_target_path, platform_template_file, platform_specific_setup, keep_noarchs=None, extra_platform_files={}, upload_packages=[], return_metadata=False, ): if keep_noarchs is None: keep_noarchs = [False] * len(platforms) metas_list_of_lists = [] enable_platform = [False] * len(platforms) for i, (platform, arch, keep_noarch) in enumerate( zip(platforms, archs, keep_noarchs) ): os.environ["CONFIG_VERSION"] = forge_config["config_version"] os.environ["BUILD_PLATFORM"] = forge_config["build_platform"][ f"{platform}_{arch}" ].replace("_", "-") # set the environment variable for OS version if platform == "linux": ver = forge_config["os_version"][f"{platform}_{arch}"] if ver: os.environ["DEFAULT_LINUX_VERSION"] = ver config = conda_build.config.get_or_merge_config( None, exclusive_config_file=forge_config["exclusive_config_file"], platform=platform, arch=arch, ) # Get the combined variants from normal variant locations prior to running migrations ( combined_variant_spec, _, ) = conda_build.variants.get_package_combined_spec( os.path.join(forge_dir, forge_config["recipe_dir"]), config=config ) migrated_combined_variant_spec = migrate_combined_spec( combined_variant_spec, forge_dir, config, forge_config, ) for channel_target in migrated_combined_variant_spec.get("channel_targets", []): if channel_target.startswith("conda-forge ") and provider_name == "github_actions": raise RuntimeError("Using github_actions as the CI provider inside " "conda-forge github org is not allowed as github actions " "to avoid a denial of service for other infrastructure.") # AFAIK there is no way to get conda build to ignore the CBC yaml # in the recipe. This one can mess up migrators applied with local # CBC yaml files where variants in the migrators are not in the CBC. # Thus we move it out of the way. # TODO: upstream this as a flag in conda-build try: _recipe_cbc = os.path.join( forge_dir, forge_config["recipe_dir"], "conda_build_config.yaml", ) if os.path.exists(_recipe_cbc): os.rename(_recipe_cbc, _recipe_cbc + ".conda.smithy.bak") metas = conda_build.api.render( os.path.join(forge_dir, forge_config["recipe_dir"]), platform=platform, arch=arch, ignore_system_variants=True, variants=migrated_combined_variant_spec, permit_undefined_jinja=True, finalize=False, bypass_env_check=True, channel_urls=forge_config.get("channels", {}).get( "sources", [] ), ) finally: if os.path.exists(_recipe_cbc + ".conda.smithy.bak"): os.rename(_recipe_cbc + ".conda.smithy.bak", _recipe_cbc) # render returns some download & reparsing info that we don't care about metas = [m for m, _, _ in metas] if not keep_noarch: to_delete = [] for idx, meta in enumerate(metas): if meta.noarch: # do not build noarch, including noarch: python, packages on Travis CI. to_delete.append(idx) for idx in reversed(to_delete): del metas[idx] for meta in metas: if not meta.skip(): enable_platform[i] = True metas_list_of_lists.append(metas) if not any(enable_platform): # There are no cases to build (not even a case without any special # dependencies), so remove the run_docker_build.sh if it exists. forge_config[provider_name]["enabled"] = False target_fnames = [platform_target_path] if extra_platform_files: for val in extra_platform_files.values(): target_fnames.extend(val) for each_target_fname in target_fnames: remove_file(each_target_fname) else: forge_config[provider_name]["enabled"] = True fancy_name = { "linux_64": "Linux", "osx_64": "OSX", "win_64": "Windows", "linux_aarch64": "Arm64", "linux_ppc64le": "PowerPC64", } fancy_platforms = [] unfancy_platforms = set() configs = [] for metas, platform, arch, enable, upload in zip( metas_list_of_lists, platforms, archs, enable_platform, upload_packages, ): if enable: configs.extend( dump_subspace_config_files( metas, forge_dir, platform, arch, upload, forge_config ) ) plat_arch = f"{platform}_{arch}" forge_config[plat_arch]["enabled"] = True fancy_platforms.append(fancy_name.get(plat_arch, plat_arch)) unfancy_platforms.add(plat_arch) elif platform in extra_platform_files: for each_target_fname in extra_platform_files[platform]: remove_file(each_target_fname) for key in extra_platform_files.keys(): if key != "common" and key not in platforms: for each_target_fname in extra_platform_files[key]: remove_file(each_target_fname) forge_config[provider_name]["platforms"] = ",".join(fancy_platforms) forge_config[provider_name]["all_platforms"] = list(unfancy_platforms) # Copy the config now. Changes below shouldn't persist across CI. forge_config = deepcopy(forge_config) forge_config["configs"] = configs forge_config["fast_finish"] = _get_fast_finish_script( provider_name, forge_dir=forge_dir, forge_config=forge_config, fast_finish_text=fast_finish_text, ) # If the recipe has its own conda_forge_ci_setup package, then # install that if os.path.exists( os.path.join( forge_dir, forge_config["recipe_dir"], "conda_forge_ci_setup", "__init__.py", ) ) and os.path.exists( os.path.join( forge_dir, forge_config["recipe_dir"], "setup.py", ) ): forge_config["local_ci_setup"] = True else: forge_config["local_ci_setup"] = False # hook for extending with whatever platform specific junk we need. # Function passed in as argument build_platforms = OrderedDict() for platform, arch, enable in zip(platforms, archs, enable_platform): if enable: build_platform = forge_config["build_platform"][ f"{platform}_{arch}" ].split("_")[0] build_platforms[build_platform] = True for platform in build_platforms.keys(): platform_specific_setup( jinja_env=jinja_env, forge_dir=forge_dir, forge_config=forge_config, platform=platform, ) template = jinja_env.get_template(platform_template_file) with write_file(platform_target_path) as fh: fh.write(template.render(**forge_config)) # circleci needs a placeholder file of sorts - always write the output, even if no metas if provider_name == "circle": template = jinja_env.get_template(platform_template_file) with write_file(platform_target_path) as fh: fh.write(template.render(**forge_config)) # TODO: azure-pipelines might need the same as circle if return_metadata: return dict( forge_config=forge_config, metas_list_of_lists=metas_list_of_lists, platforms=platforms, archs=archs, enable_platform=enable_platform, ) else: return forge_config
def _render_ci_provider( provider_name, jinja_env, forge_config, forge_dir, platforms, archs, fast_finish_text, platform_target_path, platform_template_file, platform_specific_setup, keep_noarchs=None, extra_platform_files={}, upload_packages=[], return_metadata=False, ): if keep_noarchs is None: keep_noarchs = [False] * len(platforms) metas_list_of_lists = [] enable_platform = [False] * len(platforms) for i, (platform, arch, keep_noarch) in enumerate( zip(platforms, archs, keep_noarchs) ): os.environ["CONFIG_VERSION"] = forge_config["config_version"] os.environ["BUILD_PLATFORM"] = forge_config["build_platform"][ f"{platform}_{arch}" ].replace("_", "-") # set the environment variable for OS version if platform == "linux": ver = forge_config["os_version"][f"{platform}_{arch}"] if ver: os.environ["DEFAULT_LINUX_VERSION"] = ver config = conda_build.config.get_or_merge_config( None, exclusive_config_file=forge_config["exclusive_config_file"], platform=platform, arch=arch, ) # Get the combined variants from normal variant locations prior to running migrations ( combined_variant_spec, _, ) = conda_build.variants.get_package_combined_spec( os.path.join(forge_dir, forge_config["recipe_dir"]), config=config ) migrated_combined_variant_spec = migrate_combined_spec( combined_variant_spec, forge_dir, config, forge_config, ) for channel_target in migrated_combined_variant_spec.get("channel_targets", []): if channel_target.startswith("conda-forge ") and provider_name == "github_actions": raise RuntimeError("Using github_actions as the CI provider inside " "conda-forge github org is not allowed in order " "to avoid a denial of service for other infrastructure.") # AFAIK there is no way to get conda build to ignore the CBC yaml # in the recipe. This one can mess up migrators applied with local # CBC yaml files where variants in the migrators are not in the CBC. # Thus we move it out of the way. # TODO: upstream this as a flag in conda-build try: _recipe_cbc = os.path.join( forge_dir, forge_config["recipe_dir"], "conda_build_config.yaml", ) if os.path.exists(_recipe_cbc): os.rename(_recipe_cbc, _recipe_cbc + ".conda.smithy.bak") metas = conda_build.api.render( os.path.join(forge_dir, forge_config["recipe_dir"]), platform=platform, arch=arch, ignore_system_variants=True, variants=migrated_combined_variant_spec, permit_undefined_jinja=True, finalize=False, bypass_env_check=True, channel_urls=forge_config.get("channels", {}).get( "sources", [] ), ) finally: if os.path.exists(_recipe_cbc + ".conda.smithy.bak"): os.rename(_recipe_cbc + ".conda.smithy.bak", _recipe_cbc) # render returns some download & reparsing info that we don't care about metas = [m for m, _, _ in metas] if not keep_noarch: to_delete = [] for idx, meta in enumerate(metas): if meta.noarch: # do not build noarch, including noarch: python, packages on Travis CI. to_delete.append(idx) for idx in reversed(to_delete): del metas[idx] for meta in metas: if not meta.skip(): enable_platform[i] = True metas_list_of_lists.append(metas) if not any(enable_platform): # There are no cases to build (not even a case without any special # dependencies), so remove the run_docker_build.sh if it exists. forge_config[provider_name]["enabled"] = False target_fnames = [platform_target_path] if extra_platform_files: for val in extra_platform_files.values(): target_fnames.extend(val) for each_target_fname in target_fnames: remove_file(each_target_fname) else: forge_config[provider_name]["enabled"] = True fancy_name = { "linux_64": "Linux", "osx_64": "OSX", "win_64": "Windows", "linux_aarch64": "Arm64", "linux_ppc64le": "PowerPC64", } fancy_platforms = [] unfancy_platforms = set() configs = [] for metas, platform, arch, enable, upload in zip( metas_list_of_lists, platforms, archs, enable_platform, upload_packages, ): if enable: configs.extend( dump_subspace_config_files( metas, forge_dir, platform, arch, upload, forge_config ) ) plat_arch = f"{platform}_{arch}" forge_config[plat_arch]["enabled"] = True fancy_platforms.append(fancy_name.get(plat_arch, plat_arch)) unfancy_platforms.add(plat_arch) elif platform in extra_platform_files: for each_target_fname in extra_platform_files[platform]: remove_file(each_target_fname) for key in extra_platform_files.keys(): if key != "common" and key not in platforms: for each_target_fname in extra_platform_files[key]: remove_file(each_target_fname) forge_config[provider_name]["platforms"] = ",".join(fancy_platforms) forge_config[provider_name]["all_platforms"] = list(unfancy_platforms) # Copy the config now. Changes below shouldn't persist across CI. forge_config = deepcopy(forge_config) forge_config["configs"] = configs forge_config["fast_finish"] = _get_fast_finish_script( provider_name, forge_dir=forge_dir, forge_config=forge_config, fast_finish_text=fast_finish_text, ) # If the recipe has its own conda_forge_ci_setup package, then # install that if os.path.exists( os.path.join( forge_dir, forge_config["recipe_dir"], "conda_forge_ci_setup", "__init__.py", ) ) and os.path.exists( os.path.join( forge_dir, forge_config["recipe_dir"], "setup.py", ) ): forge_config["local_ci_setup"] = True else: forge_config["local_ci_setup"] = False # hook for extending with whatever platform specific junk we need. # Function passed in as argument build_platforms = OrderedDict() for platform, arch, enable in zip(platforms, archs, enable_platform): if enable: build_platform = forge_config["build_platform"][ f"{platform}_{arch}" ].split("_")[0] build_platforms[build_platform] = True for platform in build_platforms.keys(): platform_specific_setup( jinja_env=jinja_env, forge_dir=forge_dir, forge_config=forge_config, platform=platform, ) template = jinja_env.get_template(platform_template_file) with write_file(platform_target_path) as fh: fh.write(template.render(**forge_config)) # circleci needs a placeholder file of sorts - always write the output, even if no metas if provider_name == "circle": template = jinja_env.get_template(platform_template_file) with write_file(platform_target_path) as fh: fh.write(template.render(**forge_config)) # TODO: azure-pipelines might need the same as circle if return_metadata: return dict( forge_config=forge_config, metas_list_of_lists=metas_list_of_lists, platforms=platforms, archs=archs, enable_platform=enable_platform, ) else: return forge_config
32,071
def enrich_offense_result( client: QRadarClient, response, ip_enrich=False, asset_enrich=False ): """ Enriches the values of a given offense result * epoch timestamps -> ISO time string * closing reason id -> name * Domain id -> name - collect all ids from offenses (if available) - collect all ids from assets (if available) - get id->name map - update all values in offenses and assets * Rule id -> name * IP id -> value * IP value -> Asset * Add offense link """ domain_ids = set() rule_ids = set() if isinstance(response, list): try: type_dict = client.get_offense_types() except Exception as e: demisto.error(f"Encountered an issue while getting offense type: {e}") type_dict = {} try: closing_reason_dict = client.get_closing_reasons( include_deleted=True, include_reserved=True ) except Exception as e: demisto.error(f"Encountered an issue while getting offense closing reasons: {e}") closing_reason_dict = {} for offense in response: offense["LinkToOffense"] = f"{client.server}/console/do/sem/offensesummary?" \ f"appName=Sem&pageId=OffenseSummary&summaryId={offense.get('id')}" enrich_offense_timestamps_and_closing_reason( client, offense, type_dict, closing_reason_dict ) if 'domain_id' in offense: domain_ids.add(offense['domain_id']) if 'rules' in offense and isinstance(offense['rules'], list): for rule in offense['rules']: if 'id' in rule: rule_ids.add(rule['id']) if ip_enrich or asset_enrich: enrich_offenses_with_assets_and_source_destination_addresses( client, response, ip_enrich, asset_enrich ) if asset_enrich: # get assets from offenses that have assets assets_list = list(map(lambda o: o['assets'], filter(lambda o: 'assets' in o, response))) for assets in assets_list: domain_ids.update({asset['domain_id'] for asset in assets}) if domain_ids and DOMAIN_ENRCH_FLG == "True": enrich_offense_res_with_domain_names(client, domain_ids, response) if rule_ids and RULES_ENRCH_FLG == "True": enrich_offense_res_with_rule_names(client, rule_ids, response) else: enrich_offense_timestamps_and_closing_reason(client, response) return response
def enrich_offense_result( client: QRadarClient, response, ip_enrich=False, asset_enrich=False ): """ Enriches the values of a given offense result * epoch timestamps -> ISO time string * closing reason id -> name * Domain id -> name - collect all ids from offenses (if available) - collect all ids from assets (if available) - get id->name map - update all values in offenses and assets * Rule id -> name * IP id -> value * IP value -> Asset * Add offense link """ domain_ids = set() rule_ids = set() if isinstance(response, list): try: type_dict = client.get_offense_types() except Exception as e: demisto.error(f"Encountered an issue while getting offense types: {e}") type_dict = {} try: closing_reason_dict = client.get_closing_reasons( include_deleted=True, include_reserved=True ) except Exception as e: demisto.error(f"Encountered an issue while getting offense closing reasons: {e}") closing_reason_dict = {} for offense in response: offense["LinkToOffense"] = f"{client.server}/console/do/sem/offensesummary?" \ f"appName=Sem&pageId=OffenseSummary&summaryId={offense.get('id')}" enrich_offense_timestamps_and_closing_reason( client, offense, type_dict, closing_reason_dict ) if 'domain_id' in offense: domain_ids.add(offense['domain_id']) if 'rules' in offense and isinstance(offense['rules'], list): for rule in offense['rules']: if 'id' in rule: rule_ids.add(rule['id']) if ip_enrich or asset_enrich: enrich_offenses_with_assets_and_source_destination_addresses( client, response, ip_enrich, asset_enrich ) if asset_enrich: # get assets from offenses that have assets assets_list = list(map(lambda o: o['assets'], filter(lambda o: 'assets' in o, response))) for assets in assets_list: domain_ids.update({asset['domain_id'] for asset in assets}) if domain_ids and DOMAIN_ENRCH_FLG == "True": enrich_offense_res_with_domain_names(client, domain_ids, response) if rule_ids and RULES_ENRCH_FLG == "True": enrich_offense_res_with_rule_names(client, rule_ids, response) else: enrich_offense_timestamps_and_closing_reason(client, response) return response
4,109
def create_pipeline(context, mode, exclude_classes=()): assert mode in ('pyx', 'py', 'pxd') from .Visitor import PrintTree from .ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse from .ParseTreeTransforms import ForwardDeclareTypes, InjectGilHandling, AnalyseDeclarationsTransform from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform from .ParseTreeTransforms import TrackNumpyAttributes, InterpretCompilerDirectives, TransformBuiltinMethods from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform from .ParseTreeTransforms import CalculateQualifiedNamesTransform from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic from .ParseTreeTransforms import AdjustDefByDirectives, AlignFunctionDefinitions from .ParseTreeTransforms import RemoveUnreachableCode, GilCheck from .FlowControl import ControlFlowAnalysis from .AnalysedTreeTransforms import AutoTestDictTransform from .AutoDocTransforms import EmbedSignature from .Optimize import FlattenInListTransform, SwitchTransform, IterationTransform from .Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls from .Optimize import InlineDefNodeCalls from .Optimize import ConstantFolding, FinalOptimizePhase from .Optimize import DropRefcountingTransform from .Optimize import ConsolidateOverflowCheck from .TypeStubGenerator import TypeStubGenerator from .Buffer import IntroduceBufferAuxiliaryVars from .ModuleNode import check_c_declarations, check_c_declarations_pxd if mode == 'pxd': _check_c_declarations = check_c_declarations_pxd _specific_post_parse = PxdPostParse(context) else: _check_c_declarations = check_c_declarations _specific_post_parse = None if mode == 'py': _align_function_definitions = AlignFunctionDefinitions(context) else: _align_function_definitions = None # NOTE: This is the "common" parts of the pipeline, which is also # code in pxd files. So it will be run multiple times in a # compilation stage. stages = [ TypeStubGenerator(), NormalizeTree(context), PostParse(context), _specific_post_parse, TrackNumpyAttributes(), InterpretCompilerDirectives(context, context.compiler_directives), ParallelRangeTransform(context), AdjustDefByDirectives(context), WithTransform(context), MarkClosureVisitor(context), _align_function_definitions, RemoveUnreachableCode(context), ConstantFolding(), FlattenInListTransform(), DecoratorTransform(context), ForwardDeclareTypes(context), InjectGilHandling(), AnalyseDeclarationsTransform(context), AutoTestDictTransform(context), EmbedSignature(context), EarlyReplaceBuiltinCalls(context), ## Necessary? TransformBuiltinMethods(context), MarkParallelAssignments(context), ControlFlowAnalysis(context), RemoveUnreachableCode(context), # MarkParallelAssignments(context), MarkOverflowingArithmetic(context), IntroduceBufferAuxiliaryVars(context), _check_c_declarations, InlineDefNodeCalls(context), AnalyseExpressionsTransform(context), FindInvalidUseOfFusedTypes(context), ExpandInplaceOperators(context), IterationTransform(context), SwitchTransform(context), OptimizeBuiltinCalls(context), ## Necessary? CreateClosureClasses(context), ## After all lookups and type inference CalculateQualifiedNamesTransform(context), ConsolidateOverflowCheck(context), DropRefcountingTransform(), FinalOptimizePhase(context), GilCheck() ] filtered_stages = [] for s in stages: if s.__class__ not in exclude_classes: filtered_stages.append(s) return filtered_stages
def create_pipeline(context, mode, exclude_classes=()): assert mode in ('pyx', 'py', 'pxd') from .Visitor import PrintTree from .ParseTreeTransforms import WithTransform, NormalizeTree, PostParse, PxdPostParse from .ParseTreeTransforms import ForwardDeclareTypes, InjectGilHandling, AnalyseDeclarationsTransform from .ParseTreeTransforms import AnalyseExpressionsTransform, FindInvalidUseOfFusedTypes from .ParseTreeTransforms import CreateClosureClasses, MarkClosureVisitor, DecoratorTransform from .ParseTreeTransforms import TrackNumpyAttributes, InterpretCompilerDirectives, TransformBuiltinMethods from .ParseTreeTransforms import ExpandInplaceOperators, ParallelRangeTransform from .ParseTreeTransforms import CalculateQualifiedNamesTransform from .TypeInference import MarkParallelAssignments, MarkOverflowingArithmetic from .ParseTreeTransforms import AdjustDefByDirectives, AlignFunctionDefinitions from .ParseTreeTransforms import RemoveUnreachableCode, GilCheck from .FlowControl import ControlFlowAnalysis from .AnalysedTreeTransforms import AutoTestDictTransform from .AutoDocTransforms import EmbedSignature from .Optimize import FlattenInListTransform, SwitchTransform, IterationTransform from .Optimize import EarlyReplaceBuiltinCalls, OptimizeBuiltinCalls from .Optimize import InlineDefNodeCalls from .Optimize import ConstantFolding, FinalOptimizePhase from .Optimize import DropRefcountingTransform from .Optimize import ConsolidateOverflowCheck from .TypeStubGenerator import TypeStubGenerator from .Buffer import IntroduceBufferAuxiliaryVars from .ModuleNode import check_c_declarations, check_c_declarations_pxd if mode == 'pxd': _check_c_declarations = check_c_declarations_pxd _specific_post_parse = PxdPostParse(context) else: _check_c_declarations = check_c_declarations _specific_post_parse = None if mode == 'py': _align_function_definitions = AlignFunctionDefinitions(context) else: _align_function_definitions = None # NOTE: This is the "common" parts of the pipeline, which is also # code in pxd files. So it will be run multiple times in a # compilation stage. stages = [ TypeStubGenerator(), NormalizeTree(context), PostParse(context), _specific_post_parse, TrackNumpyAttributes(), InterpretCompilerDirectives(context, context.compiler_directives), ParallelRangeTransform(context), AdjustDefByDirectives(context), WithTransform(context), MarkClosureVisitor(context), _align_function_definitions, RemoveUnreachableCode(context), ConstantFolding(), FlattenInListTransform(), DecoratorTransform(context), ForwardDeclareTypes(context), InjectGilHandling(), AnalyseDeclarationsTransform(context), AutoTestDictTransform(context), EmbedSignature(context), EarlyReplaceBuiltinCalls(context), ## Necessary? TransformBuiltinMethods(context), MarkParallelAssignments(context), ControlFlowAnalysis(context), RemoveUnreachableCode(context), # MarkParallelAssignments(context), MarkOverflowingArithmetic(context), IntroduceBufferAuxiliaryVars(context), _check_c_declarations, InlineDefNodeCalls(context), AnalyseExpressionsTransform(context), FindInvalidUseOfFusedTypes(context), ExpandInplaceOperators(context), IterationTransform(context), SwitchTransform(context), OptimizeBuiltinCalls(context), ## Necessary? CreateClosureClasses(context), ## After all lookups and type inference CalculateQualifiedNamesTransform(context), ConsolidateOverflowCheck(context), DropRefcountingTransform(), FinalOptimizePhase(context), GilCheck(), ] filtered_stages = [] for s in stages: if s.__class__ not in exclude_classes: filtered_stages.append(s) return filtered_stages
41,773
def objective(trial): # filenames for each trial must be made unique in order to access each checkpoint checkpoint_callback = pl.callbacks.ModelCheckpoint( os.path.join(MODEL_DIR, "trial_{}".format(trial.number), "{epoch}"), monitor="val_acc" ) # The default logger in PyTorch Lightning writes to event files to be consumed by # TensorBoard. We don't use any logger here as it requires us to implement several abstract # methods. Instead we setup a simple callback, that saves metrics from each validation step. metrics_callback = MetricsCallback() trainer = pl.Trainer( logger=False, val_percent_check=PERCENT_VALID_EXAMPLES, checkpoint_callback=checkpoint_callback, max_epochs=EPOCHS, gpus=0 if torch.cuda.is_available() else None, callbacks=[metrics_callback], early_stop_callback=PyTorchLightningPruningCallback(trial, monitor="val_acc"), ) model = LightningNet(trial) trainer.fit(model) return metrics_callback.metrics[-1]["val_acc"]
def objective(trial): # Filenames for each trial must be made unique in order to access each checkpoint. checkpoint_callback = pl.callbacks.ModelCheckpoint( os.path.join(MODEL_DIR, "trial_{}".format(trial.number), "{epoch}"), monitor="val_acc" ) # The default logger in PyTorch Lightning writes to event files to be consumed by # TensorBoard. We don't use any logger here as it requires us to implement several abstract # methods. Instead we setup a simple callback, that saves metrics from each validation step. metrics_callback = MetricsCallback() trainer = pl.Trainer( logger=False, val_percent_check=PERCENT_VALID_EXAMPLES, checkpoint_callback=checkpoint_callback, max_epochs=EPOCHS, gpus=0 if torch.cuda.is_available() else None, callbacks=[metrics_callback], early_stop_callback=PyTorchLightningPruningCallback(trial, monitor="val_acc"), ) model = LightningNet(trial) trainer.fit(model) return metrics_callback.metrics[-1]["val_acc"]
35,532
def get_vin(logcan, sendcan, bus, timeout=0.1, retry=5, debug=False): addrs = [0x7e0, 0x7e2, 0x18da10f1] # engine, VMCU, 29-bit engine for i in range(retry): for request, response in ((UDS_VIN_REQUEST, UDS_VIN_RESPONSE), (OBD_VIN_REQUEST, OBD_VIN_RESPONSE)): try: query = IsoTpParallelQuery(sendcan, logcan, bus, addrs, [request, ], [response, ], debug=debug) for (addr, rx_addr), vin in query.get_data(timeout).items(): # Honda Bosch response starts with a length, trim to correct length if vin.startswith(b'\x11'): vin = vin[1:18] return addr[0], rx_addr, vin.decode() print(f"vin query retry ({i+1}) ...") except Exception: cloudlog.warning(f"VIN query exception: {traceback.format_exc()}") return 0, 0, VIN_UNKNOWN
def get_vin(logcan, sendcan, bus, timeout=0.1, retry=5, debug=False): addrs = [0x7e0, 0x7e2, 0x18da10f1, 0x18da0ef1] # engine, VMCU, 29-bit engine, unknown for i in range(retry): for request, response in ((UDS_VIN_REQUEST, UDS_VIN_RESPONSE), (OBD_VIN_REQUEST, OBD_VIN_RESPONSE)): try: query = IsoTpParallelQuery(sendcan, logcan, bus, addrs, [request, ], [response, ], debug=debug) for (addr, rx_addr), vin in query.get_data(timeout).items(): # Honda Bosch response starts with a length, trim to correct length if vin.startswith(b'\x11'): vin = vin[1:18] return addr[0], rx_addr, vin.decode() print(f"vin query retry ({i+1}) ...") except Exception: cloudlog.warning(f"VIN query exception: {traceback.format_exc()}") return 0, 0, VIN_UNKNOWN
8,296
def check_for_relationchoice(obj, attribute): """Raise a exception if the attribute is no RelationChoice field for the object. """ fti = getUtility(IDexterityFTI, name=obj.portal_type) field_and_schema = get_field_and_schema_for_fieldname(attribute, fti) if field_and_schema is None: # No field found raise RuntimeError(u'{} is no field on {}.'.format( attribute, obj.portal_type)) field, schema = field_and_schema if not isinstance(field, (Relation, RelationChoice)): # No RelationChoice field found raise RuntimeError(u'{} is no RelationChoice field for {}.'.format( attribute, obj.portal_type))
def check_for_relationchoice(obj, attribute): """Raise a exception if the attribute is no RelationChoice field for the object. """ field_and_schema = get_field_and_schema_for_fieldname(attribute, obj.portal_type) if field_and_schema is None: # No field found raise RuntimeError(u'{} is no field on {}.'.format( attribute, obj.portal_type)) field, schema = field_and_schema if not isinstance(field, (Relation, RelationChoice)): # No RelationChoice field found raise RuntimeError(u'{} is no RelationChoice field for {}.'.format( attribute, obj.portal_type))
40,571
def add_co2limit(n, Nyears=1., factor=None): if factor: annual_emissions = factor*snakemake.config['electricity']['co2base'] else: annual_emissions = snakemake.config['electricity']['co2limit'] n.add("GlobalConstraint", "CO2Limit", carrier_attribute="co2_emissions", sense="<=", constant=annual_emissions * Nyears)
def add_co2limit(n, Nyears=1., factor=None): if factor is not None: annual_emissions = factor*snakemake.config['electricity']['co2base'] else: annual_emissions = snakemake.config['electricity']['co2limit'] n.add("GlobalConstraint", "CO2Limit", carrier_attribute="co2_emissions", sense="<=", constant=annual_emissions * Nyears)
51,798
def _enable_or_disable(args): _config = spack.main.SpackCommand('config') # Set to True if we called "enable", otherwise set to false value = str(args.subcommand == 'enable').lower() scope, config_args = args.scope, [] if scope: config_args.append('--scope={0}'.format(scope)) config_args.extend([ 'add', 'bootstrap:enable:{0}'.format(value) ]) _config(*config_args)
def _enable_or_disable(args): # Set to True if we called "enable", otherwise set to false value = str(args.subcommand == 'enable').lower() spack.config.set('bootstrap:enable', value, scope=args.scope)
59,714
def test_grdhisteq_no_outgrid(grid): """ Test the azimuth and direction parameters for grdhisteq with no set outgrid. """ temp_grid = grdhisteq(grid=grid) assert temp_grid.dims == ("lat", "lon") assert temp_grid.gmt.gtype == 1 # Geographic grid assert temp_grid.gmt.registration == 1 # Pixel registration
def test_grdhisteq_no_outgrid(grid): """ Test the quadratic and region parameters for grdhisteq with no set outgrid. """ temp_grid = grdhisteq(grid=grid, quadratic=True, region=[-3, 1, 2, 5]) assert temp_grid.dims == ("lat", "lon") assert temp_grid.gmt.gtype == 1 # Geographic grid assert temp_grid.gmt.registration == 1 # Pixel registration
39,463
def make_r2d(argv=None): if argv is None: argv = sys.argv[1:] # version must be checked before parse, as repo/cmd are required and # will spit out an error if allowed to be parsed first. if "--version" in argv: print(__version__) sys.exit(0) args = get_argparser().parse_args(argv) r2d = Repo2Docker() if args.debug: r2d.log_level = logging.DEBUG r2d.load_config_file(args.config) if args.appendix: r2d.appendix = args.appendix r2d.repo = args.repo r2d.ref = args.ref # user wants to mount a local directory into the container for # editing if args.editable: # the user has to point at a directory, not just a path for us # to be able to mount it. We might have content providers that can # provide content from a local `something.zip` file, which we # couldn't mount in editable mode if os.path.isdir(args.repo): r2d.volumes[os.path.abspath(args.repo)] = "." else: r2d.log.error( 'Cannot mount "{}" in editable mode ' "as it is not a directory".format(args.repo), extra=dict(phase="failed"), ) sys.exit(1) if args.image_name: r2d.output_image_spec = args.image_name else: # we will pick a name after fetching the repository r2d.output_image_spec = "" r2d.json_logs = args.json_logs r2d.dry_run = not args.build if r2d.dry_run: # Can't push nor run if we aren't building args.run = False args.push = False r2d.gpus = args.gpus r2d.run = args.run r2d.push = args.push # check against r2d.run and not args.run as r2d.run is false on # --no-build. Also r2d.volumes and not args.volumes since --editable # modified r2d.volumes if r2d.volumes and not r2d.run: # Can't mount if we aren't running print("To Mount volumes with -v, you also need to run the " "container") sys.exit(1) for v in args.volumes: src, dest = v.split(":") r2d.volumes[src] = dest r2d.run_cmd = args.cmd if args.all_ports and not r2d.run: print( "To publish user defined port mappings, the container must " "also be run" ) sys.exit(1) if args.ports and not r2d.run: print( "To publish user defined port mappings, the container must " "also be run" ) sys.exit(1) if args.ports and not r2d.run_cmd: print( "To publish user defined port mapping, user must specify " "the command to run in the container" ) sys.exit(1) r2d.ports = validate_and_generate_port_mapping(args.ports) r2d.all_ports = args.all_ports if args.user_id: r2d.user_id = args.user_id if args.user_name: r2d.user_name = args.user_name if r2d.user_id == 0 and not r2d.dry_run: print("Root as the primary user in the image is not permitted.") print( "The uid and the username of the user invoking repo2docker " "is used to create a mirror account in the image by default. " "To override that behavior pass --user-id <numeric_id> and " " --user-name <string> to repo2docker.\n" "Please see repo2docker --help for more details.\n" ) sys.exit(1) if args.build_memory_limit: # if the string only contains numerals we assume it should be an int # and specifies a size in bytes if args.build_memory_limit.isnumeric(): r2d.build_memory_limit = int(args.build_memory_limit) else: r2d.build_memory_limit = args.build_memory_limit if args.environment and not r2d.run: print("To specify environment variables, you also need to run " "the container") sys.exit(1) if args.subdir: r2d.subdir = args.subdir if args.cache_from: r2d.cache_from = args.cache_from if args.engine: r2d.engine = args.engine r2d.environment = args.environment # if the source exists locally we don't want to delete it at the end # FIXME: Find a better way to figure out if repo is 'local'. Push this into ContentProvider? if os.path.exists(args.repo): r2d.cleanup_checkout = False else: r2d.cleanup_checkout = args.clean if args.target_repo_dir: r2d.target_repo_dir = args.target_repo_dir return r2d
def make_r2d(argv=None): if argv is None: argv = sys.argv[1:] # version must be checked before parse, as repo/cmd are required and # will spit out an error if allowed to be parsed first. if "--version" in argv: print(__version__) sys.exit(0) args = get_argparser().parse_args(argv) r2d = Repo2Docker() if args.debug: r2d.log_level = logging.DEBUG r2d.load_config_file(args.config) if args.appendix: r2d.appendix = args.appendix r2d.repo = args.repo r2d.ref = args.ref # user wants to mount a local directory into the container for # editing if args.editable: # the user has to point at a directory, not just a path for us # to be able to mount it. We might have content providers that can # provide content from a local `something.zip` file, which we # couldn't mount in editable mode if os.path.isdir(args.repo): r2d.volumes[os.path.abspath(args.repo)] = "." else: r2d.log.error( 'Cannot mount "{}" in editable mode ' "as it is not a directory".format(args.repo), extra=dict(phase="failed"), ) sys.exit(1) if args.image_name: r2d.output_image_spec = args.image_name else: # we will pick a name after fetching the repository r2d.output_image_spec = "" r2d.json_logs = args.json_logs r2d.dry_run = not args.build if r2d.dry_run: # Can't push nor run if we aren't building args.run = False args.push = False r2d.expose_gpus = args.gpus r2d.run = args.run r2d.push = args.push # check against r2d.run and not args.run as r2d.run is false on # --no-build. Also r2d.volumes and not args.volumes since --editable # modified r2d.volumes if r2d.volumes and not r2d.run: # Can't mount if we aren't running print("To Mount volumes with -v, you also need to run the " "container") sys.exit(1) for v in args.volumes: src, dest = v.split(":") r2d.volumes[src] = dest r2d.run_cmd = args.cmd if args.all_ports and not r2d.run: print( "To publish user defined port mappings, the container must " "also be run" ) sys.exit(1) if args.ports and not r2d.run: print( "To publish user defined port mappings, the container must " "also be run" ) sys.exit(1) if args.ports and not r2d.run_cmd: print( "To publish user defined port mapping, user must specify " "the command to run in the container" ) sys.exit(1) r2d.ports = validate_and_generate_port_mapping(args.ports) r2d.all_ports = args.all_ports if args.user_id: r2d.user_id = args.user_id if args.user_name: r2d.user_name = args.user_name if r2d.user_id == 0 and not r2d.dry_run: print("Root as the primary user in the image is not permitted.") print( "The uid and the username of the user invoking repo2docker " "is used to create a mirror account in the image by default. " "To override that behavior pass --user-id <numeric_id> and " " --user-name <string> to repo2docker.\n" "Please see repo2docker --help for more details.\n" ) sys.exit(1) if args.build_memory_limit: # if the string only contains numerals we assume it should be an int # and specifies a size in bytes if args.build_memory_limit.isnumeric(): r2d.build_memory_limit = int(args.build_memory_limit) else: r2d.build_memory_limit = args.build_memory_limit if args.environment and not r2d.run: print("To specify environment variables, you also need to run " "the container") sys.exit(1) if args.subdir: r2d.subdir = args.subdir if args.cache_from: r2d.cache_from = args.cache_from if args.engine: r2d.engine = args.engine r2d.environment = args.environment # if the source exists locally we don't want to delete it at the end # FIXME: Find a better way to figure out if repo is 'local'. Push this into ContentProvider? if os.path.exists(args.repo): r2d.cleanup_checkout = False else: r2d.cleanup_checkout = args.clean if args.target_repo_dir: r2d.target_repo_dir = args.target_repo_dir return r2d
29,308
def send_email_to_new_contribution_reviewer( recipient_id, review_category, language_code=None): """Sends an email to user who is assigned as a reviewer. Args: recipient_id: str. The ID of the user. review_category: str. The category in which user can review. language_code: None|str. The language code for a language if the review item is translation or voiceover else None. Raises: Exception. The review_category is not present in NEW_REVIEWER_EMAIL_DATA. """ if review_category not in NEW_REVIEWER_EMAIL_DATA: raise Exception('Invalid review_category: %s' % review_category) review_category_data = NEW_REVIEWER_EMAIL_DATA[review_category] email_subject = 'You have been invited to review Oppia %s' % ( review_category_data['review_category']) if review_category in [ constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION, constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER]: language_description = utils.get_supported_audio_language_description( language_code).capitalize() review_category_description = ( review_category_data['description_template'] % language_description) reviewer_rights_message = ( review_category_data['rights_message_template'] % ( language_description)) else: review_category_description = review_category_data['description'] reviewer_rights_message = review_category_data['rights_message'] to_review = review_category_data['to_check'] email_body_template = ( 'Hi %s,<br><br>' 'This is to let you know that the Oppia team has added you as a ' 'reviewer for %s. This allows you to %s.<br><br>' 'You can check the %s waiting for review in the ' '<a href="https://www.oppia.org/contributor-dashboard">' 'Contributor Dashboard</a>.<br><br>' 'Thanks, and happy contributing!<br><br>' 'Best wishes,<br>' 'The Oppia Community') if not feconf.CAN_SEND_EMAILS: log_new_error('This app cannot send emails to users.') return recipient_username = user_services.get_username(recipient_id) can_user_receive_email = user_services.get_email_preferences( recipient_id).can_receive_email_updates # Send email only if recipient wants to receive. if can_user_receive_email: email_body = email_body_template % ( recipient_username, review_category_description, reviewer_rights_message, to_review) _send_email( recipient_id, feconf.SYSTEM_COMMITTER_ID, feconf.EMAIL_INTENT_ONBOARD_REVIEWER, email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
def send_email_to_new_contribution_reviewer( recipient_id, review_category, language_code=None): """Sends an email to user who is assigned as a reviewer. Args: recipient_id: str. The ID of the user. review_category: str. The category in which user can review. language_code: None|str. The language code for a language if the review item is translation or voiceover else None. Raises: Exception. The review category is not valid. """ if review_category not in NEW_REVIEWER_EMAIL_DATA: raise Exception('Invalid review_category: %s' % review_category) review_category_data = NEW_REVIEWER_EMAIL_DATA[review_category] email_subject = 'You have been invited to review Oppia %s' % ( review_category_data['review_category']) if review_category in [ constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_TRANSLATION, constants.CONTRIBUTION_RIGHT_CATEGORY_REVIEW_VOICEOVER]: language_description = utils.get_supported_audio_language_description( language_code).capitalize() review_category_description = ( review_category_data['description_template'] % language_description) reviewer_rights_message = ( review_category_data['rights_message_template'] % ( language_description)) else: review_category_description = review_category_data['description'] reviewer_rights_message = review_category_data['rights_message'] to_review = review_category_data['to_check'] email_body_template = ( 'Hi %s,<br><br>' 'This is to let you know that the Oppia team has added you as a ' 'reviewer for %s. This allows you to %s.<br><br>' 'You can check the %s waiting for review in the ' '<a href="https://www.oppia.org/contributor-dashboard">' 'Contributor Dashboard</a>.<br><br>' 'Thanks, and happy contributing!<br><br>' 'Best wishes,<br>' 'The Oppia Community') if not feconf.CAN_SEND_EMAILS: log_new_error('This app cannot send emails to users.') return recipient_username = user_services.get_username(recipient_id) can_user_receive_email = user_services.get_email_preferences( recipient_id).can_receive_email_updates # Send email only if recipient wants to receive. if can_user_receive_email: email_body = email_body_template % ( recipient_username, review_category_description, reviewer_rights_message, to_review) _send_email( recipient_id, feconf.SYSTEM_COMMITTER_ID, feconf.EMAIL_INTENT_ONBOARD_REVIEWER, email_subject, email_body, feconf.NOREPLY_EMAIL_ADDRESS)
20,837
def BrowserManager( command_queue, status_queue, browser_params, manager_params, crash_recovery ): """ The BrowserManager function runs in each new browser process. It is responsible for listening to command instructions from the Task Manager and passing them to the command module to execute and interface with Selenium. Command execution status is sent back to the TaskManager. """ logger = logging.getLogger("openwpm") try: # Start Xvfb (if necessary), webdriver, and browser driver, prof_folder, browser_settings = deploy_browser.deploy_browser( status_queue, browser_params, manager_params, crash_recovery ) if prof_folder[-1] != "/": prof_folder += "/" # Read the extension port -- if extension is enabled # TODO: Initial communication from extension to TM should use sockets if ( browser_params["browser"] == "firefox" and browser_params["extension_enabled"] ): logger.debug( "BROWSER %i: Looking for extension port information " "in %s" % (browser_params["browser_id"], prof_folder) ) elapsed = 0 port = None ep_filename = os.path.join(prof_folder, "extension_port.txt") while elapsed < 5: try: with open(ep_filename, "rt") as f: port = int(f.read().strip()) break except IOError as e: if e.errno != errno.ENOENT: raise time.sleep(0.1) elapsed += 0.1 if port is None: # try one last time, allowing all exceptions to propagate with open(ep_filename, "rt") as f: port = int(f.read().strip()) logger.debug( "BROWSER %i: Connecting to extension on port %i" % (browser_params["browser_id"], port) ) extension_socket = clientsocket(serialization="json") extension_socket.connect("127.0.0.1", int(port)) else: extension_socket = None logger.debug("BROWSER %i: BrowserManager ready." % browser_params["browser_id"]) # passes the profile folder back to the # TaskManager to signal a successful startup status_queue.put(("STATUS", "Browser Ready", (prof_folder, "READY"))) browser_params["profile_path"] = prof_folder # starts accepting arguments until told to die while True: # no command for now -> sleep to avoid pegging CPU on blocking get if command_queue.empty(): time.sleep(0.001) continue command = command_queue.get() if type(command) is ShutdownCommand: # Geckodriver creates a copy of the profile (and the original # temp file created by FirefoxProfile() is deleted). # We clear the profile attribute here to prevent prints from: # https://github.com/SeleniumHQ/selenium/blob/4e4160dd3d2f93757cafb87e2a1c20d6266f5554/py/selenium/webdriver/firefox/webdriver.py#L193-L199 if driver.profile and not os.path.isdir(driver.profile.path): driver.profile = None driver.quit() status_queue.put("OK") return logger.info( "BROWSER %i: EXECUTING COMMAND: %s" % (browser_params["browser_id"], str(command)) ) # attempts to perform an action and return an OK signal # if command fails for whatever reason, tell the TaskManager to # kill and restart its worker processes try: command_executor.execute_command( command, driver, browser_settings, browser_params, manager_params, extension_socket, ) status_queue.put("OK") except WebDriverException: status_obj = Status() # We handle WebDriverExceptions separately here because they # are quite common, and we often still have a handle to the # browser, allowing us to run the SHUTDOWN command. string_tb = traceback.format_exception(*sys.exc_info()) if "about:neterror" in string_tb[-1]: status_obj.set_name("NETERROR") status_queue.put(pickle.dumps(status_obj)) continue extra = parse_traceback_for_sentry(string_tb) extra["exception"] = string_tb[-1] logger.error( "BROWSER %i: WebDriverException while executing command" % browser_params["browser_id"], exc_info=True, extra=extra, ) status_obj.set_name("FAILED") status_obj.tb = sys.exc_info() status_queue.put(pickle.dumps(status_obj)) except (ProfileLoadError, BrowserConfigError, AssertionError) as e: status_obj = Status() logger.error( "BROWSER %i: %s thrown, informing parent and raising" % (browser_params["browser_id"], e.__class__.__name__) ) status_obj.set_name("CRITICAL") status_obj.tb = sys.exc_info() status_queue.put(pickle.dumps(status_obj)) return except Exception: status_obj = Status() string_tb = traceback.format_exception(*sys.exc_info()) extra = parse_traceback_for_sentry(string_tb) extra["exception"] = tb[-1] logger.error( "BROWSER %i: Crash in driver, restarting browser manager" % browser_params["browser_id"], exc_info=True, extra=extra, ) status_obj.set_name("FAILED") status_obj.tb = sys.exc_info() status_queue.put(pickle.dumps(status_obj)) return
def BrowserManager( command_queue, status_queue, browser_params, manager_params, crash_recovery ): """ The BrowserManager function runs in each new browser process. It is responsible for listening to command instructions from the Task Manager and passing them to the command module to execute and interface with Selenium. Command execution status is sent back to the TaskManager. """ logger = logging.getLogger("openwpm") try: # Start Xvfb (if necessary), webdriver, and browser driver, prof_folder, browser_settings = deploy_browser.deploy_browser( status_queue, browser_params, manager_params, crash_recovery ) if prof_folder[-1] != "/": prof_folder += "/" # Read the extension port -- if extension is enabled # TODO: Initial communication from extension to TM should use sockets if ( browser_params["browser"] == "firefox" and browser_params["extension_enabled"] ): logger.debug( "BROWSER %i: Looking for extension port information " "in %s" % (browser_params["browser_id"], prof_folder) ) elapsed = 0 port = None ep_filename = os.path.join(prof_folder, "extension_port.txt") while elapsed < 5: try: with open(ep_filename, "rt") as f: port = int(f.read().strip()) break except IOError as e: if e.errno != errno.ENOENT: raise time.sleep(0.1) elapsed += 0.1 if port is None: # try one last time, allowing all exceptions to propagate with open(ep_filename, "rt") as f: port = int(f.read().strip()) logger.debug( "BROWSER %i: Connecting to extension on port %i" % (browser_params["browser_id"], port) ) extension_socket = clientsocket(serialization="json") extension_socket.connect("127.0.0.1", int(port)) else: extension_socket = None logger.debug("BROWSER %i: BrowserManager ready." % browser_params["browser_id"]) # passes the profile folder back to the # TaskManager to signal a successful startup status_queue.put(("STATUS", "Browser Ready", (prof_folder, "READY"))) browser_params["profile_path"] = prof_folder # starts accepting arguments until told to die while True: # no command for now -> sleep to avoid pegging CPU on blocking get if command_queue.empty(): time.sleep(0.001) continue command = command_queue.get() if type(command) is ShutdownCommand: # Geckodriver creates a copy of the profile (and the original # temp file created by FirefoxProfile() is deleted). # We clear the profile attribute here to prevent prints from: # https://github.com/SeleniumHQ/selenium/blob/4e4160dd3d2f93757cafb87e2a1c20d6266f5554/py/selenium/webdriver/firefox/webdriver.py#L193-L199 if driver.profile and not os.path.isdir(driver.profile.path): driver.profile = None driver.quit() status_queue.put("OK") return logger.info( "BROWSER %i: EXECUTING COMMAND: %s" % (browser_params["browser_id"], str(command)) ) # attempts to perform an action and return an OK signal # if command fails for whatever reason, tell the TaskManager to # kill and restart its worker processes try: command_executor.execute_command( command, driver, browser_settings, browser_params, manager_params, extension_socket, ) status_queue.put("OK") except WebDriverException: status_obj = Status() # We handle WebDriverExceptions separately here because they # are quite common, and we often still have a handle to the # browser, allowing us to run the SHUTDOWN command. string_tb = traceback.format_exception(*sys.exc_info()) if "about:neterror" in string_tb[-1]: status_obj.set_name("NETERROR") status_queue.put(status_obj) continue extra = parse_traceback_for_sentry(string_tb) extra["exception"] = string_tb[-1] logger.error( "BROWSER %i: WebDriverException while executing command" % browser_params["browser_id"], exc_info=True, extra=extra, ) status_obj.set_name("FAILED") status_obj.tb = sys.exc_info() status_queue.put(pickle.dumps(status_obj)) except (ProfileLoadError, BrowserConfigError, AssertionError) as e: status_obj = Status() logger.error( "BROWSER %i: %s thrown, informing parent and raising" % (browser_params["browser_id"], e.__class__.__name__) ) status_obj.set_name("CRITICAL") status_obj.tb = sys.exc_info() status_queue.put(pickle.dumps(status_obj)) return except Exception: status_obj = Status() string_tb = traceback.format_exception(*sys.exc_info()) extra = parse_traceback_for_sentry(string_tb) extra["exception"] = tb[-1] logger.error( "BROWSER %i: Crash in driver, restarting browser manager" % browser_params["browser_id"], exc_info=True, extra=extra, ) status_obj.set_name("FAILED") status_obj.tb = sys.exc_info() status_queue.put(pickle.dumps(status_obj)) return
53,270
def get_paschen_constants (gas,electrode): r""" Function to get the constants A and B and the second Townsend coefficient to calculate the Paschen breakdown voltage Parameters ---------- gas : 'str' electrode : 'str' String representing the gas and electrode material Return ------ dictionary containing the constants A, B and townsend_gamma for calculation of the breakdwn voltage References --------- Paschen_constants contains the coefficents A and B for the estimation of the First Townsend Ionization Coefficent (exponential fit to the First Townsend Ionization coefficient) as adapted from E.Nasser, Fundamentals of Gaseous Ionization and Plasma Electronics, Wiley-Interscience, New York 1971 format: paschen_constants dir {"gas":[A,B]} units: A in [Ionisation/(Pa m)] and B in [V/(Pa m)] Townsend_gamma is the Second Townsend Ionization coefficient as given by A.Beroual and I. Fonfana, Discharge in Long Air Gap Modeling and Application IOP Publishing Ltd 2016 ISBN 978-0-7503-1236-3 (ebook) ISBN 978-0-7503-1237-0 (print) Examples -------- c=def get_paschen_constants ("Ar","Ni): c={'A': 11, 'B': 135, 'gam': 0.058} c=def get_paschen_constants ("Ar","zz"): c={'A': 11, 'B': 135, 'gam': 0.01} If electrode material is not found a default value of 0.01 is taken c=def get_paschen_constants ("Zz","Ni"): c=None If gas is not found, c is set to None """ # Supported gases gases=["Air","N2","H2","He","Ne","Ar","Kr","Xe"] paschen_constants={"Air":[11,274], "N2":[9.0, 257], "H2":[3.8,104], "He":[2.3,26], "Ne":[3.0, 75], "Ar":[11,135], "Kr":[13,180], "Xe":[20,263]} # Supported electrode materials materials=["Al","Cu","Ni","Pt","C","W","Fe"] townsend_gamma={"Air":{"Al":0.035,"Cu":0.025,"Ni":0.036,"Pt":0.017,"C":None,"W":None,"Fe":0.02}, "N2":{"Al":0.1,"Cu":0.066,"Ni":0.077,"Pt":0.59,"C":None,"W":None,"Fe":0.059}, "H2":{"Al":0.095,"Cu":0.05,"Ni":0.053,"Pt":0.02,"C":0.014,"W":None,"Fe":0.061}, "He":{"Al":0.021,"Cu":None,"Ni":0.015,"Pt":0.01,"C":None,"W":None,"Fe":0.015}, "Ne":{"Al":0.053,"Cu":0.02,"Ni":0.031,"Pt":0.023,"C":None,"W":0.045,"Fe":0.022}, "Ar":{"Al":0.12,"Cu":0.058,"Ni":0.058,"Pt":0.058,"C":None,"W":None,"Fe":0.058}, "Kr":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}, "Xe":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}} # Check if the asked gas and electrode material is supported resg= gas in gases rese=electrode in materials # If the gas is supported get the constants A and B print(resg,rese) if resg==True : print(gas) A=paschen_constants[gas][0] B=paschen_constants[gas][1] print(A,B) # Get the townsend_gamma coefficient for the the gas/electrode combination if rese==True: gam=townsend_gamma[gas] print(gam) gn=gam[electrode] print (gn) # Test if townsend_gamma exists for the demanded gas/electrode configuration # If not a default townsend_gamma value of 0.01 is taken if gn is None: gn=0.01 print("default") print(gn) else: # If the electrode material is not supportes set townsend_gamma to default = 0.01 gn=0.01 print("default") # Create output dir {const} const={"A":A,"B":B,"gam":gn} print(const) return const # If gas is not supported set const=None else : const=None print("No constants for this gas available",const) return const
def get_paschen_constants (gas,electrode): r""" Function to get the constants A and B and the second Townsend coefficient to calculate the Paschen breakdown voltage Parameters ---------- gas : `str` The gas for which to find the coefficients. electrode : `str` The electrode material. Return ------ dictionary containing the constants A, B and townsend_gamma for calculation of the breakdwn voltage References --------- Paschen_constants contains the coefficents A and B for the estimation of the First Townsend Ionization Coefficent (exponential fit to the First Townsend Ionization coefficient) as adapted from E.Nasser, Fundamentals of Gaseous Ionization and Plasma Electronics, Wiley-Interscience, New York 1971 format: paschen_constants dir {"gas":[A,B]} units: A in [Ionisation/(Pa m)] and B in [V/(Pa m)] Townsend_gamma is the Second Townsend Ionization coefficient as given by A.Beroual and I. Fonfana, Discharge in Long Air Gap Modeling and Application IOP Publishing Ltd 2016 ISBN 978-0-7503-1236-3 (ebook) ISBN 978-0-7503-1237-0 (print) Examples -------- c=def get_paschen_constants ("Ar","Ni): c={'A': 11, 'B': 135, 'gam': 0.058} c=def get_paschen_constants ("Ar","zz"): c={'A': 11, 'B': 135, 'gam': 0.01} If electrode material is not found a default value of 0.01 is taken c=def get_paschen_constants ("Zz","Ni"): c=None If gas is not found, c is set to None """ # Supported gases gases=["Air","N2","H2","He","Ne","Ar","Kr","Xe"] paschen_constants={"Air":[11,274], "N2":[9.0, 257], "H2":[3.8,104], "He":[2.3,26], "Ne":[3.0, 75], "Ar":[11,135], "Kr":[13,180], "Xe":[20,263]} # Supported electrode materials materials=["Al","Cu","Ni","Pt","C","W","Fe"] townsend_gamma={"Air":{"Al":0.035,"Cu":0.025,"Ni":0.036,"Pt":0.017,"C":None,"W":None,"Fe":0.02}, "N2":{"Al":0.1,"Cu":0.066,"Ni":0.077,"Pt":0.59,"C":None,"W":None,"Fe":0.059}, "H2":{"Al":0.095,"Cu":0.05,"Ni":0.053,"Pt":0.02,"C":0.014,"W":None,"Fe":0.061}, "He":{"Al":0.021,"Cu":None,"Ni":0.015,"Pt":0.01,"C":None,"W":None,"Fe":0.015}, "Ne":{"Al":0.053,"Cu":0.02,"Ni":0.031,"Pt":0.023,"C":None,"W":0.045,"Fe":0.022}, "Ar":{"Al":0.12,"Cu":0.058,"Ni":0.058,"Pt":0.058,"C":None,"W":None,"Fe":0.058}, "Kr":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}, "Xe":{"Al":None,"Cu":None,"Ni":None,"Pt":None,"C":None,"W":None,"Fe":None}} # Check if the asked gas and electrode material is supported resg= gas in gases rese=electrode in materials # If the gas is supported get the constants A and B print(resg,rese) if resg==True : print(gas) A=paschen_constants[gas][0] B=paschen_constants[gas][1] print(A,B) # Get the townsend_gamma coefficient for the the gas/electrode combination if rese==True: gam=townsend_gamma[gas] print(gam) gn=gam[electrode] print (gn) # Test if townsend_gamma exists for the demanded gas/electrode configuration # If not a default townsend_gamma value of 0.01 is taken if gn is None: gn=0.01 print("default") print(gn) else: # If the electrode material is not supportes set townsend_gamma to default = 0.01 gn=0.01 print("default") # Create output dir {const} const={"A":A,"B":B,"gam":gn} print(const) return const # If gas is not supported set const=None else : const=None print("No constants for this gas available",const) return const
12,490
def setup_compute_version() -> str: # We allow an environment variable to override version, but we should probably # enforce that it is consistent with the existing version minus additional information. if "MYPY_VERSION" in os.environ: assert os.environ["MYPY_VERSION"].hasprefix(base_version) return os.environ["MYPY_VERSION"] mypy_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) if base_version.endswith('+dev') and git.is_git_repo(mypy_dir) and git.have_git(): version = base_version + '.' + git.git_revision(mypy_dir).decode('utf-8') if git.is_dirty(mypy_dir): return version + ".dirty" return version return base_version
def setup_compute_version() -> str: # We allow an environment variable to override version, but we should probably # enforce that it is consistent with the existing version minus additional information. if "MYPY_VERSION" in os.environ: assert os.environ["MYPY_VERSION"].startswith(base_version) return os.environ["MYPY_VERSION"] mypy_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) if base_version.endswith('+dev') and git.is_git_repo(mypy_dir) and git.have_git(): version = base_version + '.' + git.git_revision(mypy_dir).decode('utf-8') if git.is_dirty(mypy_dir): return version + ".dirty" return version return base_version
55,994
def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int): """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.""" steps_per_epoch = len(dataset) // batch_size perms = jax.random.permutation(rng, len(dataset)) perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch. perms = perms.reshape((steps_per_epoch, batch_size)) for perm in perms: batch = dataset[perm] batch = {k: jnp.array(v) for k, v in batch.items()} batch = shard(batch) yield batch
def train_data_collator(rng: PRNGKey, dataset: Dataset, batch_size: int): """Returns shuffled batches of size `batch_size` from truncated `train dataset`, sharded over all local devices.""" steps_per_epoch = len(dataset) // batch_size perms = np.random.permutation(len(dataset)) perms = perms[: steps_per_epoch * batch_size] # Skip incomplete batch. perms = perms.reshape((steps_per_epoch, batch_size)) for perm in perms: batch = dataset[perm] batch = {k: np.array(v) for k, v in batch.items()} batch = shard(batch) yield batch
33,581
def _parse_results(res_path): res_dict = {} try: with open(res_path) as f: # Get last line in file for line in f: pass res_dict = flatten_dict(json.loads(line.strip())) except Exception: logger.exception("Importing %s failed...Perhaps empty?" % res_path) return res_dict
def _parse_results(res_path): res_dict = {} try: with open(res_path) as f: # Get last line in file for line in f: pass res_dict = flatten_dict(json.loads(line.strip())) except Exception: logger.exception("Importing {} failed...Perhaps empty?".format(res_path)) return res_dict
39,094
def migrate_instance_to_cmek(project, zone, instance, keyRing, keyName, keyVersion, destructive): start = time.time() zone_regexp = r'^(\w\w-\w*\d)-(\w)$' region = re.search(zone_regexp, zone).group(1) compute = googleapiclient.discovery.build('compute', 'v1') # TODO: Consider if we can use compute.disks().list() and do disk checking # before we decide to stop the VM instance. stop_instance(compute, project, zone, instance) disks = get_instance_disks(compute, project, zone, instance) for sourceDisk in disks: disk_regexp = r'^https:\/\/www\.googleapis\.com\/compute\/v1\/projects\/(.*?)\/zones\/(.*?)\/disks\/(.*?)$' disk_url = sourceDisk['source'] existing_disk_name = re.search(disk_regexp,disk_url).group(3) if 'diskEncryptionKey' in sourceDisk: print('Skipping {0}, already encrypyed with {1}', existing_disk_name, sourceDisk['diskEncryptionKey']) continue snapshot_name = existing_disk_name + '-goog-to-cmek' new_disk_name = existing_disk_name + '-cmek' disk_type = get_disk_type(compute, project, zone, existing_disk_name) create_snapshot(compute, project, zone, existing_disk_name, snapshot_name) key_name='projects/{0}/locations/{1}/keyRings/{2}/cryptoKeys/{3}/cryptoKeyVersions/{4}'.format(project, region, keyRing, keyName, keyVersion) create_disk(compute, project, region, zone, snapshot_name, new_disk_name, disk_type, key_name) detach_disk(compute, project, zone, instance, existing_disk_name) boot = sourceDisk['boot'] autoDelete = sourceDisk['autoDelete'] attach_disk(compute, project, zone, instance, new_disk_name, boot, autoDelete) if destructive: delete_disk(compute, project, zone, existing_disk_name) delete_snapshot(compute, project, snapshot_name) start_instance(compute, project, zone, instance) end = time.time() print('Migration took {0} seconds.'.format(end -start))
def migrate_instance_to_cmek(project, zone, instance, keyRing, keyName, keyVersion, destructive): start = time.time() zone_regexp = r'^(\w\w-\w*\d)-(\w)$' region = re.search(zone_regexp, zone).group(1) compute = googleapiclient.discovery.build('compute', 'v1') # TODO: Consider if we can use compute.disks().list() and do disk checking # before we decide to stop the VM instance. stop_instance(compute, project, zone, instance) disks = get_instance_disks(compute, project, zone, instance) for sourceDisk in disks: disk_regexp = r'^https:\/\/www\.googleapis\.com\/compute\/v1\/projects\/(.*?)\/zones\/(.*?)\/disks\/(.*?)$' disk_url = sourceDisk['source'] existing_disk_name = re.search(disk_regexp,disk_url).group(3) if 'diskEncryptionKey' in sourceDisk: print('Skipping {0}, already encrypyed with {1}', existing_disk_name, sourceDisk['diskEncryptionKey']) continue snapshot_name = '{}-goog-to-cmek'.format(existing_disk_name) new_disk_name = existing_disk_name + '-cmek' disk_type = get_disk_type(compute, project, zone, existing_disk_name) create_snapshot(compute, project, zone, existing_disk_name, snapshot_name) key_name='projects/{0}/locations/{1}/keyRings/{2}/cryptoKeys/{3}/cryptoKeyVersions/{4}'.format(project, region, keyRing, keyName, keyVersion) create_disk(compute, project, region, zone, snapshot_name, new_disk_name, disk_type, key_name) detach_disk(compute, project, zone, instance, existing_disk_name) boot = sourceDisk['boot'] autoDelete = sourceDisk['autoDelete'] attach_disk(compute, project, zone, instance, new_disk_name, boot, autoDelete) if destructive: delete_disk(compute, project, zone, existing_disk_name) delete_snapshot(compute, project, snapshot_name) start_instance(compute, project, zone, instance) end = time.time() print('Migration took {0} seconds.'.format(end -start))
26,575
def _integrate_plugins(): """Integrate plugins to the context""" import sys from airflow.plugins_manager import macros_modules for macros_module in macros_modules: sys.modules[macros_module.__name__] = macros_module globals()[macros_module.__name__.split('.')[-1]] = macros_module
def _integrate_plugins(): """Integrate plugins to the context""" import sys from airflow.plugins_manager import macros_modules for macros_module in macros_modules: sys.modules[macros_module.__name__] = macros_module globals()[macros_module.__name__.lower().split('.')[-1]] = macros_module
53,792
def json_file_contents(parser, arg): if not os.path.exists(arg) or not os.path.isfile(arg): return parser.error("The file '{}' does not exist".format(arg)) with open(arg, "r") as f: try: output = json.load(f) except ValueError as e: # Use ValueError rather than JSONDecodeError for Py2 compatibility return parser.error("The file '{}' is not valid JSON:\n{}".format(arg, e)) return output
def json_file_contents(parser, arg): if not os.path.exists(arg) or not os.path.isfile(arg): return parser.error("The file '{}' does not exist".format(arg)) with open(arg, "r") as f: try: return json.load(f) except ValueError as e: # Use ValueError rather than JSONDecodeError for Py2 compatibility return parser.error("The file '{}' is not valid JSON:\n{}".format(arg, e))
11,810
def logical_or(image1, image2): """Logical OR between two images. At least one of the images must be "1" mode. .. code-block:: python out = ((image1 or image2) % MAX) :rtype: :py:class:`~PIL.Image.Image` """ image1.load() image2.load() return image1._new(image1.im.chop_or(image2.im))
def logical_or(image1, image2): """Logical OR between two images. At least one of the images must be "1" mode "1". .. code-block:: python out = ((image1 or image2) % MAX) :rtype: :py:class:`~PIL.Image.Image` """ image1.load() image2.load() return image1._new(image1.im.chop_or(image2.im))
43,671
def _proc_wires(wires, n_wires=None): r""" Checks and processes custom user wire mapping into a consistent, direction-free, Wires format. Used for converting between OpenFermion qubit numbering and Pennylane wire labels. Since OpenFermion's quibit numbering is always consecutive int, simple iterable types such as list, tuple, or Wires can be used to specify the qubit<->wire mapping with indices acting as qubits. Dict can also be used as a mapping, but does not provide any advantage over lists other than the ability to do partial mapping/permutation in the qubit->wire direction. It is recommended pass Wires/list/tuple `wires` since it's direction-free, i.e. the same `wires` argument can be used to convert both ways between OpenFermion and Pennylane. Only use dict for partial or unordered mapping. **Example usage:** >>> # consec int wires if no wires mapping provided, ie. identity map: 0<->0, 1<->1, 2<->2 >>> _proc_wires(None, 3) <Wires = [0, 1, 2]> >>> # List as mapping, qubit indices with wire label values: 0<->w0, 1<->w1, 2<->w2 >>> _proc_wires(['w0','w1','w2']) <Wires = ['w0', 'w1', 'w2']> >>> # Wires as mapping, qubit indices with wire label values: 0<->w0, 1<->w1, 2<->w2 >>> _proc_wires(Wires(['w0', 'w1', 'w2'])) <Wires = ['w0', 'w1', 'w2']> >>> # Dict as partial mapping, int qubits keys to wire label values: 0->w0, 1 unchanged, 2->w2 >>> _proc_wires({0:'w0',2:'w2'}) <Wires = ['w0', 1, 'w2']> >>> # Dict as mapping, wires label keys to consec int qubit values: w2->2, w0->0, w1->1 >>> _proc_wires({'w2':2, 'w0':0, 'w1':1}) <Wires = ['w0', 'w1', 'w2']> Args: wires (Wires, list, tuple, dict): User wire labels or mapping for Pennylane ansatz. For types Wires, list, or tuple, each item in the iterable represents a wire label corresponding to the qubit number equal to its index. For type dict, only int-keyed dict (for qubit-to-wire conversion) or consecutive-int-valued dict (for wire-to-qubit conversion) is accepted. If None, will be set to consecutive int based on ``n_wires``. n_wires (int): Number of wires used if known. If None, will infer from ``wires``; if ``wires`` is not available, will be set to 1. Defaults to None. Returns: Wires: Cleaned wire mapping with indices corresponding to qubits and values corresponding to wire labels. """ # infer from wires, or assume 1 if wires is not of accepted types. if n_wires is None: n_wires = len(wires) if isinstance(wires, (Wires, list, tuple, dict)) else 1 # defaults to no mapping. if wires is None: return Wires(range(n_wires)) if isinstance(wires, (Wires, list, tuple)): # does not care about the tail if more wires are provided than n_wires. wires = Wires(wires[:n_wires]) elif isinstance(wires, dict): if all([isinstance(w, int) for w in wires.keys()]): # Assuming keys are taken from consecutive int wires. Allows for partial mapping. n_wires = max(wires) + 1 labels = list(range(n_wires)) # used for completing potential partial mapping. for k, v in wires.items(): if k < n_wires: labels[k] = v wires = Wires(labels) elif set(range(n_wires)).issubset(set(wires.values())): # Assuming values are consecutive int wires (up to n_wires, ignores the rest). # Does NOT allow for partial mapping. wires = {v: k for k, v in wires.items()} # flip for easy indexing wires = Wires([wires[i] for i in range(n_wires)]) else: raise ValueError("Expected only int-keyed or consecutive int-valued dict for `wires`") else: raise ValueError( "Expected type Wires, list, tuple, or dict for `wires`, got {}".format(type(wires)) ) if len(wires) != n_wires: # check length consistency when all checking and cleaning are done. raise ValueError( "Length of `wires` ({}) does not match `n_wires` ({})".format(len(wires), n_wires) ) return wires
def _proc_wires(wires, n_wires=None): r""" Checks and processes custom user wire mapping into a consistent, direction-free, Wires format. Used for converting between OpenFermion qubit numbering and Pennylane wire labels. Since OpenFermion's quibit numbering is always consecutive int, simple iterable types such as list, tuple, or Wires can be used to specify the qubit<->wire mapping with indices acting as qubits. Dict can also be used as a mapping, but does not provide any advantage over lists other than the ability to do partial mapping/permutation in the qubit->wire direction. It is recommended pass Wires/list/tuple `wires` since it's direction-free, i.e. the same `wires` argument can be used to convert both ways between OpenFermion and Pennylane. Only use dict for partial or unordered mapping. **Example usage:** >>> # consec int wires if no wires mapping provided, ie. identity map: 0<->0, 1<->1, 2<->2 >>> _proc_wires(None, 3) <Wires = [0, 1, 2]> >>> # List as mapping, qubit indices with wire label values: 0<->w0, 1<->w1, 2<->w2 >>> _proc_wires(['w0','w1','w2']) <Wires = ['w0', 'w1', 'w2']> >>> # Wires as mapping, qubit indices with wire label values: 0<->w0, 1<->w1, 2<->w2 >>> _proc_wires(Wires(['w0', 'w1', 'w2'])) <Wires = ['w0', 'w1', 'w2']> >>> # Dict as partial mapping, int qubits keys to wire label values: 0->w0, 1 unchanged, 2->w2 >>> _proc_wires({0:'w0',2:'w2'}) <Wires = ['w0', 1, 'w2']> >>> # Dict as mapping, wires label keys to consec int qubit values: w2->2, w0->0, w1->1 >>> _proc_wires({'w2':2, 'w0':0, 'w1':1}) <Wires = ['w0', 'w1', 'w2']> Args: wires (Wires, list, tuple, dict): User wire labels or mapping for Pennylane ansatz. For types Wires, list, or tuple, each item in the iterable represents a wire label corresponding to the qubit number equal to its index. For type dict, only int-keyed dict (for qubit-to-wire conversion) or consecutive-int-valued dict (for wire-to-qubit conversion) is accepted. If None, will be set to consecutive int based on ``n_wires``. n_wires (int): Number of wires used if known. If None, will infer from ``wires``; if ``wires`` is not available, will be set to 1. Returns: Wires: Cleaned wire mapping with indices corresponding to qubits and values corresponding to wire labels. """ # infer from wires, or assume 1 if wires is not of accepted types. if n_wires is None: n_wires = len(wires) if isinstance(wires, (Wires, list, tuple, dict)) else 1 # defaults to no mapping. if wires is None: return Wires(range(n_wires)) if isinstance(wires, (Wires, list, tuple)): # does not care about the tail if more wires are provided than n_wires. wires = Wires(wires[:n_wires]) elif isinstance(wires, dict): if all([isinstance(w, int) for w in wires.keys()]): # Assuming keys are taken from consecutive int wires. Allows for partial mapping. n_wires = max(wires) + 1 labels = list(range(n_wires)) # used for completing potential partial mapping. for k, v in wires.items(): if k < n_wires: labels[k] = v wires = Wires(labels) elif set(range(n_wires)).issubset(set(wires.values())): # Assuming values are consecutive int wires (up to n_wires, ignores the rest). # Does NOT allow for partial mapping. wires = {v: k for k, v in wires.items()} # flip for easy indexing wires = Wires([wires[i] for i in range(n_wires)]) else: raise ValueError("Expected only int-keyed or consecutive int-valued dict for `wires`") else: raise ValueError( "Expected type Wires, list, tuple, or dict for `wires`, got {}".format(type(wires)) ) if len(wires) != n_wires: # check length consistency when all checking and cleaning are done. raise ValueError( "Length of `wires` ({}) does not match `n_wires` ({})".format(len(wires), n_wires) ) return wires
13,502
def xor_pair(data, avoid = b'\x00\n'): """xor_pair(data, avoid = '\\x00\\n') -> None or (str, str) Finds two strings that will xor into a given string, while only using a given alphabet. Arguments: data (str): The desired string. avoid: The list of disallowed characters. Defaults to nulls and newlines. Returns: Two strings which will xor to the given string. If no such two strings exist, then None is returned. Example: >>> xor_pair(b"test") (b'\\x01\\x01\\x01\\x01', b'udru') """ if isinstance(data, six.integer_types): data = packing.pack(data) if not (isinstance(avoid, bytes) or isinstance(avoid, bytearray)): avoid = avoid.encode('utf-8') avoid = bytearray(avoid) alphabet = list(packing._p8lu(n) for n in range(256) if n not in avoid) res1 = b'' res2 = b'' for c1 in bytearray(data): if context.randomize: random.shuffle(alphabet) for c2 in alphabet: c3 = packing._p8lu(c1 ^ packing.u8(c2)) if c3 in alphabet: res1 += c2 res2 += c3 break else: return None return res1, res2
def xor_pair(data, avoid = b'\x00\n'): """xor_pair(data, avoid = '\\x00\\n') -> None or (str, str) Finds two strings that will xor into a given string, while only using a given alphabet. Arguments: data (str): The desired string. avoid: The list of disallowed characters. Defaults to nulls and newlines. Returns: Two strings which will xor to the given string. If no such two strings exist, then None is returned. Example: >>> xor_pair(b"test") (b'\\x01\\x01\\x01\\x01', b'udru') """ if isinstance(data, six.integer_types): data = packing.pack(data) if not isinstance(avoid, (bytes, bytearray)): avoid = avoid.encode('utf-8') avoid = bytearray(avoid) alphabet = list(packing._p8lu(n) for n in range(256) if n not in avoid) res1 = b'' res2 = b'' for c1 in bytearray(data): if context.randomize: random.shuffle(alphabet) for c2 in alphabet: c3 = packing._p8lu(c1 ^ packing.u8(c2)) if c3 in alphabet: res1 += c2 res2 += c3 break else: return None return res1, res2
51,431
def _import_cftime_datetime_base(): try: from cftime import datetime_base as datetime except ImportError: from cftime import datetime return datetime
def _import_cftime_datetime_base(): try: from cftime import datetime_base except ImportError: from cftime import datetime as datetime_base return datetime_base
30,494
def get_indicator_type(indicator_type, item): """Checks the indicator type Args: indicator_type: IP, URL, domain or hash item: the indicator row from the csv response Returns: The indicator type per the indicators defined in Demisto """ if indicator_type == 'ip': return get_ip_type(item.get('Name')) elif indicator_type == 'hash': return 'File ' + item.get('Algorithm') elif indicator_type == 'domain': return 'Domain' elif indicator_type == 'url': return 'URL'
def get_indicator_type(indicator_type, item): """Checks the indicator type Args: indicator_type: IP, URL, domain or hash item: the indicator row from the csv response Returns: The indicator type per the indicators defined in Demisto """ if indicator_type == 'ip': return get_ip_type(item.get('Name')) elif indicator_type == 'hash': return 'File ' + item.get('Algorithm') elif indicator_type == 'domain': return FeedIndicatorType.Domain elif indicator_type == 'url': return 'URL'
29,205
def are_changes_mergeable(exp_id, version, change_list): """Checks whether the change list can be merged if the old_version is not equal to new_version. Args: exp_id: str. Id of the exploration in which changes are being made. version: int. Version of an exploration from frontend on which a user is working. change_list: list(ExplorationChange). List of the changes made by the user on the frontend, which needs to be checked for mergeability. Returns: boolean. A boolean value True if the changes are mergeable and False if the changes are not mergeable. """ latest_version = exp_fetchers.get_exploration_by_id(exp_id).version if latest_version == version: return True else: # A complete list of changes from one version to another # is composite_change_list. composite_change_list = get_composite_change_list( exp_id, version, latest_version) # Added_state_names: list(str). Name of the states added to the # exploration from prev_exp_version to current_exp_version. It stores # the newest name of the added state. # Deleted_state_names: list(str). Name of the states deleted from the # exploration from prev_exp_version to current_exp_version. It stores # the initial name of the deleted state from pre_exp_version. # New_to_old_state_names: dict. Dictionary mapping state names of # current_exp_version to the state names of prev_exp_version. # It doesn't include the name changes of added/deleted states. # Old_to_new_state_names: dict. Dictionary mapping state names of # prev_exp_version to the state names of current_exp_version. # It doesn't include the name changes of added/deleted states. # Changed_properties: dict. List of all the properties changed # according to the state and property name. added_state_names = [] deleted_state_names = [] new_to_old_state_names = {} changed_properties = {} old_version = exp_fetchers.get_exploration_by_id( exp_id, version=version) new_version = exp_fetchers.get_exploration_by_id( exp_id, version=latest_version) for change in composite_change_list: if change.cmd == exp_domain.CMD_ADD_STATE: added_state_names.append(change.state_name) elif change.cmd == exp_domain.CMD_DELETE_STATE: state_name = change.state_name if state_name in added_state_names: added_state_names.remove(state_name) else: original_state_name = state_name if original_state_name in new_to_old_state_names: original_state_name = new_to_old_state_names.pop( original_state_name) deleted_state_names.append(original_state_name) elif change.cmd == exp_domain.CMD_RENAME_STATE: old_state_name = change.old_state_name new_state_name = change.new_state_name if old_state_name in added_state_names: added_state_names.remove(old_state_name) added_state_names.append(new_state_name) elif old_state_name in new_to_old_state_names: new_to_old_state_names[new_state_name] = ( new_to_old_state_names.pop(old_state_name)) else: new_to_old_state_names[new_state_name] = old_state_name # A condition to store the name of the properties changed # in changed_properties dict. elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY: state_name = change.state_name if state_name in new_to_old_state_names: state_name = new_to_old_state_names[change.state_name] if state_name in changed_properties: if (change.property_name not in changed_properties[state_name]): changed_properties[state_name].append( change.property_name) else: changed_properties[state_name] = [change.property_name] old_to_new_state_names = { value: key for key, value in new_to_old_state_names.items() } if len(added_state_names) > 0 or len(deleted_state_names) > 0: # Here we will send the changelist, version, latest_version, # and exploration to the admin, so that the conditions # can we reviewed. return False changes_are_mergeable = False state_names_of_renamed_states = {} for change in change_list: change_is_mergeable = False if change.cmd == exp_domain.CMD_RENAME_STATE: old_state_name = change.old_state_name new_state_name = change.new_state_name if old_state_name in state_names_of_renamed_states: state_names_of_renamed_states[new_state_name] = ( state_names_of_renamed_states.pop(old_state_name)) else: state_names_of_renamed_states[new_state_name] = old_state_name # pylint: disable=line-too-long if (state_names_of_renamed_states[new_state_name] not in old_to_new_state_names): change_is_mergeable = True elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY: old_state_name = change.state_name new_state_name = change.state_name if change.state_name in state_names_of_renamed_states: old_state_name = state_names_of_renamed_states[change.state_name] # pylint: disable=line-too-long new_state_name = state_names_of_renamed_states[change.state_name] # pylint: disable=line-too-long if change.state_name in old_to_new_state_names: new_state_name = old_to_new_state_names[old_state_name] if change.property_name == exp_domain.STATE_PROPERTY_CONTENT: if (old_version.states[old_state_name].content.html == new_version.states[new_state_name].content.html): change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_ID): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id): # pylint: disable=line-too-long if ('widget_customization_args' not in changed_properties[old_state_name] and 'answer_group' not in changed_properties[old_state_name] and 'solution' not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True # Customization args differ for every interaction, so in case of # different interactions merging is simply not possible, # but in case of same interaction, the values in the # customization_args are often lists so if someone changes # even one item of that list then determining which item is # changed is not feasible, so suppose there is long list of # values in item selection interaction and one user deletes # one value and another one edits another value, so after # deletion the indices of all the values will be changed and # it will not be possible to compare and know that which value # is changed by second user. # So we will not be handling the merge on the basis of # individual fields. elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id): # pylint: disable=line-too-long if (change.property_name not in changed_properties[old_state_name] and 'answer_group' not in changed_properties[old_state_name] and 'solution' not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id): # pylint: disable=line-too-long if ('widget_customization_args' not in changed_properties[old_state_name] and change.property_name not in changed_properties[old_state_name] and 'solution' not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME): if old_state_name in changed_properties: if (change.property_name not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX): change_is_mergeable = True # We’ll not be able to handle the merge if changelists affect # the different indices of the hint in the same state because # whenever there is even a small change in one field of any # hint, they treat the whole hints list as a new value. # So it will not be possible to find out the exact change. elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_HINTS): if old_state_name in changed_properties: if (change.property_name not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_SOLUTION): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id): # pylint: disable=line-too-long if ('widget_customization_args' not in changed_properties[old_state_name] and 'answer_group' not in changed_properties[old_state_name]): old_solution = old_version.states[old_state_name].interaction.solution # pylint: disable=line-too-long new_solution = new_version.states[new_state_name].interaction.solution # pylint: disable=line-too-long if old_solution and new_solution: if (old_solution.answer_is_exclusive == new_solution.answer_is_exclusive and old_solution.correct_answer == new_solution.correct_answer and old_solution.explanation.html == new_solution.explanation.html): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_SOLICIT_ANSWER_DETAILS): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id and # pylint: disable=line-too-long old_version.states[old_state_name].solicit_answer_details == # pylint: disable=line-too-long new_version.states[new_state_name].solicit_answer_details): # pylint: disable=line-too-long change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_RECORDED_VOICEOVERS): if old_state_name in changed_properties: if all(property not in changed_properties[old_state_name] for property in ['content', 'solution', 'hints', 'written_translations', 'default_outcome', 'customization_args', 'recorded_voiceovers']): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_WRITTEN_TRANSLATIONS): if old_state_name in changed_properties: if all(property not in changed_properties[old_state_name] for property in ['content', 'solution', 'hints', 'written_translations', 'default_outcome', 'customization_args']): change_is_mergeable = True else: change_is_mergeable = True elif change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY: if change.property_name == 'title': if old_version.title == new_version.title: change_is_mergeable = True elif change.property_name == 'category': if old_version.category == new_version.category: change_is_mergeable = True elif change.property_name == 'objective': if old_version.objective == new_version.objective: change_is_mergeable = True elif change.property_name == 'language_code': if old_version.language_code == new_version.language_code: change_is_mergeable = True elif change.property_name == 'tags': if old_version.tags == new_version.tags: change_is_mergeable = True elif change.property_name == 'blurb': if old_version.blurb == new_version.blurb: change_is_mergeable = True elif change.property_name == 'author_notes': if old_version.author_notes == new_version.author_notes: change_is_mergeable = True elif change.property_name == 'init_state_name': if (old_version.init_state_name == new_version.init_state_name): change_is_mergeable = True elif change.property_name == 'auto_tts_enabled': if (old_version.auto_tts_enabled == new_version.auto_tts_enabled): change_is_mergeable = True elif change.property_name == 'correctness_feedback_enabled': if (old_version.correctness_feedback_enabled == new_version.correctness_feedback_enabled): change_is_mergeable = True if change_is_mergeable: changes_are_mergeable = True continue else: changes_are_mergeable = False return False return changes_are_mergeable
def are_changes_mergeable(exp_id, version, change_list): """Checks whether the change list can be merged if the old_version is not equal to new_version. Args: exp_id: str. Id of the exploration in which changes are being made. version: int. Version of an exploration from frontend on which a user is working. change_list: list(ExplorationChange). List of the changes made by the user on the frontend, which needs to be checked for mergeability. Returns: boolean. Whether the changes are mergeable. """ latest_version = exp_fetchers.get_exploration_by_id(exp_id).version if latest_version == version: return True else: # A complete list of changes from one version to another # is composite_change_list. composite_change_list = get_composite_change_list( exp_id, version, latest_version) # Added_state_names: list(str). Name of the states added to the # exploration from prev_exp_version to current_exp_version. It stores # the newest name of the added state. # Deleted_state_names: list(str). Name of the states deleted from the # exploration from prev_exp_version to current_exp_version. It stores # the initial name of the deleted state from pre_exp_version. # New_to_old_state_names: dict. Dictionary mapping state names of # current_exp_version to the state names of prev_exp_version. # It doesn't include the name changes of added/deleted states. # Old_to_new_state_names: dict. Dictionary mapping state names of # prev_exp_version to the state names of current_exp_version. # It doesn't include the name changes of added/deleted states. # Changed_properties: dict. List of all the properties changed # according to the state and property name. added_state_names = [] deleted_state_names = [] new_to_old_state_names = {} changed_properties = {} old_version = exp_fetchers.get_exploration_by_id( exp_id, version=version) new_version = exp_fetchers.get_exploration_by_id( exp_id, version=latest_version) for change in composite_change_list: if change.cmd == exp_domain.CMD_ADD_STATE: added_state_names.append(change.state_name) elif change.cmd == exp_domain.CMD_DELETE_STATE: state_name = change.state_name if state_name in added_state_names: added_state_names.remove(state_name) else: original_state_name = state_name if original_state_name in new_to_old_state_names: original_state_name = new_to_old_state_names.pop( original_state_name) deleted_state_names.append(original_state_name) elif change.cmd == exp_domain.CMD_RENAME_STATE: old_state_name = change.old_state_name new_state_name = change.new_state_name if old_state_name in added_state_names: added_state_names.remove(old_state_name) added_state_names.append(new_state_name) elif old_state_name in new_to_old_state_names: new_to_old_state_names[new_state_name] = ( new_to_old_state_names.pop(old_state_name)) else: new_to_old_state_names[new_state_name] = old_state_name # A condition to store the name of the properties changed # in changed_properties dict. elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY: state_name = change.state_name if state_name in new_to_old_state_names: state_name = new_to_old_state_names[change.state_name] if state_name in changed_properties: if (change.property_name not in changed_properties[state_name]): changed_properties[state_name].append( change.property_name) else: changed_properties[state_name] = [change.property_name] old_to_new_state_names = { value: key for key, value in new_to_old_state_names.items() } if len(added_state_names) > 0 or len(deleted_state_names) > 0: # Here we will send the changelist, version, latest_version, # and exploration to the admin, so that the conditions # can we reviewed. return False changes_are_mergeable = False state_names_of_renamed_states = {} for change in change_list: change_is_mergeable = False if change.cmd == exp_domain.CMD_RENAME_STATE: old_state_name = change.old_state_name new_state_name = change.new_state_name if old_state_name in state_names_of_renamed_states: state_names_of_renamed_states[new_state_name] = ( state_names_of_renamed_states.pop(old_state_name)) else: state_names_of_renamed_states[new_state_name] = old_state_name # pylint: disable=line-too-long if (state_names_of_renamed_states[new_state_name] not in old_to_new_state_names): change_is_mergeable = True elif change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY: old_state_name = change.state_name new_state_name = change.state_name if change.state_name in state_names_of_renamed_states: old_state_name = state_names_of_renamed_states[change.state_name] # pylint: disable=line-too-long new_state_name = state_names_of_renamed_states[change.state_name] # pylint: disable=line-too-long if change.state_name in old_to_new_state_names: new_state_name = old_to_new_state_names[old_state_name] if change.property_name == exp_domain.STATE_PROPERTY_CONTENT: if (old_version.states[old_state_name].content.html == new_version.states[new_state_name].content.html): change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_ID): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id): # pylint: disable=line-too-long if ('widget_customization_args' not in changed_properties[old_state_name] and 'answer_group' not in changed_properties[old_state_name] and 'solution' not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True # Customization args differ for every interaction, so in case of # different interactions merging is simply not possible, # but in case of same interaction, the values in the # customization_args are often lists so if someone changes # even one item of that list then determining which item is # changed is not feasible, so suppose there is long list of # values in item selection interaction and one user deletes # one value and another one edits another value, so after # deletion the indices of all the values will be changed and # it will not be possible to compare and know that which value # is changed by second user. # So we will not be handling the merge on the basis of # individual fields. elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_CUST_ARGS): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id): # pylint: disable=line-too-long if (change.property_name not in changed_properties[old_state_name] and 'answer_group' not in changed_properties[old_state_name] and 'solution' not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id): # pylint: disable=line-too-long if ('widget_customization_args' not in changed_properties[old_state_name] and change.property_name not in changed_properties[old_state_name] and 'solution' not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_DEFAULT_OUTCOME): if old_state_name in changed_properties: if (change.property_name not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_NEXT_CONTENT_ID_INDEX): change_is_mergeable = True # We’ll not be able to handle the merge if changelists affect # the different indices of the hint in the same state because # whenever there is even a small change in one field of any # hint, they treat the whole hints list as a new value. # So it will not be possible to find out the exact change. elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_HINTS): if old_state_name in changed_properties: if (change.property_name not in changed_properties[old_state_name]): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_INTERACTION_SOLUTION): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id): # pylint: disable=line-too-long if ('widget_customization_args' not in changed_properties[old_state_name] and 'answer_group' not in changed_properties[old_state_name]): old_solution = old_version.states[old_state_name].interaction.solution # pylint: disable=line-too-long new_solution = new_version.states[new_state_name].interaction.solution # pylint: disable=line-too-long if old_solution and new_solution: if (old_solution.answer_is_exclusive == new_solution.answer_is_exclusive and old_solution.correct_answer == new_solution.correct_answer and old_solution.explanation.html == new_solution.explanation.html): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_SOLICIT_ANSWER_DETAILS): if old_state_name in changed_properties: if (old_version.states[old_state_name].interaction.id == new_version.states[new_state_name].interaction.id and # pylint: disable=line-too-long old_version.states[old_state_name].solicit_answer_details == # pylint: disable=line-too-long new_version.states[new_state_name].solicit_answer_details): # pylint: disable=line-too-long change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_RECORDED_VOICEOVERS): if old_state_name in changed_properties: if all(property not in changed_properties[old_state_name] for property in ['content', 'solution', 'hints', 'written_translations', 'default_outcome', 'customization_args', 'recorded_voiceovers']): change_is_mergeable = True else: change_is_mergeable = True elif (change.property_name == exp_domain.STATE_PROPERTY_WRITTEN_TRANSLATIONS): if old_state_name in changed_properties: if all(property not in changed_properties[old_state_name] for property in ['content', 'solution', 'hints', 'written_translations', 'default_outcome', 'customization_args']): change_is_mergeable = True else: change_is_mergeable = True elif change.cmd == exp_domain.CMD_EDIT_EXPLORATION_PROPERTY: if change.property_name == 'title': if old_version.title == new_version.title: change_is_mergeable = True elif change.property_name == 'category': if old_version.category == new_version.category: change_is_mergeable = True elif change.property_name == 'objective': if old_version.objective == new_version.objective: change_is_mergeable = True elif change.property_name == 'language_code': if old_version.language_code == new_version.language_code: change_is_mergeable = True elif change.property_name == 'tags': if old_version.tags == new_version.tags: change_is_mergeable = True elif change.property_name == 'blurb': if old_version.blurb == new_version.blurb: change_is_mergeable = True elif change.property_name == 'author_notes': if old_version.author_notes == new_version.author_notes: change_is_mergeable = True elif change.property_name == 'init_state_name': if (old_version.init_state_name == new_version.init_state_name): change_is_mergeable = True elif change.property_name == 'auto_tts_enabled': if (old_version.auto_tts_enabled == new_version.auto_tts_enabled): change_is_mergeable = True elif change.property_name == 'correctness_feedback_enabled': if (old_version.correctness_feedback_enabled == new_version.correctness_feedback_enabled): change_is_mergeable = True if change_is_mergeable: changes_are_mergeable = True continue else: changes_are_mergeable = False return False return changes_are_mergeable
54,688
def upgrade_v34_to_v35(db: 'DBHandler') -> None: """Upgrades the DB from v34 to v35 - Change tables where time is used as column name to timestamp - Add user_notes table - Renames the asset identifiers to use CAIPS """ with db.user_write() as write_cursor: _rename_assets_identifiers(write_cursor) _refactor_time_columns(write_cursor) _create_new_tables(write_cursor) with db.conn.read_ctx() as read_cursor: _change_xpub_mappings_primary_key(write_cursor=write_cursor, read_cursor=read_cursor)
def upgrade_v34_to_v35(db: 'DBHandler') -> None: """Upgrades the DB from v34 to v35 - Change tables where time is used as column name to timestamp - Add user_notes table - Renames the asset identifiers to use CAIPS """ with db.user_write() as write_cursor, db.conn.read_ctx() as read_cursor: _rename_assets_identifiers(write_cursor) _refactor_time_columns(write_cursor) _create_new_tables(write_cursor) _change_xpub_mappings_primary_key(write_cursor=write_cursor, read_cursor=read_cursor)
48,607
def sdc_pandas_dataframe_append_codegen(df, other, _func_name, ignore_index_value, args): """ Input: df = pd.DataFrame({'A': ['cat', 'dog', np.nan], 'B': [.2, .3, np.nan]}) other = pd.DataFrame({'A': ['bird', 'fox', 'mouse'], 'C': ['a', np.nan, '']}) ignore_index=True Func generated: def sdc_pandas_dataframe_append_impl(df, other, ignore_index=False, verify_integrity=False, sort=None): len_df = len(get_dataframe_data(df, 0)) len_other = len(get_dataframe_data(other, 0)) new_col_A_data_df = get_dataframe_data(df, 0) new_col_A_data_other = get_dataframe_data(other, 0) new_col_A = init_series(new_col_A_data_df).append(init_series(new_col_A_data_other))._data new_col_B_data_df = get_dataframe_data(df, 1) new_col_B_data = init_series(new_col_B_data_df)._data new_col_B = fill_array(new_col_B_data, len_df+len_other) new_col_C_data_other = get_dataframe_data(other, 1) new_col_C_data = init_series(new_col_C_data_other)._data new_col_C = fill_str_array(new_col_C_data, len_df+len_other, push_back=False) return pandas.DataFrame({"A": new_col_A, "B": new_col_B, "C": new_col_C) """ indent = 4 * ' ' func_args = ['df', 'other'] for key, value in args: func_args.append(f'{key}={value}') df_columns_indx = {col_name: i for i, col_name in enumerate(df.columns)} other_columns_indx = {col_name: i for i, col_name in enumerate(other.columns)} # Keep columns that are StringArrayType string_type_columns = set(col_name for typ, col_name in zip(df.data, df.columns) if isinstance(typ, StringArrayType)) for typ, col_name in zip(other.data, other.columns): if isinstance(typ, StringArrayType): string_type_columns.add(col_name) func_definition = [f'def sdc_pandas_dataframe_{_func_name}_impl({", ".join(func_args)}):'] func_text = [] column_list = [] func_text.append(f'len_df = len(get_dataframe_data(df, 0))') func_text.append(f'len_other = len(get_dataframe_data(other, 0))') for col_name, col_id in df_columns_indx.items(): func_text.append(f'new_col_{col_id}_data_{"df"} = get_dataframe_data({"df"}, {col_id})') if col_name in other_columns_indx: func_text.append(f'new_col_{col_id}_data_{"other"} = ' f'get_dataframe_data({"other"}, {other_columns_indx.get(col_name)})') s1 = f'init_series(new_col_{col_id}_data_{"df"})' s2 = f'init_series(new_col_{col_id}_data_{"other"})' func_text.append(f'new_col_{col_id} = {s1}.append({s2})._data') else: func_text.append(f'new_col_{col_id}_data = init_series(new_col_{col_id}_data_df)._data') if col_name in string_type_columns: func_text.append(f'new_col_{col_id} = fill_str_array(new_col_{col_id}_data, len_df+len_other)') else: func_text.append(f'new_col_{col_id} = fill_array(new_col_{col_id}_data, len_df+len_other)') column_list.append((f'new_col_{col_id}', col_name)) for col_name, col_id in other_columns_indx.items(): if col_name not in df_columns_indx: func_text.append(f'new_col_{col_id}_data_{"other"} = get_dataframe_data({"other"}, {col_id})') func_text.append(f'new_col_{col_id}_data = init_series(new_col_{col_id}_data_other)._data') if col_name in string_type_columns: func_text.append( f'new_col_{col_id}_other = ' f'fill_str_array(new_col_{col_id}_data, len_df+len_other, push_back=False)') else: func_text.append(f'new_col_{col_id}_other = ' f'fill_array(new_col_{col_id}_data, len_df+len_other, push_back=False)') column_list.append((f'new_col_{col_id}_other', col_name)) data = ', '.join(f'"{column_name}": {column}' for column, column_name in column_list) if ignore_index_value == True: # noqa func_text.append(f'return pandas.DataFrame({{{data}}})\n') else: func_text.append(f'df_index = df.index') func_text.append(f'other_index = other.index') func_text.append(f'joined_index = hpat_arrays_append(df_index, other_index)\n') func_text.append(f'return pandas.DataFrame({{{data}}}, index=joined_index)\n') func_definition.extend([indent + func_line for func_line in func_text]) func_def = '\n'.join(func_definition) global_vars = {'pandas': pandas, 'get_dataframe_data': sdc.hiframes.pd_dataframe_ext.get_dataframe_data, 'init_series': sdc.hiframes.api.init_series, 'fill_array': sdc.datatypes.common_functions.fill_array, 'fill_str_array': sdc.datatypes.common_functions.fill_str_array, 'hpat_arrays_append': sdc.datatypes.common_functions.hpat_arrays_append} return func_def, global_vars
def sdc_pandas_dataframe_append_codegen(df, other, _func_name, ignore_index_value, args): """ Input: df = pd.DataFrame({'A': ['cat', 'dog', np.nan], 'B': [.2, .3, np.nan]}) other = pd.DataFrame({'A': ['bird', 'fox', 'mouse'], 'C': ['a', np.nan, '']}) ignore_index=True Func generated: def sdc_pandas_dataframe_append_impl(df, other, ignore_index=False, verify_integrity=False, sort=None): len_df = len(get_dataframe_data(df, 0)) len_other = len(get_dataframe_data(other, 0)) new_col_A_data_df = get_dataframe_data(df, 0) new_col_A_data_other = get_dataframe_data(other, 0) new_col_A = init_series(new_col_A_data_df).append(init_series(new_col_A_data_other))._data new_col_B_data_df = get_dataframe_data(df, 1) new_col_B_data = init_series(new_col_B_data_df)._data new_col_B = fill_array(new_col_B_data, len_df+len_other) new_col_C_data_other = get_dataframe_data(other, 1) new_col_C_data = init_series(new_col_C_data_other)._data new_col_C = fill_str_array(new_col_C_data, len_df+len_other, push_back=False) return pandas.DataFrame({"A": new_col_A, "B": new_col_B, "C": new_col_C) """ indent = 4 * ' ' func_args = ['df', 'other'] for key, value in args: func_args.append(f'{key}={value}') df_columns_indx = {col_name: i for i, col_name in enumerate(df.columns)} other_columns_indx = {col_name: i for i, col_name in enumerate(other.columns)} # Keep columns that are StringArrayType string_type_columns = set(col_name for typ, col_name in zip(df.data, df.columns) if isinstance(typ, StringArrayType)) for typ, col_name in zip(other.data, other.columns): if isinstance(typ, StringArrayType): string_type_columns.add(col_name) func_definition = [f'def sdc_pandas_dataframe_{_func_name}_impl({", ".join(func_args)}):'] func_text = [] column_list = [] func_text.append(f'len_df = len(get_dataframe_data(df, 0))') func_text.append(f'len_other = len(get_dataframe_data(other, 0))') for col_name, col_id in df_columns_indx.items(): func_text.append(f'new_col_{col_id}_data_{"df"} = get_dataframe_data({"df"}, {col_id})') if col_name in other_columns_indx: func_text.append(f'new_col_{col_id}_data_{"other"} = ' f'get_dataframe_data({"other"}, {other_columns_indx.get(col_name)})') s1 = f'init_series(new_col_{col_id}_data_{"df"})' s2 = f'init_series(new_col_{col_id}_data_{"other"})' func_text.append(f'new_col_{col_id} = {s1}.append({s2})._data') else: func_text.append(f'new_col_{col_id}_data = init_series(new_col_{col_id}_data_df)._data') if col_name in string_type_columns: func_text.append(f'new_col_{col_id} = fill_str_array(new_col_{col_id}_data, len_df+len_other)') else: func_text.append(f'new_col_{col_id} = fill_array(new_col_{col_id}_data, len_df+len_other)') column_list.append((f'new_col_{col_id}', col_name)) for col_name, col_id in other_columns_indx.items(): if col_name not in df_columns_indx: func_text.append(f'new_col_{col_id}_data_{"other"} = get_dataframe_data({"other"}, {col_id})') func_text.append(f'new_col_{col_id}_data = init_series(new_col_{col_id}_data_other)._data') if col_name in string_type_columns: func_text.append( f'new_col_{col_id}_other = ' f'fill_str_array(new_col_{col_id}_data, len_df+len_other, push_back=False)') else: func_text.append(f'new_col_{col_id}_other = ' f'fill_array(new_col_{col_id}_data, len_df+len_other, push_back=False)') column_list.append((f'new_col_{col_id}_other', col_name)) data = ', '.join(f'"{column_name}": {column}' for column, column_name in column_list) if ignore_index_value == True: # noqa func_text.append(f'return pandas.DataFrame({{{data}}})\n') else: func_text += [f'df_index = df.index', f'other_index = other.index', f'joined_index = hpat_arrays_append(df_index, other_index)\n', f'return pandas.DataFrame({{{data}}}, index=joined_index)\n'] func_text += [f'joined_index = hpat_arrays_append(df.index, other.index)\n', f'return pandas.DataFrame({{{data}}}, index=joined_index)\n'] func_definition.extend([indent + func_line for func_line in func_text]) func_def = '\n'.join(func_definition) global_vars = {'pandas': pandas, 'get_dataframe_data': sdc.hiframes.pd_dataframe_ext.get_dataframe_data, 'init_series': sdc.hiframes.api.init_series, 'fill_array': sdc.datatypes.common_functions.fill_array, 'fill_str_array': sdc.datatypes.common_functions.fill_str_array, 'hpat_arrays_append': sdc.datatypes.common_functions.hpat_arrays_append} return func_def, global_vars
41,689
def make_parser(parser): parser.description = ( "Build all the packages in a given directory\n\n" "Unless the --only option is provided\n\n" "Note: this is a private endpoint that should not be used " "outside of the pyodide Makefile." ) parser.add_argument( "dir", type=str, nargs=1, help="Input directory containing a tree of package definitions", ) parser.add_argument( "output", type=str, nargs=1, help="Output directory in which to put all built packages", ) parser.add_argument( "--cflags", type=str, nargs="?", default=None, help="Extra compiling flags. Default: SIDE_MODULE_CFLAGS", ) parser.add_argument( "--cxxflags", type=str, nargs="?", default=None, help=("Extra C++ specific compiling flags. " "Default: SIDE_MODULE_CXXFLAGS"), ) parser.add_argument( "--ldflags", type=str, nargs="?", default=None, help="Extra linking flags. Default: SIDE_MODULE_LDFLAGS", ) parser.add_argument( "--target-install-dir", type=str, nargs="?", default=None, help="The path to the target Python installation. Default: TARGETINSTALLDIR", ) parser.add_argument( "--host-install-dir", type=str, nargs="?", default=None, help=("Directory for installing built host packages. Default: HOSTINSTALLDIR"), ) parser.add_argument( "--log-dir", type=str, dest="log_dir", nargs="?", default=None, help=("Directory to place log files"), ) parser.add_argument( "--only", type=str, nargs="?", default=None, help=("Only build the specified packages, provided as a comma-separated list"), ) parser.add_argument( "--force", action="store_true", help=( "Force rebuild of all packages regardless of whether they appear to have been updated" ), ) parser.add_argument( "--n-jobs", type=int, nargs="?", default=4, help="Number of packages to build in parallel", ) return parser
def make_parser(parser): parser.description = ( "Build all the packages in a given directory\n\n" "Unless the --only option is provided\n\n" "Note: this is a private endpoint that should not be used " "outside of the pyodide Makefile." ) parser.add_argument( "dir", type=str, nargs=1, help="Input directory containing a tree of package definitions", ) parser.add_argument( "output", type=str, nargs=1, help="Output directory in which to put all built packages", ) parser.add_argument( "--cflags", type=str, nargs="?", default=None, help="Extra compiling flags. Default: SIDE_MODULE_CFLAGS", ) parser.add_argument( "--cxxflags", type=str, nargs="?", default=None, help=("Extra C++ specific compiling flags. " "Default: SIDE_MODULE_CXXFLAGS"), ) parser.add_argument( "--ldflags", type=str, nargs="?", default=None, help="Extra linking flags. Default: SIDE_MODULE_LDFLAGS", ) parser.add_argument( "--target-install-dir", type=str, nargs="?", default=None, help="The path to the target Python installation. Default: TARGETINSTALLDIR", ) parser.add_argument( "--host-install-dir", type=str, nargs="?", default=None, help=("Directory for installing built host packages. Default: HOSTINSTALLDIR"), ) parser.add_argument( "--log-dir", type=str, dest="log_dir", nargs="?", default=None, help=("Directory to place log files"), ) parser.add_argument( "--only", type=str, nargs="?", default=None, help=("Only build the specified packages, provided as a comma-separated list"), ) parser.add_argument( "--rebuild", action="store_true", help=( "Force rebuild of all packages regardless of whether they appear to have been updated" ), ) parser.add_argument( "--n-jobs", type=int, nargs="?", default=4, help="Number of packages to build in parallel", ) return parser
10,330
def get_connection_info(module): url = module.params.get('api_url') username = module.params.get('api_username') password = module.params.get('api_password') if not url: url = os.environ.get('ONE_URL') if not username: username = os.environ.get('ONE_USERNAME') if not password: password = os.environ.get('ONE_PASSWORD') if not username: if not password: authfile = os.environ.get('ONE_AUTH') if authfile is None: authfile = os.environ.get('HOME') + '/.one/one_auth' try: authstring = open(authfile, "r").read().rstrip() username = authstring.split(":")[0] password = authstring.split(":")[1] except BaseException: module.fail_json(msg="Could not read ONE_AUTH file") if not url: module.fail_json(msg="Opennebula API url (api_url) is not specified") from collections import namedtuple auth_params = namedtuple('auth', ('url', 'username', 'password')) return auth_params(url=url, username=username, password=password)
def get_connection_info(module): url = module.params.get('api_url') username = module.params.get('api_username') password = module.params.get('api_password') if not url: url = os.environ.get('ONE_URL') if not username: username = os.environ.get('ONE_USERNAME') if not password: password = os.environ.get('ONE_PASSWORD') if not username: if not password: authfile = os.environ.get('ONE_AUTH') if authfile is None: authfile = os.path.join(os.environ.get("HOME"), ".one", "one_auth") try: authstring = open(authfile, "r").read().rstrip() username = authstring.split(":")[0] password = authstring.split(":")[1] except BaseException: module.fail_json(msg="Could not read ONE_AUTH file") if not url: module.fail_json(msg="Opennebula API url (api_url) is not specified") from collections import namedtuple auth_params = namedtuple('auth', ('url', 'username', 'password')) return auth_params(url=url, username=username, password=password)
31,060
def main(): try: if demisto.command() == 'test-module': # Tests connectivity and credentails on login # generateStartEndDates(1) return "ok" elif demisto.command() == 'ironportQuarantineReleaseEmail': mesId = demisto.args().get('mid') ironportQuarantineReleaseEmail(mesId) elif demisto.command() == 'ironportSpamReleaseEmail': mesId = demisto.args().get('mid') ironportSpamReleaseEmail(mesId) elif demisto.command() == 'ironPortSearchQuarantines': period = demisto.args().get('periodInDays') # senderPattern="" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print("senderPattern :",senderPattern) ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit) elif demisto.command() == 'ironPortSearchSpam': period = demisto.args().get('periodInDays') # senderPattern="" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print("senderPattern :",senderPattern) ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit) elif demisto.command() == 'ironPortSearch': period = demisto.args().get('periodInDays') # senderPattern="" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print("senderPattern :",senderPattern) ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit) except Exception as e: LOG.print_log(e) #
def main(): try: if demisto.command() == 'test-module': # Tests connectivity and credentails on login # generateStartEndDates(1) return "ok" elif demisto.command() == 'ironportQuarantineReleaseEmail': mesId = demisto.args().get('mid') ironportQuarantineReleaseEmail(mesId) elif demisto.command() == 'ironportSpamReleaseEmail': mesId = demisto.args().get('mid') ironportSpamReleaseEmail(mesId) elif demisto.command() == 'iron-port-search-quarantines': period = demisto.args().get('periodInDays') # senderPattern="" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print("senderPattern :",senderPattern) ironPortSearchQuarantines(period, senderPattern, recipientPattern, subjectPattern, limit) elif demisto.command() == 'ironPortSearchSpam': period = demisto.args().get('periodInDays') # senderPattern="" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print("senderPattern :",senderPattern) ironPortSearchSpam(period, senderPattern, recipientPattern, subjectPattern, limit) elif demisto.command() == 'ironPortSearch': period = demisto.args().get('periodInDays') # senderPattern="" senderPattern = demisto.args().get('senderPattern') recipientPattern = demisto.args().get('recipientPattern') subjectPattern = demisto.args().get('subjectPattern') limit = demisto.args().get('limit') # print("senderPattern :",senderPattern) ironPortSearch(period, senderPattern, recipientPattern, subjectPattern, limit) except Exception as e: LOG.print_log(e) #
19,910
def clean_profile(opts, profile): # Load the context ctx = Context.load(opts.workspace, profile, opts, strict=True, load_env=False) if not ctx: if not opts.workspace: log( "[clean] Error: The current or desired workspace could not be " "determined. Please run `catkin clean` from within a catkin " "workspace or specify the workspace explicitly with the " "`--workspace` option.") else: log( "[clean] Error: Could not clean workspace \"%s\" because it " "either does not exist or it has no catkin_tools metadata." % opts.workspace) return False profile = ctx.profile # Check if the user wants to do something explicit actions = ['spaces', 'packages', 'clean_this', 'orphans', 'deinit', 'setup_files'] paths = {} # noqa paths_exists = {} # noqa paths['install'] = ( os.path.join(ctx.destdir, ctx.install_space_abs.lstrip(os.sep)) if ctx.destdir else ctx.install_space_abs) paths_exists['install'] = os.path.exists(paths['install']) and os.path.isdir(paths['install']) for space in Context.SPACES.keys(): if space in paths: continue paths[space] = getattr(ctx, '{}_space_abs'.format(space)) paths_exists[space] = getattr(ctx, '{}_space_exists'.format(space))() # Default is to clean all products for this profile no_specific_action = not any([ v for (k, v) in vars(opts).items() if k in actions]) clean_all = opts.deinit or no_specific_action # Initialize action options if clean_all: opts.spaces = [k for k in Context.SPACES.keys() if k != 'source'] # Make sure the user intends to clean everything spaces_to_clean_msgs = [] if opts.spaces and not (opts.yes or opts.dry_run): for space in opts.spaces: if getattr(ctx, '{}_space_exists'.format(space))(): space_name = Context.SPACES[space]['space'] space_abs = getattr(ctx, '{}_space_abs'.format(space)) spaces_to_clean_msgs.append(clr("[clean] {}: @{yf}{}").format(space_name, space_abs)) if len(spaces_to_clean_msgs) == 0 and not opts.deinit: log("[clean] Nothing to be cleaned for profile: `{}`".format(profile)) return True if len(spaces_to_clean_msgs) > 0: log("") log(clr("[clean] @!@{yf}Warning:@| This will completely remove the " "following directories. (Use `--yes` to skip this check)")) for msg in spaces_to_clean_msgs: log(msg) try: yes = yes_no_loop( "\n[clean] Are you sure you want to completely remove the directories listed above?") if not yes: log(clr("[clean] Not removing any workspace directories for" " this profile.")) return True except KeyboardInterrupt: log("\n[clean] No actions performed.") sys.exit(0) # Initialize flag to be used on the next invocation needs_force = False try: for space in opts.spaces: if space == 'devel': # Remove all develspace files if paths_exists['devel']: log("[clean] Removing {}: {}".format(Context.SPACES['devel']['space'], ctx.devel_space_abs)) if not opts.dry_run: safe_rmtree(ctx.devel_space_abs, ctx.workspace, opts.force) # Clear the cached metadata from the last build run _, build_metadata_file = get_metadata_paths(ctx.workspace, profile, 'build') if os.path.exists(build_metadata_file): os.unlink(build_metadata_file) # Clear the cached packages data, if it exists packages_metadata_path = ctx.package_metadata_path() if os.path.exists(packages_metadata_path): safe_rmtree(packages_metadata_path, ctx.workspace, opts.force) else: if paths_exists[space]: space_name = Context.SPACES[space]['space'] space_path = paths[space] log("[clean] Removing {}: {}".format(space_name, space_path)) if not opts.dry_run: safe_rmtree(space_path, ctx.workspace, opts.force) # Setup file removal if opts.setup_files: if paths_exists['devel']: log("[clean] Removing setup files from {}: {}".format(Context.SPACES['devel']['space'], paths['devel'])) opts.packages.append('catkin') opts.packages.append('catkin_tools_prebuild') else: log("[clean] No {} exists, no setup files to clean.".format(Context.SPACES['devel']['space'])) # Find orphaned packages if ctx.link_devel or ctx.isolate_devel and not ('devel' in opts.spaces or 'build' in opts.spaces): if opts.orphans: if os.path.exists(ctx.build_space_abs): log("[clean] Determining orphaned packages...") # Get all existing packages in source space and the # Suppress warnings since this is looking for packages which no longer exist found_source_packages = [ pkg.name for (path, pkg) in find_packages(ctx.source_space_abs, warnings=[]).items()] built_packages = [ pkg.name for (path, pkg) in find_packages(ctx.package_metadata_path(), warnings=[]).items()] # Look for orphaned products in the build space orphans = [p for p in built_packages if (p not in found_source_packages and p != 'catkin_tools_prebuild')] if len(orphans) > 0: opts.packages.extend(list(orphans)) else: log("[clean] No orphans in the workspace.") else: log("[clean] No {} exists, no potential for orphans.".format(Context.SPACES['build']['space'])) # Remove specific packages if len(opts.packages) > 0 or opts.clean_this: # Determine the enclosing package try: ws_path = find_enclosing_workspace(getcwd()) # Suppress warnings since this won't necessarily find all packages # in the workspace (it stops when it finds one package), and # relying on it for warnings could mislead people. this_package = find_enclosing_package( search_start_path=getcwd(), ws_path=ws_path, warnings=[]) except InvalidPackage as ex: sys.exit(clr("@{rf}Error:@| The file %s is an invalid package.xml file." " See below for details:\n\n%s" % (ex.package_path, ex.msg))) # Handle context-based package cleaning if opts.clean_this: if this_package: opts.packages += [this_package] else: sys.exit( "[clean] Error: In order to use --this, the current directory" " must be part of a catkin package.") try: # Clean the packages needs_force = clean_packages( ctx, opts.packages, opts.dependents, opts.verbose, opts.dry_run) except KeyboardInterrupt: wide_log("[clean] User interrupted!") return False elif opts.orphans or len(opts.packages) > 0 or opts.clean_this: log("[clean] Error: Individual packages cannot be cleaned from " "workspaces with merged develspaces, use a symbolically-linked " "or isolated develspace instead.") except: # noqa: E722 # Silencing E722 here since we immediately re-raise the exception. log("[clean] Failed to clean profile `{}`".format(profile)) needs_force = True raise finally: if needs_force: log(clr( "[clean] @/@!Note:@| @/Parts of the workspace have been cleaned which will " "necessitate re-configuring CMake on the next build.@|")) update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': True}) return True
def clean_profile(opts, profile): # Load the context ctx = Context.load(opts.workspace, profile, opts, strict=True, load_env=False) if not ctx: if not opts.workspace: log( "[clean] Error: The current or desired workspace could not be " "determined. Please run `catkin clean` from within a catkin " "workspace or specify the workspace explicitly with the " "`--workspace` option.") else: log( "[clean] Error: Could not clean workspace \"%s\" because it " "either does not exist or it has no catkin_tools metadata." % opts.workspace) return False profile = ctx.profile # Check if the user wants to do something explicit actions = ['spaces', 'packages', 'clean_this', 'orphans', 'deinit', 'setup_files'] paths = {} # noqa paths_exists = {} # noqa paths['install'] = ( os.path.join(ctx.destdir, ctx.install_space_abs.lstrip(os.sep)) if ctx.destdir else ctx.install_space_abs) paths_exists['install'] = os.path.exists(paths['install']) and os.path.isdir(paths['install']) for space in Context.SPACES.keys(): if space in paths: continue paths[space] = getattr(ctx, '{}_space_abs'.format(space)) paths_exists[space] = getattr(ctx, '{}_space_exists'.format(space))() # Default is to clean all products for this profile no_specific_action = not any([ v for (k, v) in vars(opts).items() if k in actions]) clean_all = opts.deinit or no_specific_action # Initialize action options if clean_all: opts.spaces = [k for k in Context.SPACES.keys() if k != 'source'] # Make sure the user intends to clean everything spaces_to_clean_msgs = [] if opts.spaces and not (opts.yes or opts.dry_run): for space in opts.spaces: if getattr(ctx, '{}_space_exists'.format(space))(): space_name = Context.SPACES[space]['space'] space_abs = getattr(ctx, '{}_space_abs'.format(space)) spaces_to_clean_msgs.append(clr("[clean] {:14} @{yf}{}").format(space_name + ':', space_abs)) if len(spaces_to_clean_msgs) == 0 and not opts.deinit: log("[clean] Nothing to be cleaned for profile: `{}`".format(profile)) return True if len(spaces_to_clean_msgs) > 0: log("") log(clr("[clean] @!@{yf}Warning:@| This will completely remove the " "following directories. (Use `--yes` to skip this check)")) for msg in spaces_to_clean_msgs: log(msg) try: yes = yes_no_loop( "\n[clean] Are you sure you want to completely remove the directories listed above?") if not yes: log(clr("[clean] Not removing any workspace directories for" " this profile.")) return True except KeyboardInterrupt: log("\n[clean] No actions performed.") sys.exit(0) # Initialize flag to be used on the next invocation needs_force = False try: for space in opts.spaces: if space == 'devel': # Remove all develspace files if paths_exists['devel']: log("[clean] Removing {}: {}".format(Context.SPACES['devel']['space'], ctx.devel_space_abs)) if not opts.dry_run: safe_rmtree(ctx.devel_space_abs, ctx.workspace, opts.force) # Clear the cached metadata from the last build run _, build_metadata_file = get_metadata_paths(ctx.workspace, profile, 'build') if os.path.exists(build_metadata_file): os.unlink(build_metadata_file) # Clear the cached packages data, if it exists packages_metadata_path = ctx.package_metadata_path() if os.path.exists(packages_metadata_path): safe_rmtree(packages_metadata_path, ctx.workspace, opts.force) else: if paths_exists[space]: space_name = Context.SPACES[space]['space'] space_path = paths[space] log("[clean] Removing {}: {}".format(space_name, space_path)) if not opts.dry_run: safe_rmtree(space_path, ctx.workspace, opts.force) # Setup file removal if opts.setup_files: if paths_exists['devel']: log("[clean] Removing setup files from {}: {}".format(Context.SPACES['devel']['space'], paths['devel'])) opts.packages.append('catkin') opts.packages.append('catkin_tools_prebuild') else: log("[clean] No {} exists, no setup files to clean.".format(Context.SPACES['devel']['space'])) # Find orphaned packages if ctx.link_devel or ctx.isolate_devel and not ('devel' in opts.spaces or 'build' in opts.spaces): if opts.orphans: if os.path.exists(ctx.build_space_abs): log("[clean] Determining orphaned packages...") # Get all existing packages in source space and the # Suppress warnings since this is looking for packages which no longer exist found_source_packages = [ pkg.name for (path, pkg) in find_packages(ctx.source_space_abs, warnings=[]).items()] built_packages = [ pkg.name for (path, pkg) in find_packages(ctx.package_metadata_path(), warnings=[]).items()] # Look for orphaned products in the build space orphans = [p for p in built_packages if (p not in found_source_packages and p != 'catkin_tools_prebuild')] if len(orphans) > 0: opts.packages.extend(list(orphans)) else: log("[clean] No orphans in the workspace.") else: log("[clean] No {} exists, no potential for orphans.".format(Context.SPACES['build']['space'])) # Remove specific packages if len(opts.packages) > 0 or opts.clean_this: # Determine the enclosing package try: ws_path = find_enclosing_workspace(getcwd()) # Suppress warnings since this won't necessarily find all packages # in the workspace (it stops when it finds one package), and # relying on it for warnings could mislead people. this_package = find_enclosing_package( search_start_path=getcwd(), ws_path=ws_path, warnings=[]) except InvalidPackage as ex: sys.exit(clr("@{rf}Error:@| The file %s is an invalid package.xml file." " See below for details:\n\n%s" % (ex.package_path, ex.msg))) # Handle context-based package cleaning if opts.clean_this: if this_package: opts.packages += [this_package] else: sys.exit( "[clean] Error: In order to use --this, the current directory" " must be part of a catkin package.") try: # Clean the packages needs_force = clean_packages( ctx, opts.packages, opts.dependents, opts.verbose, opts.dry_run) except KeyboardInterrupt: wide_log("[clean] User interrupted!") return False elif opts.orphans or len(opts.packages) > 0 or opts.clean_this: log("[clean] Error: Individual packages cannot be cleaned from " "workspaces with merged develspaces, use a symbolically-linked " "or isolated develspace instead.") except: # noqa: E722 # Silencing E722 here since we immediately re-raise the exception. log("[clean] Failed to clean profile `{}`".format(profile)) needs_force = True raise finally: if needs_force: log(clr( "[clean] @/@!Note:@| @/Parts of the workspace have been cleaned which will " "necessitate re-configuring CMake on the next build.@|")) update_metadata(ctx.workspace, ctx.profile, 'build', {'needs_force': True}) return True
17,389
def _nanpolyfit_1d(arr, x, rcond=None): out = np.full((x.shape[1] + 1,), np.nan) mask = np.isnan(arr) if not np.all(mask): out[:-1], resid, rank, _ = np.linalg.lstsq(x[~mask, :], arr[~mask], rcond=rcond) out[-1] = resid or np.nan warn_on_deficient_rank(rank, x.shape[1]) return out
def _nanpolyfit_1d(arr, x, rcond=None): out = np.full((x.shape[1] + 1,), np.nan) mask = np.isnan(arr) if not np.all(mask): out[:-1], resid, rank, _ = np.linalg.lstsq(x[~mask, :], arr[~mask], rcond=rcond) out[-1] = resid if resid.size > 0 else np.nan warn_on_deficient_rank(rank, x.shape[1]) return out
34,651
def extract_patterns( training_data: TrainingData, use_lookup_tables: bool = True, use_regexes: bool = True, use_only_entities: bool = False, use_word_boundaries: bool = True, ) -> List[Dict[Text, Text]]: r"""Extract a list of patterns from the training data. The patterns are constructed using the regex features and lookup tables defined in the training data. Args: training_data: The training data. use_only_entities: If True only lookup tables and regex features with a name equal to a entity are considered. use_regexes: Boolean indicating whether to use regex features or not. use_lookup_tables: Boolean indicating whether to use lookup tables or not. use_word_boundaries: Boolean indicating whether to use `\b` around the lookup table regex expressions Returns: The list of regex patterns. """ if not training_data.lookup_tables and not training_data.regex_features: return [] patterns = [] if use_regexes: patterns.extend(_collect_regex_features(training_data, use_only_entities)) if use_lookup_tables: patterns.extend( _convert_lookup_tables_to_regex( training_data, use_only_entities, use_word_boundaries ) ) return patterns
def extract_patterns( training_data: TrainingData, use_lookup_tables: bool = True, use_regexes: bool = True, use_only_entities: bool = False, use_word_boundaries: bool = True, ) -> List[Dict[Text, Text]]: """Extract a list of patterns from the training data. The patterns are constructed using the regex features and lookup tables defined in the training data. Args: training_data: The training data. use_only_entities: If True only lookup tables and regex features with a name equal to a entity are considered. use_regexes: Boolean indicating whether to use regex features or not. use_lookup_tables: Boolean indicating whether to use lookup tables or not. use_word_boundaries: Boolean indicating whether to use `\b` around the lookup table regex expressions Returns: The list of regex patterns. """ if not training_data.lookup_tables and not training_data.regex_features: return [] patterns = [] if use_regexes: patterns.extend(_collect_regex_features(training_data, use_only_entities)) if use_lookup_tables: patterns.extend( _convert_lookup_tables_to_regex( training_data, use_only_entities, use_word_boundaries ) ) return patterns
56,637
def normalize_ddc(ddc): """ :param str ddc: :rtype: list of str """ ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '') results = [] for match in DDC_RE.finditer(ddc): parts = match.groupdict() prefix = '' suffix = '' # DDCs should start at word boundaries start = match.start() if start > 0 and re.search(r'\b', ddc[start - 1]): continue # And end at them end = match.end() if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]): continue # Some old standard which isn't used anymore; might need to filter these # out, but they should sort OK so let's keep them. if parts['neg']: prefix += '-' # Juvenile prefix if parts['j']: prefix += 'j' # Star should be at end if parts['prestar'] or parts['poststar']: suffix = '*' # Series suffix if parts['s']: suffix += ' s' # Biographical if parts['B']: suffix += ' B' # Not at all sure if parts['ninetwo']: suffix += parts['ninetwo'] # And now the actual number! if parts['number']: # Numbers in parenthesis are "series" numbers end = match.end('number') if end < len(ddc) and ddc[end] == ')': suffix += ' s' # pad the integer part of the number number_parts = parts['number'].split('.') integer = number_parts[0] # Copy decimal without losing precision decimal = '.' + number_parts[1] if len(number_parts) > 1 else '' number = '%03d%s' % (int(integer), decimal) # Handle [Fic] or [E] elif parts['fic']: number = '[%s]' % parts['fic'].title() else: continue results.append(prefix + number + suffix) return results
def normalize_ddc(ddc): """ :param str ddc: :rtype: list of str """ ddc = collapse_multiple_space(ddc.strip()).replace('/', '').replace("'", '') results = [] for match in DDC_RE.finditer(ddc): parts = match.groupdict() prefix = '' suffix = '' # DDCs should start at word boundaries start = match.start() if start > 0 and re.search(r'\b', ddc[start - 1]): continue # And end at them end = match.end() if end < (len(ddc) - 1) and re.search(r'\b', ddc[end]): continue # Some old standard which isn't used anymore; might need to filter these # out, but they should sort OK so let's keep them. for key, value in {'neg': '-', 'j': 'j'}.items(): if parts[key]: prefix += value # Star should be at end if parts['prestar'] or parts['poststar']: suffix = '*' # Series suffix if parts['s']: suffix += ' s' # Biographical if parts['B']: suffix += ' B' # Not at all sure if parts['ninetwo']: suffix += parts['ninetwo'] # And now the actual number! if parts['number']: # Numbers in parenthesis are "series" numbers end = match.end('number') if end < len(ddc) and ddc[end] == ')': suffix += ' s' # pad the integer part of the number number_parts = parts['number'].split('.') integer = number_parts[0] # Copy decimal without losing precision decimal = '.' + number_parts[1] if len(number_parts) > 1 else '' number = '%03d%s' % (int(integer), decimal) # Handle [Fic] or [E] elif parts['fic']: number = '[%s]' % parts['fic'].title() else: continue results.append(prefix + number + suffix) return results
37,798
def test_unknown_platform(monkeypatch, capsys): monkeypatch.setattr(os, 'environ', {"CIBW_PLATFORM": "Something"}) apply_mock_protection(monkeypatch) with pytest.raises(SystemExit) as exit: main() _, err = capsys.readouterr() assert exit.value.code == 2 assert 'cibuildwheel: Unsupported platform: Something' in err
def test_unknown_platform(monkeypatch, capsys): monkeypatch.setattr(os, 'environ', {"CIBW_PLATFORM": "an-unsupported-platform"}) apply_mock_protection(monkeypatch) with pytest.raises(SystemExit) as exit: main() _, err = capsys.readouterr() assert exit.value.code == 2 assert 'cibuildwheel: Unsupported platform: Something' in err
20,257
def assemble_output(): rows = [] for page in EnforcementActionPage.objects.all(): if not page.live: continue url = 'https://consumerfinance.gov' + page.get_url() if 'enforcement/actions' not in url: continue page_categories = ','.join( c.get_name_display() for c in page.categories.all()) content = '' soup = BeautifulSoup(str(page.content), 'html.parser') para = soup.findAll(['p', 'h5']) for p in para: content += p.get_text() link = p.find('a', href=True) if link: content += ': ' content += link['href'] content += '\n' row = { 'Title': page.title, 'Content': content, 'Forum': page_categories, 'Docket Numbers': ','.join( d.docket_number for d in page.docket_numbers.all()), 'Initial Filing Date': page.initial_filing_date, 'Statuses': ','.join( d.status for d in page.statuses.all()), 'Products': ','.join( d.product for d in page.products.all()), 'URL': url } rows.append(row) return rows
def assemble_output(): rows = [] for page in EnforcementActionPage.objects.all(): if not page.live: continue url = 'https://consumerfinance.gov' + page.get_url() if 'enforcement/actions' not in url: continue page_categories = ','.join( c.get_name_display() for c in page.categories.all()) content = '' soup = BeautifulSoup(str(page.content), 'html.parser') para = soup.findAll(['p', 'h5']) for p in para: content += p.get_text() link = p.find('a', href=True) if link: content += ': ' content += link['href'] content += '\n' row = { 'Title': page.title, 'Content': content, 'Forum': page_categories, 'Docket Numbers': ','.join( d.docket_number for d in page.docket_numbers.all()), 'Initial Filing Date': page.initial_filing_date, 'Statuses': ','.join( d.status for d in page.statuses.all()), 'Products': ','.join( d.get_product_display() for d in page.products.all()), 'URL': url } rows.append(row) return rows
47,511
def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment accelerator = Accelerator(log_with="all", logging_dir=args.output_dir) if args.with_tracking else Accelerator() logger.info(accelerator.state) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset(args.dataset_name, task="image-classification") else: data_files = {} if args.train_dir is not None: data_files["train"] = os.path.join(args.train_dir, "**") if args.validation_dir is not None: data_files["validation"] = os.path.join(args.validation_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, task="image-classification", ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder. # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset["train"].train_test_split(args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. labels = dataset["train"].features["labels"].names label2id = {label: str(i) for i, label in enumerate(labels)} id2label = {str(i): label for i, label in enumerate(labels)} # Load pretrained model and feature extractor # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=len(labels), i2label=id2label, label2id=label2id, finetuning_task="image-classification", ) feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path) model = AutoModelForImageClassification.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) # Preprocessing the datasets # Define torchvision transforms to be applied to each image. normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) train_transforms = Compose( [ RandomResizedCrop(feature_extractor.size), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(feature_extractor.size), CenterCrop(feature_extractor.size), ToTensor(), normalize, ] ) def preprocess_train(example_batch): """Apply _train_transforms across a batch.""" example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch def preprocess_val(example_batch): """Apply _val_transforms across a batch.""" example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) if args.max_eval_samples is not None: dataset["validation"] = dataset["validation"].shuffle(seed=args.seed).select(range(args.max_eval_samples)) # Set the validation transforms eval_dataset = dataset["validation"].with_transform(preprocess_val) # DataLoaders creation: def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example["labels"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels} train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader(eval_dataset, collate_fn=collate_fn, batch_size=args.per_device_eval_batch_size) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Figure out how many steps we should save the Accelerator states if hasattr(args.checkpointing_steps, "isdigit"): checkpointing_steps = args.checkpointing_steps if args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("image_classification_no_trainer", experiment_config) # Get the metric function metric = load_metric("accuracy") # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == starting_epoch: if resume_step is not None and step < resume_step: completed_steps += 1 continue outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps }" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, ) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress {completed_steps} steps", blocking=False, auto_lfs_prune=True, ) if completed_steps >= args.max_train_steps: break model.eval() samples_seen = 0 for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather((predictions, batch["labels"])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.num_processes > 1: if step == len(eval_dataloader): predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() logger.info(f"epoch {epoch}: {eval_metric}") if args.with_tracking: accelerator.log( { "accuracy": eval_metric, "train_loss": total_loss, "epoch": epoch, "step": completed_steps, }, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) if args.output_dir is not None: with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump({"eval_accuracy": eval_metric["accuracy"]}, f)
def main(): args = parse_args() # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will pick up all supported trackers in the environment accelerator = Accelerator(log_with="all", logging_dir=args.output_dir) if args.with_tracking else Accelerator() logger.info(accelerator.state) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: if args.hub_model_id is None: repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token) else: repo_name = args.hub_model_id repo = Repository(args.output_dir, clone_from=repo_name) with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Get the datasets: you can either provide your own training and evaluation files (see below) # or specify a Dataset from the hub (the dataset will be downloaded automatically from the datasets Hub). # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset(args.dataset_name, task="image-classification") else: data_files = {} if args.train_dir is not None: data_files["train"] = os.path.join(args.train_dir, "**") if args.validation_dir is not None: data_files["validation"] = os.path.join(args.validation_dir, "**") dataset = load_dataset( "imagefolder", data_files=data_files, cache_dir=args.cache_dir, task="image-classification", ) # See more about loading custom images at # https://huggingface.co/docs/datasets/v2.0.0/en/image_process#imagefolder. # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset["train"].train_test_split(args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. labels = dataset["train"].features["labels"].names label2id = {label: str(i) for i, label in enumerate(labels)} id2label = {str(i): label for i, label in enumerate(labels)} # Load pretrained model and feature extractor # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( args.model_name_or_path, num_labels=len(labels), i2label=id2label, label2id=label2id, finetuning_task="image-classification", ) feature_extractor = AutoFeatureExtractor.from_pretrained(args.model_name_or_path) model = AutoModelForImageClassification.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ) # Preprocessing the datasets # Define torchvision transforms to be applied to each image. normalize = Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std) train_transforms = Compose( [ RandomResizedCrop(feature_extractor.size), RandomHorizontalFlip(), ToTensor(), normalize, ] ) val_transforms = Compose( [ Resize(feature_extractor.size), CenterCrop(feature_extractor.size), ToTensor(), normalize, ] ) def preprocess_train(example_batch): """Apply _train_transforms across a batch.""" example_batch["pixel_values"] = [train_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch def preprocess_val(example_batch): """Apply _val_transforms across a batch.""" example_batch["pixel_values"] = [val_transforms(image.convert("RGB")) for image in example_batch["image"]] return example_batch with accelerator.main_process_first(): if args.max_train_samples is not None: dataset["train"] = dataset["train"].shuffle(seed=args.seed).select(range(args.max_train_samples)) # Set the training transforms train_dataset = dataset["train"].with_transform(preprocess_train) if args.max_eval_samples is not None: dataset["validation"] = dataset["validation"].shuffle(seed=args.seed).select(range(args.max_eval_samples)) # Set the validation transforms eval_dataset = dataset["validation"].with_transform(preprocess_val) # DataLoaders creation: def collate_fn(examples): pixel_values = torch.stack([example["pixel_values"] for example in examples]) labels = torch.tensor([example["labels"] for example in examples]) return {"pixel_values": pixel_values, "labels": labels} train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=collate_fn, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader(eval_dataset, collate_fn=collate_fn, batch_size=args.per_device_eval_batch_size) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps, num_training_steps=args.max_train_steps, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Figure out how many steps we should save the Accelerator states if hasattr(args.checkpointing_steps, "isdigit"): checkpointing_steps = args.checkpointing_steps if args.checkpointing_steps.isdigit(): checkpointing_steps = int(args.checkpointing_steps) else: checkpointing_steps = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("image_classification_no_trainer", experiment_config) # Get the metric function metric = load_metric("accuracy") # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}") accelerator.load_state(args.resume_from_checkpoint) path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None else: resume_step = int(training_difference.replace("step_", "")) starting_epoch = resume_step // len(train_dataloader) resume_step -= starting_epoch * len(train_dataloader) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 for step, batch in enumerate(train_dataloader): # We need to skip steps until we reach the resumed step if args.resume_from_checkpoint and epoch == starting_epoch: if resume_step is not None and step < resume_step: completed_steps += 1 continue outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0: output_dir = f"step_{completed_steps }" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, ) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress {completed_steps} steps", blocking=False, auto_lfs_prune=True, ) if completed_steps >= args.max_train_steps: break model.eval() samples_seen = 0 for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) predictions, references = accelerator.gather((predictions, batch["labels"])) # If we are in a multiprocess environment, the last batch has duplicates if accelerator.num_processes > 1: if step == len(eval_dataloader): predictions = predictions[: len(eval_dataloader.dataset) - samples_seen] references = references[: len(eval_dataloader.dataset) - samples_seen] else: samples_seen += references.shape[0] metric.add_batch( predictions=predictions, references=references, ) eval_metric = metric.compute() logger.info(f"epoch {epoch}: {eval_metric}") if args.with_tracking: accelerator.log( { "accuracy": eval_metric, "train_loss": total_loss, "epoch": epoch, "step": completed_steps, }, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) repo.push_to_hub( commit_message=f"Training in progress epoch {epoch}", blocking=False, auto_lfs_prune=True ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: feature_extractor.save_pretrained(args.output_dir) if args.push_to_hub: repo.push_to_hub(commit_message="End of training", auto_lfs_prune=True) if args.output_dir is not None: with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump({"eval_accuracy": eval_metric["accuracy"]}, f)
42,336
def get_role_list(collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get list of installed collection roles. Only roles that have an argument specification defined are returned. .. note:: Version added: 2.2 :param str collection: A fully qualified collection name used to filter the results. :param str playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_list_command(collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error
def get_role_list(collection=None, playbook_dir=None, **kwargs): ''' Run an ``ansible-doc`` command to get list of installed collection roles. Only roles that have an argument specification defined are returned. .. note:: Version added: 2.2 :param str collection: A fully qualified collection name used to filter the results. :param str playbook_dir: This parameter is used to set the relative path to handle playbook adjacent installed roles. :param str runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``. :param str host_cwd: The host current working directory to be mounted within the container (if enabled) and will be the work directory within container. :param dict envvars: Environment variables to be used when running Ansible. Environment variables will also be read from ``env/envvars`` in ``private_data_dir`` :param dict passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``. :param dict settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also be read from ``env/settings`` in ``private_data_dir``. :param str ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run. :param bool quiet: Disable all output :param bool json_mode: Store event data in place of stdout on the console and in the stdout file :param str artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir :param str project_dir: The path to the playbook content, this defaults to 'project' within the private data dir :param int rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default :param int timeout: The timeout value in seconds that will be passed to either ``pexpect`` of ``subprocess`` invocation (based on ``runner_mode`` selected) while executing command. It the timeout is triggered it will force cancel the execution. :param bool process_isolation: Enable process isolation, using a container engine (e.g. podman). :param str process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman) :param str container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel) :param list container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None) :param list container_options: List of container options to pass to execution engine. :param str container_workdir: The working directory within the container. :param str fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory. This is only used for 'jsonfile' type fact caches. :param str fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'. :param str private_data_dir: The directory containing all runner metadata needed to invoke the runner module. Output artifacts will also be stored here for later consumption. :param str ident: The run identifier for this invocation of Runner. Will be used to create and name the artifact directory holding the results of the invocation. :param function event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event :param function cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False) :param function finished_callback: An optional callback that will be invoked at shutdown after process cleanup. :param function status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout) :param function artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run. :param bool check_job_event_data: Check if job events data is completely generated. If event data is not completely generated and if value is set to 'True' it will raise 'AnsibleRunnerException' exception, if set to 'False' it log a debug message and continue execution. Default value is 'False' :returns: A tuple of response and error string. The response is a python dictionary object (as returned by ansible-doc JSON output) containing each role found, or an empty dict if none are found. ''' event_callback_handler = kwargs.pop('event_handler', None) status_callback_handler = kwargs.pop('status_handler', None) artifacts_handler = kwargs.pop('artifacts_handler', None) cancel_callback = kwargs.pop('cancel_callback', None) finished_callback = kwargs.pop('finished_callback', None) rd = DocConfig(**kwargs) rd.prepare_role_list_command(collection, playbook_dir) r = Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler, cancel_callback=cancel_callback, finished_callback=finished_callback) r.run() response = r.stdout.read() error = r.stderr.read() if response: response = json.loads(sanitize_json_response(response)) return response, error
39,964
def test_keystore_generate_report_mnemonic_blocked_by_interactive(tmpdir): with pytest.raises(ValueError): _keystore = Keystore.generate(INSECURE_DEVELOPMENT_PASSWORD, keystore_dir=tmpdir, report_mnemonic=True)
def test_keystore_generate_report_mnemonic_blocked_by_interactive(tmpdir): with pytest.raises(ValueError, match="The two values: report_mnemonic and interactive, may not both be `True`"): _keystore = Keystore.generate(INSECURE_DEVELOPMENT_PASSWORD, keystore_dir=tmpdir, report_mnemonic=True)
38,375
def load_sample(fn=None, progressbar: bool = True, timeout=None, **kwargs): """ Load sample data with yt. This is a simple wrapper around `yt.load` to include fetching data with pooch from remote source. yt sample data can be found at: https://yt-project.org/data. The data registry table can be retrieved and visualized using `yt.sample_data.api.get_data_registry_table()`. This function requires pandas and pooch to be installed. Parameters ---------- fn : str The `filename` of the dataset to load, as defined in the data registry table. progressbar: bool display a progress bar (tqdm). timeout: float or int (optional) Maximal waiting time, in seconds, after which download is aborted. `None` means "no limit". This parameter is directly passed to down to requests.get via pooch.HTTPDownloader Any additional keyword argument is passed down to `yt.load`. Note that in case of collision with predefined keyword arguments as set in the data registry, the ones passed to this function take priority. """ if fn is None: print( "One can see which sample datasets are available at: https://yt-project.org/data\n" "or alternatively by running: yt.sample_data.api.get_data_registry_table()", file=sys.stderr, ) return None from yt.sample_data.api import ( _download_sample_data_file, _get_test_data_dir_path, get_data_registry_table, ) pooch_logger = pooch.utils.get_logger() topdir, _, specific_file = str(fn).partition(os.path.sep) registry_table = get_data_registry_table() # PR 3089 # note: in the future the registry table should be reindexed # so that the following line can be replaced with # # specs = registry_table.loc[fn] # # however we don't want to do it right now because the "filename" column is # currently incomplete try: specs = registry_table.query(f"`filename` == '{topdir}'").iloc[0] except IndexError as err: raise KeyError(f"Could not find '{fn}' in the registry.") from err if not specs["load_name"]: raise ValueError( "Registry appears to be corrupted: could not find a 'load_name' entry for this dataset." ) if not isinstance(specs["load_kwargs"], dict): raise ValueError( "The requested dataset seems to be improperly registered.\n" "Tip: the entry in yt/sample_data_registry.json may inconstistent with " "https://github.com/yt-project/website/blob/master/data/datafiles.json\n" "Please report this to https://github.com/yt-project/yt/issues/new" ) kwargs = {**specs["load_kwargs"], **kwargs} try: data_dir = lookup_on_disk_data(fn) except FileNotFoundError: mylog.info("'%s' is not available locally. Looking up online.", fn) else: # if the data is already available locally, `load_sample` # only acts as a thin wrapper around `load` loadable_path = data_dir.joinpath(specs["load_name"], specific_file) mylog.info("Sample dataset found in '%s'", data_dir) if timeout is not None: mylog.info("Ignoring the `timeout` keyword argument received.") return load(loadable_path, **kwargs) try: save_dir = _get_test_data_dir_path() assert save_dir.is_dir() except (OSError, AssertionError): mylog.warning( "yt test data directory is not properly set up. " "Data will be saved to the current work directory instead." ) save_dir = Path.cwd() # effectively silence the pooch's logger and create our own log instead pooch_logger.setLevel(100) mylog.info("Downloading from %s", specs["url"]) # downloading via a pooch.Pooch instance behind the scenes filename = urlsplit(specs["url"]).path.split("/")[-1] tmp_file = _download_sample_data_file( filename, progressbar=progressbar, timeout=timeout ) # pooch has functionalities to unpack downloaded archive files, # but it needs to be told in advance that we are downloading a tarball. # Since that information is not necessarily trival to guess from the filename, # we rely on the standard library to perform a conditional unpacking instead. if tarfile.is_tarfile(tmp_file): mylog.info("Untaring downloaded file to '%s'", save_dir) with tarfile.open(tmp_file) as fh: fh.extractall(save_dir) os.remove(tmp_file) else: os.replace(tmp_file, save_dir) loadable_path = Path.joinpath(save_dir, fn, specs["load_name"], specific_file) if specific_file and not loadable_path.exists(): raise ValueError(f"Could not find file '{specific_file}'.") return load(loadable_path, **kwargs)
def load_sample(fn=None, progressbar: bool = True, timeout=None, **kwargs): """ Load sample data with yt. This is a simple wrapper around `yt.load` to include fetching data with pooch from remote source. yt sample data can be found at: https://yt-project.org/data. The data registry table can be retrieved and visualized using `yt.sample_data.api.get_data_registry_table()`. This function requires pandas and pooch to be installed. Parameters ---------- fn : str The `filename` of the dataset to load, as defined in the data registry table. progressbar: bool display a progress bar (tqdm). timeout: float or int (optional) Maximal waiting time, in seconds, after which download is aborted. `None` means "no limit". This parameter is directly passed to down to requests.get via pooch.HTTPDownloader Any additional keyword argument is passed down to `yt.load`. Note that in case of collision with predefined keyword arguments as set in the data registry, the ones passed to this function take priority. """ if fn is None: print( "One can see which sample datasets are available at: https://yt-project.org/data\n" "or alternatively by running: yt.sample_data.api.get_data_registry_table()", file=sys.stderr, ) return None from yt.sample_data.api import ( _download_sample_data_file, _get_test_data_dir_path, get_data_registry_table, ) pooch_logger = pooch.utils.get_logger() topdir, _, specific_file = str(fn).partition(os.path.sep) registry_table = get_data_registry_table() # PR 3089 # note: in the future the registry table should be reindexed # so that the following line can be replaced with # # specs = registry_table.loc[fn] # # however we don't want to do it right now because the "filename" column is # currently incomplete try: specs = registry_table.query(f"`filename` == '{topdir}'").iloc[0] except IndexError as err: raise KeyError(f"Could not find '{fn}' in the registry.") from err if not specs["load_name"]: raise ValueError( "Registry appears to be corrupted: could not find a 'load_name' entry for this dataset." ) if not isinstance(specs["load_kwargs"], dict): raise ValueError( "The requested dataset seems to be improperly registered.\n" "Tip: the entry in yt/sample_data_registry.json may be inconststent with " "https://github.com/yt-project/website/blob/master/data/datafiles.json\n" "Please report this to https://github.com/yt-project/yt/issues/new" ) kwargs = {**specs["load_kwargs"], **kwargs} try: data_dir = lookup_on_disk_data(fn) except FileNotFoundError: mylog.info("'%s' is not available locally. Looking up online.", fn) else: # if the data is already available locally, `load_sample` # only acts as a thin wrapper around `load` loadable_path = data_dir.joinpath(specs["load_name"], specific_file) mylog.info("Sample dataset found in '%s'", data_dir) if timeout is not None: mylog.info("Ignoring the `timeout` keyword argument received.") return load(loadable_path, **kwargs) try: save_dir = _get_test_data_dir_path() assert save_dir.is_dir() except (OSError, AssertionError): mylog.warning( "yt test data directory is not properly set up. " "Data will be saved to the current work directory instead." ) save_dir = Path.cwd() # effectively silence the pooch's logger and create our own log instead pooch_logger.setLevel(100) mylog.info("Downloading from %s", specs["url"]) # downloading via a pooch.Pooch instance behind the scenes filename = urlsplit(specs["url"]).path.split("/")[-1] tmp_file = _download_sample_data_file( filename, progressbar=progressbar, timeout=timeout ) # pooch has functionalities to unpack downloaded archive files, # but it needs to be told in advance that we are downloading a tarball. # Since that information is not necessarily trival to guess from the filename, # we rely on the standard library to perform a conditional unpacking instead. if tarfile.is_tarfile(tmp_file): mylog.info("Untaring downloaded file to '%s'", save_dir) with tarfile.open(tmp_file) as fh: fh.extractall(save_dir) os.remove(tmp_file) else: os.replace(tmp_file, save_dir) loadable_path = Path.joinpath(save_dir, fn, specs["load_name"], specific_file) if specific_file and not loadable_path.exists(): raise ValueError(f"Could not find file '{specific_file}'.") return load(loadable_path, **kwargs)
5,620
def decluster_peaks(x, x_th=None, method='mean', order=1, runs=0): """ Find and decluster peaks inside a signal. This function takes a one-dimensional array, finds all local maxima above a threshold, divides them into clusters, and ultimately applies a declustering method. Parameters ---------- x : sequence A signal with peaks. x_th : number or ndarray or sequence Only peaks above this threshold are selected. The threshold may also be used as a declustering parameter. If ``None`` is passed, the mean of the signal is used. method : str The declustering method to use. Accepts ``mean`` or ``runs``. See Notes. order : int N-th (>= 1) largest peaks to return from each cluster. For instance, ``order = 1`` yields the largest, ``order = 2`` yields the two largest, ..., from each cluster. runs : int Required parameter for declustering if ``method`` is ``runs``, and ignored otherwise. Returns ------- peaks : tuple Tuple with arrays containing the indecies of selected peaks in each cluster. Indecies are ordered (descending) with respect to value of the peaks. Notes ----- A useful assumption in statistical analysis of peaks, (e.g. extreme value analysis where data are fitted to suitable distribution functions), is that data are independent. However, this is necessarily NOT the case when dealing with many real world scenarios, e.g. dynamic response of mechanical systems to environmental loads. Declustering is a pragmatic method to sub-sample peaks from stationary stochastic signals to (hopefully) achieve statistical independence by spacing out peaks in a systematic manner. Here, 2 declustering approaches are implemented; mean-upcrossing and runs. Mean-upcrossing (``mean``) declustering is as following: 1. Identify clusters of exceedences, i.e., find all peaks between two upcrossings above the signal mean value. 2. Select only the n-th largest peaks from each cluster (cf. ``order``). 3. Exclude peaks that falls below the threshold (cf. ``x_th``). Runs (``runs``) declustering is as following: 1. Identify clusters of exceedences, i.e., find all peaks between two upcrossings above the threshold (cf. ``x_th``). 2. Merge clusters that are seperated with less than k runs (cf. ``runs``). The number of peaks below the threshold seperating a down-crossing and the subsequent up-crossing is called runs. 3. Select only the n-th largest peaks from each cluster (cf. ``order``). The difference between the methods are subtle, and they even overplap for ``x_th = x.mean()`` and ``runs = 0``. References ---------- .. [1] Coles S, (2001), An Introduction to Statistical Modelling of Extreme Values. Springer. See Also -------- find_peaks_cwt Find peaks using the wavelet transformation. find_peaks Find peaks inside a signal based on peak properties. """ x = _arg_x_as_expected(x) if x_th is None: x_th = x.mean() if method == 'mean': runs = 0 x_up = x.mean() elif method == 'runs': x_up = x_th else: raise ValueError("method must be 'mean' or 'runs.'") index = np.arange(len(x), dtype=int) crossups, = argcrossup(x, threshold=x_up) boolrelmax = np.zeros(len(x), dtype=bool) index_p, _, _ = _local_maxima_1d(x) boolrelmax[index_p] = True boolthreshold = (x > x_th) boolrelmax_above = boolrelmax & boolthreshold boolrelmax_below = boolrelmax & ~boolthreshold peaks_above = [] peaks_below = [] for i, j in zip(crossups[:-1], crossups[1:]): index_i = index[i:j] peaks_above.append(index_i[boolrelmax_above[i:j]]) peaks_below.append(index_i[boolrelmax_below[i:j]]) peaks = tuple( block[x[block].argsort()[-order:]] for block in _merge_clusters(peaks_above, peaks_below, runs=runs) ) return peaks
def decluster_peaks(x, x_th=None, method='mean', order=1, runs=0): """ Find and decluster peaks inside a signal. This function takes a one-dimensional array, finds all local maxima above a threshold, divides them into clusters, and ultimately applies a declustering method. Parameters ---------- x : sequence A signal with peaks. x_th : number or ndarray or sequence Only peaks above this threshold are selected. The threshold may also be used as a declustering parameter. If ``None`` is passed, the mean of the signal is used. method : str The declustering method to use. Accepts ``mean`` or ``runs``. See Notes. order : int N-th (>= 1) largest peaks to return from each cluster. For instance, ``order = 1`` yields the largest, ``order = 2`` yields the two largest, ..., from each cluster. runs : int Required parameter for declustering if ``method`` is ``runs``, and ignored otherwise. Returns ------- peaks : tuple Tuple with arrays containing the indices of selected peaks in each cluster. Indecies are ordered (descending) with respect to value of the peaks. Notes ----- A useful assumption in statistical analysis of peaks, (e.g. extreme value analysis where data are fitted to suitable distribution functions), is that data are independent. However, this is necessarily NOT the case when dealing with many real world scenarios, e.g. dynamic response of mechanical systems to environmental loads. Declustering is a pragmatic method to sub-sample peaks from stationary stochastic signals to (hopefully) achieve statistical independence by spacing out peaks in a systematic manner. Here, 2 declustering approaches are implemented; mean-upcrossing and runs. Mean-upcrossing (``mean``) declustering is as following: 1. Identify clusters of exceedences, i.e., find all peaks between two upcrossings above the signal mean value. 2. Select only the n-th largest peaks from each cluster (cf. ``order``). 3. Exclude peaks that falls below the threshold (cf. ``x_th``). Runs (``runs``) declustering is as following: 1. Identify clusters of exceedences, i.e., find all peaks between two upcrossings above the threshold (cf. ``x_th``). 2. Merge clusters that are seperated with less than k runs (cf. ``runs``). The number of peaks below the threshold seperating a down-crossing and the subsequent up-crossing is called runs. 3. Select only the n-th largest peaks from each cluster (cf. ``order``). The difference between the methods are subtle, and they even overplap for ``x_th = x.mean()`` and ``runs = 0``. References ---------- .. [1] Coles S, (2001), An Introduction to Statistical Modelling of Extreme Values. Springer. See Also -------- find_peaks_cwt Find peaks using the wavelet transformation. find_peaks Find peaks inside a signal based on peak properties. """ x = _arg_x_as_expected(x) if x_th is None: x_th = x.mean() if method == 'mean': runs = 0 x_up = x.mean() elif method == 'runs': x_up = x_th else: raise ValueError("method must be 'mean' or 'runs.'") index = np.arange(len(x), dtype=int) crossups, = argcrossup(x, threshold=x_up) boolrelmax = np.zeros(len(x), dtype=bool) index_p, _, _ = _local_maxima_1d(x) boolrelmax[index_p] = True boolthreshold = (x > x_th) boolrelmax_above = boolrelmax & boolthreshold boolrelmax_below = boolrelmax & ~boolthreshold peaks_above = [] peaks_below = [] for i, j in zip(crossups[:-1], crossups[1:]): index_i = index[i:j] peaks_above.append(index_i[boolrelmax_above[i:j]]) peaks_below.append(index_i[boolrelmax_below[i:j]]) peaks = tuple( block[x[block].argsort()[-order:]] for block in _merge_clusters(peaks_above, peaks_below, runs=runs) ) return peaks
56,245
def build_argparser(): parser = ArgumentParser() general = parser.add_argument_group('General') general.add_argument('-i', '--input', required=True, help='Required. An input to process. The input must be a single image, ' 'a folder of images, video file or camera id.') general.add_argument('--loop', default=False, action='store_true', help='Optional. Enable reading the input in a loop.') general.add_argument('-o', '--output', help='Optional. Name of output to save.') general.add_argument('-limit', '--output_limit', default=1000, type=int, help='Optional. Number of frames to store in output. ' 'If 0 is set, all frames are stored.') general.add_argument('--output_resolution', default=None, type=resolution, help='Optional. Specify the maximum output window resolution ' 'in (width x height) format. Example: 1280x720. ' 'Input frame size used by default.') general.add_argument('--no_show', action='store_true', help="Optional. Don't show output.") general.add_argument('-cw', '--crop_width', default=0, type=int, help='Optional. Crop the input stream to this width. ' 'Both -cw and -ch parameters should be specified ' 'to use crop.') general.add_argument('-ch', '--crop_height', default=0, type=int, help='Optional. Crop the input stream to this height. ' 'Both -cw and -ch parameters should be specified ' 'to use crop.') general.add_argument('--match_algo', default='HUNGARIAN', choices=('HUNGARIAN', 'MIN_DIST'), help='Optional. Algorithm for face matching. Default: HUNGARIAN.') general.add_argument('-u', '--utilization_monitors', default='', type=str, help='Optional. List of monitors to show initially.') gallery = parser.add_argument_group('Faces database') gallery.add_argument('-fg', type=Path, required=True, help='Required. Path to the face images directory.') gallery.add_argument('--run_detector', action='store_true', help='Optional. Use Face Detection model to find faces ' 'on the face images, otherwise use full images.') gallery.add_argument('--allow_grow', action='store_true', help='Optional. Allow to grow faces gallery and to dump on disk. ' 'Available only if --no_show option is off.') models = parser.add_argument_group('Models') models.add_argument('-m_fd', type=Path, required=True, help='Required. Path to an .xml file with Face Detection model.') models.add_argument('-m_lm', type=Path, required=True, help='Required. Path to an .xml file with Facial Landmarks Detection model.') models.add_argument('-m_reid', type=Path, required=True, help='Required. Path to an .xml file with Face Reidentification model.') models.add_argument('-fd_iw', '--fd_input_width', default=0, type=int, help='Optional. Specify the input width of detection model. ' 'Both -fd_iw and -fd_ih parameters should be specified ' 'for reshape.') models.add_argument('-fd_ih', '--fd_input_height', default=0, type=int, help='Optional. Specify the input height of detection model. ' 'Both -fd_iw and -fd_ih parameters should be specified ' 'for reshape.') infer = parser.add_argument_group('Inference options') infer.add_argument('-d_fd', default='CPU', choices=DEVICE_KINDS, help='Optional. Target device for Face Detection model. ' 'Default value is CPU.') infer.add_argument('-d_lm', default='CPU', choices=DEVICE_KINDS, help='Optional. Target device for Facial Landmarks Detection ' 'model. Default value is CPU.') infer.add_argument('-d_reid', default='CPU', choices=DEVICE_KINDS, help='Optional. Target device for Face Reidentification ' 'model. Default value is CPU.') infer.add_argument('-l', '--cpu_lib', metavar="PATH", default='', help='Optional. For MKLDNN (CPU)-targeted custom layers, ' 'if any. Path to a shared library with custom ' 'layers implementations.') infer.add_argument('-c', '--gpu_lib', metavar="PATH", default='', help='Optional. For clDNN (GPU)-targeted custom layers, ' 'if any. Path to the XML file with descriptions ' 'of the kernels.') infer.add_argument('-v', '--verbose', action='store_true', help='Optional. Be more verbose.') infer.add_argument('-pc', '--perf_stats', action='store_true', help='Optional. Output detailed per-layer performance stats.') infer.add_argument('-t_fd', metavar='[0..1]', type=float, default=0.6, help='Optional. Probability threshold for face detections.') infer.add_argument('-t_id', metavar='[0..1]', type=float, default=0.3, help='Optional. Cosine distance threshold between two vectors ' 'for face identification.') infer.add_argument('-exp_r_fd', metavar='NUMBER', type=float, default=1.15, help='Optional. Scaling ratio for bboxes passed to face recognition.') return parser
def build_argparser(): parser = ArgumentParser() general = parser.add_argument_group('General') general.add_argument('-i', '--input', required=True, help='Required. An input to process. The input must be a single image, ' 'a folder of images, video file or camera id.') general.add_argument('--loop', default=False, action='store_true', help='Optional. Enable reading the input in a loop.') general.add_argument('-o', '--output', help='Optional. Name of output to save.') general.add_argument('-limit', '--output_limit', default=1000, type=int, help='Optional. Number of frames to store in output. ' 'If 0 is set, all frames are stored.') general.add_argument('--output_resolution', default=None, type=resolution, help='Optional. Specify the maximum output window resolution ' 'in (width x height) format. Example: 1280x720. ' 'Input frame size used by default.') general.add_argument('--no_show', action='store_true', help="Optional. Don't show output.") general.add_argument('-cw', '--crop_width', default=0, type=int, help='Optional. Crop the input stream to this width. ' 'Both -cw and -ch parameters should be specified ' 'to use crop.') general.add_argument('-ch', '--crop_height', default=0, type=int, help='Optional. Crop the input stream to this height. ' 'Both -cw and -ch parameters should be specified ' 'to use crop.') general.add_argument('--match_algo', default='HUNGARIAN', choices=('HUNGARIAN', 'MIN_DIST'), help='Optional. Algorithm for face matching. Default: HUNGARIAN.') general.add_argument('-u', '--utilization_monitors', default='', type=str, help='Optional. List of monitors to show initially.') gallery = parser.add_argument_group('Faces database') gallery.add_argument('-fg', type=Path, required=True, help='Required. Path to the face images directory.') gallery.add_argument('--run_detector', action='store_true', help='Optional. Use Face Detection model to find faces ' 'on the face images, otherwise use full images.') gallery.add_argument('--allow_grow', action='store_true', help='Optional. Allow to grow faces gallery and to dump on disk. ' 'Available only if --no_show option is off.') models = parser.add_argument_group('Models') models.add_argument('-m_fd', type=Path, required=True, help='Required. Path to an .xml file with Face Detection model.') models.add_argument('-m_lm', type=Path, required=True, help='Required. Path to an .xml file with Facial Landmarks Detection model.') models.add_argument('-m_reid', type=Path, required=True, help='Required. Path to an .xml file with Face Reidentification model.') models.add_argument('-fd_iw', '--fd_input_width', default=0, type=int, help='Optional. Specify the input width of detection model. ' 'Both -fd_iw and -fd_ih parameters should be specified ' 'for reshape.') models.add_argument('-fd_ih', '--fd_input_height', default=0, type=int, help='Optional. Specify the input height of detection model. ' 'Both -fd_iw and -fd_ih parameters should be specified ' 'for reshape.') infer = parser.add_argument_group('Inference options') infer.add_argument('-d_fd', default='CPU', choices=DEVICE_KINDS, help='Optional. Target device for Face Detection model. ' 'Default value is CPU.') infer.add_argument('-d_lm', default='CPU', choices=DEVICE_KINDS, help='Optional. Target device for Facial Landmarks Detection ' 'model. Default value is CPU.') infer.add_argument('-d_reid', default='CPU', choices=DEVICE_KINDS, help='Optional. Target device for Face Reidentification ' 'model. Default value is CPU.') infer.add_argument('-l', '--cpu_lib', metavar="PATH", default='', help='Optional. For MKLDNN (CPU)-targeted custom layers, ' 'if any. Path to a shared library with custom ' 'layers implementations.') infer.add_argument('-c', '--gpu_lib', metavar="PATH", default='', help='Optional. For clDNN (GPU)-targeted custom layers, ' 'if any. Path to the XML file with descriptions ' 'of the kernels.') infer.add_argument('-v', '--verbose', action='store_true', help='Optional. Be more verbose.') infer.add_argument('-pc', '--perf_stats', action='store_true', help='Optional. Output detailed per-layer performance stats.') infer.add_argument('-t_fd', metavar='[0..1]', type=float, default=0.6, help='Optional. Probability threshold for face detections.') infer.add_argument('-t_id', metavar='[0..1]', type=float, default=0.3, help='Optional. Cosine distance threshold between two vectors ' 'for face identification.') infer.add_argument('-exp_r_fd', metavar='NUMBER', type=float, default=1.15, help='Optional. Scaling ratio for bboxes passed to face recognition.') return parser
58,570
def _configure_subnet(config): # No-op out this function if subnets are explicitly specified # for all node types. (It is a user error if explicitly specified # subnets are invalid.) need_to_configure_subnets = False for key, node_type in config["available_node_types"].items(): node_config = node_type["node_config"] if "SubnetIds" not in node_config: need_to_configure_subnets = True break if not need_to_configure_subnets: return ec2 = _resource("ec2", config) use_internal_ips = config["provider"].get("use_internal_ips", False) # If head or worker security group is specified, filter down to subnets # belonging to the same VPC as the security group. sg_ids = [] for node_type in config["available_node_types"].values(): node_config = node_type["node_config"] sg_ids.extend(node_config.get("SecurityGroupIds", [])) if sg_ids: vpc_id_of_sg = _get_vpc_id_of_sg(sg_ids, config) else: vpc_id_of_sg = None try: candidate_subnets = ec2.subnets.all() if vpc_id_of_sg: candidate_subnets = [ s for s in candidate_subnets if s.vpc_id == vpc_id_of_sg ] subnets = sorted( (s for s in candidate_subnets if s.state == "available" and ( use_internal_ips or s.map_public_ip_on_launch)), reverse=True, # sort from Z-A key=lambda subnet: subnet.availability_zone) except botocore.exceptions.ClientError as exc: handle_boto_error(exc, "Failed to fetch available subnets from AWS.") raise exc if not subnets: cli_logger.abort( "No usable subnets found, try manually creating an instance in " "your specified region to populate the list of subnets " "and trying this again.\n" "Note that the subnet must map public IPs " "on instance launch unless you set `use_internal_ips: true` in " "the `provider` config.") if "availability_zone" in config["provider"]: azs = config["provider"]["availability_zone"].split(",") subnets = [ s for az in azs # Iterate over AZs first to maintain the ordering for s in subnets if s.availability_zone == az ] if not subnets: cli_logger.abort( "No usable subnets matching availability zone {} found.\n" "Choose a different availability zone or try " "manually creating an instance in your specified region " "to populate the list of subnets and trying this again.", config["provider"]["availability_zone"]) # Use subnets in only one VPC, so that _configure_security_groups only # needs to create a security group in this one VPC. Otherwise, we'd need # to set up security groups in all of the user's VPCs and set up networking # rules to allow traffic between these groups. # See https://github.com/ray-project/ray/pull/14868. subnet_ids = [ s.subnet_id for s in subnets if s.vpc_id == subnets[0].vpc_id ] # map from node type key -> source of SubnetIds field subnet_src_info = {} _set_config_info(subnet_src=subnet_src_info) for key, node_type in config["available_node_types"].items(): node_config = node_type["node_config"] if "SubnetIds" not in node_config: subnet_src_info[key] = "default" node_config["SubnetIds"] = subnet_ids else: subnet_src_info[key] = "config" return config
def _configure_subnet(config): # No-op out this function if subnets are explicitly specified # for all node types. (It is a user error if explicitly specified # subnets are invalid.) need_to_configure_subnets = any( "SubnetIds" not in node_type_config["node_config"] for node_type_config in config["available_node_types"].values() ) if not need_to_configure_subnets: return ec2 = _resource("ec2", config) use_internal_ips = config["provider"].get("use_internal_ips", False) # If head or worker security group is specified, filter down to subnets # belonging to the same VPC as the security group. sg_ids = [] for node_type in config["available_node_types"].values(): node_config = node_type["node_config"] sg_ids.extend(node_config.get("SecurityGroupIds", [])) if sg_ids: vpc_id_of_sg = _get_vpc_id_of_sg(sg_ids, config) else: vpc_id_of_sg = None try: candidate_subnets = ec2.subnets.all() if vpc_id_of_sg: candidate_subnets = [ s for s in candidate_subnets if s.vpc_id == vpc_id_of_sg ] subnets = sorted( (s for s in candidate_subnets if s.state == "available" and ( use_internal_ips or s.map_public_ip_on_launch)), reverse=True, # sort from Z-A key=lambda subnet: subnet.availability_zone) except botocore.exceptions.ClientError as exc: handle_boto_error(exc, "Failed to fetch available subnets from AWS.") raise exc if not subnets: cli_logger.abort( "No usable subnets found, try manually creating an instance in " "your specified region to populate the list of subnets " "and trying this again.\n" "Note that the subnet must map public IPs " "on instance launch unless you set `use_internal_ips: true` in " "the `provider` config.") if "availability_zone" in config["provider"]: azs = config["provider"]["availability_zone"].split(",") subnets = [ s for az in azs # Iterate over AZs first to maintain the ordering for s in subnets if s.availability_zone == az ] if not subnets: cli_logger.abort( "No usable subnets matching availability zone {} found.\n" "Choose a different availability zone or try " "manually creating an instance in your specified region " "to populate the list of subnets and trying this again.", config["provider"]["availability_zone"]) # Use subnets in only one VPC, so that _configure_security_groups only # needs to create a security group in this one VPC. Otherwise, we'd need # to set up security groups in all of the user's VPCs and set up networking # rules to allow traffic between these groups. # See https://github.com/ray-project/ray/pull/14868. subnet_ids = [ s.subnet_id for s in subnets if s.vpc_id == subnets[0].vpc_id ] # map from node type key -> source of SubnetIds field subnet_src_info = {} _set_config_info(subnet_src=subnet_src_info) for key, node_type in config["available_node_types"].items(): node_config = node_type["node_config"] if "SubnetIds" not in node_config: subnet_src_info[key] = "default" node_config["SubnetIds"] = subnet_ids else: subnet_src_info[key] = "config" return config
32,140
def remove_duplicates_from_list_arg(args, field): """ Removes duplicates from a dict after calling argToList. For example: args: {'ids': "1,2,1"} , field='ids' The return output will be ["1","2"] :type args: dict :param args: Args to be converted (required) :type field: str :param field: Field in args to be converted into list without duplicates (required) :return: A python list of args without duplicates :rtype: ``list`` """ convert_to_list = argToList(args.get(field)) return list(set(convert_to_list))
def remove_duplicates_from_list_arg(args, field): """ Removes duplicates from a dict after calling argToList. For example: args: {'ids': "1,2,1"} , field='ids' The return output will be ["1","2"] :type args: dict :param args: Args to be converted (required) :type field: ``str`` :param field: Field in args to be converted into list without duplicates (required) :return: A python list of args without duplicates :rtype: ``list`` """ convert_to_list = argToList(args.get(field)) return list(set(convert_to_list))
5,191
def switch_backend(newbackend): """ Close all open figures and set the Matplotlib backend. The argument is case-insensitive. Switching to an interactive backend is possible only if no event loop for another interactive backend has started. Switching to and from non-interactive backends is always possible. Parameters ---------- newbackend : str The name of the backend to use. """ global _backend_mod # make sure the init is pulled up so we can assign to it later import matplotlib.backends close("all") if newbackend is rcsetup._auto_backend_sentinel: current_framework = cbook._get_running_interactive_framework() mapping = {'qt': 'qtagg', 'gtk3': 'gtk3agg', 'gtk4': 'gtk4agg', 'wx': 'wxagg', 'tk': 'tkagg', 'macosx': 'macosx', 'headless': 'agg'} best_guess = mapping.get(current_framework, None) if best_guess is not None: candidates = [best_guess] else: candidates = [] candidates += [ "macosx", "qtagg", "gtk4agg", "gtk3agg", "tkagg", "wxagg"] # Don't try to fallback on the cairo-based backends as they each have # an additional dependency (pycairo) over the agg-based backend, and # are of worse quality. for candidate in candidates: try: switch_backend(candidate) except ImportError: continue else: rcParamsOrig['backend'] = candidate return else: # Switching to Agg should always succeed; if it doesn't, let the # exception propagate out. switch_backend("agg") rcParamsOrig["backend"] = "agg" return backend_mod = importlib.import_module( cbook._backend_module_name(newbackend)) canvas_class = backend_mod.FigureCanvas required_framework = _get_required_interactive_framework(backend_mod) if required_framework is not None: current_framework = cbook._get_running_interactive_framework() if (current_framework and required_framework and current_framework != required_framework): raise ImportError( "Cannot load backend {!r} which requires the {!r} interactive " "framework, as {!r} is currently running".format( newbackend, required_framework, current_framework)) # Load the new_figure_manager(), draw_if_interactive(), and show() # functions from the backend. # Classically, backends can directly export these functions. This should # keep working for backcompat. new_figure_manager = getattr(backend_mod, "new_figure_manager", None) draw_if_interactive = getattr(backend_mod, "draw_if_interactive", None) show = getattr(backend_mod, "show", None) # In that classical approach are implemented as modules, but "inherit" # default method implementations from backend_bases._Backend. This is # achieved by creating a "class" that inherits from backend_bases._Backend # and whose body is filled with the module's globals. class backend_mod(matplotlib.backend_bases._Backend): locals().update(vars(backend_mod)) # However, the newer approach for defining new_figure_manager (and, in # the future, draw_if_interactive and show) is to derive them from canvas # methods. In that case, also update backend_mod accordingly. if new_figure_manager is None: def new_figure_manager_given_figure(num, figure): return canvas_class.new_manager(figure, num) def new_figure_manager(num, *args, FigureClass=Figure, **kwargs): fig = FigureClass(*args, **kwargs) return new_figure_manager_given_figure(num, fig) backend_mod.new_figure_manager_given_figure = \ new_figure_manager_given_figure backend_mod.new_figure_manager = new_figure_manager _log.debug("Loaded backend %s version %s.", newbackend, backend_mod.backend_version) rcParams['backend'] = rcParamsDefault['backend'] = newbackend _backend_mod = backend_mod for func_name in ["new_figure_manager", "draw_if_interactive", "show"]: globals()[func_name].__signature__ = inspect.signature( getattr(backend_mod, func_name)) # Need to keep a global reference to the backend for compatibility reasons. # See https://github.com/matplotlib/matplotlib/issues/6092 matplotlib.backends.backend = newbackend
def switch_backend(newbackend): """ Close all open figures and set the Matplotlib backend. The argument is case-insensitive. Switching to an interactive backend is possible only if no event loop for another interactive backend has started. Switching to and from non-interactive backends is always possible. Parameters ---------- newbackend : str The name of the backend to use. """ global _backend_mod # make sure the init is pulled up so we can assign to it later import matplotlib.backends close("all") if newbackend is rcsetup._auto_backend_sentinel: current_framework = cbook._get_running_interactive_framework() mapping = {'qt': 'qtagg', 'gtk3': 'gtk3agg', 'gtk4': 'gtk4agg', 'wx': 'wxagg', 'tk': 'tkagg', 'macosx': 'macosx', 'headless': 'agg'} best_guess = mapping.get(current_framework, None) if best_guess is not None: candidates = [best_guess] else: candidates = [] candidates += [ "macosx", "qtagg", "gtk4agg", "gtk3agg", "tkagg", "wxagg"] # Don't try to fallback on the cairo-based backends as they each have # an additional dependency (pycairo) over the agg-based backend, and # are of worse quality. for candidate in candidates: try: switch_backend(candidate) except ImportError: continue else: rcParamsOrig['backend'] = candidate return else: # Switching to Agg should always succeed; if it doesn't, let the # exception propagate out. switch_backend("agg") rcParamsOrig["backend"] = "agg" return backend_mod = importlib.import_module( cbook._backend_module_name(newbackend)) canvas_class = backend_mod.FigureCanvas required_framework = _get_required_interactive_framework(backend_mod) if required_framework is not None: current_framework = cbook._get_running_interactive_framework() if (current_framework and required_framework and current_framework != required_framework): raise ImportError( "Cannot load backend {!r} which requires the {!r} interactive " "framework, as {!r} is currently running".format( newbackend, required_framework, current_framework)) # Load the new_figure_manager(), draw_if_interactive(), and show() # functions from the backend. # Classically, backends can directly export these functions. This should # keep working for backcompat. new_figure_manager = getattr(backend_mod, "new_figure_manager", None) draw_if_interactive = getattr(backend_mod, "draw_if_interactive", None) show = getattr(backend_mod, "show", None) # In that classical approach backends are implemented as modules, but "inherit" # default method implementations from backend_bases._Backend. This is # achieved by creating a "class" that inherits from backend_bases._Backend # and whose body is filled with the module's globals. class backend_mod(matplotlib.backend_bases._Backend): locals().update(vars(backend_mod)) # However, the newer approach for defining new_figure_manager (and, in # the future, draw_if_interactive and show) is to derive them from canvas # methods. In that case, also update backend_mod accordingly. if new_figure_manager is None: def new_figure_manager_given_figure(num, figure): return canvas_class.new_manager(figure, num) def new_figure_manager(num, *args, FigureClass=Figure, **kwargs): fig = FigureClass(*args, **kwargs) return new_figure_manager_given_figure(num, fig) backend_mod.new_figure_manager_given_figure = \ new_figure_manager_given_figure backend_mod.new_figure_manager = new_figure_manager _log.debug("Loaded backend %s version %s.", newbackend, backend_mod.backend_version) rcParams['backend'] = rcParamsDefault['backend'] = newbackend _backend_mod = backend_mod for func_name in ["new_figure_manager", "draw_if_interactive", "show"]: globals()[func_name].__signature__ = inspect.signature( getattr(backend_mod, func_name)) # Need to keep a global reference to the backend for compatibility reasons. # See https://github.com/matplotlib/matplotlib/issues/6092 matplotlib.backends.backend = newbackend
11,980
def add_shading(elevation, azimuth, altitude): """Add shading to SRTM elevation data, using azimuth and altitude of the sun. Parameters ---------- elevation SRTM elevation data (in meters) azimuth Azimuth of the Sun (in degrees) altitude Altitude of the Sun (in degrees) Return shaded SRTM relief map. """ azimuth = np.deg2rad(azimuth) altitude = np.deg2rad(altitude) x, y = np.gradient(elevation) slope = np.pi / 2. - np.arctan(np.sqrt(x * x + y * y)) # -x here because of pixel orders in the SRTM tile aspect = np.arctan2(-x, y) shaded = np.sin(altitude) * np.sin(slope)\ + np.cos(altitude) * np.cos(slope)\ * np.cos((azimuth - np.pi / 2.) - aspect) return shaded
def add_shading(elevation, azimuth, altitude): """Add shading to SRTM elevation data, using azimuth and altitude of the sun. Parameters ---------- elevation SRTM elevation data (in meters) azimuth Azimuth of the Sun (in degrees) altitude Altitude of the Sun (in degrees) Return shaded SRTM relief map. """ azimuth = np.deg2rad(azimuth) altitude = np.deg2rad(altitude) x, y = np.gradient(elevation) slope = np.pi / 2 - np.arctan(np.sqrt(x * x + y * y)) # -x here because of pixel orders in the SRTM tile aspect = np.arctan2(-x, y) shaded = np.sin(altitude) * np.sin(slope)\ + np.cos(altitude) * np.cos(slope)\ * np.cos((azimuth - np.pi / 2.) - aspect) return shaded
3,688
def compile(source, modulename='untitled', extra_args='', verbose=True, source_fn=None, extension='.f', full_output=False ): """ Build extension module from a Fortran 77 source string with f2py. Parameters ---------- source : str or bytes Fortran source of module / subroutine to compile .. versionchanged:: 1.16.0 Accept str as well as bytes modulename : str, optional The name of the compiled python module extra_args : str or list, optional Additional parameters passed to f2py .. versionchanged:: 1.16.0 A list of args may also be provided. verbose : bool, optional Print f2py output to screen source_fn : str, optional Name of the file where the fortran source is written. The default is to use a temporary file with the extension provided by the `extension` parameter extension : {'.f', '.f90'}, optional Filename extension if `source_fn` is not provided. The extension tells which fortran standard is used. The default is `.f`, which implies F77 standard. .. versionadded:: 1.11.0 full_output : bool, optional If True, return a `subprocess.CompletedProcess` containing the stdout and stderr of the compile process, instead of just the status code. .. versionadded:: 1.20.0 Returns ------- result : int or `subprocess.CompletedProcess` 0 on success, or a `subprocess.CompletedProcess` if ``full_output=True`` Examples -------- .. include:: compile_session.dat :literal: """ import tempfile import shlex if source_fn is None: f, fname = tempfile.mkstemp(suffix=extension) # f is a file descriptor so need to close it # carefully -- not with .close() directly os.close(f) else: fname = source_fn if not isinstance(source, str): source = str(source, 'utf-8') try: with open(fname, 'w') as f: f.write(source) args = ['-c', '-m', modulename, f.name] if isinstance(extra_args, str): is_posix = (os.name == 'posix') extra_args = shlex.split(extra_args, posix=is_posix) args.extend(extra_args) c = [sys.executable, '-c', 'import numpy.f2py as f2py2e;f2py2e.main()'] + args try: cp = subprocess.run(c, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: # preserve historic status code used by exec_command() cp = subprocess.CompletedProcess(c, 127, stdout='', stderr='') if verbose: print(cp.stdout.decode()) finally: if source_fn is None: os.remove(fname) if full_output: return cp else: return cp.returncode
def compile(source, modulename='untitled', extra_args='', verbose=True, source_fn=None, extension='.f', full_output=False ): """ Build extension module from a Fortran 77 source string with f2py. Parameters ---------- source : str or bytes Fortran source of module / subroutine to compile .. versionchanged:: 1.16.0 Accept str as well as bytes modulename : str, optional The name of the compiled python module extra_args : str or list, optional Additional parameters passed to f2py .. versionchanged:: 1.16.0 A list of args may also be provided. verbose : bool, optional Print f2py output to screen source_fn : str, optional Name of the file where the fortran source is written. The default is to use a temporary file with the extension provided by the `extension` parameter extension : {'.f', '.f90'}, optional Filename extension if `source_fn` is not provided. The extension tells which fortran standard is used. The default is `.f`, which implies F77 standard. .. versionadded:: 1.11.0 full_output : bool, optional If True, return a `subprocess.CompletedProcess` containing the stdout and stderr of the compile process, instead of just the status code. .. versionadded:: 1.20.0 Returns ------- result : int or `subprocess.CompletedProcess` 0 on success, or a `subprocess.CompletedProcess` if ``full_output=True`` Examples -------- .. include:: compile_session.dat :literal: """ import tempfile import shlex if source_fn is None: f, fname = tempfile.mkstemp(suffix=extension) # f is a file descriptor so need to close it # carefully -- not with .close() directly os.close(f) else: fname = source_fn if not isinstance(source, str): source = str(source, 'utf-8') try: with open(fname, 'w') as f: f.write(source) args = ['-c', '-m', modulename, f.name] if isinstance(extra_args, str): is_posix = (os.name == 'posix') extra_args = shlex.split(extra_args, posix=is_posix) args.extend(extra_args) c = [sys.executable, '-c', 'import numpy.f2py as f2py2e;f2py2e.main()'] + args try: cp = subprocess.run(c, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) except OSError: # preserve historic status code used by exec_command() cp = subprocess.CompletedProcess(c, 127, stdout='', stderr='') if verbose: print(cp.stdout.decode()) finally: if source_fn is None: os.remove(fname) if full_output: return cp else: return cp.returncode
39,636
def prepare_data(source_fnames: List[str], target_fname: str, source_vocabs: List[vocab.Vocab], target_vocab: vocab.Vocab, source_vocab_paths: List[Optional[str]], target_vocab_path: Optional[str], shared_vocab: bool, max_seq_len_source: int, max_seq_len_target: int, bucketing: bool, bucket_width: int, samples_per_shard: int, min_num_shards: int, output_prefix: str, bucket_scaling: bool = True, keep_tmp_shard_files: bool = False, max_processes: int = None): logger.info("Preparing data.") # write vocabularies to data folder vocab.save_source_vocabs(source_vocabs, output_prefix) vocab.save_target_vocab(target_vocab, output_prefix) # Pass 1: get target/source length ratios. length_statistics = analyze_sequence_lengths(source_fnames, target_fname, source_vocabs, target_vocab, max_seq_len_source, max_seq_len_target) check_condition(length_statistics.num_sents > 0, "No training sequences found with length smaller or equal than the maximum sequence length." "Consider increasing %s" % C.TRAINING_ARG_MAX_SEQ_LEN) # define buckets buckets = define_parallel_buckets(max_seq_len_source, max_seq_len_target, bucket_width, bucket_scaling, length_statistics.length_ratio_mean) if bucketing else [(max_seq_len_source, max_seq_len_target)] logger.info("Buckets: %s", buckets) # Pass 2: Randomly assign data to data shards # no pre-processing yet, just write the sentences to different files num_shards = get_num_shards(length_statistics.num_sents, samples_per_shard, min_num_shards) logger.info("%d samples will be split into %d shard(s) (requested samples/shard=%d, min_num_shards=%d)." % (length_statistics.num_sents, num_shards, samples_per_shard, min_num_shards)) shards, data_statistics = shard_data(source_fnames=source_fnames, target_fname=target_fname, source_vocabs=source_vocabs, target_vocab=target_vocab, num_shards=num_shards, buckets=buckets, length_ratio_mean=length_statistics.length_ratio_mean, length_ratio_std=length_statistics.length_ratio_std, output_prefix=output_prefix) data_statistics.log() data_loader = RawParallelDatasetLoader(buckets=buckets, eos_id=C.EOS_ID, pad_id=C.PAD_ID) # 3. convert each shard to serialized ndarrays if not max_processes: logger.info("Processing shards sequentily.") # Process shards sequantially woithout using multiprocessing for shard_idx, (shard_sources, shard_target, shard_stats) in enumerate(shards): process_shard(shard_idx, data_loader, shard_sources, shard_target, shard_stats, output_prefix, keep_tmp_shard_files) else: logger.info(f"Processing shards using {max_processes} processes.") # Process shards in parallel using max_processes process results = [] pool = multiprocessing.pool.Pool(processes=max_processes) for shard_idx, (shard_sources, shard_target, shard_stats) in enumerate(shards): result = pool.apply_async(process_shard, args=(shard_idx, data_loader, shard_sources, shard_target, shard_stats, output_prefix, keep_tmp_shard_files)) results.append(result) pool.close() pool.join() for result in results: if not result.successful(): logger.error("Process ended in error.") raise RuntimeError("Shard processing fail") data_info = DataInfo(sources=[os.path.abspath(fname) for fname in source_fnames], target=os.path.abspath(target_fname), source_vocabs=source_vocab_paths, target_vocab=target_vocab_path, shared_vocab=shared_vocab, num_shards=num_shards) data_info_fname = os.path.join(output_prefix, C.DATA_INFO) logger.info("Writing data info to '%s'", data_info_fname) data_info.save(data_info_fname) config_data = DataConfig(data_statistics=data_statistics, max_seq_len_source=max_seq_len_source, max_seq_len_target=max_seq_len_target, num_source_factors=len(source_fnames)) config_data_fname = os.path.join(output_prefix, C.DATA_CONFIG) logger.info("Writing data config to '%s'", config_data_fname) config_data.save(config_data_fname) version_file = os.path.join(output_prefix, C.PREPARED_DATA_VERSION_FILE) with open(version_file, "w") as version_out: version_out.write(str(C.PREPARED_DATA_VERSION))
def prepare_data(source_fnames: List[str], target_fname: str, source_vocabs: List[vocab.Vocab], target_vocab: vocab.Vocab, source_vocab_paths: List[Optional[str]], target_vocab_path: Optional[str], shared_vocab: bool, max_seq_len_source: int, max_seq_len_target: int, bucketing: bool, bucket_width: int, samples_per_shard: int, min_num_shards: int, output_prefix: str, bucket_scaling: bool = True, keep_tmp_shard_files: bool = False, max_processes: int = None): logger.info("Preparing data.") # write vocabularies to data folder vocab.save_source_vocabs(source_vocabs, output_prefix) vocab.save_target_vocab(target_vocab, output_prefix) # Pass 1: get target/source length ratios. length_statistics = analyze_sequence_lengths(source_fnames, target_fname, source_vocabs, target_vocab, max_seq_len_source, max_seq_len_target) check_condition(length_statistics.num_sents > 0, "No training sequences found with length smaller or equal than the maximum sequence length." "Consider increasing %s" % C.TRAINING_ARG_MAX_SEQ_LEN) # define buckets buckets = define_parallel_buckets(max_seq_len_source, max_seq_len_target, bucket_width, bucket_scaling, length_statistics.length_ratio_mean) if bucketing else [(max_seq_len_source, max_seq_len_target)] logger.info("Buckets: %s", buckets) # Pass 2: Randomly assign data to data shards # no pre-processing yet, just write the sentences to different files num_shards = get_num_shards(length_statistics.num_sents, samples_per_shard, min_num_shards) logger.info("%d samples will be split into %d shard(s) (requested samples/shard=%d, min_num_shards=%d)." % (length_statistics.num_sents, num_shards, samples_per_shard, min_num_shards)) shards, data_statistics = shard_data(source_fnames=source_fnames, target_fname=target_fname, source_vocabs=source_vocabs, target_vocab=target_vocab, num_shards=num_shards, buckets=buckets, length_ratio_mean=length_statistics.length_ratio_mean, length_ratio_std=length_statistics.length_ratio_std, output_prefix=output_prefix) data_statistics.log() data_loader = RawParallelDatasetLoader(buckets=buckets, eos_id=C.EOS_ID, pad_id=C.PAD_ID) # 3. convert each shard to serialized ndarrays if not max_processes: logger.info("Processing shards sequentially.") # Process shards sequantially woithout using multiprocessing for shard_idx, (shard_sources, shard_target, shard_stats) in enumerate(shards): process_shard(shard_idx, data_loader, shard_sources, shard_target, shard_stats, output_prefix, keep_tmp_shard_files) else: logger.info(f"Processing shards using {max_processes} processes.") # Process shards in parallel using max_processes process results = [] pool = multiprocessing.pool.Pool(processes=max_processes) for shard_idx, (shard_sources, shard_target, shard_stats) in enumerate(shards): result = pool.apply_async(process_shard, args=(shard_idx, data_loader, shard_sources, shard_target, shard_stats, output_prefix, keep_tmp_shard_files)) results.append(result) pool.close() pool.join() for result in results: if not result.successful(): logger.error("Process ended in error.") raise RuntimeError("Shard processing fail") data_info = DataInfo(sources=[os.path.abspath(fname) for fname in source_fnames], target=os.path.abspath(target_fname), source_vocabs=source_vocab_paths, target_vocab=target_vocab_path, shared_vocab=shared_vocab, num_shards=num_shards) data_info_fname = os.path.join(output_prefix, C.DATA_INFO) logger.info("Writing data info to '%s'", data_info_fname) data_info.save(data_info_fname) config_data = DataConfig(data_statistics=data_statistics, max_seq_len_source=max_seq_len_source, max_seq_len_target=max_seq_len_target, num_source_factors=len(source_fnames)) config_data_fname = os.path.join(output_prefix, C.DATA_CONFIG) logger.info("Writing data config to '%s'", config_data_fname) config_data.save(config_data_fname) version_file = os.path.join(output_prefix, C.PREPARED_DATA_VERSION_FILE) with open(version_file, "w") as version_out: version_out.write(str(C.PREPARED_DATA_VERSION))
42,823
def path_update_prompt(config): """ Ask user if they'd like to update the backup path or not. If yes, update. If no... don't. """ current_path = config["backup_path"] print_path_blue("Current shallow-backup path:", current_path) if prompt_yes_no("Would you like to update this?", Fore.GREEN): while True: print_green_bold("Enter relative or absolute path:") abs_path = expand_to_abs_path(input()) if os.path.isfile(abs_path): print_path_red('New path is an existing file:', abs_path) print_red_bold('Please enter a directory.\n') continue print_path_blue("\nUpdating shallow-backup path to:", abs_path) mkdir_warn_overwrite(abs_path) move_git_repo(current_path, abs_path) config["backup_path"] = abs_path write_config(config) break
def path_update_prompt(config): """ Ask user if they'd like to update the backup path or not. If yes, update. If no... don't. """ current_path = config["backup_path"] print_path_blue("Current shallow-backup path:", current_path) if prompt_yes_no("Would you like to update this?", Fore.GREEN): while True: print_green_bold("Enter relative or absolute path:") abs_path = expand_to_abs_path(input()) if os.path.isfile(abs_path): print_path_red('New path is an existing file:', abs_path) print_red_bold('Please enter a directory.\n') continue print_path_blue("\nUpdating shallow-backup path to:", abs_path) mkdir_warn_overwrite(abs_path) move_git_repo(current_path, abs_path) config["backup_path"] = abs_path write_config(config) return
47,354
def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: from .integrations import is_wandb_available assert is_wandb_available(), "This function needs wandb installed: `pip " "install wandb`" import wandb # add WandbCallback if not already added in trainer callbacks reporting_to_wandb = False for callback in trainer.callback_handler.callbacks: if isinstance(callback, WandbCallback): reporting_to_wandb = True break if not reporting_to_wandb: trainer.add_callback(WandbCallback()) trainer.args.report_to = "wandb" best_trial = {"run_id": None, "objective": None, "hyperparameters": None} sweep_id = kwargs.pop("sweep_id", None) project = kwargs.pop("project", None) name = kwargs.pop("name", None) entity = kwargs.pop("entity", None) metric = kwargs.pop("metric", None) or "eval/loss" sweep_config = trainer.hp_space(None) sweep_config["metric"]["goal"] = direction sweep_config["metric"]["name"] = metric if name: sweep_config["name"] = name def _objective(): run = wandb.run if wandb.run else wandb.init() trainer.state.trial_name = run.name run.config.update({"assignments": {}, "metric": metric}) config = wandb.config trainer.objective = None trainer.train(resume_from_checkpoint=None, trial=vars(config)["_items"]) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) format_metrics = rewrite_logs(metrics) if metric not in format_metrics: logger.warning( f"Provided metric {metric} not found. This might result in expected sweeps charts. The available metrics are {format_metrics.keys()}" ) best_score = False if best_trial["run_id"] is not None: if direction == "minimize": best_score = trainer.objective < best_trial["objective"] elif direction == "maximize": best_score = trainer.objective > best_trial["objective"] if best_score or best_trial["run_id"] is None: best_trial["run_id"] = run.id best_trial["objective"] = trainer.objective best_trial["hyperparameters"] = dict(config) return trainer.objective sweep_id = wandb.sweep(sweep_config, project=project, entity=entity) if not sweep_id else sweep_id logger.info(f"wandb sweep id - {sweep_id}") wandb.agent(sweep_id, function=_objective, count=n_trials) return BestRun(best_trial["run_id"], best_trial["objective"], best_trial["hyperparameters"])
def run_hp_search_wandb(trainer, n_trials: int, direction: str, **kwargs) -> BestRun: from .integrations import is_wandb_available assert is_wandb_available(), "This function needs wandb installed: `pip " "install wandb`" import wandb # add WandbCallback if not already added in trainer callbacks reporting_to_wandb = False for callback in trainer.callback_handler.callbacks: if isinstance(callback, WandbCallback): reporting_to_wandb = True break if not reporting_to_wandb: trainer.add_callback(WandbCallback()) trainer.args.report_to = "wandb" best_trial = {"run_id": None, "objective": None, "hyperparameters": None} sweep_id = kwargs.pop("sweep_id", None) project = kwargs.pop("project", None) name = kwargs.pop("name", None) entity = kwargs.pop("entity", None) metric = kwargs.pop("metric", "eval/loss") sweep_config = trainer.hp_space(None) sweep_config["metric"]["goal"] = direction sweep_config["metric"]["name"] = metric if name: sweep_config["name"] = name def _objective(): run = wandb.run if wandb.run else wandb.init() trainer.state.trial_name = run.name run.config.update({"assignments": {}, "metric": metric}) config = wandb.config trainer.objective = None trainer.train(resume_from_checkpoint=None, trial=vars(config)["_items"]) # If there hasn't been any evaluation during the training loop. if getattr(trainer, "objective", None) is None: metrics = trainer.evaluate() trainer.objective = trainer.compute_objective(metrics) format_metrics = rewrite_logs(metrics) if metric not in format_metrics: logger.warning( f"Provided metric {metric} not found. This might result in expected sweeps charts. The available metrics are {format_metrics.keys()}" ) best_score = False if best_trial["run_id"] is not None: if direction == "minimize": best_score = trainer.objective < best_trial["objective"] elif direction == "maximize": best_score = trainer.objective > best_trial["objective"] if best_score or best_trial["run_id"] is None: best_trial["run_id"] = run.id best_trial["objective"] = trainer.objective best_trial["hyperparameters"] = dict(config) return trainer.objective sweep_id = wandb.sweep(sweep_config, project=project, entity=entity) if not sweep_id else sweep_id logger.info(f"wandb sweep id - {sweep_id}") wandb.agent(sweep_id, function=_objective, count=n_trials) return BestRun(best_trial["run_id"], best_trial["objective"], best_trial["hyperparameters"])
57,486
def test_secretstr_idempotent(): class Foobar(BaseModel): password: SecretStr # Should not raise an exception _ = Foobar(password=SecretStr('1234'))
def test_secretstr_idempotent(): class Foobar(BaseModel): password: SecretStr # Should not raise an exception m = Foobar(password=SecretStr('1234')) assert m.password. get_secret_value == '1234'
44,347
def steady_state(lindblad, sparse=None, method="ed", rho0=None, **kwargs): r"""Computes the numerically exact steady-state of a lindblad master equation. The computation is performed either through the exact diagonalization of the hermitian L^\dagger L matrix, or by means of an iterative solver (bicgstabl) targeting the solution of the non-hermitian system L\rho = 0 && \Tr[\rho] = 1. Note that for systems with 7 or more sites it is usually computationally impossible to build the full lindblad operator and therefore only `iterative` will work. Note that for systems with hilbert spaces with dimensions above 40k, tol should be set to a lower value if the steady state has non-trivial correlations. Args: lindblad: The lindbladian encoding the master equation. sparse: Whever to use sparse matrices (default: False for ed, True for iterative) method: 'ed' (exact diagonalization) or 'iterative' (iterative bicgstabl) rho0: starting density matrix for the iterative diagonalization (default: None) kwargs...: additional kwargs passed to bicgstabl Optional args for iterative: For full docs please consult SciPy documentation at https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.bicgstab.html maxiter: maximum number of iterations for the iterative solver (default: None) tol: The precision for the calculation (default: 1e-05) callback: User-supplied function to call after each iteration. It is called as callback(xk), where xk is the current solution vector Returns: The steady-state density matrix. """ from numpy import sqrt, array M = lindblad.hilbert.physical.n_states if method == "ed": if sparse is None: sparse = False if not sparse: from numpy.linalg import eigh lind_mat = lindblad.to_dense() ldagl = lind_mat.conj().T * lind_mat w, v = eigh(ldagl) else: from scipy.sparse.linalg import eigsh lind_mat = lindblad.to_sparse() ldagl = lind_mat.H * lind_mat w, v = eigsh(ldagl, which="SM", k=2) print("Minimum eigenvalue is: ", w[0]) rho = array(v[:, 0].reshape((M, M))) rho = rho / rho.trace() elif method == "iterative": if sparse is None: sparse = True # An extra row is added at the bottom of the therefore M^2+1 long array, # with the trace of the density matrix. This is needed to enforce the # trace-1 condition. L = lindblad.to_linear_operator(sparse=sparse, append_trace=True) # Initial density matrix ( + trace condition) Lrho_start = np.zeros((M ** 2 + 1), dtype=L.dtype) if rho0 is None: Lrho_start[0] = 1.0 Lrho_start[-1] = 1.0 else: Lrho_start[:-1] = rho0.reshape(-1) Lrho_start[-1] = rho0.trace() # Target residual (everything 0 and trace 1) Lrho_target = np.zeros((M ** 2 + 1), dtype=L.dtype) Lrho_target[-1] = 1.0 # Iterative solver print("Starting iterative solver...") res, info = bicgstab(L, Lrho_target, x0=Lrho_start, **kwargs) rho = res[:-1].reshape((M, M)) if info == 0: print("Converged trace is ", rho.trace()) elif info > 0: print("Failed to converge after ", info, " ( trace is ", rho.trace(), " )") elif info < 0: print("An error occured: ", info) else: raise ValueError("method must be 'ed' or 'iterative'") return rho
def steady_state(lindblad, sparse=None, method="ed", rho0=None, **kwargs): r"""Computes the numerically exact steady-state of a lindblad master equation. The computation is performed either through the exact diagonalization of the hermitian L^\dagger L matrix, or by means of an iterative solver (bicgstabl) targeting the solution of the non-hermitian system L\rho = 0 && \Tr[\rho] = 1. Note that for systems with 7 or more sites it is usually computationally impossible to build the full lindblad operator and therefore only `iterative` will work. Note that for systems with hilbert spaces with dimensions above 40k, tol should be set to a lower value if the steady state has non-trivial correlations. Args: lindblad: The lindbladian encoding the master equation. sparse: Whever to use sparse matrices (default: False for ed, True for iterative) method: 'ed' (exact diagonalization) or 'iterative' (iterative bicgstabl) rho0: starting density matrix for the iterative diagonalization (default: None) kwargs...: additional kwargs passed to bicgstabl Optional args for iterative: For full docs please consult SciPy documentation at https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.bicgstab.html maxiter: maximum number of iterations for the iterative solver (default: None) tol: The precision for the calculation (default: 1e-05) callback: User-supplied function to call after each iteration. It is called as callback(xk), where xk is the current solution vector Returns: The steady-state density matrix. """ from numpy import sqrt, array M = lindblad.hilbert.physical.n_states if method == "ed": if sparse is None: sparse = False if not sparse: from numpy.linalg import eigh lind_mat = lindblad.to_dense() ldagl = lind_mat.conj().T * lind_mat w, v = eigh(ldagl) else: from scipy.sparse.linalg import eigsh lind_mat = lindblad.to_sparse() ldagl = lind_mat.H * lind_mat w, v = eigsh(ldagl, which="SM", k=2) print("Minimum eigenvalue is: ", w[0]) rho = v[:, 0].reshape((M, M)) rho = rho / rho.trace() elif method == "iterative": if sparse is None: sparse = True # An extra row is added at the bottom of the therefore M^2+1 long array, # with the trace of the density matrix. This is needed to enforce the # trace-1 condition. L = lindblad.to_linear_operator(sparse=sparse, append_trace=True) # Initial density matrix ( + trace condition) Lrho_start = np.zeros((M ** 2 + 1), dtype=L.dtype) if rho0 is None: Lrho_start[0] = 1.0 Lrho_start[-1] = 1.0 else: Lrho_start[:-1] = rho0.reshape(-1) Lrho_start[-1] = rho0.trace() # Target residual (everything 0 and trace 1) Lrho_target = np.zeros((M ** 2 + 1), dtype=L.dtype) Lrho_target[-1] = 1.0 # Iterative solver print("Starting iterative solver...") res, info = bicgstab(L, Lrho_target, x0=Lrho_start, **kwargs) rho = res[:-1].reshape((M, M)) if info == 0: print("Converged trace is ", rho.trace()) elif info > 0: print("Failed to converge after ", info, " ( trace is ", rho.trace(), " )") elif info < 0: print("An error occured: ", info) else: raise ValueError("method must be 'ed' or 'iterative'") return rho
56,918
def _strip_spoiler(filename: str) -> Tuple[str, bool]: spoiler = False while filename.startswith('SPOILER_'): spoiler = True filename = filename[8:] return filename, spoiler
def _strip_spoiler(filename: str) -> Tuple[str, bool]: stripped = filename.lstrip('SPOILER_') spoiler = stripped != filename return stripped, spoiler