body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
a2213862f26d42b4d72b5048e200dc1a42660be06bb04300025ef44ca441e6ec
def tract_to_cd(self, df): '\n tract to cd\n ' df = df.merge(self.lookup_geo[['geoid_tract', 'cd']].drop_duplicates(), how='left', right_on='geoid_tract', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
tract to cd
factfinder/geography/2010.py
tract_to_cd
EricaMaurer/db-factfinder
0
python
def tract_to_cd(self, df): '\n \n ' df = df.merge(self.lookup_geo[['geoid_tract', 'cd']].drop_duplicates(), how='left', right_on='geoid_tract', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]
def tract_to_cd(self, df): '\n \n ' df = df.merge(self.lookup_geo[['geoid_tract', 'cd']].drop_duplicates(), how='left', right_on='geoid_tract', left_on='census_geoid') output = AggregatedGeography.create_output(df, 'cd') output['pff_variable'] = df['pff_variable'].to_list()[0] output['geotype'] = 'cd' return output[['census_geoid', 'pff_variable', 'geotype', 'e', 'm']]<|docstring|>tract to cd<|endoftext|>
e626566b8975edc5fedb3b42ef1440251a4f8c11a59b6f52e80a6b5d0f942bee
def after_iter(self): '\n `after_iter` contains two parts of logic:\n * log information\n * reset setting of resize\n ' if (((self.iter + 1) % self.exp.print_interval) == 0): left_iters = ((self.max_iter * self.max_epoch) - (self.progress_in_iter + 1)) eta_seconds = (self.meter['iter_time'].global_avg * left_iters) eta_str = 'ETA: {}'.format(datetime.timedelta(seconds=int(eta_seconds))) progress_str = 'epoch: {}/{}, iter: {}/{}'.format((self.epoch + 1), self.max_epoch, (self.iter + 1), self.max_iter) loss_meter = self.meter.get_filtered_meter('loss') loss_str = ', '.join(['{}: {:.1f}'.format(k, v.latest) for (k, v) in loss_meter.items()]) time_meter = self.meter.get_filtered_meter('time') time_str = ', '.join(['{}: {:.3f}s'.format(k, v.avg) for (k, v) in time_meter.items()]) logger.info(('{}, mem: {:.0f}Mb, {}, {}, lr: {:.3e}'.format(progress_str, gpu_mem_usage(), time_str, loss_str, self.meter['lr'].latest) + ', size: {:d}, {}'.format(self.input_size[0], eta_str))) self.meter.clear_meters() if ((self.exp.random_size is not None) and (((self.progress_in_iter + 1) % 10) == 0)): self.input_size = self.exp.random_resize(self.train_loader, self.epoch, self.rank, self.is_distributed)
`after_iter` contains two parts of logic: * log information * reset setting of resize
yolox/core/trainer.py
after_iter
willy541222/YOLOX
0
python
def after_iter(self): '\n `after_iter` contains two parts of logic:\n * log information\n * reset setting of resize\n ' if (((self.iter + 1) % self.exp.print_interval) == 0): left_iters = ((self.max_iter * self.max_epoch) - (self.progress_in_iter + 1)) eta_seconds = (self.meter['iter_time'].global_avg * left_iters) eta_str = 'ETA: {}'.format(datetime.timedelta(seconds=int(eta_seconds))) progress_str = 'epoch: {}/{}, iter: {}/{}'.format((self.epoch + 1), self.max_epoch, (self.iter + 1), self.max_iter) loss_meter = self.meter.get_filtered_meter('loss') loss_str = ', '.join(['{}: {:.1f}'.format(k, v.latest) for (k, v) in loss_meter.items()]) time_meter = self.meter.get_filtered_meter('time') time_str = ', '.join(['{}: {:.3f}s'.format(k, v.avg) for (k, v) in time_meter.items()]) logger.info(('{}, mem: {:.0f}Mb, {}, {}, lr: {:.3e}'.format(progress_str, gpu_mem_usage(), time_str, loss_str, self.meter['lr'].latest) + ', size: {:d}, {}'.format(self.input_size[0], eta_str))) self.meter.clear_meters() if ((self.exp.random_size is not None) and (((self.progress_in_iter + 1) % 10) == 0)): self.input_size = self.exp.random_resize(self.train_loader, self.epoch, self.rank, self.is_distributed)
def after_iter(self): '\n `after_iter` contains two parts of logic:\n * log information\n * reset setting of resize\n ' if (((self.iter + 1) % self.exp.print_interval) == 0): left_iters = ((self.max_iter * self.max_epoch) - (self.progress_in_iter + 1)) eta_seconds = (self.meter['iter_time'].global_avg * left_iters) eta_str = 'ETA: {}'.format(datetime.timedelta(seconds=int(eta_seconds))) progress_str = 'epoch: {}/{}, iter: {}/{}'.format((self.epoch + 1), self.max_epoch, (self.iter + 1), self.max_iter) loss_meter = self.meter.get_filtered_meter('loss') loss_str = ', '.join(['{}: {:.1f}'.format(k, v.latest) for (k, v) in loss_meter.items()]) time_meter = self.meter.get_filtered_meter('time') time_str = ', '.join(['{}: {:.3f}s'.format(k, v.avg) for (k, v) in time_meter.items()]) logger.info(('{}, mem: {:.0f}Mb, {}, {}, lr: {:.3e}'.format(progress_str, gpu_mem_usage(), time_str, loss_str, self.meter['lr'].latest) + ', size: {:d}, {}'.format(self.input_size[0], eta_str))) self.meter.clear_meters() if ((self.exp.random_size is not None) and (((self.progress_in_iter + 1) % 10) == 0)): self.input_size = self.exp.random_resize(self.train_loader, self.epoch, self.rank, self.is_distributed)<|docstring|>`after_iter` contains two parts of logic: * log information * reset setting of resize<|endoftext|>
530cd284c057c3022cc54405acd616b1202d452e21be0476c1ed9864de67ffef
def model_map_util(map_model, data): '\n Metodo recursivo de map apartir de um modelo já definido\n Args:\n map_model: dict com o modelo desejado, com os valores sendo qual dados desejam\n data: payload com os dados brutos\n\n Returns:\n dict modelado da forma especificada\n ' for (k, v) in map_model.items(): if isinstance(v, dict): map_model[k] = model_map_util(v, data) elif isinstance(v, list): map_model[k] = [] for x in v: map_model[k].append(model_map_util(x, data)) else: try: if is_string_value(v): map_model[k] = get_value_string(v) else: map_model[k] = get_values(v.split('.'), data) except AttributeError: map_model[k] = v except TypeError: map_model[k] = v return map_model
Metodo recursivo de map apartir de um modelo já definido Args: map_model: dict com o modelo desejado, com os valores sendo qual dados desejam data: payload com os dados brutos Returns: dict modelado da forma especificada
src/middleware-kafka/utils/rules_util.py
model_map_util
LeoNog96/IntegradorRedmine
0
python
def model_map_util(map_model, data): '\n Metodo recursivo de map apartir de um modelo já definido\n Args:\n map_model: dict com o modelo desejado, com os valores sendo qual dados desejam\n data: payload com os dados brutos\n\n Returns:\n dict modelado da forma especificada\n ' for (k, v) in map_model.items(): if isinstance(v, dict): map_model[k] = model_map_util(v, data) elif isinstance(v, list): map_model[k] = [] for x in v: map_model[k].append(model_map_util(x, data)) else: try: if is_string_value(v): map_model[k] = get_value_string(v) else: map_model[k] = get_values(v.split('.'), data) except AttributeError: map_model[k] = v except TypeError: map_model[k] = v return map_model
def model_map_util(map_model, data): '\n Metodo recursivo de map apartir de um modelo já definido\n Args:\n map_model: dict com o modelo desejado, com os valores sendo qual dados desejam\n data: payload com os dados brutos\n\n Returns:\n dict modelado da forma especificada\n ' for (k, v) in map_model.items(): if isinstance(v, dict): map_model[k] = model_map_util(v, data) elif isinstance(v, list): map_model[k] = [] for x in v: map_model[k].append(model_map_util(x, data)) else: try: if is_string_value(v): map_model[k] = get_value_string(v) else: map_model[k] = get_values(v.split('.'), data) except AttributeError: map_model[k] = v except TypeError: map_model[k] = v return map_model<|docstring|>Metodo recursivo de map apartir de um modelo já definido Args: map_model: dict com o modelo desejado, com os valores sendo qual dados desejam data: payload com os dados brutos Returns: dict modelado da forma especificada<|endoftext|>
4a08c94a4ea0b3bf4a75e96a87c3ad064bce72da2b18e83e2b2fb11f06141b9f
def get_values(string, dictionary): '\n Metodo para pegar valores de um dict indepedente da hierarquia em que se encontra\n Args:\n string: nivel desejado. ex: teste.objeto.id\n dictionary: dict que contem o valor desejado\n\n Returns:\n Retorna o valor da chave especificada\n ' new_dictionary = dictionary for x in string: new_dictionary = new_dictionary.get(x) return new_dictionary
Metodo para pegar valores de um dict indepedente da hierarquia em que se encontra Args: string: nivel desejado. ex: teste.objeto.id dictionary: dict que contem o valor desejado Returns: Retorna o valor da chave especificada
src/middleware-kafka/utils/rules_util.py
get_values
LeoNog96/IntegradorRedmine
0
python
def get_values(string, dictionary): '\n Metodo para pegar valores de um dict indepedente da hierarquia em que se encontra\n Args:\n string: nivel desejado. ex: teste.objeto.id\n dictionary: dict que contem o valor desejado\n\n Returns:\n Retorna o valor da chave especificada\n ' new_dictionary = dictionary for x in string: new_dictionary = new_dictionary.get(x) return new_dictionary
def get_values(string, dictionary): '\n Metodo para pegar valores de um dict indepedente da hierarquia em que se encontra\n Args:\n string: nivel desejado. ex: teste.objeto.id\n dictionary: dict que contem o valor desejado\n\n Returns:\n Retorna o valor da chave especificada\n ' new_dictionary = dictionary for x in string: new_dictionary = new_dictionary.get(x) return new_dictionary<|docstring|>Metodo para pegar valores de um dict indepedente da hierarquia em que se encontra Args: string: nivel desejado. ex: teste.objeto.id dictionary: dict que contem o valor desejado Returns: Retorna o valor da chave especificada<|endoftext|>
e87e39940d9d9c854a5404865df901f8528b2c926dc1f66c6123c0e8ce938dcb
def post(self, request, *args, **kwargs): '\n Activates a token\n\n :param request:\n :type request:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n :return: 200 / 400\n :rtype:\n ' serializer = self.get_serializer(data=self.request.data) if (not serializer.is_valid()): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) token = serializer.validated_data['token'] token.active = True token.user_validator = None token.save() return Response({'user': {'id': request.user.id, 'authentication': 'AUTHKEY', 'email': decrypt_with_db_secret(request.user.email), 'secret_key': request.user.secret_key, 'secret_key_nonce': request.user.secret_key_nonce}}, status=status.HTTP_200_OK)
Activates a token :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: 200 / 400 :rtype:
psono/restapi/views/activate_token.py
post
dirigeant/psono-server
48
python
def post(self, request, *args, **kwargs): '\n Activates a token\n\n :param request:\n :type request:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n :return: 200 / 400\n :rtype:\n ' serializer = self.get_serializer(data=self.request.data) if (not serializer.is_valid()): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) token = serializer.validated_data['token'] token.active = True token.user_validator = None token.save() return Response({'user': {'id': request.user.id, 'authentication': 'AUTHKEY', 'email': decrypt_with_db_secret(request.user.email), 'secret_key': request.user.secret_key, 'secret_key_nonce': request.user.secret_key_nonce}}, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs): '\n Activates a token\n\n :param request:\n :type request:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n :return: 200 / 400\n :rtype:\n ' serializer = self.get_serializer(data=self.request.data) if (not serializer.is_valid()): return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) token = serializer.validated_data['token'] token.active = True token.user_validator = None token.save() return Response({'user': {'id': request.user.id, 'authentication': 'AUTHKEY', 'email': decrypt_with_db_secret(request.user.email), 'secret_key': request.user.secret_key, 'secret_key_nonce': request.user.secret_key_nonce}}, status=status.HTTP_200_OK)<|docstring|>Activates a token :param request: :type request: :param args: :type args: :param kwargs: :type kwargs: :return: 200 / 400 :rtype:<|endoftext|>
7970f90afb13ac84f3f6f16dde2b23aa6c458608cafcf322fb58a33c585df4d3
def check_max_depth(self): "\n check the maximum subdirectory depth (relative to the PlateMicroscopy dir)\n Depth is three for 'plate_dir/exp_dir/sortday_dir/'\n Depth is two for either 'plate_dir/exp_dir/' or 'plate_dir/PublicationQuality/'\n " maxx = 0 for row in self.os_walk: (path, subdirs, filenames) = row filenames = [name for name in filenames if ('.tif' in name)] if (not filenames): continue maxx = max(maxx, len(path.replace(self.root_dir, '').split(os.sep))) return maxx
check the maximum subdirectory depth (relative to the PlateMicroscopy dir) Depth is three for 'plate_dir/exp_dir/sortday_dir/' Depth is two for either 'plate_dir/exp_dir/' or 'plate_dir/PublicationQuality/'
opencell/imaging/managers.py
check_max_depth
czbiohub/opencell-portal-pub
2
python
def check_max_depth(self): "\n check the maximum subdirectory depth (relative to the PlateMicroscopy dir)\n Depth is three for 'plate_dir/exp_dir/sortday_dir/'\n Depth is two for either 'plate_dir/exp_dir/' or 'plate_dir/PublicationQuality/'\n " maxx = 0 for row in self.os_walk: (path, subdirs, filenames) = row filenames = [name for name in filenames if ('.tif' in name)] if (not filenames): continue maxx = max(maxx, len(path.replace(self.root_dir, ).split(os.sep))) return maxx
def check_max_depth(self): "\n check the maximum subdirectory depth (relative to the PlateMicroscopy dir)\n Depth is three for 'plate_dir/exp_dir/sortday_dir/'\n Depth is two for either 'plate_dir/exp_dir/' or 'plate_dir/PublicationQuality/'\n " maxx = 0 for row in self.os_walk: (path, subdirs, filenames) = row filenames = [name for name in filenames if ('.tif' in name)] if (not filenames): continue maxx = max(maxx, len(path.replace(self.root_dir, ).split(os.sep))) return maxx<|docstring|>check the maximum subdirectory depth (relative to the PlateMicroscopy dir) Depth is three for 'plate_dir/exp_dir/sortday_dir/' Depth is two for either 'plate_dir/exp_dir/' or 'plate_dir/PublicationQuality/'<|endoftext|>
e16593e490848904791385c58bdf767a1ef5a357a9165e4a1e014bcb6c2f3139
@staticmethod def parse_raw_tiff_filename(filename): "\n Parse well_id, site_num, and target name from a raw TIFF filename\n\n For almost all filenames, the format is '{well_id}_{site_num}_{target_name}.ome.tif'\n\n The exception is 'Jin' lines, which appear in plate6 and plate7;\n here, the format is '{well_id}_{site_num}_Jin_{well_id}_{target_name}.ome.tif',\n and it is the first well_id that is the 'real', pipeline-relevant, well_id\n\n Note that the target name sometimes includes the terminus that was tagged,\n in the form of a trailing '-N', '-C', '_N', '_C', '_Int', '-Int'\n\n Also, two target names include a trailing '-A' or '-B'\n (these are 'HLA-A' and 'ARHGAP11A-B')\n " well_id = '[A-H][1-9][0-2]?' site_num = '[1-9][0-9]?' target_name = '[a-zA-Z0-9]+' appendix = '[\\-|_][a-zA-Z]+' raw_pattern = f'^({well_id})_({site_num})_({target_name})({appendix})?.ome.tif$' raw_jin_pattern = f'^({well_id})_({site_num})_Jin_(?:{well_id})_({target_name})({appendix})?.ome.tif$' filename_was_parsed = False for pattern in [raw_pattern, raw_jin_pattern]: result = re.match(pattern, filename) if result: filename_was_parsed = True (well_id, site_num, target_name, appendix) = result.groups() break if (not filename_was_parsed): return None site_num = int(site_num) return (well_id, site_num, target_name)
Parse well_id, site_num, and target name from a raw TIFF filename For almost all filenames, the format is '{well_id}_{site_num}_{target_name}.ome.tif' The exception is 'Jin' lines, which appear in plate6 and plate7; here, the format is '{well_id}_{site_num}_Jin_{well_id}_{target_name}.ome.tif', and it is the first well_id that is the 'real', pipeline-relevant, well_id Note that the target name sometimes includes the terminus that was tagged, in the form of a trailing '-N', '-C', '_N', '_C', '_Int', '-Int' Also, two target names include a trailing '-A' or '-B' (these are 'HLA-A' and 'ARHGAP11A-B')
opencell/imaging/managers.py
parse_raw_tiff_filename
czbiohub/opencell-portal-pub
2
python
@staticmethod def parse_raw_tiff_filename(filename): "\n Parse well_id, site_num, and target name from a raw TIFF filename\n\n For almost all filenames, the format is '{well_id}_{site_num}_{target_name}.ome.tif'\n\n The exception is 'Jin' lines, which appear in plate6 and plate7;\n here, the format is '{well_id}_{site_num}_Jin_{well_id}_{target_name}.ome.tif',\n and it is the first well_id that is the 'real', pipeline-relevant, well_id\n\n Note that the target name sometimes includes the terminus that was tagged,\n in the form of a trailing '-N', '-C', '_N', '_C', '_Int', '-Int'\n\n Also, two target names include a trailing '-A' or '-B'\n (these are 'HLA-A' and 'ARHGAP11A-B')\n " well_id = '[A-H][1-9][0-2]?' site_num = '[1-9][0-9]?' target_name = '[a-zA-Z0-9]+' appendix = '[\\-|_][a-zA-Z]+' raw_pattern = f'^({well_id})_({site_num})_({target_name})({appendix})?.ome.tif$' raw_jin_pattern = f'^({well_id})_({site_num})_Jin_(?:{well_id})_({target_name})({appendix})?.ome.tif$' filename_was_parsed = False for pattern in [raw_pattern, raw_jin_pattern]: result = re.match(pattern, filename) if result: filename_was_parsed = True (well_id, site_num, target_name, appendix) = result.groups() break if (not filename_was_parsed): return None site_num = int(site_num) return (well_id, site_num, target_name)
@staticmethod def parse_raw_tiff_filename(filename): "\n Parse well_id, site_num, and target name from a raw TIFF filename\n\n For almost all filenames, the format is '{well_id}_{site_num}_{target_name}.ome.tif'\n\n The exception is 'Jin' lines, which appear in plate6 and plate7;\n here, the format is '{well_id}_{site_num}_Jin_{well_id}_{target_name}.ome.tif',\n and it is the first well_id that is the 'real', pipeline-relevant, well_id\n\n Note that the target name sometimes includes the terminus that was tagged,\n in the form of a trailing '-N', '-C', '_N', '_C', '_Int', '-Int'\n\n Also, two target names include a trailing '-A' or '-B'\n (these are 'HLA-A' and 'ARHGAP11A-B')\n " well_id = '[A-H][1-9][0-2]?' site_num = '[1-9][0-9]?' target_name = '[a-zA-Z0-9]+' appendix = '[\\-|_][a-zA-Z]+' raw_pattern = f'^({well_id})_({site_num})_({target_name})({appendix})?.ome.tif$' raw_jin_pattern = f'^({well_id})_({site_num})_Jin_(?:{well_id})_({target_name})({appendix})?.ome.tif$' filename_was_parsed = False for pattern in [raw_pattern, raw_jin_pattern]: result = re.match(pattern, filename) if result: filename_was_parsed = True (well_id, site_num, target_name, appendix) = result.groups() break if (not filename_was_parsed): return None site_num = int(site_num) return (well_id, site_num, target_name)<|docstring|>Parse well_id, site_num, and target name from a raw TIFF filename For almost all filenames, the format is '{well_id}_{site_num}_{target_name}.ome.tif' The exception is 'Jin' lines, which appear in plate6 and plate7; here, the format is '{well_id}_{site_num}_Jin_{well_id}_{target_name}.ome.tif', and it is the first well_id that is the 'real', pipeline-relevant, well_id Note that the target name sometimes includes the terminus that was tagged, in the form of a trailing '-N', '-C', '_N', '_C', '_Int', '-Int' Also, two target names include a trailing '-A' or '-B' (these are 'HLA-A' and 'ARHGAP11A-B')<|endoftext|>
f7f93c68b41971b942e3da2797e92e81dd8c439db7816ec3852dd74991895153
def construct_metadata(self, paths_only=False): '\n Create metadata dataframe from the os.walk results\n ' rows = [] for row in self.os_walk: (path, subdirs, filenames) = row filenames = [name for name in filenames if ('.tif' in name)] if (not filenames): continue rel_path = path.replace(self.root_dir, '') if (not re.match('^mNG96wp[1-9]([0-9])?(_Thawed|/)', rel_path)): continue path_dirs = rel_path.split(os.sep) path_info = {('level_%d' % ind): path_dir for (ind, path_dir) in enumerate(path_dirs)} (plate_num, imaging_round_num) = self.parse_src_plate_dir(path_dirs[0]) plate_info = {'plate_num': plate_num, 'imaging_round_num': imaging_round_num} if paths_only: rows.append({**path_info, **plate_info}) continue for filename in filenames: rows.append({'filename': filename, **path_info, **plate_info}) md = pd.DataFrame(data=rows) md = md.rename(columns={'level_0': 'plate_dir', 'level_1': 'exp_dir', 'level_2': 'exp_subdir'}) md['is_raw'] = md.exp_dir.apply((lambda s: (re.match('^ML[0-9]{4}_[0-9]{8}$', s) is not None))) self.md = md
Create metadata dataframe from the os.walk results
opencell/imaging/managers.py
construct_metadata
czbiohub/opencell-portal-pub
2
python
def construct_metadata(self, paths_only=False): '\n \n ' rows = [] for row in self.os_walk: (path, subdirs, filenames) = row filenames = [name for name in filenames if ('.tif' in name)] if (not filenames): continue rel_path = path.replace(self.root_dir, ) if (not re.match('^mNG96wp[1-9]([0-9])?(_Thawed|/)', rel_path)): continue path_dirs = rel_path.split(os.sep) path_info = {('level_%d' % ind): path_dir for (ind, path_dir) in enumerate(path_dirs)} (plate_num, imaging_round_num) = self.parse_src_plate_dir(path_dirs[0]) plate_info = {'plate_num': plate_num, 'imaging_round_num': imaging_round_num} if paths_only: rows.append({**path_info, **plate_info}) continue for filename in filenames: rows.append({'filename': filename, **path_info, **plate_info}) md = pd.DataFrame(data=rows) md = md.rename(columns={'level_0': 'plate_dir', 'level_1': 'exp_dir', 'level_2': 'exp_subdir'}) md['is_raw'] = md.exp_dir.apply((lambda s: (re.match('^ML[0-9]{4}_[0-9]{8}$', s) is not None))) self.md = md
def construct_metadata(self, paths_only=False): '\n \n ' rows = [] for row in self.os_walk: (path, subdirs, filenames) = row filenames = [name for name in filenames if ('.tif' in name)] if (not filenames): continue rel_path = path.replace(self.root_dir, ) if (not re.match('^mNG96wp[1-9]([0-9])?(_Thawed|/)', rel_path)): continue path_dirs = rel_path.split(os.sep) path_info = {('level_%d' % ind): path_dir for (ind, path_dir) in enumerate(path_dirs)} (plate_num, imaging_round_num) = self.parse_src_plate_dir(path_dirs[0]) plate_info = {'plate_num': plate_num, 'imaging_round_num': imaging_round_num} if paths_only: rows.append({**path_info, **plate_info}) continue for filename in filenames: rows.append({'filename': filename, **path_info, **plate_info}) md = pd.DataFrame(data=rows) md = md.rename(columns={'level_0': 'plate_dir', 'level_1': 'exp_dir', 'level_2': 'exp_subdir'}) md['is_raw'] = md.exp_dir.apply((lambda s: (re.match('^ML[0-9]{4}_[0-9]{8}$', s) is not None))) self.md = md<|docstring|>Create metadata dataframe from the os.walk results<|endoftext|>
a63a6a41c110117d3d0d8b93848e5ff3f377d303c7ef7fab0101752ed5693fa3
@staticmethod def parse_src_plate_dir(src_plate_dir): "\n Parse the plate number from a src plate_dir\n Example: 'mNG96wp19' -> '19'\n " plate_num = int(re.findall('^mNG96wp([0-9]{1,2})', src_plate_dir.split(os.sep)[0])[0]) if ('Thawed2' in src_plate_dir): imaging_round_num = 3 elif ('Thawed' in src_plate_dir): imaging_round_num = 2 else: imaging_round_num = 1 return (plate_num, imaging_round_num)
Parse the plate number from a src plate_dir Example: 'mNG96wp19' -> '19'
opencell/imaging/managers.py
parse_src_plate_dir
czbiohub/opencell-portal-pub
2
python
@staticmethod def parse_src_plate_dir(src_plate_dir): "\n Parse the plate number from a src plate_dir\n Example: 'mNG96wp19' -> '19'\n " plate_num = int(re.findall('^mNG96wp([0-9]{1,2})', src_plate_dir.split(os.sep)[0])[0]) if ('Thawed2' in src_plate_dir): imaging_round_num = 3 elif ('Thawed' in src_plate_dir): imaging_round_num = 2 else: imaging_round_num = 1 return (plate_num, imaging_round_num)
@staticmethod def parse_src_plate_dir(src_plate_dir): "\n Parse the plate number from a src plate_dir\n Example: 'mNG96wp19' -> '19'\n " plate_num = int(re.findall('^mNG96wp([0-9]{1,2})', src_plate_dir.split(os.sep)[0])[0]) if ('Thawed2' in src_plate_dir): imaging_round_num = 3 elif ('Thawed' in src_plate_dir): imaging_round_num = 2 else: imaging_round_num = 1 return (plate_num, imaging_round_num)<|docstring|>Parse the plate number from a src plate_dir Example: 'mNG96wp19' -> '19'<|endoftext|>
127415b00716c490f63554ef561dca55b01bbfe3f2c4a4e151c273db36ef6573
def construct_raw_metadata(self): '\n Construct the raw metadata (a subset of self.md)\n ' md_raw = self.md.loc[self.md.is_raw].copy() for column in ['well_id', 'site_num', 'target_name', 'fov_id']: md_raw[column] = None dropped_inds = [] for (ind, row) in md_raw.iterrows(): result = self.parse_raw_tiff_filename(row.filename) if (not result): if ('MMStack' not in row.filename): logger.warning(('Unparseable raw filename %s' % row.filename)) dropped_inds.append(ind) continue (well_id, site_num, target_name) = result md_raw.at[(ind, 'site_num')] = site_num md_raw.at[(ind, 'target_name')] = target_name (well_row, well_col) = re.match('([A-H])([0-9]{1,2})', well_id).groups() md_raw.at[(ind, 'well_id')] = ('%s%02d' % (well_row, int(well_col))) logger.warning(('Dropping %s rows of unparseable raw metadata' % len(dropped_inds))) md_raw.drop(dropped_inds, inplace=True) md_raw['parental_line'] = constants.PARENTAL_LINE_NAME md_raw['ep_id'] = 'EP01' md_raw['exp_id'] = [exp_dir.split('_')[0] for exp_dir in md_raw.exp_dir] md_raw['pml_id'] = [f'P{ml_id}' for ml_id in md_raw.exp_id] md_raw['site_id'] = [('S%02d' % int(num)) for num in md_raw.site_num] md_raw['plate_id'] = [('P%04d' % num) for num in md_raw.plate_num] md_raw['imaging_round_id'] = [('R%02d' % num) for num in md_raw.imaging_round_num] for (ind, row) in md_raw.iterrows(): md_raw.at[(ind, 'raw_filepath')] = os.path.join(row.plate_dir, row.exp_dir, (row.exp_subdir if (not pd.isna(row.exp_subdir)) else ''), row.filename) for (ind, row) in md_raw.iterrows(): md_raw.at[(ind, 'fov_id')] = ('%s-%s-%s' % (row.exp_id, row.well_id, row.site_id)) n = md_raw.groupby('fov_id').count() degenerate_fov_ids = n.loc[(n.filename > 1)].index md_raw = md_raw.loc[(~ md_raw.fov_id.isin(degenerate_fov_ids))] logger.warning(('Dropping non-unique fov_ids %s' % list(degenerate_fov_ids))) self.md_raw = md_raw
Construct the raw metadata (a subset of self.md)
opencell/imaging/managers.py
construct_raw_metadata
czbiohub/opencell-portal-pub
2
python
def construct_raw_metadata(self): '\n \n ' md_raw = self.md.loc[self.md.is_raw].copy() for column in ['well_id', 'site_num', 'target_name', 'fov_id']: md_raw[column] = None dropped_inds = [] for (ind, row) in md_raw.iterrows(): result = self.parse_raw_tiff_filename(row.filename) if (not result): if ('MMStack' not in row.filename): logger.warning(('Unparseable raw filename %s' % row.filename)) dropped_inds.append(ind) continue (well_id, site_num, target_name) = result md_raw.at[(ind, 'site_num')] = site_num md_raw.at[(ind, 'target_name')] = target_name (well_row, well_col) = re.match('([A-H])([0-9]{1,2})', well_id).groups() md_raw.at[(ind, 'well_id')] = ('%s%02d' % (well_row, int(well_col))) logger.warning(('Dropping %s rows of unparseable raw metadata' % len(dropped_inds))) md_raw.drop(dropped_inds, inplace=True) md_raw['parental_line'] = constants.PARENTAL_LINE_NAME md_raw['ep_id'] = 'EP01' md_raw['exp_id'] = [exp_dir.split('_')[0] for exp_dir in md_raw.exp_dir] md_raw['pml_id'] = [f'P{ml_id}' for ml_id in md_raw.exp_id] md_raw['site_id'] = [('S%02d' % int(num)) for num in md_raw.site_num] md_raw['plate_id'] = [('P%04d' % num) for num in md_raw.plate_num] md_raw['imaging_round_id'] = [('R%02d' % num) for num in md_raw.imaging_round_num] for (ind, row) in md_raw.iterrows(): md_raw.at[(ind, 'raw_filepath')] = os.path.join(row.plate_dir, row.exp_dir, (row.exp_subdir if (not pd.isna(row.exp_subdir)) else ), row.filename) for (ind, row) in md_raw.iterrows(): md_raw.at[(ind, 'fov_id')] = ('%s-%s-%s' % (row.exp_id, row.well_id, row.site_id)) n = md_raw.groupby('fov_id').count() degenerate_fov_ids = n.loc[(n.filename > 1)].index md_raw = md_raw.loc[(~ md_raw.fov_id.isin(degenerate_fov_ids))] logger.warning(('Dropping non-unique fov_ids %s' % list(degenerate_fov_ids))) self.md_raw = md_raw
def construct_raw_metadata(self): '\n \n ' md_raw = self.md.loc[self.md.is_raw].copy() for column in ['well_id', 'site_num', 'target_name', 'fov_id']: md_raw[column] = None dropped_inds = [] for (ind, row) in md_raw.iterrows(): result = self.parse_raw_tiff_filename(row.filename) if (not result): if ('MMStack' not in row.filename): logger.warning(('Unparseable raw filename %s' % row.filename)) dropped_inds.append(ind) continue (well_id, site_num, target_name) = result md_raw.at[(ind, 'site_num')] = site_num md_raw.at[(ind, 'target_name')] = target_name (well_row, well_col) = re.match('([A-H])([0-9]{1,2})', well_id).groups() md_raw.at[(ind, 'well_id')] = ('%s%02d' % (well_row, int(well_col))) logger.warning(('Dropping %s rows of unparseable raw metadata' % len(dropped_inds))) md_raw.drop(dropped_inds, inplace=True) md_raw['parental_line'] = constants.PARENTAL_LINE_NAME md_raw['ep_id'] = 'EP01' md_raw['exp_id'] = [exp_dir.split('_')[0] for exp_dir in md_raw.exp_dir] md_raw['pml_id'] = [f'P{ml_id}' for ml_id in md_raw.exp_id] md_raw['site_id'] = [('S%02d' % int(num)) for num in md_raw.site_num] md_raw['plate_id'] = [('P%04d' % num) for num in md_raw.plate_num] md_raw['imaging_round_id'] = [('R%02d' % num) for num in md_raw.imaging_round_num] for (ind, row) in md_raw.iterrows(): md_raw.at[(ind, 'raw_filepath')] = os.path.join(row.plate_dir, row.exp_dir, (row.exp_subdir if (not pd.isna(row.exp_subdir)) else ), row.filename) for (ind, row) in md_raw.iterrows(): md_raw.at[(ind, 'fov_id')] = ('%s-%s-%s' % (row.exp_id, row.well_id, row.site_id)) n = md_raw.groupby('fov_id').count() degenerate_fov_ids = n.loc[(n.filename > 1)].index md_raw = md_raw.loc[(~ md_raw.fov_id.isin(degenerate_fov_ids))] logger.warning(('Dropping non-unique fov_ids %s' % list(degenerate_fov_ids))) self.md_raw = md_raw<|docstring|>Construct the raw metadata (a subset of self.md)<|endoftext|>
a6d19fc7a8f1050b91df166ffbda98ccb88e22c46e50f344877194eccd2c4198
def append_file_info(self): '\n Append the file size and creation time to the metadata\n (requires that the partition be mounted)\n ' if (not os.path.isdir(self.root_dir)): logger.warning('Cannot determine file info unless the partition is mounted') return md = self.md.replace(to_replace=np.nan, value='') for (ind, row) in md.iterrows(): if (not np.mod(ind, 10000)): print(ind) s = os.stat(os.path.join(self.root_dir, row.plate_dir, row.exp_dir, row.exp_subdir, row.filename)) md.at[(ind, 'filesize')] = s.st_size md.at[(ind, 'ctime')] = s.st_ctime self.md = md
Append the file size and creation time to the metadata (requires that the partition be mounted)
opencell/imaging/managers.py
append_file_info
czbiohub/opencell-portal-pub
2
python
def append_file_info(self): '\n Append the file size and creation time to the metadata\n (requires that the partition be mounted)\n ' if (not os.path.isdir(self.root_dir)): logger.warning('Cannot determine file info unless the partition is mounted') return md = self.md.replace(to_replace=np.nan, value=) for (ind, row) in md.iterrows(): if (not np.mod(ind, 10000)): print(ind) s = os.stat(os.path.join(self.root_dir, row.plate_dir, row.exp_dir, row.exp_subdir, row.filename)) md.at[(ind, 'filesize')] = s.st_size md.at[(ind, 'ctime')] = s.st_ctime self.md = md
def append_file_info(self): '\n Append the file size and creation time to the metadata\n (requires that the partition be mounted)\n ' if (not os.path.isdir(self.root_dir)): logger.warning('Cannot determine file info unless the partition is mounted') return md = self.md.replace(to_replace=np.nan, value=) for (ind, row) in md.iterrows(): if (not np.mod(ind, 10000)): print(ind) s = os.stat(os.path.join(self.root_dir, row.plate_dir, row.exp_dir, row.exp_subdir, row.filename)) md.at[(ind, 'filesize')] = s.st_size md.at[(ind, 'ctime')] = s.st_ctime self.md = md<|docstring|>Append the file size and creation time to the metadata (requires that the partition be mounted)<|endoftext|>
08e8ba9fa5e9cc92b58108e4078cc1b71b6dec2905baac34109407b37a61618d
def test_create_valid_user_success(self): 'Test creating user with valid payload is successful' payload = {'email': '[email protected]', 'password': 'testpass', 'name': 'Test name'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) user = get_user_model().objects.get(**res.data) self.assertTrue(user.check_password(payload['password'])) self.assertNotIn('password', res.data)
Test creating user with valid payload is successful
app/user/test/test_user_api.py
test_create_valid_user_success
mandeepdhillon01/recipe-app-api
0
python
def test_create_valid_user_success(self): payload = {'email': '[email protected]', 'password': 'testpass', 'name': 'Test name'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) user = get_user_model().objects.get(**res.data) self.assertTrue(user.check_password(payload['password'])) self.assertNotIn('password', res.data)
def test_create_valid_user_success(self): payload = {'email': '[email protected]', 'password': 'testpass', 'name': 'Test name'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_201_CREATED) user = get_user_model().objects.get(**res.data) self.assertTrue(user.check_password(payload['password'])) self.assertNotIn('password', res.data)<|docstring|>Test creating user with valid payload is successful<|endoftext|>
7e5bcd300e29b4c7c2bc1c2d57c32bf58b16f90be81f933d7e39cc53171b3d00
def test_user_exists(self): 'test creating a user that already exists (fails)' payload = {'email': '[email protected]', 'password': 'testpass'} create_user(**payload) res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
test creating a user that already exists (fails)
app/user/test/test_user_api.py
test_user_exists
mandeepdhillon01/recipe-app-api
0
python
def test_user_exists(self): payload = {'email': '[email protected]', 'password': 'testpass'} create_user(**payload) res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_user_exists(self): payload = {'email': '[email protected]', 'password': 'testpass'} create_user(**payload) res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>test creating a user that already exists (fails)<|endoftext|>
ee6a088135fd4e74d3c8268d34dbec10ed7bff1347d1c3ac6875bfeb925c278b
def test_password_too_Short(self): 'test password must be more than 5 characters' payload = {'email': '[email protected]', 'password': 'pw'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exists = get_user_model().objects.filter(email=payload['email']).exists() self.assertFalse(user_exists)
test password must be more than 5 characters
app/user/test/test_user_api.py
test_password_too_Short
mandeepdhillon01/recipe-app-api
0
python
def test_password_too_Short(self): payload = {'email': '[email protected]', 'password': 'pw'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exists = get_user_model().objects.filter(email=payload['email']).exists() self.assertFalse(user_exists)
def test_password_too_Short(self): payload = {'email': '[email protected]', 'password': 'pw'} res = self.client.post(CREATE_USER_URL, payload) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST) user_exists = get_user_model().objects.filter(email=payload['email']).exists() self.assertFalse(user_exists)<|docstring|>test password must be more than 5 characters<|endoftext|>
7f334e4809941ba0e10812faaff6b4bafc38c0b3631e240220f6a26063392248
def test_create_token_for_user(self): 'Test that a token is created for the user' payload = {'email': '[email protected]', 'password': 'testpass'} create_user(**payload) res = self.client.post(TOKEN_URL, payload) self.assertIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_200_OK)
Test that a token is created for the user
app/user/test/test_user_api.py
test_create_token_for_user
mandeepdhillon01/recipe-app-api
0
python
def test_create_token_for_user(self): payload = {'email': '[email protected]', 'password': 'testpass'} create_user(**payload) res = self.client.post(TOKEN_URL, payload) self.assertIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_200_OK)
def test_create_token_for_user(self): payload = {'email': '[email protected]', 'password': 'testpass'} create_user(**payload) res = self.client.post(TOKEN_URL, payload) self.assertIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_200_OK)<|docstring|>Test that a token is created for the user<|endoftext|>
cf4fdbc7c0250b9ae2fdde7a39604f74e97ecf649eaaec1ef378c8ae55224ef1
def test_create_token_invalid_credentials(self): 'Test that token is not created if invalid credentials are given' create_user(email='[email protected]', password='testpass') payload = {'email': '[email protected]', 'password': 'wrong'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
Test that token is not created if invalid credentials are given
app/user/test/test_user_api.py
test_create_token_invalid_credentials
mandeepdhillon01/recipe-app-api
0
python
def test_create_token_invalid_credentials(self): create_user(email='[email protected]', password='testpass') payload = {'email': '[email protected]', 'password': 'wrong'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_invalid_credentials(self): create_user(email='[email protected]', password='testpass') payload = {'email': '[email protected]', 'password': 'wrong'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Test that token is not created if invalid credentials are given<|endoftext|>
f0c56dd21d007bba8e7413288d0d131c294b757a9cbfcdce813dbac6329e5b94
def test_create_token_no_user(self): "Test that token is not created if user doens't exist" payload = {'email': '[email protected]', 'password': 'testpass'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
Test that token is not created if user doens't exist
app/user/test/test_user_api.py
test_create_token_no_user
mandeepdhillon01/recipe-app-api
0
python
def test_create_token_no_user(self): payload = {'email': '[email protected]', 'password': 'testpass'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_no_user(self): payload = {'email': '[email protected]', 'password': 'testpass'} res = self.client.post(TOKEN_URL, payload) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Test that token is not created if user doens't exist<|endoftext|>
8a88ea42e4be0997f62e198cc99550dc219441658b2c1062a81b62ef62e7e5d3
def test_create_token_missing_field(self): 'Test that email and password are required' res = self.client.post(TOKEN_URL, {'email': 'one', 'password': ''}) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
Test that email and password are required
app/user/test/test_user_api.py
test_create_token_missing_field
mandeepdhillon01/recipe-app-api
0
python
def test_create_token_missing_field(self): res = self.client.post(TOKEN_URL, {'email': 'one', 'password': }) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_create_token_missing_field(self): res = self.client.post(TOKEN_URL, {'email': 'one', 'password': }) self.assertNotIn('token', res.data) self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)<|docstring|>Test that email and password are required<|endoftext|>
3729b3b9c9c71f6ce56af0403f5c9035e7bf6d8dad73498043fa4c78015f9a42
def group_lasso_regularizer(X: Tensor, groups: List[List[int]]) -> Tensor: 'Computes the group lasso regularization function for the given point.\n\n Args:\n X: A bxd tensor representing the points to evaluate the regularization at.\n groups: List of indices of different groups.\n\n Returns:\n Computed group lasso norm of at the given points.\n ' return torch.sum(torch.stack([(math.sqrt(len(g)) * torch.norm(X[(..., g)], p=2, dim=(- 1))) for g in groups], dim=(- 1)), dim=(- 1))
Computes the group lasso regularization function for the given point. Args: X: A bxd tensor representing the points to evaluate the regularization at. groups: List of indices of different groups. Returns: Computed group lasso norm of at the given points.
botorch/acquisition/penalized.py
group_lasso_regularizer
saitcakmak/botorch
2,344
python
def group_lasso_regularizer(X: Tensor, groups: List[List[int]]) -> Tensor: 'Computes the group lasso regularization function for the given point.\n\n Args:\n X: A bxd tensor representing the points to evaluate the regularization at.\n groups: List of indices of different groups.\n\n Returns:\n Computed group lasso norm of at the given points.\n ' return torch.sum(torch.stack([(math.sqrt(len(g)) * torch.norm(X[(..., g)], p=2, dim=(- 1))) for g in groups], dim=(- 1)), dim=(- 1))
def group_lasso_regularizer(X: Tensor, groups: List[List[int]]) -> Tensor: 'Computes the group lasso regularization function for the given point.\n\n Args:\n X: A bxd tensor representing the points to evaluate the regularization at.\n groups: List of indices of different groups.\n\n Returns:\n Computed group lasso norm of at the given points.\n ' return torch.sum(torch.stack([(math.sqrt(len(g)) * torch.norm(X[(..., g)], p=2, dim=(- 1))) for g in groups], dim=(- 1)), dim=(- 1))<|docstring|>Computes the group lasso regularization function for the given point. Args: X: A bxd tensor representing the points to evaluate the regularization at. groups: List of indices of different groups. Returns: Computed group lasso norm of at the given points.<|endoftext|>
b096ef366e3cb765b4c731b36852c99e0a5d4c3f81341d5b4c4e8464e7a0aba9
def __init__(self, init_point: Tensor): 'Initializing L2 regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n ' super().__init__() self.init_point = init_point
Initializing L2 regularization. Args: init_point: The "1 x dim" reference point against which we want to regularize.
botorch/acquisition/penalized.py
__init__
saitcakmak/botorch
2,344
python
def __init__(self, init_point: Tensor): 'Initializing L2 regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n ' super().__init__() self.init_point = init_point
def __init__(self, init_point: Tensor): 'Initializing L2 regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n ' super().__init__() self.init_point = init_point<|docstring|>Initializing L2 regularization. Args: init_point: The "1 x dim" reference point against which we want to regularize.<|endoftext|>
ec07626d17009766fcf8c4fe747a479a859189172b2fe25d1c7359d67506b08c
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A tensor of size "batch_shape" representing the acqfn for each q-batch.\n ' regularization_term = (torch.norm((X - self.init_point), p=2, dim=(- 1)).max(dim=(- 1)).values ** 2) return regularization_term
Args: X: A "batch_shape x q x dim" representing the points to be evaluated. Returns: A tensor of size "batch_shape" representing the acqfn for each q-batch.
botorch/acquisition/penalized.py
forward
saitcakmak/botorch
2,344
python
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A tensor of size "batch_shape" representing the acqfn for each q-batch.\n ' regularization_term = (torch.norm((X - self.init_point), p=2, dim=(- 1)).max(dim=(- 1)).values ** 2) return regularization_term
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A tensor of size "batch_shape" representing the acqfn for each q-batch.\n ' regularization_term = (torch.norm((X - self.init_point), p=2, dim=(- 1)).max(dim=(- 1)).values ** 2) return regularization_term<|docstring|>Args: X: A "batch_shape x q x dim" representing the points to be evaluated. Returns: A tensor of size "batch_shape" representing the acqfn for each q-batch.<|endoftext|>
234f16bf58f7591c53c85c0e6418d2bd024e919bfba39bbcfd711f61e3949da5
def __init__(self, init_point: Tensor): 'Initializing L1 regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n ' super().__init__() self.init_point = init_point
Initializing L1 regularization. Args: init_point: The "1 x dim" reference point against which we want to regularize.
botorch/acquisition/penalized.py
__init__
saitcakmak/botorch
2,344
python
def __init__(self, init_point: Tensor): 'Initializing L1 regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n ' super().__init__() self.init_point = init_point
def __init__(self, init_point: Tensor): 'Initializing L1 regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n ' super().__init__() self.init_point = init_point<|docstring|>Initializing L1 regularization. Args: init_point: The "1 x dim" reference point against which we want to regularize.<|endoftext|>
dcbbdb38e254735456937d1fb2662673868cfeab522909abf36922189deac86e
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A tensor of size "batch_shape" representing the acqfn for each q-batch.\n ' regularization_term = torch.norm((X - self.init_point), p=1, dim=(- 1)).max(dim=(- 1)).values return regularization_term
Args: X: A "batch_shape x q x dim" representing the points to be evaluated. Returns: A tensor of size "batch_shape" representing the acqfn for each q-batch.
botorch/acquisition/penalized.py
forward
saitcakmak/botorch
2,344
python
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A tensor of size "batch_shape" representing the acqfn for each q-batch.\n ' regularization_term = torch.norm((X - self.init_point), p=1, dim=(- 1)).max(dim=(- 1)).values return regularization_term
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A tensor of size "batch_shape" representing the acqfn for each q-batch.\n ' regularization_term = torch.norm((X - self.init_point), p=1, dim=(- 1)).max(dim=(- 1)).values return regularization_term<|docstring|>Args: X: A "batch_shape x q x dim" representing the points to be evaluated. Returns: A tensor of size "batch_shape" representing the acqfn for each q-batch.<|endoftext|>
09783f562ce99297075fa1b9bfaa88ba09bef82f06b4fd9d53922a56179b6679
def __init__(self, init_point: Tensor, sigma: float): 'Initializing Gaussian regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n sigma: The parameter used in gaussian function.\n ' super().__init__() self.init_point = init_point self.sigma = sigma
Initializing Gaussian regularization. Args: init_point: The "1 x dim" reference point against which we want to regularize. sigma: The parameter used in gaussian function.
botorch/acquisition/penalized.py
__init__
saitcakmak/botorch
2,344
python
def __init__(self, init_point: Tensor, sigma: float): 'Initializing Gaussian regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n sigma: The parameter used in gaussian function.\n ' super().__init__() self.init_point = init_point self.sigma = sigma
def __init__(self, init_point: Tensor, sigma: float): 'Initializing Gaussian regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n sigma: The parameter used in gaussian function.\n ' super().__init__() self.init_point = init_point self.sigma = sigma<|docstring|>Initializing Gaussian regularization. Args: init_point: The "1 x dim" reference point against which we want to regularize. sigma: The parameter used in gaussian function.<|endoftext|>
d8e7e504191874dec496334c4308a29a1fe76694ed7718296a2973c48df23ace
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A tensor of size "batch_shape" representing the acqfn for each q-batch.\n ' sq_diff = (torch.norm((X - self.init_point), p=2, dim=(- 1)) ** 2) pdf = torch.exp(((sq_diff / 2) / (self.sigma ** 2))) regularization_term = pdf.max(dim=(- 1)).values return regularization_term
Args: X: A "batch_shape x q x dim" representing the points to be evaluated. Returns: A tensor of size "batch_shape" representing the acqfn for each q-batch.
botorch/acquisition/penalized.py
forward
saitcakmak/botorch
2,344
python
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A tensor of size "batch_shape" representing the acqfn for each q-batch.\n ' sq_diff = (torch.norm((X - self.init_point), p=2, dim=(- 1)) ** 2) pdf = torch.exp(((sq_diff / 2) / (self.sigma ** 2))) regularization_term = pdf.max(dim=(- 1)).values return regularization_term
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A tensor of size "batch_shape" representing the acqfn for each q-batch.\n ' sq_diff = (torch.norm((X - self.init_point), p=2, dim=(- 1)) ** 2) pdf = torch.exp(((sq_diff / 2) / (self.sigma ** 2))) regularization_term = pdf.max(dim=(- 1)).values return regularization_term<|docstring|>Args: X: A "batch_shape x q x dim" representing the points to be evaluated. Returns: A tensor of size "batch_shape" representing the acqfn for each q-batch.<|endoftext|>
abe3143986e73453cfd3abe39f0df275557e823dce5275aa2d9580b12cbed905
def __init__(self, init_point: Tensor, groups: List[List[int]]): 'Initializing Group-Lasso regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which we want\n to regularize.\n groups: Groups of indices used in group lasso.\n ' super().__init__() self.init_point = init_point self.groups = groups
Initializing Group-Lasso regularization. Args: init_point: The "1 x dim" reference point against which we want to regularize. groups: Groups of indices used in group lasso.
botorch/acquisition/penalized.py
__init__
saitcakmak/botorch
2,344
python
def __init__(self, init_point: Tensor, groups: List[List[int]]): 'Initializing Group-Lasso regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which we want\n to regularize.\n groups: Groups of indices used in group lasso.\n ' super().__init__() self.init_point = init_point self.groups = groups
def __init__(self, init_point: Tensor, groups: List[List[int]]): 'Initializing Group-Lasso regularization.\n\n Args:\n init_point: The "1 x dim" reference point against which we want\n to regularize.\n groups: Groups of indices used in group lasso.\n ' super().__init__() self.init_point = init_point self.groups = groups<|docstring|>Initializing Group-Lasso regularization. Args: init_point: The "1 x dim" reference point against which we want to regularize. groups: Groups of indices used in group lasso.<|endoftext|>
9b748132cf48839d42bb27c8a4a4b9f95415ba50764057ae590afe8acecb5ecf
def forward(self, X: Tensor) -> Tensor: '\n X should be batch_shape x 1 x dim tensor. Evaluation for q-batch is not\n implemented yet.\n ' if (X.shape[(- 2)] != 1): raise NotImplementedError('group-lasso has not been implemented for q>1 yet.') regularization_term = group_lasso_regularizer(X=(X.squeeze((- 2)) - self.init_point), groups=self.groups) return regularization_term
X should be batch_shape x 1 x dim tensor. Evaluation for q-batch is not implemented yet.
botorch/acquisition/penalized.py
forward
saitcakmak/botorch
2,344
python
def forward(self, X: Tensor) -> Tensor: '\n X should be batch_shape x 1 x dim tensor. Evaluation for q-batch is not\n implemented yet.\n ' if (X.shape[(- 2)] != 1): raise NotImplementedError('group-lasso has not been implemented for q>1 yet.') regularization_term = group_lasso_regularizer(X=(X.squeeze((- 2)) - self.init_point), groups=self.groups) return regularization_term
def forward(self, X: Tensor) -> Tensor: '\n X should be batch_shape x 1 x dim tensor. Evaluation for q-batch is not\n implemented yet.\n ' if (X.shape[(- 2)] != 1): raise NotImplementedError('group-lasso has not been implemented for q>1 yet.') regularization_term = group_lasso_regularizer(X=(X.squeeze((- 2)) - self.init_point), groups=self.groups) return regularization_term<|docstring|>X should be batch_shape x 1 x dim tensor. Evaluation for q-batch is not implemented yet.<|endoftext|>
b0df18c09595d582b477d218d7166e0f98cd986528ae58490a1a47ecbc4763e6
def __init__(self, raw_acqf: AcquisitionFunction, penalty_func: torch.nn.Module, regularization_parameter: float) -> None: 'Initializing Group-Lasso regularization.\n\n Args:\n raw_acqf: The raw acquisition function that is going to be regularized.\n penalty_func: The regularization function.\n regularization_parameter: Regularization parameter used in optimization.\n ' super().__init__(model=raw_acqf.model) self.raw_acqf = raw_acqf self.penalty_func = penalty_func self.regularization_parameter = regularization_parameter
Initializing Group-Lasso regularization. Args: raw_acqf: The raw acquisition function that is going to be regularized. penalty_func: The regularization function. regularization_parameter: Regularization parameter used in optimization.
botorch/acquisition/penalized.py
__init__
saitcakmak/botorch
2,344
python
def __init__(self, raw_acqf: AcquisitionFunction, penalty_func: torch.nn.Module, regularization_parameter: float) -> None: 'Initializing Group-Lasso regularization.\n\n Args:\n raw_acqf: The raw acquisition function that is going to be regularized.\n penalty_func: The regularization function.\n regularization_parameter: Regularization parameter used in optimization.\n ' super().__init__(model=raw_acqf.model) self.raw_acqf = raw_acqf self.penalty_func = penalty_func self.regularization_parameter = regularization_parameter
def __init__(self, raw_acqf: AcquisitionFunction, penalty_func: torch.nn.Module, regularization_parameter: float) -> None: 'Initializing Group-Lasso regularization.\n\n Args:\n raw_acqf: The raw acquisition function that is going to be regularized.\n penalty_func: The regularization function.\n regularization_parameter: Regularization parameter used in optimization.\n ' super().__init__(model=raw_acqf.model) self.raw_acqf = raw_acqf self.penalty_func = penalty_func self.regularization_parameter = regularization_parameter<|docstring|>Initializing Group-Lasso regularization. Args: raw_acqf: The raw acquisition function that is going to be regularized. penalty_func: The regularization function. regularization_parameter: Regularization parameter used in optimization.<|endoftext|>
018c2ad035650fde97d697f0f52aecdf3ace9784b72e28664c111f0baa4c389a
def __init__(self, init_point: Tensor): 'Initializing L1 penalty objective.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n ' super().__init__() self.init_point = init_point
Initializing L1 penalty objective. Args: init_point: The "1 x dim" reference point against which we want to regularize.
botorch/acquisition/penalized.py
__init__
saitcakmak/botorch
2,344
python
def __init__(self, init_point: Tensor): 'Initializing L1 penalty objective.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n ' super().__init__() self.init_point = init_point
def __init__(self, init_point: Tensor): 'Initializing L1 penalty objective.\n\n Args:\n init_point: The "1 x dim" reference point against which\n we want to regularize.\n ' super().__init__() self.init_point = init_point<|docstring|>Initializing L1 penalty objective. Args: init_point: The "1 x dim" reference point against which we want to regularize.<|endoftext|>
9b0d0d06d440223d7be2b5de01987ec4a8069ba65f07ddcc4af3b246d86e745b
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A "1 x batch_shape x q" tensor representing the penalty for each point.\n The first dimension corresponds to the dimension of MC samples.\n ' return torch.norm((X - self.init_point), p=1, dim=(- 1)).unsqueeze(dim=0)
Args: X: A "batch_shape x q x dim" representing the points to be evaluated. Returns: A "1 x batch_shape x q" tensor representing the penalty for each point. The first dimension corresponds to the dimension of MC samples.
botorch/acquisition/penalized.py
forward
saitcakmak/botorch
2,344
python
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A "1 x batch_shape x q" tensor representing the penalty for each point.\n The first dimension corresponds to the dimension of MC samples.\n ' return torch.norm((X - self.init_point), p=1, dim=(- 1)).unsqueeze(dim=0)
def forward(self, X: Tensor) -> Tensor: '\n Args:\n X: A "batch_shape x q x dim" representing the points to be evaluated.\n\n Returns:\n A "1 x batch_shape x q" tensor representing the penalty for each point.\n The first dimension corresponds to the dimension of MC samples.\n ' return torch.norm((X - self.init_point), p=1, dim=(- 1)).unsqueeze(dim=0)<|docstring|>Args: X: A "batch_shape x q x dim" representing the points to be evaluated. Returns: A "1 x batch_shape x q" tensor representing the penalty for each point. The first dimension corresponds to the dimension of MC samples.<|endoftext|>
b4c06dd14ff34d12929bc8c09cd139bbca59d0a7d6d5d570d578279de8390621
def __init__(self, objective: Callable[([Tensor, Optional[Tensor]], Tensor)], penalty_objective: torch.nn.Module, regularization_parameter: float) -> None: 'Penalized MC objective.\n\n Args:\n objective: A callable `f(samples, X)` mapping a\n `sample_shape x batch-shape x q x m`-dim Tensor `samples` and\n an optional `batch-shape x q x d`-dim Tensor `X` to a\n `sample_shape x batch-shape x q`-dim Tensor of objective values.\n penalty_objective: A torch.nn.Module `f(X)` that takes in a\n `batch-shape x q x d`-dim Tensor `X` and outputs a\n `1 x batch-shape x q`-dim Tensor of penalty objective values.\n regularization_parameter: weight of the penalty (regularization) term\n ' super().__init__(objective=objective) self.penalty_objective = penalty_objective self.regularization_parameter = regularization_parameter
Penalized MC objective. Args: objective: A callable `f(samples, X)` mapping a `sample_shape x batch-shape x q x m`-dim Tensor `samples` and an optional `batch-shape x q x d`-dim Tensor `X` to a `sample_shape x batch-shape x q`-dim Tensor of objective values. penalty_objective: A torch.nn.Module `f(X)` that takes in a `batch-shape x q x d`-dim Tensor `X` and outputs a `1 x batch-shape x q`-dim Tensor of penalty objective values. regularization_parameter: weight of the penalty (regularization) term
botorch/acquisition/penalized.py
__init__
saitcakmak/botorch
2,344
python
def __init__(self, objective: Callable[([Tensor, Optional[Tensor]], Tensor)], penalty_objective: torch.nn.Module, regularization_parameter: float) -> None: 'Penalized MC objective.\n\n Args:\n objective: A callable `f(samples, X)` mapping a\n `sample_shape x batch-shape x q x m`-dim Tensor `samples` and\n an optional `batch-shape x q x d`-dim Tensor `X` to a\n `sample_shape x batch-shape x q`-dim Tensor of objective values.\n penalty_objective: A torch.nn.Module `f(X)` that takes in a\n `batch-shape x q x d`-dim Tensor `X` and outputs a\n `1 x batch-shape x q`-dim Tensor of penalty objective values.\n regularization_parameter: weight of the penalty (regularization) term\n ' super().__init__(objective=objective) self.penalty_objective = penalty_objective self.regularization_parameter = regularization_parameter
def __init__(self, objective: Callable[([Tensor, Optional[Tensor]], Tensor)], penalty_objective: torch.nn.Module, regularization_parameter: float) -> None: 'Penalized MC objective.\n\n Args:\n objective: A callable `f(samples, X)` mapping a\n `sample_shape x batch-shape x q x m`-dim Tensor `samples` and\n an optional `batch-shape x q x d`-dim Tensor `X` to a\n `sample_shape x batch-shape x q`-dim Tensor of objective values.\n penalty_objective: A torch.nn.Module `f(X)` that takes in a\n `batch-shape x q x d`-dim Tensor `X` and outputs a\n `1 x batch-shape x q`-dim Tensor of penalty objective values.\n regularization_parameter: weight of the penalty (regularization) term\n ' super().__init__(objective=objective) self.penalty_objective = penalty_objective self.regularization_parameter = regularization_parameter<|docstring|>Penalized MC objective. Args: objective: A callable `f(samples, X)` mapping a `sample_shape x batch-shape x q x m`-dim Tensor `samples` and an optional `batch-shape x q x d`-dim Tensor `X` to a `sample_shape x batch-shape x q`-dim Tensor of objective values. penalty_objective: A torch.nn.Module `f(X)` that takes in a `batch-shape x q x d`-dim Tensor `X` and outputs a `1 x batch-shape x q`-dim Tensor of penalty objective values. regularization_parameter: weight of the penalty (regularization) term<|endoftext|>
1acb0aeeffe72af997bfdc95c79168b3ee6d064840ceb8077e6ddfd625176c9e
def forward(self, samples: Tensor, X: Optional[Tensor]=None) -> Tensor: 'Evaluate the penalized objective on the samples.\n\n Args:\n samples: A `sample_shape x batch_shape x q x m`-dim Tensors of\n samples from a model posterior.\n X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if\n the objective depends on the inputs explicitly.\n\n Returns:\n A `sample_shape x batch_shape x q`-dim Tensor of objective values\n with penalty added for each point.\n ' obj = super().forward(samples=samples, X=X) penalty_obj = self.penalty_objective(X) return (obj - (self.regularization_parameter * penalty_obj))
Evaluate the penalized objective on the samples. Args: samples: A `sample_shape x batch_shape x q x m`-dim Tensors of samples from a model posterior. X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if the objective depends on the inputs explicitly. Returns: A `sample_shape x batch_shape x q`-dim Tensor of objective values with penalty added for each point.
botorch/acquisition/penalized.py
forward
saitcakmak/botorch
2,344
python
def forward(self, samples: Tensor, X: Optional[Tensor]=None) -> Tensor: 'Evaluate the penalized objective on the samples.\n\n Args:\n samples: A `sample_shape x batch_shape x q x m`-dim Tensors of\n samples from a model posterior.\n X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if\n the objective depends on the inputs explicitly.\n\n Returns:\n A `sample_shape x batch_shape x q`-dim Tensor of objective values\n with penalty added for each point.\n ' obj = super().forward(samples=samples, X=X) penalty_obj = self.penalty_objective(X) return (obj - (self.regularization_parameter * penalty_obj))
def forward(self, samples: Tensor, X: Optional[Tensor]=None) -> Tensor: 'Evaluate the penalized objective on the samples.\n\n Args:\n samples: A `sample_shape x batch_shape x q x m`-dim Tensors of\n samples from a model posterior.\n X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if\n the objective depends on the inputs explicitly.\n\n Returns:\n A `sample_shape x batch_shape x q`-dim Tensor of objective values\n with penalty added for each point.\n ' obj = super().forward(samples=samples, X=X) penalty_obj = self.penalty_objective(X) return (obj - (self.regularization_parameter * penalty_obj))<|docstring|>Evaluate the penalized objective on the samples. Args: samples: A `sample_shape x batch_shape x q x m`-dim Tensors of samples from a model posterior. X: A `batch_shape x q x d`-dim tensor of inputs. Relevant only if the objective depends on the inputs explicitly. Returns: A `sample_shape x batch_shape x q`-dim Tensor of objective values with penalty added for each point.<|endoftext|>
e3c27b973760366b4a35656d3c616961d25eac9dab315671be7426e3a34d2f63
def remove_all_whitespace(str): '\n Strips all whitespace from a given string.\n :return: new string without whitespaces, will return the original string if it is empty or None\n ' if str: return re.sub('\\s+', '', str) else: return str
Strips all whitespace from a given string. :return: new string without whitespaces, will return the original string if it is empty or None
lsh/utils/strings_utils.py
remove_all_whitespace
singhj/locality-sensitive-hashing
19
python
def remove_all_whitespace(str): '\n Strips all whitespace from a given string.\n :return: new string without whitespaces, will return the original string if it is empty or None\n ' if str: return re.sub('\\s+', , str) else: return str
def remove_all_whitespace(str): '\n Strips all whitespace from a given string.\n :return: new string without whitespaces, will return the original string if it is empty or None\n ' if str: return re.sub('\\s+', , str) else: return str<|docstring|>Strips all whitespace from a given string. :return: new string without whitespaces, will return the original string if it is empty or None<|endoftext|>
b38c8fe918fb34b6d74f7bb648ebdfa21c01f38b5461e6ed650c6c2061bf9d82
def tokenize(str, delimiter=DEFAULT_TOKENIZER_DELIMITER): '\n Splits a string by a given delimiter. Default delimiter is a single whitespace.\n :return: list of string tokens, will return the original string if it is empty or None\n ' if str: return str.split(delimiter) else: return str
Splits a string by a given delimiter. Default delimiter is a single whitespace. :return: list of string tokens, will return the original string if it is empty or None
lsh/utils/strings_utils.py
tokenize
singhj/locality-sensitive-hashing
19
python
def tokenize(str, delimiter=DEFAULT_TOKENIZER_DELIMITER): '\n Splits a string by a given delimiter. Default delimiter is a single whitespace.\n :return: list of string tokens, will return the original string if it is empty or None\n ' if str: return str.split(delimiter) else: return str
def tokenize(str, delimiter=DEFAULT_TOKENIZER_DELIMITER): '\n Splits a string by a given delimiter. Default delimiter is a single whitespace.\n :return: list of string tokens, will return the original string if it is empty or None\n ' if str: return str.split(delimiter) else: return str<|docstring|>Splits a string by a given delimiter. Default delimiter is a single whitespace. :return: list of string tokens, will return the original string if it is empty or None<|endoftext|>
c4fb3f32a343a99df5ed9e7583fa26186fcd21ddb574a62436c0ac8a5c718dec
def normalize(str): '\n Normalizes the string making string all lower case and removes all punctuation.\n :param str: string to be normalized\n :return: normalized string, if str is None or empty it returns the original string\n ' if str: if isinstance(str, unicode): not_letters_or_digits = u'!"#%\'()*+,-./:;<=>?@[\\]^_`{|}~' translate_to = u'' translate_table = dict(((ord(char), translate_to) for char in not_letters_or_digits)) return str.translate(translate_table) else: return str.lower().translate(string.maketrans('', ''), string.punctuation) else: return str
Normalizes the string making string all lower case and removes all punctuation. :param str: string to be normalized :return: normalized string, if str is None or empty it returns the original string
lsh/utils/strings_utils.py
normalize
singhj/locality-sensitive-hashing
19
python
def normalize(str): '\n Normalizes the string making string all lower case and removes all punctuation.\n :param str: string to be normalized\n :return: normalized string, if str is None or empty it returns the original string\n ' if str: if isinstance(str, unicode): not_letters_or_digits = u'!"#%\'()*+,-./:;<=>?@[\\]^_`{|}~' translate_to = u translate_table = dict(((ord(char), translate_to) for char in not_letters_or_digits)) return str.translate(translate_table) else: return str.lower().translate(string.maketrans(, ), string.punctuation) else: return str
def normalize(str): '\n Normalizes the string making string all lower case and removes all punctuation.\n :param str: string to be normalized\n :return: normalized string, if str is None or empty it returns the original string\n ' if str: if isinstance(str, unicode): not_letters_or_digits = u'!"#%\'()*+,-./:;<=>?@[\\]^_`{|}~' translate_to = u translate_table = dict(((ord(char), translate_to) for char in not_letters_or_digits)) return str.translate(translate_table) else: return str.lower().translate(string.maketrans(, ), string.punctuation) else: return str<|docstring|>Normalizes the string making string all lower case and removes all punctuation. :param str: string to be normalized :return: normalized string, if str is None or empty it returns the original string<|endoftext|>
3b858f1ba69824973de9265ad73569b495a79305981a6d80e2d5e39a295b7268
def mirror(self, plane: str='M'): "\n Mirror all previous moves and scramble moves across a plane.\n\n Parameters\n ----------\n plane : {'M', 'E', 'S'}, optional\n Plane the moves are reflected across.\n\n If `plane` == 'M': the moves are mirrored left to right.\n If `plane` == 'E': the moves are mirrored top to bottom.\n If `plane` == 'S': the moves are mirrored front to back.\n " def mirror_moves(moves, swaps, planes): mirrored_moves = [] for move in moves: for (char, swap) in swaps: if (char in move): move = move.replace(char, swap) break if (not set(planes).intersection(move)): if (move[(- 1)] == "'"): move = move[:(- 1)] elif (move[(- 1)] != '2'): move += "'" mirrored_moves.append(move) return mirrored_moves if (plane.upper() == 'M'): swaps = ('RL', 'LR', 'rl', 'lr') planes = 'Mm' elif (plane.upper() == 'E'): swaps = ('UD', 'DU', 'ud', 'du') planes = 'Ee' elif (plane.upper() == 'S'): swaps = ('FB', 'BF', 'fb', 'bf') planes = 'Ss' else: raise ValueError moves = self.moves smoves = self.smoves self.reset(self.size) self.smoves = mirror_moves(smoves, swaps, planes) self.move(self.smoves) self.moves = [] self.move(mirror_moves(moves, swaps, planes))
Mirror all previous moves and scramble moves across a plane. Parameters ---------- plane : {'M', 'E', 'S'}, optional Plane the moves are reflected across. If `plane` == 'M': the moves are mirrored left to right. If `plane` == 'E': the moves are mirrored top to bottom. If `plane` == 'S': the moves are mirrored front to back.
cube/_mirror.py
mirror
17LangF/virtual-cube
0
python
def mirror(self, plane: str='M'): "\n Mirror all previous moves and scramble moves across a plane.\n\n Parameters\n ----------\n plane : {'M', 'E', 'S'}, optional\n Plane the moves are reflected across.\n\n If `plane` == 'M': the moves are mirrored left to right.\n If `plane` == 'E': the moves are mirrored top to bottom.\n If `plane` == 'S': the moves are mirrored front to back.\n " def mirror_moves(moves, swaps, planes): mirrored_moves = [] for move in moves: for (char, swap) in swaps: if (char in move): move = move.replace(char, swap) break if (not set(planes).intersection(move)): if (move[(- 1)] == "'"): move = move[:(- 1)] elif (move[(- 1)] != '2'): move += "'" mirrored_moves.append(move) return mirrored_moves if (plane.upper() == 'M'): swaps = ('RL', 'LR', 'rl', 'lr') planes = 'Mm' elif (plane.upper() == 'E'): swaps = ('UD', 'DU', 'ud', 'du') planes = 'Ee' elif (plane.upper() == 'S'): swaps = ('FB', 'BF', 'fb', 'bf') planes = 'Ss' else: raise ValueError moves = self.moves smoves = self.smoves self.reset(self.size) self.smoves = mirror_moves(smoves, swaps, planes) self.move(self.smoves) self.moves = [] self.move(mirror_moves(moves, swaps, planes))
def mirror(self, plane: str='M'): "\n Mirror all previous moves and scramble moves across a plane.\n\n Parameters\n ----------\n plane : {'M', 'E', 'S'}, optional\n Plane the moves are reflected across.\n\n If `plane` == 'M': the moves are mirrored left to right.\n If `plane` == 'E': the moves are mirrored top to bottom.\n If `plane` == 'S': the moves are mirrored front to back.\n " def mirror_moves(moves, swaps, planes): mirrored_moves = [] for move in moves: for (char, swap) in swaps: if (char in move): move = move.replace(char, swap) break if (not set(planes).intersection(move)): if (move[(- 1)] == "'"): move = move[:(- 1)] elif (move[(- 1)] != '2'): move += "'" mirrored_moves.append(move) return mirrored_moves if (plane.upper() == 'M'): swaps = ('RL', 'LR', 'rl', 'lr') planes = 'Mm' elif (plane.upper() == 'E'): swaps = ('UD', 'DU', 'ud', 'du') planes = 'Ee' elif (plane.upper() == 'S'): swaps = ('FB', 'BF', 'fb', 'bf') planes = 'Ss' else: raise ValueError moves = self.moves smoves = self.smoves self.reset(self.size) self.smoves = mirror_moves(smoves, swaps, planes) self.move(self.smoves) self.moves = [] self.move(mirror_moves(moves, swaps, planes))<|docstring|>Mirror all previous moves and scramble moves across a plane. Parameters ---------- plane : {'M', 'E', 'S'}, optional Plane the moves are reflected across. If `plane` == 'M': the moves are mirrored left to right. If `plane` == 'E': the moves are mirrored top to bottom. If `plane` == 'S': the moves are mirrored front to back.<|endoftext|>
83eee5e57c7a3a5b5a997eecc5fd0fbd6aef8afda49d29b07fec097658b2f3ab
def initializeDB(self): ' Deletes item from database.\n\n Args:\n key (String): name of item to delete\n Returns:\n None\n ' client = pymongo.MongoClient(host=self.host, port=self.port) db = client[self.databaseName] return MongoDatabase(db, client)
Deletes item from database. Args: key (String): name of item to delete Returns: None
overwatch/database/mongoDatabaseFactory.py
initializeDB
ostr00000/OVERWATCH
0
python
def initializeDB(self): ' Deletes item from database.\n\n Args:\n key (String): name of item to delete\n Returns:\n None\n ' client = pymongo.MongoClient(host=self.host, port=self.port) db = client[self.databaseName] return MongoDatabase(db, client)
def initializeDB(self): ' Deletes item from database.\n\n Args:\n key (String): name of item to delete\n Returns:\n None\n ' client = pymongo.MongoClient(host=self.host, port=self.port) db = client[self.databaseName] return MongoDatabase(db, client)<|docstring|>Deletes item from database. Args: key (String): name of item to delete Returns: None<|endoftext|>
3e893c77b678997edbc9a45a1cebd1f34458bd68e4f8ce69afa753e353fe6e85
def __init__(self, capacity, operation, neutral_element): "Build a Segment Tree data structure.\n\n https://en.wikipedia.org/wiki/Segment_tree\n\n Can be used as regular array, but with two\n important differences:\n\n a) setting item's value is slightly slower.\n It is O(lg capacity) instead of O(1).\n b) user has access to an efficient `reduce`\n operation which reduces `operation` over\n a contiguous subsequence of items in the\n array.\n\n Paramters\n ---------\n capacity: int\n Total size of the array - must be a power of two.\n operation: lambda obj, obj -> obj\n and operation for combining elements (eg. sum, max)\n must for a mathematical group together with the set of\n possible values for array elements.\n neutral_element: obj\n neutral element for the operation above. eg. float('-inf')\n for max and 0 for sum.\n " assert ((capacity > 0) and ((capacity & (capacity - 1)) == 0)), 'capacity must be positive and a power of 2.' self._capacity = capacity self._value = [neutral_element for _ in range((2 * capacity))] self._operation = operation
Build a Segment Tree data structure. https://en.wikipedia.org/wiki/Segment_tree Can be used as regular array, but with two important differences: a) setting item's value is slightly slower. It is O(lg capacity) instead of O(1). b) user has access to an efficient `reduce` operation which reduces `operation` over a contiguous subsequence of items in the array. Paramters --------- capacity: int Total size of the array - must be a power of two. operation: lambda obj, obj -> obj and operation for combining elements (eg. sum, max) must for a mathematical group together with the set of possible values for array elements. neutral_element: obj neutral element for the operation above. eg. float('-inf') for max and 0 for sum.
utils/segment_tree.py
__init__
ManUtdMoon/Safe_Reachability_RL
6
python
def __init__(self, capacity, operation, neutral_element): "Build a Segment Tree data structure.\n\n https://en.wikipedia.org/wiki/Segment_tree\n\n Can be used as regular array, but with two\n important differences:\n\n a) setting item's value is slightly slower.\n It is O(lg capacity) instead of O(1).\n b) user has access to an efficient `reduce`\n operation which reduces `operation` over\n a contiguous subsequence of items in the\n array.\n\n Paramters\n ---------\n capacity: int\n Total size of the array - must be a power of two.\n operation: lambda obj, obj -> obj\n and operation for combining elements (eg. sum, max)\n must for a mathematical group together with the set of\n possible values for array elements.\n neutral_element: obj\n neutral element for the operation above. eg. float('-inf')\n for max and 0 for sum.\n " assert ((capacity > 0) and ((capacity & (capacity - 1)) == 0)), 'capacity must be positive and a power of 2.' self._capacity = capacity self._value = [neutral_element for _ in range((2 * capacity))] self._operation = operation
def __init__(self, capacity, operation, neutral_element): "Build a Segment Tree data structure.\n\n https://en.wikipedia.org/wiki/Segment_tree\n\n Can be used as regular array, but with two\n important differences:\n\n a) setting item's value is slightly slower.\n It is O(lg capacity) instead of O(1).\n b) user has access to an efficient `reduce`\n operation which reduces `operation` over\n a contiguous subsequence of items in the\n array.\n\n Paramters\n ---------\n capacity: int\n Total size of the array - must be a power of two.\n operation: lambda obj, obj -> obj\n and operation for combining elements (eg. sum, max)\n must for a mathematical group together with the set of\n possible values for array elements.\n neutral_element: obj\n neutral element for the operation above. eg. float('-inf')\n for max and 0 for sum.\n " assert ((capacity > 0) and ((capacity & (capacity - 1)) == 0)), 'capacity must be positive and a power of 2.' self._capacity = capacity self._value = [neutral_element for _ in range((2 * capacity))] self._operation = operation<|docstring|>Build a Segment Tree data structure. https://en.wikipedia.org/wiki/Segment_tree Can be used as regular array, but with two important differences: a) setting item's value is slightly slower. It is O(lg capacity) instead of O(1). b) user has access to an efficient `reduce` operation which reduces `operation` over a contiguous subsequence of items in the array. Paramters --------- capacity: int Total size of the array - must be a power of two. operation: lambda obj, obj -> obj and operation for combining elements (eg. sum, max) must for a mathematical group together with the set of possible values for array elements. neutral_element: obj neutral element for the operation above. eg. float('-inf') for max and 0 for sum.<|endoftext|>
d901b74222ee1e56f3b21acfb5f0cd35926f921c1468c318ae0dc16787945445
def reduce(self, start=0, end=None): 'Returns result of applying `self.operation`\n to a contiguous subsequence of the array.\n\n self.operation(\n arr[start], operation(arr[start+1], operation(... arr[end])))\n\n Parameters\n ----------\n start: int\n beginning of the subsequence\n end: int\n end of the subsequences\n\n Returns\n -------\n reduced: obj\n result of reducing self.operation over the specified range of array\n elements.\n ' if (end is None): end = (self._capacity - 1) if (end < 0): end += self._capacity return self._reduce_helper(start, end, 1, 0, (self._capacity - 1))
Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation( arr[start], operation(arr[start+1], operation(... arr[end]))) Parameters ---------- start: int beginning of the subsequence end: int end of the subsequences Returns ------- reduced: obj result of reducing self.operation over the specified range of array elements.
utils/segment_tree.py
reduce
ManUtdMoon/Safe_Reachability_RL
6
python
def reduce(self, start=0, end=None): 'Returns result of applying `self.operation`\n to a contiguous subsequence of the array.\n\n self.operation(\n arr[start], operation(arr[start+1], operation(... arr[end])))\n\n Parameters\n ----------\n start: int\n beginning of the subsequence\n end: int\n end of the subsequences\n\n Returns\n -------\n reduced: obj\n result of reducing self.operation over the specified range of array\n elements.\n ' if (end is None): end = (self._capacity - 1) if (end < 0): end += self._capacity return self._reduce_helper(start, end, 1, 0, (self._capacity - 1))
def reduce(self, start=0, end=None): 'Returns result of applying `self.operation`\n to a contiguous subsequence of the array.\n\n self.operation(\n arr[start], operation(arr[start+1], operation(... arr[end])))\n\n Parameters\n ----------\n start: int\n beginning of the subsequence\n end: int\n end of the subsequences\n\n Returns\n -------\n reduced: obj\n result of reducing self.operation over the specified range of array\n elements.\n ' if (end is None): end = (self._capacity - 1) if (end < 0): end += self._capacity return self._reduce_helper(start, end, 1, 0, (self._capacity - 1))<|docstring|>Returns result of applying `self.operation` to a contiguous subsequence of the array. self.operation( arr[start], operation(arr[start+1], operation(... arr[end]))) Parameters ---------- start: int beginning of the subsequence end: int end of the subsequences Returns ------- reduced: obj result of reducing self.operation over the specified range of array elements.<|endoftext|>
b865f2c7b22b1d5dbb2ab89af7160f270cc5f8b790496f91e7b183fd0ae51262
def sum(self, start=0, end=None): 'Returns arr[start] + ... + arr[end]' return super(SumSegmentTree, self).reduce(start, end)
Returns arr[start] + ... + arr[end]
utils/segment_tree.py
sum
ManUtdMoon/Safe_Reachability_RL
6
python
def sum(self, start=0, end=None): return super(SumSegmentTree, self).reduce(start, end)
def sum(self, start=0, end=None): return super(SumSegmentTree, self).reduce(start, end)<|docstring|>Returns arr[start] + ... + arr[end]<|endoftext|>
b9f306c13c878cdb5fddb0e74608b5b86fefa21ca87d88481b6157aea83b069f
def find_prefixsum_idx(self, prefixsum): 'Find the highest index `i` in the array such that\n sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum\n\n if array values are probabilities, this function\n allows to sample indexes according to the discrete\n probability efficiently.\n\n Parameters\n ----------\n perfixsum: float\n upperbound on the sum of array prefix\n\n Returns\n -------\n idx: int\n highest index satisfying the prefixsum constraint\n ' assert (0 <= prefixsum <= (self.sum() + 1e-05)) idx = 1 while (idx < self._capacity): if (self._value[(2 * idx)] > prefixsum): idx = (2 * idx) else: prefixsum -= self._value[(2 * idx)] idx = ((2 * idx) + 1) return (idx - self._capacity)
Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint
utils/segment_tree.py
find_prefixsum_idx
ManUtdMoon/Safe_Reachability_RL
6
python
def find_prefixsum_idx(self, prefixsum): 'Find the highest index `i` in the array such that\n sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum\n\n if array values are probabilities, this function\n allows to sample indexes according to the discrete\n probability efficiently.\n\n Parameters\n ----------\n perfixsum: float\n upperbound on the sum of array prefix\n\n Returns\n -------\n idx: int\n highest index satisfying the prefixsum constraint\n ' assert (0 <= prefixsum <= (self.sum() + 1e-05)) idx = 1 while (idx < self._capacity): if (self._value[(2 * idx)] > prefixsum): idx = (2 * idx) else: prefixsum -= self._value[(2 * idx)] idx = ((2 * idx) + 1) return (idx - self._capacity)
def find_prefixsum_idx(self, prefixsum): 'Find the highest index `i` in the array such that\n sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum\n\n if array values are probabilities, this function\n allows to sample indexes according to the discrete\n probability efficiently.\n\n Parameters\n ----------\n perfixsum: float\n upperbound on the sum of array prefix\n\n Returns\n -------\n idx: int\n highest index satisfying the prefixsum constraint\n ' assert (0 <= prefixsum <= (self.sum() + 1e-05)) idx = 1 while (idx < self._capacity): if (self._value[(2 * idx)] > prefixsum): idx = (2 * idx) else: prefixsum -= self._value[(2 * idx)] idx = ((2 * idx) + 1) return (idx - self._capacity)<|docstring|>Find the highest index `i` in the array such that sum(arr[0] + arr[1] + ... + arr[i - i]) <= prefixsum if array values are probabilities, this function allows to sample indexes according to the discrete probability efficiently. Parameters ---------- perfixsum: float upperbound on the sum of array prefix Returns ------- idx: int highest index satisfying the prefixsum constraint<|endoftext|>
21a0600b1e8f3f3b59049a775f634c24cdf19850ed7d21db3ce18494772f7b57
def min(self, start=0, end=None): 'Returns min(arr[start], ..., arr[end])' return super(MinSegmentTree, self).reduce(start, end)
Returns min(arr[start], ..., arr[end])
utils/segment_tree.py
min
ManUtdMoon/Safe_Reachability_RL
6
python
def min(self, start=0, end=None): return super(MinSegmentTree, self).reduce(start, end)
def min(self, start=0, end=None): return super(MinSegmentTree, self).reduce(start, end)<|docstring|>Returns min(arr[start], ..., arr[end])<|endoftext|>
742080c91610fd131489cd6a606045e3a6616ad112c8e7a902a2e516cb71c153
def generate(env): '\n Add Builders and construction variables for C compilers to an Environment.\n ' (static_obj, shared_obj) = SCons.Tool.createObjBuilders(env) for suffix in CSuffixes: static_obj.add_action(suffix, SCons.Defaults.CAction) shared_obj.add_action(suffix, SCons.Defaults.ShCAction) static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter) shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter) env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS' env['FRAMEWORKS'] = SCons.Util.CLVar('') env['FRAMEWORKPATH'] = SCons.Util.CLVar('') if (env['PLATFORM'] == 'darwin'): env['_CCCOMCOM'] = (env['_CCCOMCOM'] + ' $_FRAMEWORKPATH') env['CC'] = 'cc' env['CCFLAGS'] = SCons.Util.CLVar('') env['CCCOM'] = '$CC -o $TARGET -c $CCFLAGS $_CCCOMCOM $SOURCES' env['SHCC'] = '$CC' env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS') env['SHCCCOM'] = '$SHCC -o $TARGET -c $SHCCFLAGS $_CCCOMCOM $SOURCES' env['CPPDEFPREFIX'] = '-D' env['CPPDEFSUFFIX'] = '' env['INCPREFIX'] = '-I' env['INCSUFFIX'] = '' env['SHOBJSUFFIX'] = '.os' env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0 env['CFILESUFFIX'] = '.c'
Add Builders and construction variables for C compilers to an Environment.
okl4_kernel/okl4_2.1.1-patch.9/tools/SCons/Tool/cc.py
generate
CyberQueenMara/baseband-research
77
python
def generate(env): '\n \n ' (static_obj, shared_obj) = SCons.Tool.createObjBuilders(env) for suffix in CSuffixes: static_obj.add_action(suffix, SCons.Defaults.CAction) shared_obj.add_action(suffix, SCons.Defaults.ShCAction) static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter) shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter) env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS' env['FRAMEWORKS'] = SCons.Util.CLVar() env['FRAMEWORKPATH'] = SCons.Util.CLVar() if (env['PLATFORM'] == 'darwin'): env['_CCCOMCOM'] = (env['_CCCOMCOM'] + ' $_FRAMEWORKPATH') env['CC'] = 'cc' env['CCFLAGS'] = SCons.Util.CLVar() env['CCCOM'] = '$CC -o $TARGET -c $CCFLAGS $_CCCOMCOM $SOURCES' env['SHCC'] = '$CC' env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS') env['SHCCCOM'] = '$SHCC -o $TARGET -c $SHCCFLAGS $_CCCOMCOM $SOURCES' env['CPPDEFPREFIX'] = '-D' env['CPPDEFSUFFIX'] = env['INCPREFIX'] = '-I' env['INCSUFFIX'] = env['SHOBJSUFFIX'] = '.os' env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0 env['CFILESUFFIX'] = '.c'
def generate(env): '\n \n ' (static_obj, shared_obj) = SCons.Tool.createObjBuilders(env) for suffix in CSuffixes: static_obj.add_action(suffix, SCons.Defaults.CAction) shared_obj.add_action(suffix, SCons.Defaults.ShCAction) static_obj.add_emitter(suffix, SCons.Defaults.StaticObjectEmitter) shared_obj.add_emitter(suffix, SCons.Defaults.SharedObjectEmitter) env['_CCCOMCOM'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS' env['FRAMEWORKS'] = SCons.Util.CLVar() env['FRAMEWORKPATH'] = SCons.Util.CLVar() if (env['PLATFORM'] == 'darwin'): env['_CCCOMCOM'] = (env['_CCCOMCOM'] + ' $_FRAMEWORKPATH') env['CC'] = 'cc' env['CCFLAGS'] = SCons.Util.CLVar() env['CCCOM'] = '$CC -o $TARGET -c $CCFLAGS $_CCCOMCOM $SOURCES' env['SHCC'] = '$CC' env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS') env['SHCCCOM'] = '$SHCC -o $TARGET -c $SHCCFLAGS $_CCCOMCOM $SOURCES' env['CPPDEFPREFIX'] = '-D' env['CPPDEFSUFFIX'] = env['INCPREFIX'] = '-I' env['INCSUFFIX'] = env['SHOBJSUFFIX'] = '.os' env['STATIC_AND_SHARED_OBJECTS_ARE_THE_SAME'] = 0 env['CFILESUFFIX'] = '.c'<|docstring|>Add Builders and construction variables for C compilers to an Environment.<|endoftext|>
69bfccf3b2a8d6732ace3e4025de0ff5e44877ee6a04fcd40d3d50bd7cdf9587
def __enumerate_all(self, node, value): ' We are going to iterate through all values of all non-evidence nodes. For each state of the evidence we sum the probability of that state by the probabilities of all other states.\n ' oldValue = self.evidence[node] self.evidence[node] = value nonEvidence = self.evidence.empty() self.__initialize(nonEvidence) prob = self.__probability(self.evidence) while self.__next_state(nonEvidence): prob += self.__probability(self.evidence) self.evidence[nonEvidence] = (- 1) self.evidence[node] = oldValue return prob
We are going to iterate through all values of all non-evidence nodes. For each state of the evidence we sum the probability of that state by the probabilities of all other states.
PBNT/Inference.py
__enumerate_all
NunoEdgarGFlowHub/quantum-fog
87
python
def __enumerate_all(self, node, value): ' \n ' oldValue = self.evidence[node] self.evidence[node] = value nonEvidence = self.evidence.empty() self.__initialize(nonEvidence) prob = self.__probability(self.evidence) while self.__next_state(nonEvidence): prob += self.__probability(self.evidence) self.evidence[nonEvidence] = (- 1) self.evidence[node] = oldValue return prob
def __enumerate_all(self, node, value): ' \n ' oldValue = self.evidence[node] self.evidence[node] = value nonEvidence = self.evidence.empty() self.__initialize(nonEvidence) prob = self.__probability(self.evidence) while self.__next_state(nonEvidence): prob += self.__probability(self.evidence) self.evidence[nonEvidence] = (- 1) self.evidence[node] = oldValue return prob<|docstring|>We are going to iterate through all values of all non-evidence nodes. For each state of the evidence we sum the probability of that state by the probabilities of all other states.<|endoftext|>
0e63b9a173ff3450256c9649fc33fc9e668acffef0092aae4d55dd497793d51b
def project(self, clique, sepset): ' We want to project from the clique to the sepset. We do this by marginalizing the clique potential into the sepset potential.\n ' oldSepsetPotential = copy.deepcopy(sepset.potential) sepset.potential = clique.potential.marginalize(sepset.potential) return oldSepsetPotential
We want to project from the clique to the sepset. We do this by marginalizing the clique potential into the sepset potential.
PBNT/Inference.py
project
NunoEdgarGFlowHub/quantum-fog
87
python
def project(self, clique, sepset): ' \n ' oldSepsetPotential = copy.deepcopy(sepset.potential) sepset.potential = clique.potential.marginalize(sepset.potential) return oldSepsetPotential
def project(self, clique, sepset): ' \n ' oldSepsetPotential = copy.deepcopy(sepset.potential) sepset.potential = clique.potential.marginalize(sepset.potential) return oldSepsetPotential<|docstring|>We want to project from the clique to the sepset. We do this by marginalizing the clique potential into the sepset potential.<|endoftext|>
13fe5c5f3f6fe292014c040a3a584f4ad097535a7b3ff5d9cb45da950514e8a3
def absorb(self, clique, sepset, oldPotential): " absorb divides the sepset's potential by the old potential. The result is multiplied by the clique's potential. Please see c. Huang and A. Darwiche 96. As with project, this could be optimized by finding the best set of axes to iterate over (either the sepsets, or the clique's axes that are not in the sepset). The best solution would be to define a multiplication operation on a Potential that hides the details.\n " oldPotential[repr((sepset.potential.table == 0))] = 1 sepset.potential /= oldPotential clique.potential *= sepset.potential
absorb divides the sepset's potential by the old potential. The result is multiplied by the clique's potential. Please see c. Huang and A. Darwiche 96. As with project, this could be optimized by finding the best set of axes to iterate over (either the sepsets, or the clique's axes that are not in the sepset). The best solution would be to define a multiplication operation on a Potential that hides the details.
PBNT/Inference.py
absorb
NunoEdgarGFlowHub/quantum-fog
87
python
def absorb(self, clique, sepset, oldPotential): " \n " oldPotential[repr((sepset.potential.table == 0))] = 1 sepset.potential /= oldPotential clique.potential *= sepset.potential
def absorb(self, clique, sepset, oldPotential): " \n " oldPotential[repr((sepset.potential.table == 0))] = 1 sepset.potential /= oldPotential clique.potential *= sepset.potential<|docstring|>absorb divides the sepset's potential by the old potential. The result is multiplied by the clique's potential. Please see c. Huang and A. Darwiche 96. As with project, this could be optimized by finding the best set of axes to iterate over (either the sepsets, or the clique's axes that are not in the sepset). The best solution would be to define a multiplication operation on a Potential that hides the details.<|endoftext|>
6b1408021b75cf8d93b6fdfdd4a9d427768a2bef7cc21f2ada03b80cc09c5633
def create_sepset_priority_queue(self, cliques): ' Create a sepset (with a unique id) for every unique pair of cliques, and insert it into a priority queue.\n ' sepsetHeap = [] id = 0 for i in range((len(cliques) - 1)): for clique in cliques[(i + 1):]: sepset = Sepset(id, cliques[i], clique) id += 1 heapq.heappush(sepsetHeap, sepset) return sepsetHeap
Create a sepset (with a unique id) for every unique pair of cliques, and insert it into a priority queue.
PBNT/Inference.py
create_sepset_priority_queue
NunoEdgarGFlowHub/quantum-fog
87
python
def create_sepset_priority_queue(self, cliques): ' \n ' sepsetHeap = [] id = 0 for i in range((len(cliques) - 1)): for clique in cliques[(i + 1):]: sepset = Sepset(id, cliques[i], clique) id += 1 heapq.heappush(sepsetHeap, sepset) return sepsetHeap
def create_sepset_priority_queue(self, cliques): ' \n ' sepsetHeap = [] id = 0 for i in range((len(cliques) - 1)): for clique in cliques[(i + 1):]: sepset = Sepset(id, cliques[i], clique) id += 1 heapq.heappush(sepsetHeap, sepset) return sepsetHeap<|docstring|>Create a sepset (with a unique id) for every unique pair of cliques, and insert it into a priority queue.<|endoftext|>
450e8a9db3cb3a461146db4e808f201351e9f183582c990a9182dc18b4595e1a
def test_success(database): ' Tests that SF 133 amount for line 1000 matches Appropriation budget_authority_unobligat_fyb for the specified\n fiscal year and period\n ' tas_1 = 'tas_one_line_1' tas_2 = 'tas_one_line_2' sf_1 = SF133(line=1000, tas=tas_1, period=1, fiscal_year=2016, amount=1, agency_identifier='sys', main_account_code='000', sub_account_code='000') sf_2 = SF133(line=1000, tas=tas_2, period=1, fiscal_year=2016, amount=0, agency_identifier='sys', main_account_code='000', sub_account_code='000') ap_1 = Appropriation(job_id=1, row_number=1, tas=tas_1, budget_authority_unobligat_fyb=1) ap_2 = Appropriation(job_id=2, row_number=1, tas=tas_2, budget_authority_unobligat_fyb=None) assert (number_of_errors(_FILE, database, models=[sf_1, sf_2, ap_1, ap_2]) == 0) tas = 'tas_two_lines' sf_1 = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys', main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='n') sf_2 = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=4, agency_identifier='sys', main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='o') ap = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_unobligat_fyb=5) assert (number_of_errors(_FILE, database, models=[sf_1, sf_2, ap]) == 0)
Tests that SF 133 amount for line 1000 matches Appropriation budget_authority_unobligat_fyb for the specified fiscal year and period
tests/unit/dataactvalidator/test_a7_appropriations.py
test_success
RonSherfey/data-act-broker-backend
0
python
def test_success(database): ' Tests that SF 133 amount for line 1000 matches Appropriation budget_authority_unobligat_fyb for the specified\n fiscal year and period\n ' tas_1 = 'tas_one_line_1' tas_2 = 'tas_one_line_2' sf_1 = SF133(line=1000, tas=tas_1, period=1, fiscal_year=2016, amount=1, agency_identifier='sys', main_account_code='000', sub_account_code='000') sf_2 = SF133(line=1000, tas=tas_2, period=1, fiscal_year=2016, amount=0, agency_identifier='sys', main_account_code='000', sub_account_code='000') ap_1 = Appropriation(job_id=1, row_number=1, tas=tas_1, budget_authority_unobligat_fyb=1) ap_2 = Appropriation(job_id=2, row_number=1, tas=tas_2, budget_authority_unobligat_fyb=None) assert (number_of_errors(_FILE, database, models=[sf_1, sf_2, ap_1, ap_2]) == 0) tas = 'tas_two_lines' sf_1 = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys', main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='n') sf_2 = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=4, agency_identifier='sys', main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='o') ap = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_unobligat_fyb=5) assert (number_of_errors(_FILE, database, models=[sf_1, sf_2, ap]) == 0)
def test_success(database): ' Tests that SF 133 amount for line 1000 matches Appropriation budget_authority_unobligat_fyb for the specified\n fiscal year and period\n ' tas_1 = 'tas_one_line_1' tas_2 = 'tas_one_line_2' sf_1 = SF133(line=1000, tas=tas_1, period=1, fiscal_year=2016, amount=1, agency_identifier='sys', main_account_code='000', sub_account_code='000') sf_2 = SF133(line=1000, tas=tas_2, period=1, fiscal_year=2016, amount=0, agency_identifier='sys', main_account_code='000', sub_account_code='000') ap_1 = Appropriation(job_id=1, row_number=1, tas=tas_1, budget_authority_unobligat_fyb=1) ap_2 = Appropriation(job_id=2, row_number=1, tas=tas_2, budget_authority_unobligat_fyb=None) assert (number_of_errors(_FILE, database, models=[sf_1, sf_2, ap_1, ap_2]) == 0) tas = 'tas_two_lines' sf_1 = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys', main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='n') sf_2 = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=4, agency_identifier='sys', main_account_code='000', sub_account_code='000', disaster_emergency_fund_code='o') ap = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_unobligat_fyb=5) assert (number_of_errors(_FILE, database, models=[sf_1, sf_2, ap]) == 0)<|docstring|>Tests that SF 133 amount for line 1000 matches Appropriation budget_authority_unobligat_fyb for the specified fiscal year and period<|endoftext|>
420030a41a8d04aea1b7786b9ff33ca2be9c94bcf27f1c764148da7440e3906b
def test_failure(database): ' Tests that SF 133 amount for line 1000 does not match Appropriation budget_authority_unobligat_fyb for the\n specified fiscal year and period\n ' tas = 'fail_tas' sf = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys', main_account_code='000', sub_account_code='000') ap_1 = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_unobligat_fyb=0) ap_2 = Appropriation(job_id=2, row_number=1, tas=tas, budget_authority_unobligat_fyb=None) assert (number_of_errors(_FILE, database, models=[sf, ap_1, ap_2]) == 2)
Tests that SF 133 amount for line 1000 does not match Appropriation budget_authority_unobligat_fyb for the specified fiscal year and period
tests/unit/dataactvalidator/test_a7_appropriations.py
test_failure
RonSherfey/data-act-broker-backend
0
python
def test_failure(database): ' Tests that SF 133 amount for line 1000 does not match Appropriation budget_authority_unobligat_fyb for the\n specified fiscal year and period\n ' tas = 'fail_tas' sf = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys', main_account_code='000', sub_account_code='000') ap_1 = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_unobligat_fyb=0) ap_2 = Appropriation(job_id=2, row_number=1, tas=tas, budget_authority_unobligat_fyb=None) assert (number_of_errors(_FILE, database, models=[sf, ap_1, ap_2]) == 2)
def test_failure(database): ' Tests that SF 133 amount for line 1000 does not match Appropriation budget_authority_unobligat_fyb for the\n specified fiscal year and period\n ' tas = 'fail_tas' sf = SF133(line=1000, tas=tas, period=1, fiscal_year=2016, amount=1, agency_identifier='sys', main_account_code='000', sub_account_code='000') ap_1 = Appropriation(job_id=1, row_number=1, tas=tas, budget_authority_unobligat_fyb=0) ap_2 = Appropriation(job_id=2, row_number=1, tas=tas, budget_authority_unobligat_fyb=None) assert (number_of_errors(_FILE, database, models=[sf, ap_1, ap_2]) == 2)<|docstring|>Tests that SF 133 amount for line 1000 does not match Appropriation budget_authority_unobligat_fyb for the specified fiscal year and period<|endoftext|>
840cc42b9755e97e4fcbfa8824fa93eaa02b82afed1bc57b8bb905b6ed9df7a7
def clean_product_version(version: str) -> str: '\n Replace / alias "latest" with "release"\n\n :type version: str\n :rtype: str\n :param version: The current version being used\n :return: The cleaned/replaced version\n ' if (version == 'latest'): version = 'release' return version
Replace / alias "latest" with "release" :type version: str :rtype: str :param version: The current version being used :return: The cleaned/replaced version
get-version.py
clean_product_version
rstudio/rstudio-docker-products
31
python
def clean_product_version(version: str) -> str: '\n Replace / alias "latest" with "release"\n\n :type version: str\n :rtype: str\n :param version: The current version being used\n :return: The cleaned/replaced version\n ' if (version == 'latest'): version = 'release' return version
def clean_product_version(version: str) -> str: '\n Replace / alias "latest" with "release"\n\n :type version: str\n :rtype: str\n :param version: The current version being used\n :return: The cleaned/replaced version\n ' if (version == 'latest'): version = 'release' return version<|docstring|>Replace / alias "latest" with "release" :type version: str :rtype: str :param version: The current version being used :return: The cleaned/replaced version<|endoftext|>
007b4b1311f011ad5d85ddd48961f499557b34c4583374058acd4cffe791cd0f
def clean_product_selection(product: str) -> str: '\n Clean up / alias products together for version determination\n - Remove rstudio- prefix\n - Remove -preview suffix\n - Convert r-session prefixed products to workbench\n - Convert connect- prefixed products to connect\n - Convert rsw -> workbench, rsc -> connect, rspm -> package-manager\n\n :rtype: str\n :param product: The current product being requested\n :return: The cleaned/replaced product name\n ' pref = re.compile('^rstudio-') product = pref.sub('', product) suffix = re.compile('-preview$') product = suffix.sub('', product) rsw = re.compile('^rsw$') if rsw.match(product): product = 'workbench' rsc = re.compile('^rsc$') if rsc.match(product): product = 'connect' rspm = re.compile('^rspm$') if rspm.match(product): product = 'package-manager' session_pref = re.compile('^r-session') if session_pref.match(product): print(f"Swapping product '{product}' for 'workbench'", file=sys.stderr) product = 'workbench' connect_pref = re.compile('^connect-') if connect_pref.match(product): print(f"Swapping product '{product}' for 'connect'", file=sys.stderr) product = 'connect' return product
Clean up / alias products together for version determination - Remove rstudio- prefix - Remove -preview suffix - Convert r-session prefixed products to workbench - Convert connect- prefixed products to connect - Convert rsw -> workbench, rsc -> connect, rspm -> package-manager :rtype: str :param product: The current product being requested :return: The cleaned/replaced product name
get-version.py
clean_product_selection
rstudio/rstudio-docker-products
31
python
def clean_product_selection(product: str) -> str: '\n Clean up / alias products together for version determination\n - Remove rstudio- prefix\n - Remove -preview suffix\n - Convert r-session prefixed products to workbench\n - Convert connect- prefixed products to connect\n - Convert rsw -> workbench, rsc -> connect, rspm -> package-manager\n\n :rtype: str\n :param product: The current product being requested\n :return: The cleaned/replaced product name\n ' pref = re.compile('^rstudio-') product = pref.sub(, product) suffix = re.compile('-preview$') product = suffix.sub(, product) rsw = re.compile('^rsw$') if rsw.match(product): product = 'workbench' rsc = re.compile('^rsc$') if rsc.match(product): product = 'connect' rspm = re.compile('^rspm$') if rspm.match(product): product = 'package-manager' session_pref = re.compile('^r-session') if session_pref.match(product): print(f"Swapping product '{product}' for 'workbench'", file=sys.stderr) product = 'workbench' connect_pref = re.compile('^connect-') if connect_pref.match(product): print(f"Swapping product '{product}' for 'connect'", file=sys.stderr) product = 'connect' return product
def clean_product_selection(product: str) -> str: '\n Clean up / alias products together for version determination\n - Remove rstudio- prefix\n - Remove -preview suffix\n - Convert r-session prefixed products to workbench\n - Convert connect- prefixed products to connect\n - Convert rsw -> workbench, rsc -> connect, rspm -> package-manager\n\n :rtype: str\n :param product: The current product being requested\n :return: The cleaned/replaced product name\n ' pref = re.compile('^rstudio-') product = pref.sub(, product) suffix = re.compile('-preview$') product = suffix.sub(, product) rsw = re.compile('^rsw$') if rsw.match(product): product = 'workbench' rsc = re.compile('^rsc$') if rsc.match(product): product = 'connect' rspm = re.compile('^rspm$') if rspm.match(product): product = 'package-manager' session_pref = re.compile('^r-session') if session_pref.match(product): print(f"Swapping product '{product}' for 'workbench'", file=sys.stderr) product = 'workbench' connect_pref = re.compile('^connect-') if connect_pref.match(product): print(f"Swapping product '{product}' for 'connect'", file=sys.stderr) product = 'connect' return product<|docstring|>Clean up / alias products together for version determination - Remove rstudio- prefix - Remove -preview suffix - Convert r-session prefixed products to workbench - Convert connect- prefixed products to connect - Convert rsw -> workbench, rsc -> connect, rspm -> package-manager :rtype: str :param product: The current product being requested :return: The cleaned/replaced product name<|endoftext|>
f1000af643dd1fc16ce572c2c4bdddf9171ef100caa29ca41c0db5f896ea9bfc
def get_child_accounts(self): "\n Return a list of all child accounts of the integration's account.\n " params = {} response = self.json_api_call('POST', '/accounts/v1/account/list', params) return response
Return a list of all child accounts of the integration's account.
duo_client/accounts.py
get_child_accounts
cavemanpi/duo_client_python
96
python
def get_child_accounts(self): "\n \n " params = {} response = self.json_api_call('POST', '/accounts/v1/account/list', params) return response
def get_child_accounts(self): "\n \n " params = {} response = self.json_api_call('POST', '/accounts/v1/account/list', params) return response<|docstring|>Return a list of all child accounts of the integration's account.<|endoftext|>
a67e36fa94c5820f1e265b1a6bd1f9d251b79fafbcbb99437aa3492882439251
def create_account(self, name): "\n Create a new child account of the integration's account.\n " params = {'name': name} response = self.json_api_call('POST', '/accounts/v1/account/create', params) return response
Create a new child account of the integration's account.
duo_client/accounts.py
create_account
cavemanpi/duo_client_python
96
python
def create_account(self, name): "\n \n " params = {'name': name} response = self.json_api_call('POST', '/accounts/v1/account/create', params) return response
def create_account(self, name): "\n \n " params = {'name': name} response = self.json_api_call('POST', '/accounts/v1/account/create', params) return response<|docstring|>Create a new child account of the integration's account.<|endoftext|>
62adf5d4ce736b81b82ade51e01477d0f263925bc39a412181482254e60e1a73
def delete_account(self, account_id): "\n Delete a child account of the integration's account.\n " params = {'account_id': account_id} response = self.json_api_call('POST', '/accounts/v1/account/delete', params) return response
Delete a child account of the integration's account.
duo_client/accounts.py
delete_account
cavemanpi/duo_client_python
96
python
def delete_account(self, account_id): "\n \n " params = {'account_id': account_id} response = self.json_api_call('POST', '/accounts/v1/account/delete', params) return response
def delete_account(self, account_id): "\n \n " params = {'account_id': account_id} response = self.json_api_call('POST', '/accounts/v1/account/delete', params) return response<|docstring|>Delete a child account of the integration's account.<|endoftext|>
cff373a1f5167813af901d862641e8c1c7afd0feaaeca4c2de320feb0815ec4d
def handle_file_download(url: str, filename: str): 'Handle file download from url, write to file and save to static directory.' if (isfile(filename) is False): response = io.StringIO(urllib.request.urlopen(url).read().decode('utf-8')) with open(filename, 'w') as f: f.write(response.read())
Handle file download from url, write to file and save to static directory.
viewer/src/utils.py
handle_file_download
DecoPath/DecoPath
1
python
def handle_file_download(url: str, filename: str): if (isfile(filename) is False): response = io.StringIO(urllib.request.urlopen(url).read().decode('utf-8')) with open(filename, 'w') as f: f.write(response.read())
def handle_file_download(url: str, filename: str): if (isfile(filename) is False): response = io.StringIO(urllib.request.urlopen(url).read().decode('utf-8')) with open(filename, 'w') as f: f.write(response.read())<|docstring|>Handle file download from url, write to file and save to static directory.<|endoftext|>
76638dbb4e37687fba78e5411529df92f9dd3081c8199bacf0d783aebfa46897
def read_data_file(file_path: str, filename: str) -> Union[(pd.DataFrame, str)]: 'Check read data file.' logger.info(f'Reading {file_path}') try: if file_path.endswith(CSV): return pd.read_csv(file_path, sep=',') elif file_path.endswith(TSV): return pd.read_csv(file_path, sep='\t') else: return pd.read_csv(file_path, sep=None, engine='python') except IOError: logger.error(f'Failed to read {filename} {file_path}. File exists: {os.path.isfile(file_path)}') return f'There is a problem with your file {filename}. please check that it meets the criteria.'
Check read data file.
viewer/src/utils.py
read_data_file
DecoPath/DecoPath
1
python
def read_data_file(file_path: str, filename: str) -> Union[(pd.DataFrame, str)]: logger.info(f'Reading {file_path}') try: if file_path.endswith(CSV): return pd.read_csv(file_path, sep=',') elif file_path.endswith(TSV): return pd.read_csv(file_path, sep='\t') else: return pd.read_csv(file_path, sep=None, engine='python') except IOError: logger.error(f'Failed to read {filename} {file_path}. File exists: {os.path.isfile(file_path)}') return f'There is a problem with your file {filename}. please check that it meets the criteria.'
def read_data_file(file_path: str, filename: str) -> Union[(pd.DataFrame, str)]: logger.info(f'Reading {file_path}') try: if file_path.endswith(CSV): return pd.read_csv(file_path, sep=',') elif file_path.endswith(TSV): return pd.read_csv(file_path, sep='\t') else: return pd.read_csv(file_path, sep=None, engine='python') except IOError: logger.error(f'Failed to read {filename} {file_path}. File exists: {os.path.isfile(file_path)}') return f'There is a problem with your file {filename}. please check that it meets the criteria.'<|docstring|>Check read data file.<|endoftext|>
ed7f755d0ef8a1553fdf1f5c24d7a1e4657bf217294231b7a88b6dba7fb2e0f9
@functools.lru_cache() def _get_hgnc_mapping_dict(hgnc_mappings=HGNC_MAPPINGS): 'Load HGNC name-id mappings.' with open(hgnc_mappings) as json_file: return json.load(json_file)
Load HGNC name-id mappings.
viewer/src/utils.py
_get_hgnc_mapping_dict
DecoPath/DecoPath
1
python
@functools.lru_cache() def _get_hgnc_mapping_dict(hgnc_mappings=HGNC_MAPPINGS): with open(hgnc_mappings) as json_file: return json.load(json_file)
@functools.lru_cache() def _get_hgnc_mapping_dict(hgnc_mappings=HGNC_MAPPINGS): with open(hgnc_mappings) as json_file: return json.load(json_file)<|docstring|>Load HGNC name-id mappings.<|endoftext|>
2941252ddf2425c6068a3218e3a678525025d50ced5d2566245dbecb172f3395
def concatenate_files(files_list: List, databases_list: List) -> str: 'Concatenate GMT files in a list of files and write to a new file.' databases = '_'.join((str(x) for x in sorted(databases_list))) concatenated_file = (databases + GMT_FILE_EXTENSION) file_path = os.path.join(GMT_FILES_DIR, concatenated_file) if (not isfile(file_path)): with open(file_path, 'w') as outfile: for file in files_list: with open(file) as infile: for line in infile: outfile.write(line) return file_path
Concatenate GMT files in a list of files and write to a new file.
viewer/src/utils.py
concatenate_files
DecoPath/DecoPath
1
python
def concatenate_files(files_list: List, databases_list: List) -> str: databases = '_'.join((str(x) for x in sorted(databases_list))) concatenated_file = (databases + GMT_FILE_EXTENSION) file_path = os.path.join(GMT_FILES_DIR, concatenated_file) if (not isfile(file_path)): with open(file_path, 'w') as outfile: for file in files_list: with open(file) as infile: for line in infile: outfile.write(line) return file_path
def concatenate_files(files_list: List, databases_list: List) -> str: databases = '_'.join((str(x) for x in sorted(databases_list))) concatenated_file = (databases + GMT_FILE_EXTENSION) file_path = os.path.join(GMT_FILES_DIR, concatenated_file) if (not isfile(file_path)): with open(file_path, 'w') as outfile: for file in files_list: with open(file) as infile: for line in infile: outfile.write(line) return file_path<|docstring|>Concatenate GMT files in a list of files and write to a new file.<|endoftext|>
d2927dffa8af134858bf6ba4b02591113ccfe2db067487527b0d976029f93a4d
def spliterate(lines: Iterable[str], sep='\t') -> Iterable[Tuple[(str, ...)]]: 'Split each line in the iterable by the given separator.' for line in lines: (yield line.strip().split(sep))
Split each line in the iterable by the given separator.
viewer/src/utils.py
spliterate
DecoPath/DecoPath
1
python
def spliterate(lines: Iterable[str], sep='\t') -> Iterable[Tuple[(str, ...)]]: for line in lines: (yield line.strip().split(sep))
def spliterate(lines: Iterable[str], sep='\t') -> Iterable[Tuple[(str, ...)]]: for line in lines: (yield line.strip().split(sep))<|docstring|>Split each line in the iterable by the given separator.<|endoftext|>
a55ac557637078656f731ff72508b29def9edcfb8332cf7fae64f58d5fb2e03a
def query_pathway_db_model(): 'Query pathway database model for all databases to display as multiple choice field for user selection.' return tuple([(index, DATABASES[str(obj)]) for (index, obj) in enumerate(PathwayDatabase.objects.filter(database_name__in=[KEGG, REACTOME, PATHBANK, WIKIPATHWAYS]))])
Query pathway database model for all databases to display as multiple choice field for user selection.
viewer/src/utils.py
query_pathway_db_model
DecoPath/DecoPath
1
python
def query_pathway_db_model(): return tuple([(index, DATABASES[str(obj)]) for (index, obj) in enumerate(PathwayDatabase.objects.filter(database_name__in=[KEGG, REACTOME, PATHBANK, WIKIPATHWAYS]))])
def query_pathway_db_model(): return tuple([(index, DATABASES[str(obj)]) for (index, obj) in enumerate(PathwayDatabase.objects.filter(database_name__in=[KEGG, REACTOME, PATHBANK, WIKIPATHWAYS]))])<|docstring|>Query pathway database model for all databases to display as multiple choice field for user selection.<|endoftext|>
c0831def5d2a93dda517813b6e4dec2e9531a600aa0a8a3c22c05f716b95a896
def get_database_by_id(row: pd.Series): 'Get database by identifier suffix if default databases used for analysis.' if row.startswith('hsa'): return KEGG elif row.startswith('PW'): return PATHBANK elif row.startswith('R-HSA'): return REACTOME elif row.startswith('WP'): return WIKIPATHWAYS elif row.startswith('DC'): return DECOPATH return CUSTOM
Get database by identifier suffix if default databases used for analysis.
viewer/src/utils.py
get_database_by_id
DecoPath/DecoPath
1
python
def get_database_by_id(row: pd.Series): if row.startswith('hsa'): return KEGG elif row.startswith('PW'): return PATHBANK elif row.startswith('R-HSA'): return REACTOME elif row.startswith('WP'): return WIKIPATHWAYS elif row.startswith('DC'): return DECOPATH return CUSTOM
def get_database_by_id(row: pd.Series): if row.startswith('hsa'): return KEGG elif row.startswith('PW'): return PATHBANK elif row.startswith('R-HSA'): return REACTOME elif row.startswith('WP'): return WIKIPATHWAYS elif row.startswith('DC'): return DECOPATH return CUSTOM<|docstring|>Get database by identifier suffix if default databases used for analysis.<|endoftext|>
8aa489d7589fddace2fbb551f3487dbdf49f2437c74403aa8dc4665bb8f69575
def get_missing_columns(column_labels: set, df: pd.DataFrame) -> List: 'Get missing column labels.' return [column for column in column_labels if (column not in df)]
Get missing column labels.
viewer/src/utils.py
get_missing_columns
DecoPath/DecoPath
1
python
def get_missing_columns(column_labels: set, df: pd.DataFrame) -> List: return [column for column in column_labels if (column not in df)]
def get_missing_columns(column_labels: set, df: pd.DataFrame) -> List: return [column for column in column_labels if (column not in df)]<|docstring|>Get missing column labels.<|endoftext|>
893809846cd53a19c31877316768b03814bc7f06562fdf6d86d45d71854538eb
def get_equivalent_dicts(equivalent_rows): 'Return equivalent pathways.' equivalent_pathways = defaultdict(list) for (source_db, source_id, source_name, mapping, target_db, target_id, target_name) in equivalent_rows.values: equivalent_pathways[(source_db, source_id)].append((target_db, target_id)) equivalent_pathways[(target_db, target_id)].append((source_db, source_id)) for (source_db, source_id, source_name, mapping, target_db, target_id, target_name) in equivalent_rows.values: if (len(equivalent_pathways[(source_db, source_id)]) != len(equivalent_pathways[(target_db, target_id)])): raise ValueError(f'{(source_db, source_id)} has {len(equivalent_pathways[(source_db, source_id)])} mappings{(target_db, target_id)} has {len(equivalent_pathways[(target_db, target_id)])} mappings') return dict(equivalent_pathways)
Return equivalent pathways.
viewer/src/utils.py
get_equivalent_dicts
DecoPath/DecoPath
1
python
def get_equivalent_dicts(equivalent_rows): equivalent_pathways = defaultdict(list) for (source_db, source_id, source_name, mapping, target_db, target_id, target_name) in equivalent_rows.values: equivalent_pathways[(source_db, source_id)].append((target_db, target_id)) equivalent_pathways[(target_db, target_id)].append((source_db, source_id)) for (source_db, source_id, source_name, mapping, target_db, target_id, target_name) in equivalent_rows.values: if (len(equivalent_pathways[(source_db, source_id)]) != len(equivalent_pathways[(target_db, target_id)])): raise ValueError(f'{(source_db, source_id)} has {len(equivalent_pathways[(source_db, source_id)])} mappings{(target_db, target_id)} has {len(equivalent_pathways[(target_db, target_id)])} mappings') return dict(equivalent_pathways)
def get_equivalent_dicts(equivalent_rows): equivalent_pathways = defaultdict(list) for (source_db, source_id, source_name, mapping, target_db, target_id, target_name) in equivalent_rows.values: equivalent_pathways[(source_db, source_id)].append((target_db, target_id)) equivalent_pathways[(target_db, target_id)].append((source_db, source_id)) for (source_db, source_id, source_name, mapping, target_db, target_id, target_name) in equivalent_rows.values: if (len(equivalent_pathways[(source_db, source_id)]) != len(equivalent_pathways[(target_db, target_id)])): raise ValueError(f'{(source_db, source_id)} has {len(equivalent_pathways[(source_db, source_id)])} mappings{(target_db, target_id)} has {len(equivalent_pathways[(target_db, target_id)])} mappings') return dict(equivalent_pathways)<|docstring|>Return equivalent pathways.<|endoftext|>
72ce83d2870e9e804301ce4c259f157088839cb0b6c1d975c01743c25b0d4d42
def _check_duplicates(df): 'Check quality of df.' hierarchy_df = df[(df[MAPPING_TYPE] == IS_PART_OF)] duplicates_hierarchy = [df.iloc[i] for (i, duplicated) in hierarchy_df[SOURCE_ID].duplicated().items() if duplicated] if duplicates_hierarchy: raise ValueError(f'Duplicate hierarchy: {duplicates_hierarchy}')
Check quality of df.
viewer/src/utils.py
_check_duplicates
DecoPath/DecoPath
1
python
def _check_duplicates(df): hierarchy_df = df[(df[MAPPING_TYPE] == IS_PART_OF)] duplicates_hierarchy = [df.iloc[i] for (i, duplicated) in hierarchy_df[SOURCE_ID].duplicated().items() if duplicated] if duplicates_hierarchy: raise ValueError(f'Duplicate hierarchy: {duplicates_hierarchy}')
def _check_duplicates(df): hierarchy_df = df[(df[MAPPING_TYPE] == IS_PART_OF)] duplicates_hierarchy = [df.iloc[i] for (i, duplicated) in hierarchy_df[SOURCE_ID].duplicated().items() if duplicated] if duplicates_hierarchy: raise ValueError(f'Duplicate hierarchy: {duplicates_hierarchy}')<|docstring|>Check quality of df.<|endoftext|>
31c94a74fa826edb120c1295379e4a774fe389c6d5c995eee67890fd32e1a831
def _add_gsea_cmap(nodes, node_to_score, fdr_dict, significance_value): 'Return dictionary with the nodes and their corresponding normalized colors.' cmap = matplotlib.colors.LinearSegmentedColormap.from_list('DecoPath ColorMap', COLORMAP_VALUES) norm = matplotlib.colors.Normalize(min(node_to_score.items(), key=operator.itemgetter(1))[1], max(node_to_score.items(), key=operator.itemgetter(1))[1]) color_map = {} for node_id in nodes: if (node_id in node_to_score): if (fdr_dict[node_id] < significance_value): color_map[node_id] = matplotlib.colors.rgb2hex(cmap(norm(float(node_to_score[node_id])))) else: color_map[node_id] = '#b4b4b4' else: color_map[node_id] = '#dadada' return color_map
Return dictionary with the nodes and their corresponding normalized colors.
viewer/src/utils.py
_add_gsea_cmap
DecoPath/DecoPath
1
python
def _add_gsea_cmap(nodes, node_to_score, fdr_dict, significance_value): cmap = matplotlib.colors.LinearSegmentedColormap.from_list('DecoPath ColorMap', COLORMAP_VALUES) norm = matplotlib.colors.Normalize(min(node_to_score.items(), key=operator.itemgetter(1))[1], max(node_to_score.items(), key=operator.itemgetter(1))[1]) color_map = {} for node_id in nodes: if (node_id in node_to_score): if (fdr_dict[node_id] < significance_value): color_map[node_id] = matplotlib.colors.rgb2hex(cmap(norm(float(node_to_score[node_id])))) else: color_map[node_id] = '#b4b4b4' else: color_map[node_id] = '#dadada' return color_map
def _add_gsea_cmap(nodes, node_to_score, fdr_dict, significance_value): cmap = matplotlib.colors.LinearSegmentedColormap.from_list('DecoPath ColorMap', COLORMAP_VALUES) norm = matplotlib.colors.Normalize(min(node_to_score.items(), key=operator.itemgetter(1))[1], max(node_to_score.items(), key=operator.itemgetter(1))[1]) color_map = {} for node_id in nodes: if (node_id in node_to_score): if (fdr_dict[node_id] < significance_value): color_map[node_id] = matplotlib.colors.rgb2hex(cmap(norm(float(node_to_score[node_id])))) else: color_map[node_id] = '#b4b4b4' else: color_map[node_id] = '#dadada' return color_map<|docstring|>Return dictionary with the nodes and their corresponding normalized colors.<|endoftext|>
568431f30ad740e9117d98bb715d8069553403761a3dda00887ef3836ec956dd
def _add_pvalue_cmap(nodes, node_to_score, significance_value): 'Return dictionary with the nodes and their corresponding normalized colors according to p value.' cmap = matplotlib.cm.get_cmap('Reds').reversed() norm = matplotlib.colors.Normalize((- 0.01), 0.05) color_map = {} for node_id in nodes: if ((node_id in node_to_score) and (node_to_score[node_id] < significance_value)): color_map[node_id] = matplotlib.colors.rgb2hex(cmap(norm(float(node_to_score[node_id])))) else: color_map[node_id] = '#94989c' return color_map
Return dictionary with the nodes and their corresponding normalized colors according to p value.
viewer/src/utils.py
_add_pvalue_cmap
DecoPath/DecoPath
1
python
def _add_pvalue_cmap(nodes, node_to_score, significance_value): cmap = matplotlib.cm.get_cmap('Reds').reversed() norm = matplotlib.colors.Normalize((- 0.01), 0.05) color_map = {} for node_id in nodes: if ((node_id in node_to_score) and (node_to_score[node_id] < significance_value)): color_map[node_id] = matplotlib.colors.rgb2hex(cmap(norm(float(node_to_score[node_id])))) else: color_map[node_id] = '#94989c' return color_map
def _add_pvalue_cmap(nodes, node_to_score, significance_value): cmap = matplotlib.cm.get_cmap('Reds').reversed() norm = matplotlib.colors.Normalize((- 0.01), 0.05) color_map = {} for node_id in nodes: if ((node_id in node_to_score) and (node_to_score[node_id] < significance_value)): color_map[node_id] = matplotlib.colors.rgb2hex(cmap(norm(float(node_to_score[node_id])))) else: color_map[node_id] = '#94989c' return color_map<|docstring|>Return dictionary with the nodes and their corresponding normalized colors according to p value.<|endoftext|>
ee774bdcfeebd6c95e877a0ea693de9197cceb3717a0b3d7737210afa41d8751
def _label_tree(network, id_to_database, id_to_name): 'Add attributes to the nodes in the tree.' nx.set_node_attributes(network, id_to_database, 'database') nx.set_node_attributes(network, {node_id: (id_to_name[node_id] if (node_id in id_to_name) else '') for node_id in network.nodes()}, 'name') mapping = {node_name: node_name.replace('path:', '') for node_name in network if ('path:' in node_name)} nx.relabel_nodes(network, mapping, copy=False)
Add attributes to the nodes in the tree.
viewer/src/utils.py
_label_tree
DecoPath/DecoPath
1
python
def _label_tree(network, id_to_database, id_to_name): nx.set_node_attributes(network, id_to_database, 'database') nx.set_node_attributes(network, {node_id: (id_to_name[node_id] if (node_id in id_to_name) else ) for node_id in network.nodes()}, 'name') mapping = {node_name: node_name.replace('path:', ) for node_name in network if ('path:' in node_name)} nx.relabel_nodes(network, mapping, copy=False)
def _label_tree(network, id_to_database, id_to_name): nx.set_node_attributes(network, id_to_database, 'database') nx.set_node_attributes(network, {node_id: (id_to_name[node_id] if (node_id in id_to_name) else ) for node_id in network.nodes()}, 'name') mapping = {node_name: node_name.replace('path:', ) for node_name in network if ('path:' in node_name)} nx.relabel_nodes(network, mapping, copy=False)<|docstring|>Add attributes to the nodes in the tree.<|endoftext|>
fa9c942ce9dc31f660a311e45b9faedadeb9f56aa03e91ec613c2b113093c764
def parse_hierarchy_excel(url): 'Parse hierarchy file.' xls = pd.ExcelFile(DECOPATH_ONTOLOGY) root_node = 'SuperPathway' tree_network = DiGraph() id_to_database = {} id_to_name = {} for sheet_name in xls.sheet_names: if (sheet_name == 'equivalence_same_db'): continue df = pd.read_excel(io=xls, sheet_name=sheet_name, usecols=[SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME, MAPPING_TYPE, TARGET_RESOURCE, TARGET_ID, TARGET_NAME], dtype=str) equivalent_pathways_dict = get_equivalent_dicts(df[(df[MAPPING_TYPE] == EQUIVALENT_TO)]) _check_duplicates(df) for (source_db, source_id, source_name, mapping, target_db, target_id, target_name) in df.values: if ((mapping == EQUIVALENT_TO) or pd.isna(mapping)): continue id_to_name[target_id] = target_name id_to_database[target_id] = target_db id_to_name[source_id] = source_name id_to_database[source_id] = source_db 'Logic to generate the tree' if (mapping == 'SuperPathway'): tree_network.add_edge(root_node, target_id) elif (mapping == IS_PART_OF): tree_network.add_edge(target_id, source_id) else: raise ValueError(f'invalid {mapping}') _label_tree(tree_network, id_to_database, id_to_name) return (tree_data(tree_network, root_node), tree_network, equivalent_pathways_dict, root_node)
Parse hierarchy file.
viewer/src/utils.py
parse_hierarchy_excel
DecoPath/DecoPath
1
python
def parse_hierarchy_excel(url): xls = pd.ExcelFile(DECOPATH_ONTOLOGY) root_node = 'SuperPathway' tree_network = DiGraph() id_to_database = {} id_to_name = {} for sheet_name in xls.sheet_names: if (sheet_name == 'equivalence_same_db'): continue df = pd.read_excel(io=xls, sheet_name=sheet_name, usecols=[SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME, MAPPING_TYPE, TARGET_RESOURCE, TARGET_ID, TARGET_NAME], dtype=str) equivalent_pathways_dict = get_equivalent_dicts(df[(df[MAPPING_TYPE] == EQUIVALENT_TO)]) _check_duplicates(df) for (source_db, source_id, source_name, mapping, target_db, target_id, target_name) in df.values: if ((mapping == EQUIVALENT_TO) or pd.isna(mapping)): continue id_to_name[target_id] = target_name id_to_database[target_id] = target_db id_to_name[source_id] = source_name id_to_database[source_id] = source_db 'Logic to generate the tree' if (mapping == 'SuperPathway'): tree_network.add_edge(root_node, target_id) elif (mapping == IS_PART_OF): tree_network.add_edge(target_id, source_id) else: raise ValueError(f'invalid {mapping}') _label_tree(tree_network, id_to_database, id_to_name) return (tree_data(tree_network, root_node), tree_network, equivalent_pathways_dict, root_node)
def parse_hierarchy_excel(url): xls = pd.ExcelFile(DECOPATH_ONTOLOGY) root_node = 'SuperPathway' tree_network = DiGraph() id_to_database = {} id_to_name = {} for sheet_name in xls.sheet_names: if (sheet_name == 'equivalence_same_db'): continue df = pd.read_excel(io=xls, sheet_name=sheet_name, usecols=[SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME, MAPPING_TYPE, TARGET_RESOURCE, TARGET_ID, TARGET_NAME], dtype=str) equivalent_pathways_dict = get_equivalent_dicts(df[(df[MAPPING_TYPE] == EQUIVALENT_TO)]) _check_duplicates(df) for (source_db, source_id, source_name, mapping, target_db, target_id, target_name) in df.values: if ((mapping == EQUIVALENT_TO) or pd.isna(mapping)): continue id_to_name[target_id] = target_name id_to_database[target_id] = target_db id_to_name[source_id] = source_name id_to_database[source_id] = source_db 'Logic to generate the tree' if (mapping == 'SuperPathway'): tree_network.add_edge(root_node, target_id) elif (mapping == IS_PART_OF): tree_network.add_edge(target_id, source_id) else: raise ValueError(f'invalid {mapping}') _label_tree(tree_network, id_to_database, id_to_name) return (tree_data(tree_network, root_node), tree_network, equivalent_pathways_dict, root_node)<|docstring|>Parse hierarchy file.<|endoftext|>
2f46ff20163e633cbd8c55a29d71844a0ab9577dea68253ffa70a401de0ad121
def map_results_to_hierarchy(network_hierarchy, root_node, results: List[Tuple[(str, str, str, str, str)]], enrichment_method: str, significance_value: float) -> Dict[(Any, Any)]: 'Map results of GSEA experiment to hierarchy.\n\n :param network_hierarchy: network reprsenting the hierarchy\n :param root_node: hierarchy root\n :param results: results of gsea experiment\n :param enrichment_method: enrichment method\n :return: hierarchy with mapped results\n ' geneset_size_mapping = {} node_to_score = {} fdr_dict = {} for (db, pathway_id, score, fdr, geneset_size) in results: if (pathway_id not in network_hierarchy): continue if (enrichment_method == ORA): node_to_score[pathway_id] = fdr elif ((enrichment_method == GSEA) or (enrichment_method == PRERANK)): node_to_score[pathway_id] = score fdr_dict[pathway_id] = fdr else: logger.warning(f'unknown enrichment method {enrichment_method}') raise ValueError() geneset_size_mapping[pathway_id] = geneset_size if (not node_to_score): raise ValueError('could not map to any pathway in the hierarchy') elif all(((i is None) for i in node_to_score.values())): logger.warning('error with pathway scores') raise ValueError() nx.set_node_attributes(network_hierarchy, geneset_size_mapping, 'geneset_size') nx.set_node_attributes(network_hierarchy, node_to_score, 'direction') if (enrichment_method == ORA): nx.set_node_attributes(network_hierarchy, _add_pvalue_cmap(network_hierarchy.nodes(), node_to_score, significance_value), 'color') else: nx.set_node_attributes(network_hierarchy, fdr_dict, 'fdr') nx.set_node_attributes(network_hierarchy, _add_gsea_cmap(network_hierarchy.nodes(), node_to_score, fdr_dict, significance_value), 'color') return tree_data(network_hierarchy, root_node)
Map results of GSEA experiment to hierarchy. :param network_hierarchy: network reprsenting the hierarchy :param root_node: hierarchy root :param results: results of gsea experiment :param enrichment_method: enrichment method :return: hierarchy with mapped results
viewer/src/utils.py
map_results_to_hierarchy
DecoPath/DecoPath
1
python
def map_results_to_hierarchy(network_hierarchy, root_node, results: List[Tuple[(str, str, str, str, str)]], enrichment_method: str, significance_value: float) -> Dict[(Any, Any)]: 'Map results of GSEA experiment to hierarchy.\n\n :param network_hierarchy: network reprsenting the hierarchy\n :param root_node: hierarchy root\n :param results: results of gsea experiment\n :param enrichment_method: enrichment method\n :return: hierarchy with mapped results\n ' geneset_size_mapping = {} node_to_score = {} fdr_dict = {} for (db, pathway_id, score, fdr, geneset_size) in results: if (pathway_id not in network_hierarchy): continue if (enrichment_method == ORA): node_to_score[pathway_id] = fdr elif ((enrichment_method == GSEA) or (enrichment_method == PRERANK)): node_to_score[pathway_id] = score fdr_dict[pathway_id] = fdr else: logger.warning(f'unknown enrichment method {enrichment_method}') raise ValueError() geneset_size_mapping[pathway_id] = geneset_size if (not node_to_score): raise ValueError('could not map to any pathway in the hierarchy') elif all(((i is None) for i in node_to_score.values())): logger.warning('error with pathway scores') raise ValueError() nx.set_node_attributes(network_hierarchy, geneset_size_mapping, 'geneset_size') nx.set_node_attributes(network_hierarchy, node_to_score, 'direction') if (enrichment_method == ORA): nx.set_node_attributes(network_hierarchy, _add_pvalue_cmap(network_hierarchy.nodes(), node_to_score, significance_value), 'color') else: nx.set_node_attributes(network_hierarchy, fdr_dict, 'fdr') nx.set_node_attributes(network_hierarchy, _add_gsea_cmap(network_hierarchy.nodes(), node_to_score, fdr_dict, significance_value), 'color') return tree_data(network_hierarchy, root_node)
def map_results_to_hierarchy(network_hierarchy, root_node, results: List[Tuple[(str, str, str, str, str)]], enrichment_method: str, significance_value: float) -> Dict[(Any, Any)]: 'Map results of GSEA experiment to hierarchy.\n\n :param network_hierarchy: network reprsenting the hierarchy\n :param root_node: hierarchy root\n :param results: results of gsea experiment\n :param enrichment_method: enrichment method\n :return: hierarchy with mapped results\n ' geneset_size_mapping = {} node_to_score = {} fdr_dict = {} for (db, pathway_id, score, fdr, geneset_size) in results: if (pathway_id not in network_hierarchy): continue if (enrichment_method == ORA): node_to_score[pathway_id] = fdr elif ((enrichment_method == GSEA) or (enrichment_method == PRERANK)): node_to_score[pathway_id] = score fdr_dict[pathway_id] = fdr else: logger.warning(f'unknown enrichment method {enrichment_method}') raise ValueError() geneset_size_mapping[pathway_id] = geneset_size if (not node_to_score): raise ValueError('could not map to any pathway in the hierarchy') elif all(((i is None) for i in node_to_score.values())): logger.warning('error with pathway scores') raise ValueError() nx.set_node_attributes(network_hierarchy, geneset_size_mapping, 'geneset_size') nx.set_node_attributes(network_hierarchy, node_to_score, 'direction') if (enrichment_method == ORA): nx.set_node_attributes(network_hierarchy, _add_pvalue_cmap(network_hierarchy.nodes(), node_to_score, significance_value), 'color') else: nx.set_node_attributes(network_hierarchy, fdr_dict, 'fdr') nx.set_node_attributes(network_hierarchy, _add_gsea_cmap(network_hierarchy.nodes(), node_to_score, fdr_dict, significance_value), 'color') return tree_data(network_hierarchy, root_node)<|docstring|>Map results of GSEA experiment to hierarchy. :param network_hierarchy: network reprsenting the hierarchy :param root_node: hierarchy root :param results: results of gsea experiment :param enrichment_method: enrichment method :return: hierarchy with mapped results<|endoftext|>
72031f4735ca9b873889ad5c573606712d097691e012d1c12d51d206d6a63055
def export_geneset(geneset_dict, database, temp_file, gmt_file): 'Export gene set to gmt file format.' df = pd.DataFrame.from_dict(data=geneset_dict, orient='index') df['Resource'] = pd.Series(database, index=df.index) df = df[(['Resource'] + [col for col in df.columns if (col != 'Resource')])] df.to_csv(temp_file, header=False, sep='\t') _export_geneset_to_gmt(temp_file, gmt_file) os.remove(temp_file)
Export gene set to gmt file format.
viewer/src/utils.py
export_geneset
DecoPath/DecoPath
1
python
def export_geneset(geneset_dict, database, temp_file, gmt_file): df = pd.DataFrame.from_dict(data=geneset_dict, orient='index') df['Resource'] = pd.Series(database, index=df.index) df = df[(['Resource'] + [col for col in df.columns if (col != 'Resource')])] df.to_csv(temp_file, header=False, sep='\t') _export_geneset_to_gmt(temp_file, gmt_file) os.remove(temp_file)
def export_geneset(geneset_dict, database, temp_file, gmt_file): df = pd.DataFrame.from_dict(data=geneset_dict, orient='index') df['Resource'] = pd.Series(database, index=df.index) df = df[(['Resource'] + [col for col in df.columns if (col != 'Resource')])] df.to_csv(temp_file, header=False, sep='\t') _export_geneset_to_gmt(temp_file, gmt_file) os.remove(temp_file)<|docstring|>Export gene set to gmt file format.<|endoftext|>
a82ade716434f997429b86bd7bfba43636c90f9013028b54f77748c06b3e7408
def _export_geneset_to_gmt(geneset_file, outfile): 'Export gene set to gmt file format.' with open(geneset_file, 'r') as file: with open(outfile, 'w') as f: for line in file: line = line.rstrip() f.write((line + '\n')) f.close()
Export gene set to gmt file format.
viewer/src/utils.py
_export_geneset_to_gmt
DecoPath/DecoPath
1
python
def _export_geneset_to_gmt(geneset_file, outfile): with open(geneset_file, 'r') as file: with open(outfile, 'w') as f: for line in file: line = line.rstrip() f.write((line + '\n')) f.close()
def _export_geneset_to_gmt(geneset_file, outfile): with open(geneset_file, 'r') as file: with open(outfile, 'w') as f: for line in file: line = line.rstrip() f.write((line + '\n')) f.close()<|docstring|>Export gene set to gmt file format.<|endoftext|>
78a48253fa6023c105a8981efe27ab69ff426b1566934768188020e8656bb3d4
def get_equivalent_pathway_dc_ids(decopath_ontology): 'Parse DecoPath ontology file and get DC IDs for equivalent super pathways.' sheets_dict = pd.read_excel(io=decopath_ontology, engine='openpyxl', sheet_name=None, usecols=[SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME, MAPPING_TYPE, TARGET_RESOURCE, TARGET_ID, TARGET_NAME], dtype=str, index_col=None) sheets_dict.pop('equivalence_same_db', None) frames = [sheet for (name, sheet) in sheets_dict.items()] df = pd.concat(frames) df[SOURCE_ID] = df[SOURCE_ID].str.replace('path:', '') df[TARGET_ID] = df[TARGET_ID].str.replace('path:', '') equivalence_df = df.loc[(df[MAPPING_TYPE] == EQUIVALENT_TO)] equivalent_pathways = (equivalence_df[SOURCE_ID].to_list() + equivalence_df[TARGET_ID].to_list()) id_to_dc_id = pd.DataFrame.from_dict({source_id: {'pathway_id': source_id, 'dc_id': target_id, 'dc_name': target_name} for (source_db, source_id, source_name, mapping_type, target_db, target_id, target_name) in df.values if (mapping_type == IS_PART_OF) if (source_id in equivalent_pathways)}, orient='index') id_to_dc_id = id_to_dc_id.reset_index(drop=True) return (id_to_dc_id, df)
Parse DecoPath ontology file and get DC IDs for equivalent super pathways.
viewer/src/utils.py
get_equivalent_pathway_dc_ids
DecoPath/DecoPath
1
python
def get_equivalent_pathway_dc_ids(decopath_ontology): sheets_dict = pd.read_excel(io=decopath_ontology, engine='openpyxl', sheet_name=None, usecols=[SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME, MAPPING_TYPE, TARGET_RESOURCE, TARGET_ID, TARGET_NAME], dtype=str, index_col=None) sheets_dict.pop('equivalence_same_db', None) frames = [sheet for (name, sheet) in sheets_dict.items()] df = pd.concat(frames) df[SOURCE_ID] = df[SOURCE_ID].str.replace('path:', ) df[TARGET_ID] = df[TARGET_ID].str.replace('path:', ) equivalence_df = df.loc[(df[MAPPING_TYPE] == EQUIVALENT_TO)] equivalent_pathways = (equivalence_df[SOURCE_ID].to_list() + equivalence_df[TARGET_ID].to_list()) id_to_dc_id = pd.DataFrame.from_dict({source_id: {'pathway_id': source_id, 'dc_id': target_id, 'dc_name': target_name} for (source_db, source_id, source_name, mapping_type, target_db, target_id, target_name) in df.values if (mapping_type == IS_PART_OF) if (source_id in equivalent_pathways)}, orient='index') id_to_dc_id = id_to_dc_id.reset_index(drop=True) return (id_to_dc_id, df)
def get_equivalent_pathway_dc_ids(decopath_ontology): sheets_dict = pd.read_excel(io=decopath_ontology, engine='openpyxl', sheet_name=None, usecols=[SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME, MAPPING_TYPE, TARGET_RESOURCE, TARGET_ID, TARGET_NAME], dtype=str, index_col=None) sheets_dict.pop('equivalence_same_db', None) frames = [sheet for (name, sheet) in sheets_dict.items()] df = pd.concat(frames) df[SOURCE_ID] = df[SOURCE_ID].str.replace('path:', ) df[TARGET_ID] = df[TARGET_ID].str.replace('path:', ) equivalence_df = df.loc[(df[MAPPING_TYPE] == EQUIVALENT_TO)] equivalent_pathways = (equivalence_df[SOURCE_ID].to_list() + equivalence_df[TARGET_ID].to_list()) id_to_dc_id = pd.DataFrame.from_dict({source_id: {'pathway_id': source_id, 'dc_id': target_id, 'dc_name': target_name} for (source_db, source_id, source_name, mapping_type, target_db, target_id, target_name) in df.values if (mapping_type == IS_PART_OF) if (source_id in equivalent_pathways)}, orient='index') id_to_dc_id = id_to_dc_id.reset_index(drop=True) return (id_to_dc_id, df)<|docstring|>Parse DecoPath ontology file and get DC IDs for equivalent super pathways.<|endoftext|>
ef9451b05787a5f319f7e1a23210c2759370b169a9b1e0aaed04a7413633b970
def get_dc_pathway_resources(decopath_ontology): 'Parse DecoPath ontology file and get source resources for DC super pathways.' sheets_dict = pd.read_excel(io=decopath_ontology, engine='openpyxl', sheet_name=None, usecols=[SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME, MAPPING_TYPE, TARGET_RESOURCE, TARGET_ID, TARGET_NAME], dtype=str, index_col=None) sheets_dict.pop('equivalence_same_db', None) frames = [sheet for (name, sheet) in sheets_dict.items()] df = pd.concat(frames) df[SOURCE_ID] = df[SOURCE_ID].str.replace('path:', '') df[TARGET_ID] = df[TARGET_ID].str.replace('path:', '') equivalence_df = df.loc[(df[MAPPING_TYPE] == EQUIVALENT_TO)] equivalent_pathways = (equivalence_df[SOURCE_ID].to_list() + equivalence_df[TARGET_ID].to_list()) dc_source_dict = defaultdict(set) dc_source_id_dict = defaultdict(set) for (source_db, source_id, source_name, mapping_type, target_db, target_id, target_name) in df.values: if (mapping_type == IS_PART_OF): if (source_id in equivalent_pathways): dc_source_dict[target_id].add(source_db) dc_source_id_dict[target_id].add(source_id) return (dc_source_dict, dc_source_id_dict)
Parse DecoPath ontology file and get source resources for DC super pathways.
viewer/src/utils.py
get_dc_pathway_resources
DecoPath/DecoPath
1
python
def get_dc_pathway_resources(decopath_ontology): sheets_dict = pd.read_excel(io=decopath_ontology, engine='openpyxl', sheet_name=None, usecols=[SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME, MAPPING_TYPE, TARGET_RESOURCE, TARGET_ID, TARGET_NAME], dtype=str, index_col=None) sheets_dict.pop('equivalence_same_db', None) frames = [sheet for (name, sheet) in sheets_dict.items()] df = pd.concat(frames) df[SOURCE_ID] = df[SOURCE_ID].str.replace('path:', ) df[TARGET_ID] = df[TARGET_ID].str.replace('path:', ) equivalence_df = df.loc[(df[MAPPING_TYPE] == EQUIVALENT_TO)] equivalent_pathways = (equivalence_df[SOURCE_ID].to_list() + equivalence_df[TARGET_ID].to_list()) dc_source_dict = defaultdict(set) dc_source_id_dict = defaultdict(set) for (source_db, source_id, source_name, mapping_type, target_db, target_id, target_name) in df.values: if (mapping_type == IS_PART_OF): if (source_id in equivalent_pathways): dc_source_dict[target_id].add(source_db) dc_source_id_dict[target_id].add(source_id) return (dc_source_dict, dc_source_id_dict)
def get_dc_pathway_resources(decopath_ontology): sheets_dict = pd.read_excel(io=decopath_ontology, engine='openpyxl', sheet_name=None, usecols=[SOURCE_RESOURCE, SOURCE_ID, SOURCE_NAME, MAPPING_TYPE, TARGET_RESOURCE, TARGET_ID, TARGET_NAME], dtype=str, index_col=None) sheets_dict.pop('equivalence_same_db', None) frames = [sheet for (name, sheet) in sheets_dict.items()] df = pd.concat(frames) df[SOURCE_ID] = df[SOURCE_ID].str.replace('path:', ) df[TARGET_ID] = df[TARGET_ID].str.replace('path:', ) equivalence_df = df.loc[(df[MAPPING_TYPE] == EQUIVALENT_TO)] equivalent_pathways = (equivalence_df[SOURCE_ID].to_list() + equivalence_df[TARGET_ID].to_list()) dc_source_dict = defaultdict(set) dc_source_id_dict = defaultdict(set) for (source_db, source_id, source_name, mapping_type, target_db, target_id, target_name) in df.values: if (mapping_type == IS_PART_OF): if (source_id in equivalent_pathways): dc_source_dict[target_id].add(source_db) dc_source_id_dict[target_id].add(source_id) return (dc_source_dict, dc_source_id_dict)<|docstring|>Parse DecoPath ontology file and get source resources for DC super pathways.<|endoftext|>
56519b1db746458ff850eebc3c5eee960af435937d0834a62512150652cf8791
def _get_gmt_dict(filename): 'Parse gmt files and get gene sets.' with open(filename, 'r') as f: content = [line.strip().split('\t') for line in f] return {pathway[0]: pathway[2:] for pathway in content}
Parse gmt files and get gene sets.
viewer/src/utils.py
_get_gmt_dict
DecoPath/DecoPath
1
python
def _get_gmt_dict(filename): with open(filename, 'r') as f: content = [line.strip().split('\t') for line in f] return {pathway[0]: pathway[2:] for pathway in content}
def _get_gmt_dict(filename): with open(filename, 'r') as f: content = [line.strip().split('\t') for line in f] return {pathway[0]: pathway[2:] for pathway in content}<|docstring|>Parse gmt files and get gene sets.<|endoftext|>
fd0f23f364e9144ff4c2e255bcfdd28939311ef64877e1752bbe2f4b1d6c5297
def get_decopath_genesets(decopath_ontology, gmt_dir: str): 'Generate DecoPath gene sets with super pathways.' concatenated_genesets_dict = {} dc_mapping = defaultdict(list) if (not os.path.isdir(gmt_dir)): make_geneset_dir() (super_pathway_mappings, ontology_df) = get_equivalent_pathway_dc_ids(decopath_ontology) gmt_files = [os.path.join(GMT_FILES_DIR, filename) for filename in os.listdir(gmt_dir) if filename.endswith('.gmt')] genesets = [_get_gmt_dict(file) for file in gmt_files] for geneset in genesets: concatenated_genesets_dict.update(geneset) for (pathway_id, dc_id, dc_name) in super_pathway_mappings.values: if (pathway_id in concatenated_genesets_dict): dc_mapping[dc_id].append(concatenated_genesets_dict[pathway_id]) return {pathway_id: {gene for sublist in geneset for gene in sublist} for (pathway_id, geneset) in dc_mapping.items()}
Generate DecoPath gene sets with super pathways.
viewer/src/utils.py
get_decopath_genesets
DecoPath/DecoPath
1
python
def get_decopath_genesets(decopath_ontology, gmt_dir: str): concatenated_genesets_dict = {} dc_mapping = defaultdict(list) if (not os.path.isdir(gmt_dir)): make_geneset_dir() (super_pathway_mappings, ontology_df) = get_equivalent_pathway_dc_ids(decopath_ontology) gmt_files = [os.path.join(GMT_FILES_DIR, filename) for filename in os.listdir(gmt_dir) if filename.endswith('.gmt')] genesets = [_get_gmt_dict(file) for file in gmt_files] for geneset in genesets: concatenated_genesets_dict.update(geneset) for (pathway_id, dc_id, dc_name) in super_pathway_mappings.values: if (pathway_id in concatenated_genesets_dict): dc_mapping[dc_id].append(concatenated_genesets_dict[pathway_id]) return {pathway_id: {gene for sublist in geneset for gene in sublist} for (pathway_id, geneset) in dc_mapping.items()}
def get_decopath_genesets(decopath_ontology, gmt_dir: str): concatenated_genesets_dict = {} dc_mapping = defaultdict(list) if (not os.path.isdir(gmt_dir)): make_geneset_dir() (super_pathway_mappings, ontology_df) = get_equivalent_pathway_dc_ids(decopath_ontology) gmt_files = [os.path.join(GMT_FILES_DIR, filename) for filename in os.listdir(gmt_dir) if filename.endswith('.gmt')] genesets = [_get_gmt_dict(file) for file in gmt_files] for geneset in genesets: concatenated_genesets_dict.update(geneset) for (pathway_id, dc_id, dc_name) in super_pathway_mappings.values: if (pathway_id in concatenated_genesets_dict): dc_mapping[dc_id].append(concatenated_genesets_dict[pathway_id]) return {pathway_id: {gene for sublist in geneset for gene in sublist} for (pathway_id, geneset) in dc_mapping.items()}<|docstring|>Generate DecoPath gene sets with super pathways.<|endoftext|>
8f5fe7a69d5baa5ac4c36f4abcc04a149f1433cb363284c0ca6f130eb23ccf1b
def get_name_id_mapping(db_file, pathway, outfile): 'Create a database connection to the SQLite database specified by the db_file and get pathway name/ID mappings.' conn = sqlite3.connect(db_file) cur = conn.cursor() cur.execute(('SELECT * FROM ' + pathway)) rows = cur.fetchall() name_id_mappings = {identifier: name for (_, identifier, name) in rows} export_to_json(name_id_mappings, outfile)
Create a database connection to the SQLite database specified by the db_file and get pathway name/ID mappings.
viewer/src/utils.py
get_name_id_mapping
DecoPath/DecoPath
1
python
def get_name_id_mapping(db_file, pathway, outfile): conn = sqlite3.connect(db_file) cur = conn.cursor() cur.execute(('SELECT * FROM ' + pathway)) rows = cur.fetchall() name_id_mappings = {identifier: name for (_, identifier, name) in rows} export_to_json(name_id_mappings, outfile)
def get_name_id_mapping(db_file, pathway, outfile): conn = sqlite3.connect(db_file) cur = conn.cursor() cur.execute(('SELECT * FROM ' + pathway)) rows = cur.fetchall() name_id_mappings = {identifier: name for (_, identifier, name) in rows} export_to_json(name_id_mappings, outfile)<|docstring|>Create a database connection to the SQLite database specified by the db_file and get pathway name/ID mappings.<|endoftext|>
7c29cf330571e216d2b7f99d8c1797399851063951520c285fe2fd43a32a995b
def handle_zipfile_download(path: str) -> None: 'Download and extract zip file content.' r = requests.get(path) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall()
Download and extract zip file content.
viewer/src/utils.py
handle_zipfile_download
DecoPath/DecoPath
1
python
def handle_zipfile_download(path: str) -> None: r = requests.get(path) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall()
def handle_zipfile_download(path: str) -> None: r = requests.get(path) z = zipfile.ZipFile(io.BytesIO(r.content)) z.extractall()<|docstring|>Download and extract zip file content.<|endoftext|>
bd3f7b0f904588bad0a884b5e5a6681551e75a99d7906a86d576a7a7b2199a3a
def export_pathbank_name_id_mappings(outfile, pathbank_pathways_url: str=PATHBANK_PATHWAY_URL): 'Download PathBank pathway metadata and export as json.' logger.info('Downloading PathBank content.') handle_zipfile_download(pathbank_pathways_url) df = pd.read_csv(PATHBANK_PATHWAYS_FILE, sep=',') pathbank_mappings = pd.Series(df['Name'].values, index=df['PW ID']).to_dict() export_to_json(pathbank_mappings, outfile)
Download PathBank pathway metadata and export as json.
viewer/src/utils.py
export_pathbank_name_id_mappings
DecoPath/DecoPath
1
python
def export_pathbank_name_id_mappings(outfile, pathbank_pathways_url: str=PATHBANK_PATHWAY_URL): logger.info('Downloading PathBank content.') handle_zipfile_download(pathbank_pathways_url) df = pd.read_csv(PATHBANK_PATHWAYS_FILE, sep=',') pathbank_mappings = pd.Series(df['Name'].values, index=df['PW ID']).to_dict() export_to_json(pathbank_mappings, outfile)
def export_pathbank_name_id_mappings(outfile, pathbank_pathways_url: str=PATHBANK_PATHWAY_URL): logger.info('Downloading PathBank content.') handle_zipfile_download(pathbank_pathways_url) df = pd.read_csv(PATHBANK_PATHWAYS_FILE, sep=',') pathbank_mappings = pd.Series(df['Name'].values, index=df['PW ID']).to_dict() export_to_json(pathbank_mappings, outfile)<|docstring|>Download PathBank pathway metadata and export as json.<|endoftext|>
90504d1f4ecf61d58c8926bbcd49e33b5a3ef47048f38b2eb906eb5fa1d3f0cd
def new_file(self, *args, **kwargs): '\n Create the file object to append to as data is coming in.\n ' super().new_file(*args, **kwargs) self.file = TransientUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra)
Create the file object to append to as data is coming in.
viewer/src/utils.py
new_file
DecoPath/DecoPath
1
python
def new_file(self, *args, **kwargs): '\n \n ' super().new_file(*args, **kwargs) self.file = TransientUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra)
def new_file(self, *args, **kwargs): '\n \n ' super().new_file(*args, **kwargs) self.file = TransientUploadedFile(self.file_name, self.content_type, 0, self.charset, self.content_type_extra)<|docstring|>Create the file object to append to as data is coming in.<|endoftext|>
af7b83a4a07bbf82707bf6bc24a5a57c93eb0d73c7d8c2bd93a4b9a0108a156b
def transient_file_path(self): 'Return the full path of this file.' return self.file.name
Return the full path of this file.
viewer/src/utils.py
transient_file_path
DecoPath/DecoPath
1
python
def transient_file_path(self): return self.file.name
def transient_file_path(self): return self.file.name<|docstring|>Return the full path of this file.<|endoftext|>
6f26e611979c750541a74681d120595edb7a19d4a551f39cdad0c60053ae4254
def get_argument_parser() -> ArgumentParser: 'Create an ArgumentParser which will parse arguments from\n the command line parameters passed to this tool.\n\n :return: The argument parser\n ' usage = 'Parse an $UpCase:$Info file and display the output. v{}'.format(VERSION) arguments = ArgumentParser(description=usage) arguments.add_argument('-s', '--source', dest='source', action='store', required=True, help='The source $UpCase:$Info file to parse.') arguments.add_argument('-f', '--format', dest='format', action='store', choices=['text', 'json'], default='json', required=True, help='The output format.') arguments.add_argument('--logging', dest='logging', action='store', default='INFO', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'], help='Logging level [default=INFO]') return arguments
Create an ArgumentParser which will parse arguments from the command line parameters passed to this tool. :return: The argument parser
scripts/upcaseinfo_parser.py
get_argument_parser
forensicmatt/upcaseinfo-py
1
python
def get_argument_parser() -> ArgumentParser: 'Create an ArgumentParser which will parse arguments from\n the command line parameters passed to this tool.\n\n :return: The argument parser\n ' usage = 'Parse an $UpCase:$Info file and display the output. v{}'.format(VERSION) arguments = ArgumentParser(description=usage) arguments.add_argument('-s', '--source', dest='source', action='store', required=True, help='The source $UpCase:$Info file to parse.') arguments.add_argument('-f', '--format', dest='format', action='store', choices=['text', 'json'], default='json', required=True, help='The output format.') arguments.add_argument('--logging', dest='logging', action='store', default='INFO', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'], help='Logging level [default=INFO]') return arguments
def get_argument_parser() -> ArgumentParser: 'Create an ArgumentParser which will parse arguments from\n the command line parameters passed to this tool.\n\n :return: The argument parser\n ' usage = 'Parse an $UpCase:$Info file and display the output. v{}'.format(VERSION) arguments = ArgumentParser(description=usage) arguments.add_argument('-s', '--source', dest='source', action='store', required=True, help='The source $UpCase:$Info file to parse.') arguments.add_argument('-f', '--format', dest='format', action='store', choices=['text', 'json'], default='json', required=True, help='The output format.') arguments.add_argument('--logging', dest='logging', action='store', default='INFO', choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'], help='Logging level [default=INFO]') return arguments<|docstring|>Create an ArgumentParser which will parse arguments from the command line parameters passed to this tool. :return: The argument parser<|endoftext|>
28adb0e700d904402cba41f723db83a64e4dd4c9c3a055a513e942c11dda1f9e
def set_logging_level(logging_level: str): "Set the logging level to use.\n\n :param logging_level: The logging level's variable name as used in the logging lib.\n :return:\n " logging.basicConfig(level=getattr(logging, logging_level))
Set the logging level to use. :param logging_level: The logging level's variable name as used in the logging lib. :return:
scripts/upcaseinfo_parser.py
set_logging_level
forensicmatt/upcaseinfo-py
1
python
def set_logging_level(logging_level: str): "Set the logging level to use.\n\n :param logging_level: The logging level's variable name as used in the logging lib.\n :return:\n " logging.basicConfig(level=getattr(logging, logging_level))
def set_logging_level(logging_level: str): "Set the logging level to use.\n\n :param logging_level: The logging level's variable name as used in the logging lib.\n :return:\n " logging.basicConfig(level=getattr(logging, logging_level))<|docstring|>Set the logging level to use. :param logging_level: The logging level's variable name as used in the logging lib. :return:<|endoftext|>
eccc6fbda01fa7b138dc6c98f16c8afc4d8ea94660b9a1df94cd16d2a1a2b662
@register_make_test_function() def make_space_to_batch_nd_tests(options): 'Make a set of tests to do space_to_batch_nd.' test_parameters = [{'dtype': [tf.int32, tf.int64, tf.float32], 'input_shape': [[1, 2, 2, 3], [2, 2, 4, 1]], 'block_shape': [[1, 3], [2, 2]], 'paddings': [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}, {'dtype': [tf.float32], 'input_shape': [[2, 3, 7, 3]], 'block_shape': [[1, 3], [2, 2]], 'paddings': [[[0, 0], [2, 0]], [[1, 0], [1, 0]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}, {'dtype': [tf.float32], 'input_shape': [[1, 4, 4, 4, 1, 1]], 'block_shape': [[2, 2, 2]], 'paddings': [[[0, 0], [0, 0], [0, 0]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}, {'dtype': [tf.float32], 'input_shape': [[1, 4, 4]], 'block_shape': [[2]], 'paddings': [[[0, 0]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}] def build_graph(parameters): 'Build a space_to_batch graph given `parameters`.' input_tensor = tf.compat.v1.placeholder(dtype=parameters['dtype'], name='input', shape=parameters['input_shape']) input_tensors = [input_tensor] if parameters['constant_block_shape']: block_shape = parameters['block_shape'] else: shape = [len(parameters['block_shape'])] block_shape = tf.compat.v1.placeholder(dtype=tf.int32, name='shape', shape=shape) input_tensors.append(block_shape) if parameters['constant_paddings']: paddings = parameters['paddings'] else: shape = [len(parameters['paddings']), 2] paddings = tf.compat.v1.placeholder(dtype=tf.int32, name='paddings', shape=shape) input_tensors.append(paddings) out = tf.space_to_batch_nd(input_tensor, block_shape, paddings) return (input_tensors, [out]) def build_inputs(parameters, sess, inputs, outputs): values = [create_tensor_data(parameters['dtype'], parameters['input_shape'])] if (not parameters['constant_block_shape']): values.append(np.array(parameters['block_shape'])) if (not parameters['constant_paddings']): values.append(np.array(parameters['paddings'])) return (values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))) if options.use_experimental_converter: test_parameters = [test_parameters[0], test_parameters[1], test_parameters[3]] make_zip_of_tests(options, test_parameters, build_graph, build_inputs, expected_tf_failures=56)
Make a set of tests to do space_to_batch_nd.
tensorflow/lite/testing/op_tests/space_to_batch_nd.py
make_space_to_batch_nd_tests
antlad/tensorflow
190,993
python
@register_make_test_function() def make_space_to_batch_nd_tests(options): test_parameters = [{'dtype': [tf.int32, tf.int64, tf.float32], 'input_shape': [[1, 2, 2, 3], [2, 2, 4, 1]], 'block_shape': [[1, 3], [2, 2]], 'paddings': [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}, {'dtype': [tf.float32], 'input_shape': [[2, 3, 7, 3]], 'block_shape': [[1, 3], [2, 2]], 'paddings': [[[0, 0], [2, 0]], [[1, 0], [1, 0]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}, {'dtype': [tf.float32], 'input_shape': [[1, 4, 4, 4, 1, 1]], 'block_shape': [[2, 2, 2]], 'paddings': [[[0, 0], [0, 0], [0, 0]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}, {'dtype': [tf.float32], 'input_shape': [[1, 4, 4]], 'block_shape': [[2]], 'paddings': [[[0, 0]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}] def build_graph(parameters): 'Build a space_to_batch graph given `parameters`.' input_tensor = tf.compat.v1.placeholder(dtype=parameters['dtype'], name='input', shape=parameters['input_shape']) input_tensors = [input_tensor] if parameters['constant_block_shape']: block_shape = parameters['block_shape'] else: shape = [len(parameters['block_shape'])] block_shape = tf.compat.v1.placeholder(dtype=tf.int32, name='shape', shape=shape) input_tensors.append(block_shape) if parameters['constant_paddings']: paddings = parameters['paddings'] else: shape = [len(parameters['paddings']), 2] paddings = tf.compat.v1.placeholder(dtype=tf.int32, name='paddings', shape=shape) input_tensors.append(paddings) out = tf.space_to_batch_nd(input_tensor, block_shape, paddings) return (input_tensors, [out]) def build_inputs(parameters, sess, inputs, outputs): values = [create_tensor_data(parameters['dtype'], parameters['input_shape'])] if (not parameters['constant_block_shape']): values.append(np.array(parameters['block_shape'])) if (not parameters['constant_paddings']): values.append(np.array(parameters['paddings'])) return (values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))) if options.use_experimental_converter: test_parameters = [test_parameters[0], test_parameters[1], test_parameters[3]] make_zip_of_tests(options, test_parameters, build_graph, build_inputs, expected_tf_failures=56)
@register_make_test_function() def make_space_to_batch_nd_tests(options): test_parameters = [{'dtype': [tf.int32, tf.int64, tf.float32], 'input_shape': [[1, 2, 2, 3], [2, 2, 4, 1]], 'block_shape': [[1, 3], [2, 2]], 'paddings': [[[0, 0], [0, 0]], [[0, 0], [2, 0]], [[1, 1], [1, 1]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}, {'dtype': [tf.float32], 'input_shape': [[2, 3, 7, 3]], 'block_shape': [[1, 3], [2, 2]], 'paddings': [[[0, 0], [2, 0]], [[1, 0], [1, 0]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}, {'dtype': [tf.float32], 'input_shape': [[1, 4, 4, 4, 1, 1]], 'block_shape': [[2, 2, 2]], 'paddings': [[[0, 0], [0, 0], [0, 0]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}, {'dtype': [tf.float32], 'input_shape': [[1, 4, 4]], 'block_shape': [[2]], 'paddings': [[[0, 0]]], 'constant_block_shape': [True, False], 'constant_paddings': [True, False]}] def build_graph(parameters): 'Build a space_to_batch graph given `parameters`.' input_tensor = tf.compat.v1.placeholder(dtype=parameters['dtype'], name='input', shape=parameters['input_shape']) input_tensors = [input_tensor] if parameters['constant_block_shape']: block_shape = parameters['block_shape'] else: shape = [len(parameters['block_shape'])] block_shape = tf.compat.v1.placeholder(dtype=tf.int32, name='shape', shape=shape) input_tensors.append(block_shape) if parameters['constant_paddings']: paddings = parameters['paddings'] else: shape = [len(parameters['paddings']), 2] paddings = tf.compat.v1.placeholder(dtype=tf.int32, name='paddings', shape=shape) input_tensors.append(paddings) out = tf.space_to_batch_nd(input_tensor, block_shape, paddings) return (input_tensors, [out]) def build_inputs(parameters, sess, inputs, outputs): values = [create_tensor_data(parameters['dtype'], parameters['input_shape'])] if (not parameters['constant_block_shape']): values.append(np.array(parameters['block_shape'])) if (not parameters['constant_paddings']): values.append(np.array(parameters['paddings'])) return (values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))) if options.use_experimental_converter: test_parameters = [test_parameters[0], test_parameters[1], test_parameters[3]] make_zip_of_tests(options, test_parameters, build_graph, build_inputs, expected_tf_failures=56)<|docstring|>Make a set of tests to do space_to_batch_nd.<|endoftext|>
36306c67bb7c8b778dd03fa67ba01a750e3daf66a1690f294d9e4ed5375d0e49
def build_graph(parameters): 'Build a space_to_batch graph given `parameters`.' input_tensor = tf.compat.v1.placeholder(dtype=parameters['dtype'], name='input', shape=parameters['input_shape']) input_tensors = [input_tensor] if parameters['constant_block_shape']: block_shape = parameters['block_shape'] else: shape = [len(parameters['block_shape'])] block_shape = tf.compat.v1.placeholder(dtype=tf.int32, name='shape', shape=shape) input_tensors.append(block_shape) if parameters['constant_paddings']: paddings = parameters['paddings'] else: shape = [len(parameters['paddings']), 2] paddings = tf.compat.v1.placeholder(dtype=tf.int32, name='paddings', shape=shape) input_tensors.append(paddings) out = tf.space_to_batch_nd(input_tensor, block_shape, paddings) return (input_tensors, [out])
Build a space_to_batch graph given `parameters`.
tensorflow/lite/testing/op_tests/space_to_batch_nd.py
build_graph
antlad/tensorflow
190,993
python
def build_graph(parameters): input_tensor = tf.compat.v1.placeholder(dtype=parameters['dtype'], name='input', shape=parameters['input_shape']) input_tensors = [input_tensor] if parameters['constant_block_shape']: block_shape = parameters['block_shape'] else: shape = [len(parameters['block_shape'])] block_shape = tf.compat.v1.placeholder(dtype=tf.int32, name='shape', shape=shape) input_tensors.append(block_shape) if parameters['constant_paddings']: paddings = parameters['paddings'] else: shape = [len(parameters['paddings']), 2] paddings = tf.compat.v1.placeholder(dtype=tf.int32, name='paddings', shape=shape) input_tensors.append(paddings) out = tf.space_to_batch_nd(input_tensor, block_shape, paddings) return (input_tensors, [out])
def build_graph(parameters): input_tensor = tf.compat.v1.placeholder(dtype=parameters['dtype'], name='input', shape=parameters['input_shape']) input_tensors = [input_tensor] if parameters['constant_block_shape']: block_shape = parameters['block_shape'] else: shape = [len(parameters['block_shape'])] block_shape = tf.compat.v1.placeholder(dtype=tf.int32, name='shape', shape=shape) input_tensors.append(block_shape) if parameters['constant_paddings']: paddings = parameters['paddings'] else: shape = [len(parameters['paddings']), 2] paddings = tf.compat.v1.placeholder(dtype=tf.int32, name='paddings', shape=shape) input_tensors.append(paddings) out = tf.space_to_batch_nd(input_tensor, block_shape, paddings) return (input_tensors, [out])<|docstring|>Build a space_to_batch graph given `parameters`.<|endoftext|>
e3a3530dd3d8434b9432976251f27033c861163cc30fdadeb900aa72fdf0aa77
@staticmethod def s1ap_callback(msg_type, msg_p, msg_len): ' S1ap tester compatible callback' with S1ApUtil._cond: S1ApUtil._msg.put(S1ApUtil.Msg(msg_type, msg_p, msg_len)) S1ApUtil._cond.notify_all()
S1ap tester compatible callback
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
s1ap_callback
marosmars/magma
2
python
@staticmethod def s1ap_callback(msg_type, msg_p, msg_len): ' ' with S1ApUtil._cond: S1ApUtil._msg.put(S1ApUtil.Msg(msg_type, msg_p, msg_len)) S1ApUtil._cond.notify_all()
@staticmethod def s1ap_callback(msg_type, msg_p, msg_len): ' ' with S1ApUtil._cond: S1ApUtil._msg.put(S1ApUtil.Msg(msg_type, msg_p, msg_len)) S1ApUtil._cond.notify_all()<|docstring|>S1ap tester compatible callback<|endoftext|>
c42698c72090f07aae8b1c4386c086dd9afcff6b55b5deb84e7e23dacfc23356
def __init__(self): '\n Initialize the s1aplibrary and its callbacks.\n ' lib_path = os.environ['S1AP_TESTER_ROOT'] lib = os.path.join(lib_path, 'bin', S1ApUtil.lib_name) os.chdir(lib_path) self._test_lib = ctypes.cdll.LoadLibrary(lib) self._callback_type = ctypes.CFUNCTYPE(None, ctypes.c_short, ctypes.c_void_p, ctypes.c_short) self._callback_fn = self._callback_type(S1ApUtil.s1ap_callback) self._test_lib.initTestFrameWork(self._callback_fn) self._test_api = self._test_lib.tfwApi self._test_api.restype = ctypes.c_int16 self._test_api.argtypes = [ctypes.c_uint16, ctypes.c_void_p] self._lock = threading.RLock() self._ue_ip_map = {}
Initialize the s1aplibrary and its callbacks.
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
__init__
marosmars/magma
2
python
def __init__(self): '\n \n ' lib_path = os.environ['S1AP_TESTER_ROOT'] lib = os.path.join(lib_path, 'bin', S1ApUtil.lib_name) os.chdir(lib_path) self._test_lib = ctypes.cdll.LoadLibrary(lib) self._callback_type = ctypes.CFUNCTYPE(None, ctypes.c_short, ctypes.c_void_p, ctypes.c_short) self._callback_fn = self._callback_type(S1ApUtil.s1ap_callback) self._test_lib.initTestFrameWork(self._callback_fn) self._test_api = self._test_lib.tfwApi self._test_api.restype = ctypes.c_int16 self._test_api.argtypes = [ctypes.c_uint16, ctypes.c_void_p] self._lock = threading.RLock() self._ue_ip_map = {}
def __init__(self): '\n \n ' lib_path = os.environ['S1AP_TESTER_ROOT'] lib = os.path.join(lib_path, 'bin', S1ApUtil.lib_name) os.chdir(lib_path) self._test_lib = ctypes.cdll.LoadLibrary(lib) self._callback_type = ctypes.CFUNCTYPE(None, ctypes.c_short, ctypes.c_void_p, ctypes.c_short) self._callback_fn = self._callback_type(S1ApUtil.s1ap_callback) self._test_lib.initTestFrameWork(self._callback_fn) self._test_api = self._test_lib.tfwApi self._test_api.restype = ctypes.c_int16 self._test_api.argtypes = [ctypes.c_uint16, ctypes.c_void_p] self._lock = threading.RLock() self._ue_ip_map = {}<|docstring|>Initialize the s1aplibrary and its callbacks.<|endoftext|>
9164fa2fc816710b8a2a31bc34da182fd05f6ec74beeaf6824907cfd05030f0a
def cleanup(self): "\n Cleanup the dll loaded explicitly so the next run doesn't reuse the\n same globals as ctypes LoadLibrary uses dlopen under the covers\n\n Also clear out the UE ID: IP mappings\n " self._test_lib = None self._ue_ip_map = {}
Cleanup the dll loaded explicitly so the next run doesn't reuse the same globals as ctypes LoadLibrary uses dlopen under the covers Also clear out the UE ID: IP mappings
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
cleanup
marosmars/magma
2
python
def cleanup(self): "\n Cleanup the dll loaded explicitly so the next run doesn't reuse the\n same globals as ctypes LoadLibrary uses dlopen under the covers\n\n Also clear out the UE ID: IP mappings\n " self._test_lib = None self._ue_ip_map = {}
def cleanup(self): "\n Cleanup the dll loaded explicitly so the next run doesn't reuse the\n same globals as ctypes LoadLibrary uses dlopen under the covers\n\n Also clear out the UE ID: IP mappings\n " self._test_lib = None self._ue_ip_map = {}<|docstring|>Cleanup the dll loaded explicitly so the next run doesn't reuse the same globals as ctypes LoadLibrary uses dlopen under the covers Also clear out the UE ID: IP mappings<|endoftext|>
455a87e47e8856799fb97afb5891d5c6ab60e36dac9632b6cbe818584fddc3f7
def issue_cmd(self, cmd_type, req): '\n Issue a command to the s1aptester and blocks until response is recvd.\n Args:\n cmd_type: The cmd type enum\n req: The request Structure\n Returns:\n None\n ' c_req = None if req: c_req = ctypes.byref(req) with self._cond: rc = self._test_api(cmd_type.value, c_req) if rc: logging.error(('Error executing command %s' % repr(cmd_type))) return rc return 0
Issue a command to the s1aptester and blocks until response is recvd. Args: cmd_type: The cmd type enum req: The request Structure Returns: None
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
issue_cmd
marosmars/magma
2
python
def issue_cmd(self, cmd_type, req): '\n Issue a command to the s1aptester and blocks until response is recvd.\n Args:\n cmd_type: The cmd type enum\n req: The request Structure\n Returns:\n None\n ' c_req = None if req: c_req = ctypes.byref(req) with self._cond: rc = self._test_api(cmd_type.value, c_req) if rc: logging.error(('Error executing command %s' % repr(cmd_type))) return rc return 0
def issue_cmd(self, cmd_type, req): '\n Issue a command to the s1aptester and blocks until response is recvd.\n Args:\n cmd_type: The cmd type enum\n req: The request Structure\n Returns:\n None\n ' c_req = None if req: c_req = ctypes.byref(req) with self._cond: rc = self._test_api(cmd_type.value, c_req) if rc: logging.error(('Error executing command %s' % repr(cmd_type))) return rc return 0<|docstring|>Issue a command to the s1aptester and blocks until response is recvd. Args: cmd_type: The cmd type enum req: The request Structure Returns: None<|endoftext|>
835b30e5b38969c01064235cd03a8fbc92324cb1eafbdf8b5f99cb93a89b04dc
def get_ip(self, ue_id): ' Returns the IP assigned to a given UE ID\n\n Args:\n ue_id: the ue_id to query\n\n Returns an ipaddress.ip_address for the given UE ID, or None if no IP\n has been observed to be assigned to this IP\n ' with self._lock: if (ue_id in self._ue_ip_map): return self._ue_ip_map[ue_id] return None
Returns the IP assigned to a given UE ID Args: ue_id: the ue_id to query Returns an ipaddress.ip_address for the given UE ID, or None if no IP has been observed to be assigned to this IP
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
get_ip
marosmars/magma
2
python
def get_ip(self, ue_id): ' Returns the IP assigned to a given UE ID\n\n Args:\n ue_id: the ue_id to query\n\n Returns an ipaddress.ip_address for the given UE ID, or None if no IP\n has been observed to be assigned to this IP\n ' with self._lock: if (ue_id in self._ue_ip_map): return self._ue_ip_map[ue_id] return None
def get_ip(self, ue_id): ' Returns the IP assigned to a given UE ID\n\n Args:\n ue_id: the ue_id to query\n\n Returns an ipaddress.ip_address for the given UE ID, or None if no IP\n has been observed to be assigned to this IP\n ' with self._lock: if (ue_id in self._ue_ip_map): return self._ue_ip_map[ue_id] return None<|docstring|>Returns the IP assigned to a given UE ID Args: ue_id: the ue_id to query Returns an ipaddress.ip_address for the given UE ID, or None if no IP has been observed to be assigned to this IP<|endoftext|>
d9ea3cc6cd6e9d928bd9380100b9a0aec222d98ec1ec1d24c3cdb20cd642cbde
def attach(self, ue_id, attach_type, resp_type, resp_msg_type, sec_ctxt=s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT, id_type=s1ap_types.TFW_MID_TYPE_IMSI, eps_type=s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH): '\n Given a UE issue the attach request of specified type\n\n Caches the assigned IP address, if any is assigned\n\n Args:\n ue_id: The eNB ue_id\n attach_type: The type of attach e.g. UE_END_TO_END_ATTACH_REQUEST\n resp_type: enum type of the expected response\n sec_ctxt: Optional param allows for the reuse of the security\n context, defaults to creating a new security context.\n id_type: Optional param allows for changing up the ID type,\n defaults to s1ap_types.TFW_MID_TYPE_IMSI.\n eps_type: Optional param allows for variation in the EPS attach\n type, defaults to s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH.\n ' attach_req = s1ap_types.ueAttachRequest_t() attach_req.ue_Id = ue_id attach_req.mIdType = id_type attach_req.epsAttachType = eps_type attach_req.useOldSecCtxt = sec_ctxt assert (self.issue_cmd(attach_type, attach_req) == 0) response = self.get_response() if (s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value == response.msg_type): response = self.get_response() elif (s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND.value == response.msg_type): context_setup = self.get_response() assert (context_setup.msg_type == s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value) logging.debug('s1ap response expected, received: %d, %d', resp_type.value, response.msg_type) assert (resp_type.value == response.msg_type) msg = response.cast(resp_msg_type) if (resp_msg_type == s1ap_types.ueAttachAccept_t): pdn_type = msg.esmInfo.pAddr.pdnType addr = msg.esmInfo.pAddr.addrInfo if (S1ApUtil.CM_ESM_PDN_IPV4 == pdn_type): ip = ipaddress.ip_address(bytes(addr[:4])) with self._lock: self._ue_ip_map[ue_id] = ip else: raise ValueError(('PDN TYPE %s not supported' % pdn_type)) return msg
Given a UE issue the attach request of specified type Caches the assigned IP address, if any is assigned Args: ue_id: The eNB ue_id attach_type: The type of attach e.g. UE_END_TO_END_ATTACH_REQUEST resp_type: enum type of the expected response sec_ctxt: Optional param allows for the reuse of the security context, defaults to creating a new security context. id_type: Optional param allows for changing up the ID type, defaults to s1ap_types.TFW_MID_TYPE_IMSI. eps_type: Optional param allows for variation in the EPS attach type, defaults to s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH.
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
attach
marosmars/magma
2
python
def attach(self, ue_id, attach_type, resp_type, resp_msg_type, sec_ctxt=s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT, id_type=s1ap_types.TFW_MID_TYPE_IMSI, eps_type=s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH): '\n Given a UE issue the attach request of specified type\n\n Caches the assigned IP address, if any is assigned\n\n Args:\n ue_id: The eNB ue_id\n attach_type: The type of attach e.g. UE_END_TO_END_ATTACH_REQUEST\n resp_type: enum type of the expected response\n sec_ctxt: Optional param allows for the reuse of the security\n context, defaults to creating a new security context.\n id_type: Optional param allows for changing up the ID type,\n defaults to s1ap_types.TFW_MID_TYPE_IMSI.\n eps_type: Optional param allows for variation in the EPS attach\n type, defaults to s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH.\n ' attach_req = s1ap_types.ueAttachRequest_t() attach_req.ue_Id = ue_id attach_req.mIdType = id_type attach_req.epsAttachType = eps_type attach_req.useOldSecCtxt = sec_ctxt assert (self.issue_cmd(attach_type, attach_req) == 0) response = self.get_response() if (s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value == response.msg_type): response = self.get_response() elif (s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND.value == response.msg_type): context_setup = self.get_response() assert (context_setup.msg_type == s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value) logging.debug('s1ap response expected, received: %d, %d', resp_type.value, response.msg_type) assert (resp_type.value == response.msg_type) msg = response.cast(resp_msg_type) if (resp_msg_type == s1ap_types.ueAttachAccept_t): pdn_type = msg.esmInfo.pAddr.pdnType addr = msg.esmInfo.pAddr.addrInfo if (S1ApUtil.CM_ESM_PDN_IPV4 == pdn_type): ip = ipaddress.ip_address(bytes(addr[:4])) with self._lock: self._ue_ip_map[ue_id] = ip else: raise ValueError(('PDN TYPE %s not supported' % pdn_type)) return msg
def attach(self, ue_id, attach_type, resp_type, resp_msg_type, sec_ctxt=s1ap_types.TFW_CREATE_NEW_SECURITY_CONTEXT, id_type=s1ap_types.TFW_MID_TYPE_IMSI, eps_type=s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH): '\n Given a UE issue the attach request of specified type\n\n Caches the assigned IP address, if any is assigned\n\n Args:\n ue_id: The eNB ue_id\n attach_type: The type of attach e.g. UE_END_TO_END_ATTACH_REQUEST\n resp_type: enum type of the expected response\n sec_ctxt: Optional param allows for the reuse of the security\n context, defaults to creating a new security context.\n id_type: Optional param allows for changing up the ID type,\n defaults to s1ap_types.TFW_MID_TYPE_IMSI.\n eps_type: Optional param allows for variation in the EPS attach\n type, defaults to s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH.\n ' attach_req = s1ap_types.ueAttachRequest_t() attach_req.ue_Id = ue_id attach_req.mIdType = id_type attach_req.epsAttachType = eps_type attach_req.useOldSecCtxt = sec_ctxt assert (self.issue_cmd(attach_type, attach_req) == 0) response = self.get_response() if (s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value == response.msg_type): response = self.get_response() elif (s1ap_types.tfwCmd.UE_ATTACH_ACCEPT_IND.value == response.msg_type): context_setup = self.get_response() assert (context_setup.msg_type == s1ap_types.tfwCmd.INT_CTX_SETUP_IND.value) logging.debug('s1ap response expected, received: %d, %d', resp_type.value, response.msg_type) assert (resp_type.value == response.msg_type) msg = response.cast(resp_msg_type) if (resp_msg_type == s1ap_types.ueAttachAccept_t): pdn_type = msg.esmInfo.pAddr.pdnType addr = msg.esmInfo.pAddr.addrInfo if (S1ApUtil.CM_ESM_PDN_IPV4 == pdn_type): ip = ipaddress.ip_address(bytes(addr[:4])) with self._lock: self._ue_ip_map[ue_id] = ip else: raise ValueError(('PDN TYPE %s not supported' % pdn_type)) return msg<|docstring|>Given a UE issue the attach request of specified type Caches the assigned IP address, if any is assigned Args: ue_id: The eNB ue_id attach_type: The type of attach e.g. UE_END_TO_END_ATTACH_REQUEST resp_type: enum type of the expected response sec_ctxt: Optional param allows for the reuse of the security context, defaults to creating a new security context. id_type: Optional param allows for changing up the ID type, defaults to s1ap_types.TFW_MID_TYPE_IMSI. eps_type: Optional param allows for variation in the EPS attach type, defaults to s1ap_types.TFW_EPS_ATTACH_TYPE_EPS_ATTACH.<|endoftext|>
3c91152350c066d6bc0f14854e9ec929b8d5e2732a0f9662aba3e3e31481f465
def detach(self, ue_id, reason_type, wait_for_s1_ctxt_release=True): ' Given a UE issue a detach request ' detach_req = s1ap_types.uedetachReq_t() detach_req.ue_Id = ue_id detach_req.ueDetType = reason_type assert (self.issue_cmd(s1ap_types.tfwCmd.UE_DETACH_REQUEST, detach_req) == 0) if (reason_type == s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value): response = self.get_response() assert (s1ap_types.tfwCmd.UE_DETACH_ACCEPT_IND.value == response.msg_type) if wait_for_s1_ctxt_release: response = self.get_response() assert (s1ap_types.tfwCmd.UE_CTX_REL_IND.value == response.msg_type) with self._lock: del self._ue_ip_map[ue_id]
Given a UE issue a detach request
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
detach
marosmars/magma
2
python
def detach(self, ue_id, reason_type, wait_for_s1_ctxt_release=True): ' ' detach_req = s1ap_types.uedetachReq_t() detach_req.ue_Id = ue_id detach_req.ueDetType = reason_type assert (self.issue_cmd(s1ap_types.tfwCmd.UE_DETACH_REQUEST, detach_req) == 0) if (reason_type == s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value): response = self.get_response() assert (s1ap_types.tfwCmd.UE_DETACH_ACCEPT_IND.value == response.msg_type) if wait_for_s1_ctxt_release: response = self.get_response() assert (s1ap_types.tfwCmd.UE_CTX_REL_IND.value == response.msg_type) with self._lock: del self._ue_ip_map[ue_id]
def detach(self, ue_id, reason_type, wait_for_s1_ctxt_release=True): ' ' detach_req = s1ap_types.uedetachReq_t() detach_req.ue_Id = ue_id detach_req.ueDetType = reason_type assert (self.issue_cmd(s1ap_types.tfwCmd.UE_DETACH_REQUEST, detach_req) == 0) if (reason_type == s1ap_types.ueDetachType_t.UE_NORMAL_DETACH.value): response = self.get_response() assert (s1ap_types.tfwCmd.UE_DETACH_ACCEPT_IND.value == response.msg_type) if wait_for_s1_ctxt_release: response = self.get_response() assert (s1ap_types.tfwCmd.UE_CTX_REL_IND.value == response.msg_type) with self._lock: del self._ue_ip_map[ue_id]<|docstring|>Given a UE issue a detach request<|endoftext|>
3b7e9ed8d4a8b86834d0dfe6be79dd7aaccd0bb1529cdaaf3574836afe40dd6d
def __init__(self, subscriber_client): '\n Initialize subscriber util.\n\n Args:\n subscriber_client (subscriber_db_client.SubscriberDbClient):\n client interacting with our subscriber APIs\n ' self._sid_idx = 1 self._ue_id = 1 self._ue_cfgs = [] self._subscriber_client = subscriber_client
Initialize subscriber util. Args: subscriber_client (subscriber_db_client.SubscriberDbClient): client interacting with our subscriber APIs
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
__init__
marosmars/magma
2
python
def __init__(self, subscriber_client): '\n Initialize subscriber util.\n\n Args:\n subscriber_client (subscriber_db_client.SubscriberDbClient):\n client interacting with our subscriber APIs\n ' self._sid_idx = 1 self._ue_id = 1 self._ue_cfgs = [] self._subscriber_client = subscriber_client
def __init__(self, subscriber_client): '\n Initialize subscriber util.\n\n Args:\n subscriber_client (subscriber_db_client.SubscriberDbClient):\n client interacting with our subscriber APIs\n ' self._sid_idx = 1 self._ue_id = 1 self._ue_cfgs = [] self._subscriber_client = subscriber_client<|docstring|>Initialize subscriber util. Args: subscriber_client (subscriber_db_client.SubscriberDbClient): client interacting with our subscriber APIs<|endoftext|>
c1b878a448529ff6095bdd8d9d16b1cdee5c5d4a7a6628162f3f189960d1e60f
def _gen_next_sid(self): '\n Generate the sid based on index offset and prefix\n ' idx = str(self._sid_idx) padding = ((self.IMSI_LEN - len(idx)) - len(self.SID_PREFIX[4:])) sid = ((self.SID_PREFIX + ('0' * padding)) + idx) self._sid_idx += 1 print(('Using subscriber IMSI %s' % sid)) return sid
Generate the sid based on index offset and prefix
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
_gen_next_sid
marosmars/magma
2
python
def _gen_next_sid(self): '\n \n ' idx = str(self._sid_idx) padding = ((self.IMSI_LEN - len(idx)) - len(self.SID_PREFIX[4:])) sid = ((self.SID_PREFIX + ('0' * padding)) + idx) self._sid_idx += 1 print(('Using subscriber IMSI %s' % sid)) return sid
def _gen_next_sid(self): '\n \n ' idx = str(self._sid_idx) padding = ((self.IMSI_LEN - len(idx)) - len(self.SID_PREFIX[4:])) sid = ((self.SID_PREFIX + ('0' * padding)) + idx) self._sid_idx += 1 print(('Using subscriber IMSI %s' % sid)) return sid<|docstring|>Generate the sid based on index offset and prefix<|endoftext|>
4a74c2c7abcd968da926fafe120b480964144d3876e07650d7dd33794b2cf374
def _get_s1ap_sub(self, sid): '\n Get the subscriber data in s1aptester format.\n Args:\n The string representation of the subscriber id\n ' ue_cfg = s1ap_types.ueConfig_t() ue_cfg.ue_id = self._ue_id ue_cfg.auth_key = 1 for i in range(0, 15): ue_cfg.imsi[i] = ctypes.c_ubyte(int(sid[(4 + i)])) ue_cfg.imei[i] = ctypes.c_ubyte(int('1')) ue_cfg.imei[15] = ctypes.c_ubyte(int('1')) ue_cfg.imsiLen = self.IMSI_LEN self._ue_cfgs.append(ue_cfg) self._ue_id += 1 return ue_cfg
Get the subscriber data in s1aptester format. Args: The string representation of the subscriber id
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
_get_s1ap_sub
marosmars/magma
2
python
def _get_s1ap_sub(self, sid): '\n Get the subscriber data in s1aptester format.\n Args:\n The string representation of the subscriber id\n ' ue_cfg = s1ap_types.ueConfig_t() ue_cfg.ue_id = self._ue_id ue_cfg.auth_key = 1 for i in range(0, 15): ue_cfg.imsi[i] = ctypes.c_ubyte(int(sid[(4 + i)])) ue_cfg.imei[i] = ctypes.c_ubyte(int('1')) ue_cfg.imei[15] = ctypes.c_ubyte(int('1')) ue_cfg.imsiLen = self.IMSI_LEN self._ue_cfgs.append(ue_cfg) self._ue_id += 1 return ue_cfg
def _get_s1ap_sub(self, sid): '\n Get the subscriber data in s1aptester format.\n Args:\n The string representation of the subscriber id\n ' ue_cfg = s1ap_types.ueConfig_t() ue_cfg.ue_id = self._ue_id ue_cfg.auth_key = 1 for i in range(0, 15): ue_cfg.imsi[i] = ctypes.c_ubyte(int(sid[(4 + i)])) ue_cfg.imei[i] = ctypes.c_ubyte(int('1')) ue_cfg.imei[15] = ctypes.c_ubyte(int('1')) ue_cfg.imsiLen = self.IMSI_LEN self._ue_cfgs.append(ue_cfg) self._ue_id += 1 return ue_cfg<|docstring|>Get the subscriber data in s1aptester format. Args: The string representation of the subscriber id<|endoftext|>
48e7e674a96afc86c6615305f7749422e9f3d1bd35f98cfd9b2d8ced29853a6e
def add_sub(self, num_ues=1): ' Add subscribers to the EPC, is blocking ' subscribers = [] for _ in range(num_ues): sid = self._gen_next_sid() self._subscriber_client.add_subscriber(sid) subscribers.append(self._get_s1ap_sub(sid)) self._subscriber_client.wait_for_changes() return subscribers
Add subscribers to the EPC, is blocking
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
add_sub
marosmars/magma
2
python
def add_sub(self, num_ues=1): ' ' subscribers = [] for _ in range(num_ues): sid = self._gen_next_sid() self._subscriber_client.add_subscriber(sid) subscribers.append(self._get_s1ap_sub(sid)) self._subscriber_client.wait_for_changes() return subscribers
def add_sub(self, num_ues=1): ' ' subscribers = [] for _ in range(num_ues): sid = self._gen_next_sid() self._subscriber_client.add_subscriber(sid) subscribers.append(self._get_s1ap_sub(sid)) self._subscriber_client.wait_for_changes() return subscribers<|docstring|>Add subscribers to the EPC, is blocking<|endoftext|>
4b3508f5c2085260fc0e8003e659fcfb07159967ba950eb5a91a28ea0dc8b647
def config_apn_data(self, imsi, apn_list): ' Add APN details ' self._subscriber_client.config_apn_details(imsi, apn_list)
Add APN details
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
config_apn_data
marosmars/magma
2
python
def config_apn_data(self, imsi, apn_list): ' ' self._subscriber_client.config_apn_details(imsi, apn_list)
def config_apn_data(self, imsi, apn_list): ' ' self._subscriber_client.config_apn_details(imsi, apn_list)<|docstring|>Add APN details<|endoftext|>
dd448000e10101c86386e6485727c10502ecb85a1d8fe586bcef1fb8a0356cb9
def cleanup(self): ' Cleanup added subscriber from subscriberdb ' self._subscriber_client.clean_up() self._subscriber_client.wait_for_changes()
Cleanup added subscriber from subscriberdb
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
cleanup
marosmars/magma
2
python
def cleanup(self): ' ' self._subscriber_client.clean_up() self._subscriber_client.wait_for_changes()
def cleanup(self): ' ' self._subscriber_client.clean_up() self._subscriber_client.wait_for_changes()<|docstring|>Cleanup added subscriber from subscriberdb<|endoftext|>
37ff60b85f2dcd59040e913db6351b59348a8b9775f179bb090de62ebf584298
def __init__(self, magmad_client): '\n Init magmad util.\n\n Args:\n magmad_client: MagmadServiceClient\n ' self._magmad_client = magmad_client self._data = {'user': 'vagrant', 'host': '192.168.60.142', 'password': 'vagrant', 'command': 'test'} self._command = 'sshpass -p {password} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {user}@{host} {command}'
Init magmad util. Args: magmad_client: MagmadServiceClient
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
__init__
marosmars/magma
2
python
def __init__(self, magmad_client): '\n Init magmad util.\n\n Args:\n magmad_client: MagmadServiceClient\n ' self._magmad_client = magmad_client self._data = {'user': 'vagrant', 'host': '192.168.60.142', 'password': 'vagrant', 'command': 'test'} self._command = 'sshpass -p {password} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {user}@{host} {command}'
def __init__(self, magmad_client): '\n Init magmad util.\n\n Args:\n magmad_client: MagmadServiceClient\n ' self._magmad_client = magmad_client self._data = {'user': 'vagrant', 'host': '192.168.60.142', 'password': 'vagrant', 'command': 'test'} self._command = 'sshpass -p {password} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {user}@{host} {command}'<|docstring|>Init magmad util. Args: magmad_client: MagmadServiceClient<|endoftext|>
952473ab51e7191b128d3dcd208894425e087dc4abcc96afcad3f31f780c460b
def exec_command(self, command): "\n Run a command remotly on magma_dev VM.\n\n Args:\n command: command (str) to be executed on remote host\n e.g. 'sed -i 's/config1/config2/g' /etc/magma/mme.yml'\n\n " data = self._data data['command'] = (('"' + command) + '"') os.system(self._command.format(**data))
Run a command remotly on magma_dev VM. Args: command: command (str) to be executed on remote host e.g. 'sed -i 's/config1/config2/g' /etc/magma/mme.yml'
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
exec_command
marosmars/magma
2
python
def exec_command(self, command): "\n Run a command remotly on magma_dev VM.\n\n Args:\n command: command (str) to be executed on remote host\n e.g. 'sed -i 's/config1/config2/g' /etc/magma/mme.yml'\n\n " data = self._data data['command'] = (('"' + command) + '"') os.system(self._command.format(**data))
def exec_command(self, command): "\n Run a command remotly on magma_dev VM.\n\n Args:\n command: command (str) to be executed on remote host\n e.g. 'sed -i 's/config1/config2/g' /etc/magma/mme.yml'\n\n " data = self._data data['command'] = (('"' + command) + '"') os.system(self._command.format(**data))<|docstring|>Run a command remotly on magma_dev VM. Args: command: command (str) to be executed on remote host e.g. 'sed -i 's/config1/config2/g' /etc/magma/mme.yml'<|endoftext|>
67a18b8a802db7c371c0cbeb00900d75c190ff339595dfaefce745fc1e72cc46
def set_config_stateless(self, enabled): '\n Sets the use_stateless flag in mme.yml file\n\n Args:\n enabled: sets the flag to true if enabled\n\n ' if enabled: self.exec_command("sed -i 's/use_stateless: false/use_stateless: true/g' /etc/magma/mme.yml") else: self.exec_command("sed -i 's/use_stateless: true/use_stateless: false/g' /etc/magma/mme.yml")
Sets the use_stateless flag in mme.yml file Args: enabled: sets the flag to true if enabled
lte/gateway/python/integ_tests/s1aptests/s1ap_utils.py
set_config_stateless
marosmars/magma
2
python
def set_config_stateless(self, enabled): '\n Sets the use_stateless flag in mme.yml file\n\n Args:\n enabled: sets the flag to true if enabled\n\n ' if enabled: self.exec_command("sed -i 's/use_stateless: false/use_stateless: true/g' /etc/magma/mme.yml") else: self.exec_command("sed -i 's/use_stateless: true/use_stateless: false/g' /etc/magma/mme.yml")
def set_config_stateless(self, enabled): '\n Sets the use_stateless flag in mme.yml file\n\n Args:\n enabled: sets the flag to true if enabled\n\n ' if enabled: self.exec_command("sed -i 's/use_stateless: false/use_stateless: true/g' /etc/magma/mme.yml") else: self.exec_command("sed -i 's/use_stateless: true/use_stateless: false/g' /etc/magma/mme.yml")<|docstring|>Sets the use_stateless flag in mme.yml file Args: enabled: sets the flag to true if enabled<|endoftext|>