body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
b5e7e07122593b6fa96664307ade09dd691618950e5515ba8f16de1dccfffa83
def _crop(inname, outname, color): '\n Crop the image and colour it in a flat colour. The alpha channel is left unchanged.\n :param inname: base name of input file. Input image is {inname}1.png. The 1 is added by latex.\n :param outname: base name of output file. Output image is {outname}.png\n :param color:\n :return:\n ' image = Image.open('{}1.png'.format(inname)) image.load() image_data = np.asarray(image) shape = image_data.shape shape = (shape[0], shape[1], 4) image_temp = np.zeros(shape, dtype=np.uint8) for i in range(shape[0]): for j in range(shape[1]): image_temp[i][j][3] = (255 - image_data[i][j][0]) image_data = image_temp image_data_bw = image_data.max(axis=2) non_empty_columns = np.where((image_data_bw.max(axis=0) > 0))[0] non_empty_rows = np.where((image_data_bw.max(axis=1) > 0))[0] cropbox = (min(non_empty_rows), max(non_empty_rows), min(non_empty_columns), max(non_empty_columns)) image_data_new = image_data[(cropbox[0]:(cropbox[1] + 1), cropbox[2]:(cropbox[3] + 1), :)] image_data_colored = np.zeros_like(image_data_new) color_data = ((color.r * 255), (color.g * 255), (color.b * 255)) for i in range(image_data_new.shape[0]): for j in range(image_data_new.shape[1]): image_data_colored[i][j][3] = image_data_new[i][j][3] image_data_colored[i][j][0] = color_data[0] image_data_colored[i][j][1] = color_data[1] image_data_colored[i][j][2] = color_data[2] new_image = Image.fromarray(image_data_colored) filename = '{}.png'.format(outname) new_image.save(filename) return (filename, ((cropbox[3] - cropbox[2]), (cropbox[1] - cropbox[0])))
Crop the image and colour it in a flat colour. The alpha channel is left unchanged. :param inname: base name of input file. Input image is {inname}1.png. The 1 is added by latex. :param outname: base name of output file. Output image is {outname}.png :param color: :return:
generativepy/formulas.py
_crop
sthagen/martinmcbride-generativepy
0
python
def _crop(inname, outname, color): '\n Crop the image and colour it in a flat colour. The alpha channel is left unchanged.\n :param inname: base name of input file. Input image is {inname}1.png. The 1 is added by latex.\n :param outname: base name of output file. Output image is {outname}.png\n :param color:\n :return:\n ' image = Image.open('{}1.png'.format(inname)) image.load() image_data = np.asarray(image) shape = image_data.shape shape = (shape[0], shape[1], 4) image_temp = np.zeros(shape, dtype=np.uint8) for i in range(shape[0]): for j in range(shape[1]): image_temp[i][j][3] = (255 - image_data[i][j][0]) image_data = image_temp image_data_bw = image_data.max(axis=2) non_empty_columns = np.where((image_data_bw.max(axis=0) > 0))[0] non_empty_rows = np.where((image_data_bw.max(axis=1) > 0))[0] cropbox = (min(non_empty_rows), max(non_empty_rows), min(non_empty_columns), max(non_empty_columns)) image_data_new = image_data[(cropbox[0]:(cropbox[1] + 1), cropbox[2]:(cropbox[3] + 1), :)] image_data_colored = np.zeros_like(image_data_new) color_data = ((color.r * 255), (color.g * 255), (color.b * 255)) for i in range(image_data_new.shape[0]): for j in range(image_data_new.shape[1]): image_data_colored[i][j][3] = image_data_new[i][j][3] image_data_colored[i][j][0] = color_data[0] image_data_colored[i][j][1] = color_data[1] image_data_colored[i][j][2] = color_data[2] new_image = Image.fromarray(image_data_colored) filename = '{}.png'.format(outname) new_image.save(filename) return (filename, ((cropbox[3] - cropbox[2]), (cropbox[1] - cropbox[0])))
def _crop(inname, outname, color): '\n Crop the image and colour it in a flat colour. The alpha channel is left unchanged.\n :param inname: base name of input file. Input image is {inname}1.png. The 1 is added by latex.\n :param outname: base name of output file. Output image is {outname}.png\n :param color:\n :return:\n ' image = Image.open('{}1.png'.format(inname)) image.load() image_data = np.asarray(image) shape = image_data.shape shape = (shape[0], shape[1], 4) image_temp = np.zeros(shape, dtype=np.uint8) for i in range(shape[0]): for j in range(shape[1]): image_temp[i][j][3] = (255 - image_data[i][j][0]) image_data = image_temp image_data_bw = image_data.max(axis=2) non_empty_columns = np.where((image_data_bw.max(axis=0) > 0))[0] non_empty_rows = np.where((image_data_bw.max(axis=1) > 0))[0] cropbox = (min(non_empty_rows), max(non_empty_rows), min(non_empty_columns), max(non_empty_columns)) image_data_new = image_data[(cropbox[0]:(cropbox[1] + 1), cropbox[2]:(cropbox[3] + 1), :)] image_data_colored = np.zeros_like(image_data_new) color_data = ((color.r * 255), (color.g * 255), (color.b * 255)) for i in range(image_data_new.shape[0]): for j in range(image_data_new.shape[1]): image_data_colored[i][j][3] = image_data_new[i][j][3] image_data_colored[i][j][0] = color_data[0] image_data_colored[i][j][1] = color_data[1] image_data_colored[i][j][2] = color_data[2] new_image = Image.fromarray(image_data_colored) filename = '{}.png'.format(outname) new_image.save(filename) return (filename, ((cropbox[3] - cropbox[2]), (cropbox[1] - cropbox[0])))<|docstring|>Crop the image and colour it in a flat colour. The alpha channel is left unchanged. :param inname: base name of input file. Input image is {inname}1.png. The 1 is added by latex. :param outname: base name of output file. Output image is {outname}.png :param color: :return:<|endoftext|>
ac41ec54f3d8116288088ea4ea8e288e6b0cf2ce6880a35424b64ce27afa1402
def _remove_ignore_errors(filename): "\n Remove a file but ignore errors. We shouldn;t fail just because a temp file didn't get deleted.\n :param filename:\n :return: None\n " try: os.remove(filename) except Exception: pass
Remove a file but ignore errors. We shouldn;t fail just because a temp file didn't get deleted. :param filename: :return: None
generativepy/formulas.py
_remove_ignore_errors
sthagen/martinmcbride-generativepy
0
python
def _remove_ignore_errors(filename): "\n Remove a file but ignore errors. We shouldn;t fail just because a temp file didn't get deleted.\n :param filename:\n :return: None\n " try: os.remove(filename) except Exception: pass
def _remove_ignore_errors(filename): "\n Remove a file but ignore errors. We shouldn;t fail just because a temp file didn't get deleted.\n :param filename:\n :return: None\n " try: os.remove(filename) except Exception: pass<|docstring|>Remove a file but ignore errors. We shouldn;t fail just because a temp file didn't get deleted. :param filename: :return: None<|endoftext|>
4855c1cc2ed6f44e0e81920fafe16843af3ff5ca85d165e16aa2f038d7661c1d
def rasterise_formula(name, formula, color, dpi=600): '\n Convert a latex formula into a PNG image. The PNG image will be tightly cropped, with a transparent background and\n text in the selected colour.\n :param name: Name of the output images. String with no extension, eg "myformula". The final output will be stored\n using this name, in the current working folder, so if you are creating multiple formulae give each one a unique name\n :param formula: The forumal, as a latex string.\n :param color: Color object defining the required colour of the output.\n :param dpi: The resolution, in dpi. This indirectly controls the size of the image,\n :return: A tuple containing the filename of teh result (with a png extension) and the (width, height) of the image\n in pixels.\n ' unique_name = '{}-{}'.format(name, random.randint(100000, 999999)) tex = '\n'.join([tex1, formula, tex2]) tex_fn = '{}.tex'.format(unique_name) with open(tex_fn, 'w') as tex_file: tex_file.write(tex) process = subprocess.Popen('latex {}.tex'.format(unique_name), shell=True, stdout=subprocess.PIPE) process.wait() process = subprocess.Popen('dvipng -T tight -D {} {}.dvi'.format(dpi, unique_name), shell=True, stdout=subprocess.PIPE) process.wait() (filename, size) = _crop(unique_name, name, color) _remove_ignore_errors('{}.aux'.format(unique_name)) _remove_ignore_errors('{}.log'.format(unique_name)) _remove_ignore_errors('{}.tex'.format(unique_name)) _remove_ignore_errors('{}.dvi'.format(unique_name)) _remove_ignore_errors('{}1.png'.format(unique_name)) return (filename, size)
Convert a latex formula into a PNG image. The PNG image will be tightly cropped, with a transparent background and text in the selected colour. :param name: Name of the output images. String with no extension, eg "myformula". The final output will be stored using this name, in the current working folder, so if you are creating multiple formulae give each one a unique name :param formula: The forumal, as a latex string. :param color: Color object defining the required colour of the output. :param dpi: The resolution, in dpi. This indirectly controls the size of the image, :return: A tuple containing the filename of teh result (with a png extension) and the (width, height) of the image in pixels.
generativepy/formulas.py
rasterise_formula
sthagen/martinmcbride-generativepy
0
python
def rasterise_formula(name, formula, color, dpi=600): '\n Convert a latex formula into a PNG image. The PNG image will be tightly cropped, with a transparent background and\n text in the selected colour.\n :param name: Name of the output images. String with no extension, eg "myformula". The final output will be stored\n using this name, in the current working folder, so if you are creating multiple formulae give each one a unique name\n :param formula: The forumal, as a latex string.\n :param color: Color object defining the required colour of the output.\n :param dpi: The resolution, in dpi. This indirectly controls the size of the image,\n :return: A tuple containing the filename of teh result (with a png extension) and the (width, height) of the image\n in pixels.\n ' unique_name = '{}-{}'.format(name, random.randint(100000, 999999)) tex = '\n'.join([tex1, formula, tex2]) tex_fn = '{}.tex'.format(unique_name) with open(tex_fn, 'w') as tex_file: tex_file.write(tex) process = subprocess.Popen('latex {}.tex'.format(unique_name), shell=True, stdout=subprocess.PIPE) process.wait() process = subprocess.Popen('dvipng -T tight -D {} {}.dvi'.format(dpi, unique_name), shell=True, stdout=subprocess.PIPE) process.wait() (filename, size) = _crop(unique_name, name, color) _remove_ignore_errors('{}.aux'.format(unique_name)) _remove_ignore_errors('{}.log'.format(unique_name)) _remove_ignore_errors('{}.tex'.format(unique_name)) _remove_ignore_errors('{}.dvi'.format(unique_name)) _remove_ignore_errors('{}1.png'.format(unique_name)) return (filename, size)
def rasterise_formula(name, formula, color, dpi=600): '\n Convert a latex formula into a PNG image. The PNG image will be tightly cropped, with a transparent background and\n text in the selected colour.\n :param name: Name of the output images. String with no extension, eg "myformula". The final output will be stored\n using this name, in the current working folder, so if you are creating multiple formulae give each one a unique name\n :param formula: The forumal, as a latex string.\n :param color: Color object defining the required colour of the output.\n :param dpi: The resolution, in dpi. This indirectly controls the size of the image,\n :return: A tuple containing the filename of teh result (with a png extension) and the (width, height) of the image\n in pixels.\n ' unique_name = '{}-{}'.format(name, random.randint(100000, 999999)) tex = '\n'.join([tex1, formula, tex2]) tex_fn = '{}.tex'.format(unique_name) with open(tex_fn, 'w') as tex_file: tex_file.write(tex) process = subprocess.Popen('latex {}.tex'.format(unique_name), shell=True, stdout=subprocess.PIPE) process.wait() process = subprocess.Popen('dvipng -T tight -D {} {}.dvi'.format(dpi, unique_name), shell=True, stdout=subprocess.PIPE) process.wait() (filename, size) = _crop(unique_name, name, color) _remove_ignore_errors('{}.aux'.format(unique_name)) _remove_ignore_errors('{}.log'.format(unique_name)) _remove_ignore_errors('{}.tex'.format(unique_name)) _remove_ignore_errors('{}.dvi'.format(unique_name)) _remove_ignore_errors('{}1.png'.format(unique_name)) return (filename, size)<|docstring|>Convert a latex formula into a PNG image. The PNG image will be tightly cropped, with a transparent background and text in the selected colour. :param name: Name of the output images. String with no extension, eg "myformula". The final output will be stored using this name, in the current working folder, so if you are creating multiple formulae give each one a unique name :param formula: The forumal, as a latex string. :param color: Color object defining the required colour of the output. :param dpi: The resolution, in dpi. This indirectly controls the size of the image, :return: A tuple containing the filename of teh result (with a png extension) and the (width, height) of the image in pixels.<|endoftext|>
80e8e516ac9d2e6922f9d15498998e0745a761c501e7448d3f1830ec89c42859
def export_model(): 'Exports the model' trained_checkpoint_prefix = 'linear_regression' loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: sess.run(tf.global_variables_initializer()) loader = tf.train.import_meta_graph((trained_checkpoint_prefix + '.meta')) loader.restore(sess, trained_checkpoint_prefix) graph = tf.get_default_graph() inputs = tf.saved_model.utils.build_tensor_info(graph.get_tensor_by_name('X:0')) outputs = tf.saved_model.utils.build_tensor_info(graph.get_tensor_by_name('y_model:0')) signature = signature_def_utils.build_signature_def(inputs={'X': inputs}, outputs={'y_model': outputs}, method_name=signature_constants.PREDICT_METHOD_NAME) signature_map = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature} builder = tf.saved_model.builder.SavedModelBuilder('./my_model') builder.add_meta_graph_and_variables(sess, signature_def_map=signature_map, tags=[tf.saved_model.tag_constants.SERVING]) builder.save()
Exports the model
Chapter12/01-chapter-content/tensorflow/linear_regression_tensorflow/tensorflow_save_and_load_using_model_builder.py
export_model
ProgrammerWaterworth/Mastering-OpenCV-4-with-Python
2
python
def export_model(): trained_checkpoint_prefix = 'linear_regression' loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: sess.run(tf.global_variables_initializer()) loader = tf.train.import_meta_graph((trained_checkpoint_prefix + '.meta')) loader.restore(sess, trained_checkpoint_prefix) graph = tf.get_default_graph() inputs = tf.saved_model.utils.build_tensor_info(graph.get_tensor_by_name('X:0')) outputs = tf.saved_model.utils.build_tensor_info(graph.get_tensor_by_name('y_model:0')) signature = signature_def_utils.build_signature_def(inputs={'X': inputs}, outputs={'y_model': outputs}, method_name=signature_constants.PREDICT_METHOD_NAME) signature_map = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature} builder = tf.saved_model.builder.SavedModelBuilder('./my_model') builder.add_meta_graph_and_variables(sess, signature_def_map=signature_map, tags=[tf.saved_model.tag_constants.SERVING]) builder.save()
def export_model(): trained_checkpoint_prefix = 'linear_regression' loaded_graph = tf.Graph() with tf.Session(graph=loaded_graph) as sess: sess.run(tf.global_variables_initializer()) loader = tf.train.import_meta_graph((trained_checkpoint_prefix + '.meta')) loader.restore(sess, trained_checkpoint_prefix) graph = tf.get_default_graph() inputs = tf.saved_model.utils.build_tensor_info(graph.get_tensor_by_name('X:0')) outputs = tf.saved_model.utils.build_tensor_info(graph.get_tensor_by_name('y_model:0')) signature = signature_def_utils.build_signature_def(inputs={'X': inputs}, outputs={'y_model': outputs}, method_name=signature_constants.PREDICT_METHOD_NAME) signature_map = {signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature} builder = tf.saved_model.builder.SavedModelBuilder('./my_model') builder.add_meta_graph_and_variables(sess, signature_def_map=signature_map, tags=[tf.saved_model.tag_constants.SERVING]) builder.save()<|docstring|>Exports the model<|endoftext|>
167afe5d948d22c647ae9c15bd36c521cc41c237b3098f94481870407a4fdaa0
def Default(value: _TDefaultType) -> _TDefaultType: "\n You shouldn't use this function directly.\n\n It's used internally to recognize when a default value has been overwritten, even\n if the overriden default value was truthy.\n " return _DefaultPlaceholder(value)
You shouldn't use this function directly. It's used internally to recognize when a default value has been overwritten, even if the overriden default value was truthy.
sqlmodel/default.py
Default
strickvl/sqlmodel
5,490
python
def Default(value: _TDefaultType) -> _TDefaultType: "\n You shouldn't use this function directly.\n\n It's used internally to recognize when a default value has been overwritten, even\n if the overriden default value was truthy.\n " return _DefaultPlaceholder(value)
def Default(value: _TDefaultType) -> _TDefaultType: "\n You shouldn't use this function directly.\n\n It's used internally to recognize when a default value has been overwritten, even\n if the overriden default value was truthy.\n " return _DefaultPlaceholder(value)<|docstring|>You shouldn't use this function directly. It's used internally to recognize when a default value has been overwritten, even if the overriden default value was truthy.<|endoftext|>
78e92c7265fb801c2a051d5e7003ccfbd93129a4c744d42d8a80e95dbe628dd4
def __init__(self, *, host: str='compute.googleapis.com', credentials: ga_credentials.Credentials=None, credentials_file: str=None, scopes: Sequence[str]=None, client_cert_source_for_mtls: Callable[([], Tuple[(bytes, bytes)])]=None, quota_project_id: Optional[str]=None, client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool]=False, url_scheme: str='https') -> None: 'Instantiate the transport.\n\n Args:\n host (Optional[str]):\n The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is ignored if ``channel`` is provided.\n scopes (Optional(Sequence[str])): A list of scopes. This argument is\n ignored if ``channel`` is provided.\n client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client\n certificate to configure mutual TLS HTTP channel. It is ignored\n if ``channel`` is provided.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you are developing\n your own client library.\n always_use_jwt_access (Optional[bool]): Whether self signed JWT should\n be used for service account credentials.\n url_scheme: the protocol scheme for the API endpoint. Normally\n "https", but for testing or local servers,\n "http" can be specified.\n ' super().__init__(host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access) self._session = AuthorizedSession(self._credentials, default_host=self.DEFAULT_HOST) if client_cert_source_for_mtls: self._session.configure_mtls_channel(client_cert_source_for_mtls) self._prep_wrapped_messages(client_info)
Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client certificate to configure mutual TLS HTTP channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you are developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. url_scheme: the protocol scheme for the API endpoint. Normally "https", but for testing or local servers, "http" can be specified.
google/cloud/compute_v1/services/region_commitments/transports/rest.py
__init__
LaudateCorpus1/python-compute
0
python
def __init__(self, *, host: str='compute.googleapis.com', credentials: ga_credentials.Credentials=None, credentials_file: str=None, scopes: Sequence[str]=None, client_cert_source_for_mtls: Callable[([], Tuple[(bytes, bytes)])]=None, quota_project_id: Optional[str]=None, client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool]=False, url_scheme: str='https') -> None: 'Instantiate the transport.\n\n Args:\n host (Optional[str]):\n The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is ignored if ``channel`` is provided.\n scopes (Optional(Sequence[str])): A list of scopes. This argument is\n ignored if ``channel`` is provided.\n client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client\n certificate to configure mutual TLS HTTP channel. It is ignored\n if ``channel`` is provided.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you are developing\n your own client library.\n always_use_jwt_access (Optional[bool]): Whether self signed JWT should\n be used for service account credentials.\n url_scheme: the protocol scheme for the API endpoint. Normally\n "https", but for testing or local servers,\n "http" can be specified.\n ' super().__init__(host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access) self._session = AuthorizedSession(self._credentials, default_host=self.DEFAULT_HOST) if client_cert_source_for_mtls: self._session.configure_mtls_channel(client_cert_source_for_mtls) self._prep_wrapped_messages(client_info)
def __init__(self, *, host: str='compute.googleapis.com', credentials: ga_credentials.Credentials=None, credentials_file: str=None, scopes: Sequence[str]=None, client_cert_source_for_mtls: Callable[([], Tuple[(bytes, bytes)])]=None, quota_project_id: Optional[str]=None, client_info: gapic_v1.client_info.ClientInfo=DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool]=False, url_scheme: str='https') -> None: 'Instantiate the transport.\n\n Args:\n host (Optional[str]):\n The hostname to connect to.\n credentials (Optional[google.auth.credentials.Credentials]): The\n authorization credentials to attach to requests. These\n credentials identify the application to the service; if none\n are specified, the client will attempt to ascertain the\n credentials from the environment.\n\n credentials_file (Optional[str]): A file with credentials that can\n be loaded with :func:`google.auth.load_credentials_from_file`.\n This argument is ignored if ``channel`` is provided.\n scopes (Optional(Sequence[str])): A list of scopes. This argument is\n ignored if ``channel`` is provided.\n client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client\n certificate to configure mutual TLS HTTP channel. It is ignored\n if ``channel`` is provided.\n quota_project_id (Optional[str]): An optional project to use for billing\n and quota.\n client_info (google.api_core.gapic_v1.client_info.ClientInfo):\n The client info used to send a user-agent string along with\n API requests. If ``None``, then default info will be used.\n Generally, you only need to set this if you are developing\n your own client library.\n always_use_jwt_access (Optional[bool]): Whether self signed JWT should\n be used for service account credentials.\n url_scheme: the protocol scheme for the API endpoint. Normally\n "https", but for testing or local servers,\n "http" can be specified.\n ' super().__init__(host=host, credentials=credentials, client_info=client_info, always_use_jwt_access=always_use_jwt_access) self._session = AuthorizedSession(self._credentials, default_host=self.DEFAULT_HOST) if client_cert_source_for_mtls: self._session.configure_mtls_channel(client_cert_source_for_mtls) self._prep_wrapped_messages(client_info)<|docstring|>Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is ignored if ``channel`` is provided. scopes (Optional(Sequence[str])): A list of scopes. This argument is ignored if ``channel`` is provided. client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client certificate to configure mutual TLS HTTP channel. It is ignored if ``channel`` is provided. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you are developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. url_scheme: the protocol scheme for the API endpoint. Normally "https", but for testing or local servers, "http" can be specified.<|endoftext|>
cb32e08094e510b8f83280cfddbf61066c6f954004cf64d1bc84e6ca6a5ed169
def __call__(self, request: compute.AggregatedListRegionCommitmentsRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.CommitmentAggregatedList: 'Call the aggregated list method over HTTP.\n\n Args:\n request (~.compute.AggregatedListRegionCommitmentsRequest):\n The request object. A request message for\n RegionCommitments.AggregatedList. See\n the method description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.CommitmentAggregatedList:\n\n ' http_options = [{'method': 'get', 'uri': '/compute/v1/projects/{project}/aggregated/commitments'}] request_kwargs = compute.AggregatedListRegionCommitmentsRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.AggregatedListRegionCommitmentsRequest.to_json(compute.AggregatedListRegionCommitmentsRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params)) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.CommitmentAggregatedList.from_json(response.content, ignore_unknown_fields=True)
Call the aggregated list method over HTTP. Args: request (~.compute.AggregatedListRegionCommitmentsRequest): The request object. A request message for RegionCommitments.AggregatedList. See the method description for details. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.CommitmentAggregatedList:
google/cloud/compute_v1/services/region_commitments/transports/rest.py
__call__
LaudateCorpus1/python-compute
0
python
def __call__(self, request: compute.AggregatedListRegionCommitmentsRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.CommitmentAggregatedList: 'Call the aggregated list method over HTTP.\n\n Args:\n request (~.compute.AggregatedListRegionCommitmentsRequest):\n The request object. A request message for\n RegionCommitments.AggregatedList. See\n the method description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.CommitmentAggregatedList:\n\n ' http_options = [{'method': 'get', 'uri': '/compute/v1/projects/{project}/aggregated/commitments'}] request_kwargs = compute.AggregatedListRegionCommitmentsRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.AggregatedListRegionCommitmentsRequest.to_json(compute.AggregatedListRegionCommitmentsRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params)) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.CommitmentAggregatedList.from_json(response.content, ignore_unknown_fields=True)
def __call__(self, request: compute.AggregatedListRegionCommitmentsRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.CommitmentAggregatedList: 'Call the aggregated list method over HTTP.\n\n Args:\n request (~.compute.AggregatedListRegionCommitmentsRequest):\n The request object. A request message for\n RegionCommitments.AggregatedList. See\n the method description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.CommitmentAggregatedList:\n\n ' http_options = [{'method': 'get', 'uri': '/compute/v1/projects/{project}/aggregated/commitments'}] request_kwargs = compute.AggregatedListRegionCommitmentsRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.AggregatedListRegionCommitmentsRequest.to_json(compute.AggregatedListRegionCommitmentsRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params)) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.CommitmentAggregatedList.from_json(response.content, ignore_unknown_fields=True)<|docstring|>Call the aggregated list method over HTTP. Args: request (~.compute.AggregatedListRegionCommitmentsRequest): The request object. A request message for RegionCommitments.AggregatedList. See the method description for details. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.CommitmentAggregatedList:<|endoftext|>
08913fe9f5edfa6a8db11e72ce4bfc9ecca3f248d8e29920cfaf389a544908bd
def __call__(self, request: compute.GetRegionCommitmentRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.Commitment: 'Call the get method over HTTP.\n\n Args:\n request (~.compute.GetRegionCommitmentRequest):\n The request object. A request message for\n RegionCommitments.Get. See the method\n description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Commitment:\n Represents a regional Commitment\n resource. Creating a commitment resource\n means that you are purchasing a\n committed use contract with an explicit\n start and end time. You can create\n commitments based on vCPUs and memory\n usage and receive discounted rates. For\n full details, read Signing Up for\n Committed Use Discounts.\n\n ' http_options = [{'method': 'get', 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments/{commitment}'}] request_kwargs = compute.GetRegionCommitmentRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.GetRegionCommitmentRequest.to_json(compute.GetRegionCommitmentRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params)) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.Commitment.from_json(response.content, ignore_unknown_fields=True)
Call the get method over HTTP. Args: request (~.compute.GetRegionCommitmentRequest): The request object. A request message for RegionCommitments.Get. See the method description for details. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Commitment: Represents a regional Commitment resource. Creating a commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.
google/cloud/compute_v1/services/region_commitments/transports/rest.py
__call__
LaudateCorpus1/python-compute
0
python
def __call__(self, request: compute.GetRegionCommitmentRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.Commitment: 'Call the get method over HTTP.\n\n Args:\n request (~.compute.GetRegionCommitmentRequest):\n The request object. A request message for\n RegionCommitments.Get. See the method\n description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Commitment:\n Represents a regional Commitment\n resource. Creating a commitment resource\n means that you are purchasing a\n committed use contract with an explicit\n start and end time. You can create\n commitments based on vCPUs and memory\n usage and receive discounted rates. For\n full details, read Signing Up for\n Committed Use Discounts.\n\n ' http_options = [{'method': 'get', 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments/{commitment}'}] request_kwargs = compute.GetRegionCommitmentRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.GetRegionCommitmentRequest.to_json(compute.GetRegionCommitmentRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params)) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.Commitment.from_json(response.content, ignore_unknown_fields=True)
def __call__(self, request: compute.GetRegionCommitmentRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.Commitment: 'Call the get method over HTTP.\n\n Args:\n request (~.compute.GetRegionCommitmentRequest):\n The request object. A request message for\n RegionCommitments.Get. See the method\n description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Commitment:\n Represents a regional Commitment\n resource. Creating a commitment resource\n means that you are purchasing a\n committed use contract with an explicit\n start and end time. You can create\n commitments based on vCPUs and memory\n usage and receive discounted rates. For\n full details, read Signing Up for\n Committed Use Discounts.\n\n ' http_options = [{'method': 'get', 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments/{commitment}'}] request_kwargs = compute.GetRegionCommitmentRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.GetRegionCommitmentRequest.to_json(compute.GetRegionCommitmentRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params)) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.Commitment.from_json(response.content, ignore_unknown_fields=True)<|docstring|>Call the get method over HTTP. Args: request (~.compute.GetRegionCommitmentRequest): The request object. A request message for RegionCommitments.Get. See the method description for details. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Commitment: Represents a regional Commitment resource. Creating a commitment resource means that you are purchasing a committed use contract with an explicit start and end time. You can create commitments based on vCPUs and memory usage and receive discounted rates. For full details, read Signing Up for Committed Use Discounts.<|endoftext|>
6b928ae2d07010fb58ac355413d872bd9eddad1e9e8f9827a99c68762b5bc551
def __call__(self, request: compute.InsertRegionCommitmentRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.Operation: 'Call the insert method over HTTP.\n\n Args:\n request (~.compute.InsertRegionCommitmentRequest):\n The request object. A request message for\n RegionCommitments.Insert. See the method\n description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Operation:\n Represents an Operation resource. Google Compute Engine\n has three Operation resources: \\*\n `Global </compute/docs/reference/rest/v1/globalOperations>`__\n \\*\n `Regional </compute/docs/reference/rest/v1/regionOperations>`__\n \\*\n `Zonal </compute/docs/reference/rest/v1/zoneOperations>`__\n You can use an operation resource to manage asynchronous\n API requests. For more information, read Handling API\n responses. Operations can be global, regional or zonal.\n - For global operations, use the ``globalOperations``\n resource. - For regional operations, use the\n ``regionOperations`` resource. - For zonal operations,\n use the ``zonalOperations`` resource. For more\n information, read Global, Regional, and Zonal Resources.\n\n ' http_options = [{'method': 'post', 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments', 'body': 'commitment_resource'}] request_kwargs = compute.InsertRegionCommitmentRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) body = compute.Commitment.to_json(compute.Commitment(transcoded_request['body']), including_default_value_fields=False, use_integers_for_enums=False) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.InsertRegionCommitmentRequest.to_json(compute.InsertRegionCommitmentRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params), data=body) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
Call the insert method over HTTP. Args: request (~.compute.InsertRegionCommitmentRequest): The request object. A request message for RegionCommitments.Insert. See the method description for details. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: \* `Global </compute/docs/reference/rest/v1/globalOperations>`__ \* `Regional </compute/docs/reference/rest/v1/regionOperations>`__ \* `Zonal </compute/docs/reference/rest/v1/zoneOperations>`__ You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the ``globalOperations`` resource. - For regional operations, use the ``regionOperations`` resource. - For zonal operations, use the ``zonalOperations`` resource. For more information, read Global, Regional, and Zonal Resources.
google/cloud/compute_v1/services/region_commitments/transports/rest.py
__call__
LaudateCorpus1/python-compute
0
python
def __call__(self, request: compute.InsertRegionCommitmentRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.Operation: 'Call the insert method over HTTP.\n\n Args:\n request (~.compute.InsertRegionCommitmentRequest):\n The request object. A request message for\n RegionCommitments.Insert. See the method\n description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Operation:\n Represents an Operation resource. Google Compute Engine\n has three Operation resources: \\*\n `Global </compute/docs/reference/rest/v1/globalOperations>`__\n \\*\n `Regional </compute/docs/reference/rest/v1/regionOperations>`__\n \\*\n `Zonal </compute/docs/reference/rest/v1/zoneOperations>`__\n You can use an operation resource to manage asynchronous\n API requests. For more information, read Handling API\n responses. Operations can be global, regional or zonal.\n - For global operations, use the ``globalOperations``\n resource. - For regional operations, use the\n ``regionOperations`` resource. - For zonal operations,\n use the ``zonalOperations`` resource. For more\n information, read Global, Regional, and Zonal Resources.\n\n ' http_options = [{'method': 'post', 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments', 'body': 'commitment_resource'}] request_kwargs = compute.InsertRegionCommitmentRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) body = compute.Commitment.to_json(compute.Commitment(transcoded_request['body']), including_default_value_fields=False, use_integers_for_enums=False) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.InsertRegionCommitmentRequest.to_json(compute.InsertRegionCommitmentRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params), data=body) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.Operation.from_json(response.content, ignore_unknown_fields=True)
def __call__(self, request: compute.InsertRegionCommitmentRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.Operation: 'Call the insert method over HTTP.\n\n Args:\n request (~.compute.InsertRegionCommitmentRequest):\n The request object. A request message for\n RegionCommitments.Insert. See the method\n description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.Operation:\n Represents an Operation resource. Google Compute Engine\n has three Operation resources: \\*\n `Global </compute/docs/reference/rest/v1/globalOperations>`__\n \\*\n `Regional </compute/docs/reference/rest/v1/regionOperations>`__\n \\*\n `Zonal </compute/docs/reference/rest/v1/zoneOperations>`__\n You can use an operation resource to manage asynchronous\n API requests. For more information, read Handling API\n responses. Operations can be global, regional or zonal.\n - For global operations, use the ``globalOperations``\n resource. - For regional operations, use the\n ``regionOperations`` resource. - For zonal operations,\n use the ``zonalOperations`` resource. For more\n information, read Global, Regional, and Zonal Resources.\n\n ' http_options = [{'method': 'post', 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments', 'body': 'commitment_resource'}] request_kwargs = compute.InsertRegionCommitmentRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) body = compute.Commitment.to_json(compute.Commitment(transcoded_request['body']), including_default_value_fields=False, use_integers_for_enums=False) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.InsertRegionCommitmentRequest.to_json(compute.InsertRegionCommitmentRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params), data=body) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.Operation.from_json(response.content, ignore_unknown_fields=True)<|docstring|>Call the insert method over HTTP. Args: request (~.compute.InsertRegionCommitmentRequest): The request object. A request message for RegionCommitments.Insert. See the method description for details. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.Operation: Represents an Operation resource. Google Compute Engine has three Operation resources: \* `Global </compute/docs/reference/rest/v1/globalOperations>`__ \* `Regional </compute/docs/reference/rest/v1/regionOperations>`__ \* `Zonal </compute/docs/reference/rest/v1/zoneOperations>`__ You can use an operation resource to manage asynchronous API requests. For more information, read Handling API responses. Operations can be global, regional or zonal. - For global operations, use the ``globalOperations`` resource. - For regional operations, use the ``regionOperations`` resource. - For zonal operations, use the ``zonalOperations`` resource. For more information, read Global, Regional, and Zonal Resources.<|endoftext|>
384b70f0890d75055cff8255a5bffe134aacf97d35443df4784d073961905956
def __call__(self, request: compute.ListRegionCommitmentsRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.CommitmentList: 'Call the list method over HTTP.\n\n Args:\n request (~.compute.ListRegionCommitmentsRequest):\n The request object. A request message for\n RegionCommitments.List. See the method\n description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.CommitmentList:\n Contains a list of Commitment\n resources.\n\n ' http_options = [{'method': 'get', 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments'}] request_kwargs = compute.ListRegionCommitmentsRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.ListRegionCommitmentsRequest.to_json(compute.ListRegionCommitmentsRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params)) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.CommitmentList.from_json(response.content, ignore_unknown_fields=True)
Call the list method over HTTP. Args: request (~.compute.ListRegionCommitmentsRequest): The request object. A request message for RegionCommitments.List. See the method description for details. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.CommitmentList: Contains a list of Commitment resources.
google/cloud/compute_v1/services/region_commitments/transports/rest.py
__call__
LaudateCorpus1/python-compute
0
python
def __call__(self, request: compute.ListRegionCommitmentsRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.CommitmentList: 'Call the list method over HTTP.\n\n Args:\n request (~.compute.ListRegionCommitmentsRequest):\n The request object. A request message for\n RegionCommitments.List. See the method\n description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.CommitmentList:\n Contains a list of Commitment\n resources.\n\n ' http_options = [{'method': 'get', 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments'}] request_kwargs = compute.ListRegionCommitmentsRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.ListRegionCommitmentsRequest.to_json(compute.ListRegionCommitmentsRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params)) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.CommitmentList.from_json(response.content, ignore_unknown_fields=True)
def __call__(self, request: compute.ListRegionCommitmentsRequest, *, retry: OptionalRetry=gapic_v1.method.DEFAULT, timeout: float=None, metadata: Sequence[Tuple[(str, str)]]=()) -> compute.CommitmentList: 'Call the list method over HTTP.\n\n Args:\n request (~.compute.ListRegionCommitmentsRequest):\n The request object. A request message for\n RegionCommitments.List. See the method\n description for details.\n\n retry (google.api_core.retry.Retry): Designation of what errors, if any,\n should be retried.\n timeout (float): The timeout for this request.\n metadata (Sequence[Tuple[str, str]]): Strings which should be\n sent along with the request as metadata.\n\n Returns:\n ~.compute.CommitmentList:\n Contains a list of Commitment\n resources.\n\n ' http_options = [{'method': 'get', 'uri': '/compute/v1/projects/{project}/regions/{region}/commitments'}] request_kwargs = compute.ListRegionCommitmentsRequest.to_dict(request) transcoded_request = path_template.transcode(http_options, **request_kwargs) uri = transcoded_request['uri'] method = transcoded_request['method'] query_params = json.loads(compute.ListRegionCommitmentsRequest.to_json(compute.ListRegionCommitmentsRequest(transcoded_request['query_params']), including_default_value_fields=False, use_integers_for_enums=False)) query_params.update(self._get_unset_required_fields(query_params)) headers = dict(metadata) headers['Content-Type'] = 'application/json' response = getattr(self._session, method)('https://{host}{uri}'.format(host=self._host, uri=uri), timeout=timeout, headers=headers, params=rest_helpers.flatten_query_params(query_params)) if (response.status_code >= 400): raise core_exceptions.from_http_response(response) return compute.CommitmentList.from_json(response.content, ignore_unknown_fields=True)<|docstring|>Call the list method over HTTP. Args: request (~.compute.ListRegionCommitmentsRequest): The request object. A request message for RegionCommitments.List. See the method description for details. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. Returns: ~.compute.CommitmentList: Contains a list of Commitment resources.<|endoftext|>
4f5857e111db227bac06443a57ac15211bee235a59be6030a1237993ad22dc6a
def __init__(self, W3=0.003, H2=0.003, Zs=36, init_dict=None): 'Constructor of the class. Can be use in two ways :\n - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values\n for Matrix, None will initialise the property with an empty Matrix\n for pyleecan type, None will call the default constructor\n - __init__ (init_dict = d) d must be a dictionnary wiht every properties as keys\n\n ndarray or list can be given for Vector and Matrix\n object or dict can be given for pyleecan Object' if (init_dict is not None): check_init_dict(init_dict, ['W3', 'H2', 'Zs']) if ('W3' in list(init_dict.keys())): W3 = init_dict['W3'] if ('H2' in list(init_dict.keys())): H2 = init_dict['H2'] if ('Zs' in list(init_dict.keys())): Zs = init_dict['Zs'] self.W3 = W3 self.H2 = H2 super(SlotW24, self).__init__(Zs=Zs)
Constructor of the class. Can be use in two ways : - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values for Matrix, None will initialise the property with an empty Matrix for pyleecan type, None will call the default constructor - __init__ (init_dict = d) d must be a dictionnary wiht every properties as keys ndarray or list can be given for Vector and Matrix object or dict can be given for pyleecan Object
Classes/SlotW24.py
__init__
Superomeg4/pyleecan
0
python
def __init__(self, W3=0.003, H2=0.003, Zs=36, init_dict=None): 'Constructor of the class. Can be use in two ways :\n - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values\n for Matrix, None will initialise the property with an empty Matrix\n for pyleecan type, None will call the default constructor\n - __init__ (init_dict = d) d must be a dictionnary wiht every properties as keys\n\n ndarray or list can be given for Vector and Matrix\n object or dict can be given for pyleecan Object' if (init_dict is not None): check_init_dict(init_dict, ['W3', 'H2', 'Zs']) if ('W3' in list(init_dict.keys())): W3 = init_dict['W3'] if ('H2' in list(init_dict.keys())): H2 = init_dict['H2'] if ('Zs' in list(init_dict.keys())): Zs = init_dict['Zs'] self.W3 = W3 self.H2 = H2 super(SlotW24, self).__init__(Zs=Zs)
def __init__(self, W3=0.003, H2=0.003, Zs=36, init_dict=None): 'Constructor of the class. Can be use in two ways :\n - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values\n for Matrix, None will initialise the property with an empty Matrix\n for pyleecan type, None will call the default constructor\n - __init__ (init_dict = d) d must be a dictionnary wiht every properties as keys\n\n ndarray or list can be given for Vector and Matrix\n object or dict can be given for pyleecan Object' if (init_dict is not None): check_init_dict(init_dict, ['W3', 'H2', 'Zs']) if ('W3' in list(init_dict.keys())): W3 = init_dict['W3'] if ('H2' in list(init_dict.keys())): H2 = init_dict['H2'] if ('Zs' in list(init_dict.keys())): Zs = init_dict['Zs'] self.W3 = W3 self.H2 = H2 super(SlotW24, self).__init__(Zs=Zs)<|docstring|>Constructor of the class. Can be use in two ways : - __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values for Matrix, None will initialise the property with an empty Matrix for pyleecan type, None will call the default constructor - __init__ (init_dict = d) d must be a dictionnary wiht every properties as keys ndarray or list can be given for Vector and Matrix object or dict can be given for pyleecan Object<|endoftext|>
a3b0a7082993d47c043d10502823a761823c8d990c5f1c34a52ec6fa06156248
def __str__(self): 'Convert this objet in a readeable string (for print)' SlotW24_str = '' SlotW24_str += (super(SlotW24, self).__str__() + linesep) SlotW24_str += (('W3 = ' + str(self.W3)) + linesep) SlotW24_str += ('H2 = ' + str(self.H2)) return SlotW24_str
Convert this objet in a readeable string (for print)
Classes/SlotW24.py
__str__
Superomeg4/pyleecan
0
python
def __str__(self): SlotW24_str = SlotW24_str += (super(SlotW24, self).__str__() + linesep) SlotW24_str += (('W3 = ' + str(self.W3)) + linesep) SlotW24_str += ('H2 = ' + str(self.H2)) return SlotW24_str
def __str__(self): SlotW24_str = SlotW24_str += (super(SlotW24, self).__str__() + linesep) SlotW24_str += (('W3 = ' + str(self.W3)) + linesep) SlotW24_str += ('H2 = ' + str(self.H2)) return SlotW24_str<|docstring|>Convert this objet in a readeable string (for print)<|endoftext|>
5d6049173519bd4f579b8d222eefe4e83aab6131bffebff2577d821c76838d76
def __eq__(self, other): 'Compare two objects (skip parent)' if (type(other) != type(self)): return False if (not super(SlotW24, self).__eq__(other)): return False if (other.W3 != self.W3): return False if (other.H2 != self.H2): return False return True
Compare two objects (skip parent)
Classes/SlotW24.py
__eq__
Superomeg4/pyleecan
0
python
def __eq__(self, other): if (type(other) != type(self)): return False if (not super(SlotW24, self).__eq__(other)): return False if (other.W3 != self.W3): return False if (other.H2 != self.H2): return False return True
def __eq__(self, other): if (type(other) != type(self)): return False if (not super(SlotW24, self).__eq__(other)): return False if (other.W3 != self.W3): return False if (other.H2 != self.H2): return False return True<|docstring|>Compare two objects (skip parent)<|endoftext|>
a3a84312826d203ccb175ae53e3ac047657c6ca093bdc4a4c3df2e47cd8d3e76
def as_dict(self): 'Convert this objet in a json seriable dict (can be use in __init__)\n ' SlotW24_dict = super(SlotW24, self).as_dict() SlotW24_dict['W3'] = self.W3 SlotW24_dict['H2'] = self.H2 SlotW24_dict['__class__'] = 'SlotW24' return SlotW24_dict
Convert this objet in a json seriable dict (can be use in __init__)
Classes/SlotW24.py
as_dict
Superomeg4/pyleecan
0
python
def as_dict(self): '\n ' SlotW24_dict = super(SlotW24, self).as_dict() SlotW24_dict['W3'] = self.W3 SlotW24_dict['H2'] = self.H2 SlotW24_dict['__class__'] = 'SlotW24' return SlotW24_dict
def as_dict(self): '\n ' SlotW24_dict = super(SlotW24, self).as_dict() SlotW24_dict['W3'] = self.W3 SlotW24_dict['H2'] = self.H2 SlotW24_dict['__class__'] = 'SlotW24' return SlotW24_dict<|docstring|>Convert this objet in a json seriable dict (can be use in __init__)<|endoftext|>
63ca1f80e8c3fae78e12a57e74bbb47d462678abddd28e823e40c77079d040d5
def _set_None(self): 'Set all the properties to None (except pyleecan object)' self.W3 = None self.H2 = None super(SlotW24, self)._set_None()
Set all the properties to None (except pyleecan object)
Classes/SlotW24.py
_set_None
Superomeg4/pyleecan
0
python
def _set_None(self): self.W3 = None self.H2 = None super(SlotW24, self)._set_None()
def _set_None(self): self.W3 = None self.H2 = None super(SlotW24, self)._set_None()<|docstring|>Set all the properties to None (except pyleecan object)<|endoftext|>
db0c5c7ddabe7b22defd211bd4745c62c7d9d73ea2de24847b390acf80ec10c8
def _get_W3(self): 'getter of W3' return self._W3
getter of W3
Classes/SlotW24.py
_get_W3
Superomeg4/pyleecan
0
python
def _get_W3(self): return self._W3
def _get_W3(self): return self._W3<|docstring|>getter of W3<|endoftext|>
79d5927136e0d2b72442510bb9b888d900483e8fd43dcb357ea75fdf9776efdd
def _set_W3(self, value): 'setter of W3' check_var('W3', value, 'float', Vmin=0) self._W3 = value
setter of W3
Classes/SlotW24.py
_set_W3
Superomeg4/pyleecan
0
python
def _set_W3(self, value): check_var('W3', value, 'float', Vmin=0) self._W3 = value
def _set_W3(self, value): check_var('W3', value, 'float', Vmin=0) self._W3 = value<|docstring|>setter of W3<|endoftext|>
4456531bfd45b3d2d5de2ff45ac780cdc30fa16f31b405d7791a922380a33bda
def _get_H2(self): 'getter of H2' return self._H2
getter of H2
Classes/SlotW24.py
_get_H2
Superomeg4/pyleecan
0
python
def _get_H2(self): return self._H2
def _get_H2(self): return self._H2<|docstring|>getter of H2<|endoftext|>
76bcd51e088a21cae6a9cb34a9240aceed84295946f028b70abace9304ee6fa1
def _set_H2(self, value): 'setter of H2' check_var('H2', value, 'float', Vmin=0) self._H2 = value
setter of H2
Classes/SlotW24.py
_set_H2
Superomeg4/pyleecan
0
python
def _set_H2(self, value): check_var('H2', value, 'float', Vmin=0) self._H2 = value
def _set_H2(self, value): check_var('H2', value, 'float', Vmin=0) self._H2 = value<|docstring|>setter of H2<|endoftext|>
d4bb2bfbc03277fba32a700c3582b96dbe2d91e41ccfc8f09657764d652e9dfa
def get_coordinates(gnd_id): ' takes a GeoNames-ID and returns a dict with lat, lng ' g = Graph() WSG84 = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#') try: parsed = g.parse('https://www.geonames.org/{}/about.rdf'.format(gnd_id)) except Exception as e: print(e) return None if parsed: lat = [x for x in parsed.subject_objects(WSG84.lat)][0][1].value lng = [x for x in parsed.subject_objects(WSG84.long)][0][1].value return {'lat': lat, 'lng': lng} else: return None
takes a GeoNames-ID and returns a dict with lat, lng
entities/utils.py
get_coordinates
reading-in-the-alps/vfbr
0
python
def get_coordinates(gnd_id): ' ' g = Graph() WSG84 = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#') try: parsed = g.parse('https://www.geonames.org/{}/about.rdf'.format(gnd_id)) except Exception as e: print(e) return None if parsed: lat = [x for x in parsed.subject_objects(WSG84.lat)][0][1].value lng = [x for x in parsed.subject_objects(WSG84.long)][0][1].value return {'lat': lat, 'lng': lng} else: return None
def get_coordinates(gnd_id): ' ' g = Graph() WSG84 = Namespace('http://www.w3.org/2003/01/geo/wgs84_pos#') try: parsed = g.parse('https://www.geonames.org/{}/about.rdf'.format(gnd_id)) except Exception as e: print(e) return None if parsed: lat = [x for x in parsed.subject_objects(WSG84.lat)][0][1].value lng = [x for x in parsed.subject_objects(WSG84.long)][0][1].value return {'lat': lat, 'lng': lng} else: return None<|docstring|>takes a GeoNames-ID and returns a dict with lat, lng<|endoftext|>
be49493c67d047790713dcb22dd0bdca11e7e4a0127c1c5604a71c24ffb484fe
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): 'Set up the Bravia TV platform.' host = config[CONF_HOST] bravia_config_file_path = hass.config.path(BRAVIA_CONFIG_FILE) bravia_config = (await hass.async_add_executor_job(load_json, bravia_config_file_path)) if (not bravia_config): _LOGGER.error('Configuration import failed, there is no bravia.conf file in the configuration folder') return while bravia_config: (host_ip, host_config) = bravia_config.popitem() if (host_ip == host): pin = host_config[CONF_PIN] hass.async_create_task(hass.config_entries.flow.async_init(DOMAIN, context={'source': SOURCE_IMPORT}, data={CONF_HOST: host, CONF_PIN: pin})) return
Set up the Bravia TV platform.
homeassistant/components/braviatv/media_player.py
async_setup_platform
maurerle/core
5
python
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): host = config[CONF_HOST] bravia_config_file_path = hass.config.path(BRAVIA_CONFIG_FILE) bravia_config = (await hass.async_add_executor_job(load_json, bravia_config_file_path)) if (not bravia_config): _LOGGER.error('Configuration import failed, there is no bravia.conf file in the configuration folder') return while bravia_config: (host_ip, host_config) = bravia_config.popitem() if (host_ip == host): pin = host_config[CONF_PIN] hass.async_create_task(hass.config_entries.flow.async_init(DOMAIN, context={'source': SOURCE_IMPORT}, data={CONF_HOST: host, CONF_PIN: pin})) return
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None): host = config[CONF_HOST] bravia_config_file_path = hass.config.path(BRAVIA_CONFIG_FILE) bravia_config = (await hass.async_add_executor_job(load_json, bravia_config_file_path)) if (not bravia_config): _LOGGER.error('Configuration import failed, there is no bravia.conf file in the configuration folder') return while bravia_config: (host_ip, host_config) = bravia_config.popitem() if (host_ip == host): pin = host_config[CONF_PIN] hass.async_create_task(hass.config_entries.flow.async_init(DOMAIN, context={'source': SOURCE_IMPORT}, data={CONF_HOST: host, CONF_PIN: pin})) return<|docstring|>Set up the Bravia TV platform.<|endoftext|>
157515fdaf1e3a98f01d1167599d1133d3395308911797a4ea577ee41ed912a3
async def async_setup_entry(hass, config_entry, async_add_entities): 'Set up Bravia TV Media Player from a config_entry.' coordinator = hass.data[DOMAIN][config_entry.entry_id][BRAVIA_COORDINATOR] unique_id = config_entry.unique_id device_info = {'identifiers': {(DOMAIN, unique_id)}, 'name': DEFAULT_NAME, 'manufacturer': ATTR_MANUFACTURER, 'model': config_entry.title} async_add_entities([BraviaTVMediaPlayer(coordinator, DEFAULT_NAME, unique_id, device_info)])
Set up Bravia TV Media Player from a config_entry.
homeassistant/components/braviatv/media_player.py
async_setup_entry
maurerle/core
5
python
async def async_setup_entry(hass, config_entry, async_add_entities): coordinator = hass.data[DOMAIN][config_entry.entry_id][BRAVIA_COORDINATOR] unique_id = config_entry.unique_id device_info = {'identifiers': {(DOMAIN, unique_id)}, 'name': DEFAULT_NAME, 'manufacturer': ATTR_MANUFACTURER, 'model': config_entry.title} async_add_entities([BraviaTVMediaPlayer(coordinator, DEFAULT_NAME, unique_id, device_info)])
async def async_setup_entry(hass, config_entry, async_add_entities): coordinator = hass.data[DOMAIN][config_entry.entry_id][BRAVIA_COORDINATOR] unique_id = config_entry.unique_id device_info = {'identifiers': {(DOMAIN, unique_id)}, 'name': DEFAULT_NAME, 'manufacturer': ATTR_MANUFACTURER, 'model': config_entry.title} async_add_entities([BraviaTVMediaPlayer(coordinator, DEFAULT_NAME, unique_id, device_info)])<|docstring|>Set up Bravia TV Media Player from a config_entry.<|endoftext|>
81bd22d92d779836399da9b0aa58b30fd6918354ec63dee00598526a395b5220
def __init__(self, coordinator, name, unique_id, device_info): 'Initialize the entity.' self._name = name self._unique_id = unique_id self._device_info = device_info super().__init__(coordinator)
Initialize the entity.
homeassistant/components/braviatv/media_player.py
__init__
maurerle/core
5
python
def __init__(self, coordinator, name, unique_id, device_info): self._name = name self._unique_id = unique_id self._device_info = device_info super().__init__(coordinator)
def __init__(self, coordinator, name, unique_id, device_info): self._name = name self._unique_id = unique_id self._device_info = device_info super().__init__(coordinator)<|docstring|>Initialize the entity.<|endoftext|>
959514ff3ad36bfa3d9e493ec80719a74614a7e62c186c5799451855e3d3f810
@property def name(self): 'Return the name of the device.' return self._name
Return the name of the device.
homeassistant/components/braviatv/media_player.py
name
maurerle/core
5
python
@property def name(self): return self._name
@property def name(self): return self._name<|docstring|>Return the name of the device.<|endoftext|>
bbc24990453d9ce2e5567ef31f40b20a343650e2fe4e97cfc322928b08150336
@property def unique_id(self): 'Return a unique_id for this entity.' return self._unique_id
Return a unique_id for this entity.
homeassistant/components/braviatv/media_player.py
unique_id
maurerle/core
5
python
@property def unique_id(self): return self._unique_id
@property def unique_id(self): return self._unique_id<|docstring|>Return a unique_id for this entity.<|endoftext|>
5ed6caad8ee1bdd3d6694fbffe8bbec4befb02e267da8cf31285b977b31c8f2b
@property def device_info(self): 'Return the device info.' return self._device_info
Return the device info.
homeassistant/components/braviatv/media_player.py
device_info
maurerle/core
5
python
@property def device_info(self): return self._device_info
@property def device_info(self): return self._device_info<|docstring|>Return the device info.<|endoftext|>
cf65832884c6cbdd856af47c748a86a6d95084629097db8e799b80d25fe461da
@property def state(self): 'Return the state of the device.' if self.coordinator.is_on: return (STATE_PLAYING if self.coordinator.playing else STATE_PAUSED) return STATE_OFF
Return the state of the device.
homeassistant/components/braviatv/media_player.py
state
maurerle/core
5
python
@property def state(self): if self.coordinator.is_on: return (STATE_PLAYING if self.coordinator.playing else STATE_PAUSED) return STATE_OFF
@property def state(self): if self.coordinator.is_on: return (STATE_PLAYING if self.coordinator.playing else STATE_PAUSED) return STATE_OFF<|docstring|>Return the state of the device.<|endoftext|>
e83d969892985171cf3d5a379d5618fc5f6f15198b6ceedf8d7490209e1bacf8
@property def source(self): 'Return the current input source.' return self.coordinator.source
Return the current input source.
homeassistant/components/braviatv/media_player.py
source
maurerle/core
5
python
@property def source(self): return self.coordinator.source
@property def source(self): return self.coordinator.source<|docstring|>Return the current input source.<|endoftext|>
f9fc13a045390847e0aab4e6a18dceb8a9994ee5fcfdd4133a70879e6c45a4a0
@property def source_list(self): 'List of available input sources.' return self.coordinator.source_list
List of available input sources.
homeassistant/components/braviatv/media_player.py
source_list
maurerle/core
5
python
@property def source_list(self): return self.coordinator.source_list
@property def source_list(self): return self.coordinator.source_list<|docstring|>List of available input sources.<|endoftext|>
2880e5abd50aa4a57d98bcd2433f2ac830985827a8908186313941604523ef71
@property def volume_level(self): 'Volume level of the media player (0..1).' if (self.coordinator.volume is not None): return (self.coordinator.volume / 100) return None
Volume level of the media player (0..1).
homeassistant/components/braviatv/media_player.py
volume_level
maurerle/core
5
python
@property def volume_level(self): if (self.coordinator.volume is not None): return (self.coordinator.volume / 100) return None
@property def volume_level(self): if (self.coordinator.volume is not None): return (self.coordinator.volume / 100) return None<|docstring|>Volume level of the media player (0..1).<|endoftext|>
2f81d6a2bf069b560ac9a96e7897109fa5e6ed57c76f4df1f1d135028d527936
@property def is_volume_muted(self): 'Boolean if volume is currently muted.' return self.coordinator.muted
Boolean if volume is currently muted.
homeassistant/components/braviatv/media_player.py
is_volume_muted
maurerle/core
5
python
@property def is_volume_muted(self): return self.coordinator.muted
@property def is_volume_muted(self): return self.coordinator.muted<|docstring|>Boolean if volume is currently muted.<|endoftext|>
776d9e3fd3bf351a39ad4b072775a03f19fe1ecbd4db909d7a7bff57dd8697a1
@property def media_title(self): 'Title of current playing media.' return_value = None if (self.coordinator.channel_name is not None): return_value = self.coordinator.channel_name if (self.coordinator.program_name is not None): return_value = f'{return_value}: {self.coordinator.program_name}' return return_value
Title of current playing media.
homeassistant/components/braviatv/media_player.py
media_title
maurerle/core
5
python
@property def media_title(self): return_value = None if (self.coordinator.channel_name is not None): return_value = self.coordinator.channel_name if (self.coordinator.program_name is not None): return_value = f'{return_value}: {self.coordinator.program_name}' return return_value
@property def media_title(self): return_value = None if (self.coordinator.channel_name is not None): return_value = self.coordinator.channel_name if (self.coordinator.program_name is not None): return_value = f'{return_value}: {self.coordinator.program_name}' return return_value<|docstring|>Title of current playing media.<|endoftext|>
a56a86b5fdebeff1653c602fa58c14d7c60bccf6f789aeeeb81a07b63ea214cb
@property def media_content_id(self): 'Content ID of current playing media.' return self.coordinator.channel_name
Content ID of current playing media.
homeassistant/components/braviatv/media_player.py
media_content_id
maurerle/core
5
python
@property def media_content_id(self): return self.coordinator.channel_name
@property def media_content_id(self): return self.coordinator.channel_name<|docstring|>Content ID of current playing media.<|endoftext|>
402c48c8e7cca4069207a32e23c24054da6f2dc85e08e29f1e04b78a8c760823
@property def media_duration(self): 'Duration of current playing media in seconds.' return self.coordinator.duration
Duration of current playing media in seconds.
homeassistant/components/braviatv/media_player.py
media_duration
maurerle/core
5
python
@property def media_duration(self): return self.coordinator.duration
@property def media_duration(self): return self.coordinator.duration<|docstring|>Duration of current playing media in seconds.<|endoftext|>
301150404221e179cd2edc5b67708c1d89c133a6f4964f703fa492e13f643c8d
async def async_turn_on(self): 'Turn the device on.' (await self.coordinator.async_turn_on())
Turn the device on.
homeassistant/components/braviatv/media_player.py
async_turn_on
maurerle/core
5
python
async def async_turn_on(self): (await self.coordinator.async_turn_on())
async def async_turn_on(self): (await self.coordinator.async_turn_on())<|docstring|>Turn the device on.<|endoftext|>
4fc4317e914e5d234923a341ba06a291693bf9f7d46e616e01cc19a018d8763a
async def async_turn_off(self): 'Turn the device off.' (await self.coordinator.async_turn_off())
Turn the device off.
homeassistant/components/braviatv/media_player.py
async_turn_off
maurerle/core
5
python
async def async_turn_off(self): (await self.coordinator.async_turn_off())
async def async_turn_off(self): (await self.coordinator.async_turn_off())<|docstring|>Turn the device off.<|endoftext|>
ce1c3ac24c3379e1f5faa0f27038eafd1946f9420f0990ac62eebc0f444c49c8
async def async_set_volume_level(self, volume): 'Set volume level, range 0..1.' (await self.coordinator.async_set_volume_level(volume))
Set volume level, range 0..1.
homeassistant/components/braviatv/media_player.py
async_set_volume_level
maurerle/core
5
python
async def async_set_volume_level(self, volume): (await self.coordinator.async_set_volume_level(volume))
async def async_set_volume_level(self, volume): (await self.coordinator.async_set_volume_level(volume))<|docstring|>Set volume level, range 0..1.<|endoftext|>
c65deebb8fa426880a0026df4eb8b413ee97a72b3f58bd419832ee3b99fede36
async def async_volume_up(self): 'Send volume up command.' (await self.coordinator.async_volume_up())
Send volume up command.
homeassistant/components/braviatv/media_player.py
async_volume_up
maurerle/core
5
python
async def async_volume_up(self): (await self.coordinator.async_volume_up())
async def async_volume_up(self): (await self.coordinator.async_volume_up())<|docstring|>Send volume up command.<|endoftext|>
ccf8981de54cd3ad92771d226f33a74e4f12910ea87efd967c1bb18664ab44a8
async def async_volume_down(self): 'Send volume down command.' (await self.coordinator.async_volume_down())
Send volume down command.
homeassistant/components/braviatv/media_player.py
async_volume_down
maurerle/core
5
python
async def async_volume_down(self): (await self.coordinator.async_volume_down())
async def async_volume_down(self): (await self.coordinator.async_volume_down())<|docstring|>Send volume down command.<|endoftext|>
d74a05663c9c5d996b246e2bfd323b7e8b055e20408976f2fd89e44f51cee922
async def async_mute_volume(self, mute): 'Send mute command.' (await self.coordinator.async_volume_mute(mute))
Send mute command.
homeassistant/components/braviatv/media_player.py
async_mute_volume
maurerle/core
5
python
async def async_mute_volume(self, mute): (await self.coordinator.async_volume_mute(mute))
async def async_mute_volume(self, mute): (await self.coordinator.async_volume_mute(mute))<|docstring|>Send mute command.<|endoftext|>
1825d7c7aa309ffb7efa78cc7aa1da89a2abcb80fc98f7bc4fb9cea7e7f88c5b
async def async_select_source(self, source): 'Set the input source.' (await self.coordinator.async_select_source(source))
Set the input source.
homeassistant/components/braviatv/media_player.py
async_select_source
maurerle/core
5
python
async def async_select_source(self, source): (await self.coordinator.async_select_source(source))
async def async_select_source(self, source): (await self.coordinator.async_select_source(source))<|docstring|>Set the input source.<|endoftext|>
a866d75b87a267cc1cf1d507d273cf8f2811f1de0462b40863a055285a755374
async def async_media_play(self): 'Send play command.' (await self.coordinator.async_media_play())
Send play command.
homeassistant/components/braviatv/media_player.py
async_media_play
maurerle/core
5
python
async def async_media_play(self): (await self.coordinator.async_media_play())
async def async_media_play(self): (await self.coordinator.async_media_play())<|docstring|>Send play command.<|endoftext|>
8625d2b8486fc7604ac1ddb5c668323b4fa96c6d708f64be7654edcf8c4059cc
async def async_media_pause(self): 'Send pause command.' (await self.coordinator.async_media_pause())
Send pause command.
homeassistant/components/braviatv/media_player.py
async_media_pause
maurerle/core
5
python
async def async_media_pause(self): (await self.coordinator.async_media_pause())
async def async_media_pause(self): (await self.coordinator.async_media_pause())<|docstring|>Send pause command.<|endoftext|>
808fdaf28431bb72c987e99607e02d7491a6c9ce2e314236cb344898b6fe93b4
async def async_media_stop(self): 'Send media stop command to media player.' (await self.coordinator.async_media_stop())
Send media stop command to media player.
homeassistant/components/braviatv/media_player.py
async_media_stop
maurerle/core
5
python
async def async_media_stop(self): (await self.coordinator.async_media_stop())
async def async_media_stop(self): (await self.coordinator.async_media_stop())<|docstring|>Send media stop command to media player.<|endoftext|>
c2542d36e1fff3e1bc88042f2f1ce895ad529905fb57a763c48e5c0d40c46036
async def async_media_next_track(self): 'Send next track command.' (await self.coordinator.async_media_next_track())
Send next track command.
homeassistant/components/braviatv/media_player.py
async_media_next_track
maurerle/core
5
python
async def async_media_next_track(self): (await self.coordinator.async_media_next_track())
async def async_media_next_track(self): (await self.coordinator.async_media_next_track())<|docstring|>Send next track command.<|endoftext|>
c9357745e2b61f2fd6d2ff2f97521f891faa07bc45611248dc019d330606f3d1
async def async_media_previous_track(self): 'Send previous track command.' (await self.coordinator.async_media_previous_track())
Send previous track command.
homeassistant/components/braviatv/media_player.py
async_media_previous_track
maurerle/core
5
python
async def async_media_previous_track(self): (await self.coordinator.async_media_previous_track())
async def async_media_previous_track(self): (await self.coordinator.async_media_previous_track())<|docstring|>Send previous track command.<|endoftext|>
efe051ff99120a9965df08edfd39456a3d704ced1dd127d6820017a223cfee1c
def __call__(self, targets, logits, seq_length=None): '\n\t\tCompute the loss\n\n\t\tCreates the operation to compute the crossentropy multi loss\n\n\t\tArgs:\n\t\t\ttargets: a dictionary of [batch_size x ... x ...] tensor containing\n\t\t\t\tthe targets\n\t\t\tlogits: a dictionary of [batch_size x ... x ...] tensors containing the logits\n\n\t\tReturns:\n\t\t\tloss: a scalar value containing the loss\n\t\t\tnorm: a scalar value indicating how to normalize the loss\n\t\t' if (('av_anchors_time_flag' in self.lossconf) and (self.lossconf['av_anchors_time_flag'] in ['true', 'True'])): av_anchors_time_flag = True else: av_anchors_time_flag = False if (('resh_logits' in self.lossconf) and (self.lossconf['resh_logits'] in ['true', 'True'])): resh_logits = True else: resh_logits = False if (('allow_permutation' not in self.lossconf) or (self.lossconf['allow_permutation'] == 'True')): allow_permutation = True else: allow_permutation = False spkids = targets['spkids'] logits = logits['spkest'] if av_anchors_time_flag: logits = tf.reduce_mean(logits, 1) if resh_logits: nrS = spkids.get_shape()[1] logits = tf.reshape(logits, [self.batch_size, nrS, (- 1)]) (loss, norm) = ops.crossentropy_multi_loss(spkids, logits, self.batch_size, allow_permutation=allow_permutation) return (loss, norm)
Compute the loss Creates the operation to compute the crossentropy multi loss Args: targets: a dictionary of [batch_size x ... x ...] tensor containing the targets logits: a dictionary of [batch_size x ... x ...] tensors containing the logits Returns: loss: a scalar value containing the loss norm: a scalar value indicating how to normalize the loss
nabu/neuralnetworks/loss_computers/crossentropy_multi_loss.py
__call__
Darleen2019/Nabu-MSSS
18
python
def __call__(self, targets, logits, seq_length=None): '\n\t\tCompute the loss\n\n\t\tCreates the operation to compute the crossentropy multi loss\n\n\t\tArgs:\n\t\t\ttargets: a dictionary of [batch_size x ... x ...] tensor containing\n\t\t\t\tthe targets\n\t\t\tlogits: a dictionary of [batch_size x ... x ...] tensors containing the logits\n\n\t\tReturns:\n\t\t\tloss: a scalar value containing the loss\n\t\t\tnorm: a scalar value indicating how to normalize the loss\n\t\t' if (('av_anchors_time_flag' in self.lossconf) and (self.lossconf['av_anchors_time_flag'] in ['true', 'True'])): av_anchors_time_flag = True else: av_anchors_time_flag = False if (('resh_logits' in self.lossconf) and (self.lossconf['resh_logits'] in ['true', 'True'])): resh_logits = True else: resh_logits = False if (('allow_permutation' not in self.lossconf) or (self.lossconf['allow_permutation'] == 'True')): allow_permutation = True else: allow_permutation = False spkids = targets['spkids'] logits = logits['spkest'] if av_anchors_time_flag: logits = tf.reduce_mean(logits, 1) if resh_logits: nrS = spkids.get_shape()[1] logits = tf.reshape(logits, [self.batch_size, nrS, (- 1)]) (loss, norm) = ops.crossentropy_multi_loss(spkids, logits, self.batch_size, allow_permutation=allow_permutation) return (loss, norm)
def __call__(self, targets, logits, seq_length=None): '\n\t\tCompute the loss\n\n\t\tCreates the operation to compute the crossentropy multi loss\n\n\t\tArgs:\n\t\t\ttargets: a dictionary of [batch_size x ... x ...] tensor containing\n\t\t\t\tthe targets\n\t\t\tlogits: a dictionary of [batch_size x ... x ...] tensors containing the logits\n\n\t\tReturns:\n\t\t\tloss: a scalar value containing the loss\n\t\t\tnorm: a scalar value indicating how to normalize the loss\n\t\t' if (('av_anchors_time_flag' in self.lossconf) and (self.lossconf['av_anchors_time_flag'] in ['true', 'True'])): av_anchors_time_flag = True else: av_anchors_time_flag = False if (('resh_logits' in self.lossconf) and (self.lossconf['resh_logits'] in ['true', 'True'])): resh_logits = True else: resh_logits = False if (('allow_permutation' not in self.lossconf) or (self.lossconf['allow_permutation'] == 'True')): allow_permutation = True else: allow_permutation = False spkids = targets['spkids'] logits = logits['spkest'] if av_anchors_time_flag: logits = tf.reduce_mean(logits, 1) if resh_logits: nrS = spkids.get_shape()[1] logits = tf.reshape(logits, [self.batch_size, nrS, (- 1)]) (loss, norm) = ops.crossentropy_multi_loss(spkids, logits, self.batch_size, allow_permutation=allow_permutation) return (loss, norm)<|docstring|>Compute the loss Creates the operation to compute the crossentropy multi loss Args: targets: a dictionary of [batch_size x ... x ...] tensor containing the targets logits: a dictionary of [batch_size x ... x ...] tensors containing the logits Returns: loss: a scalar value containing the loss norm: a scalar value indicating how to normalize the loss<|endoftext|>
91fcbd304bb1d28ff6db07a5e82beaad22c5e6557724fc0efacefcfe1d4c2a37
def _get_file(genome_dir: str, fname: str, warn_missing: Optional[bool]=True): '\n Returns the filepath to a single (gzipped) file in the genome_dir with matching ext.\n ' fpath = os.path.join(genome_dir, fname) if os.path.exists(fpath): return fpath if os.path.exists(f'{fpath}.gz'): return f'{fpath}.gz' if warn_missing: logger.warning(f"Could not find '{fname}(.gz)' in directory {genome_dir}. Methods using this file won't work!") return
Returns the filepath to a single (gzipped) file in the genome_dir with matching ext.
genomepy/annotation/__init__.py
_get_file
vanheeringen-lab/genomepy
146
python
def _get_file(genome_dir: str, fname: str, warn_missing: Optional[bool]=True): '\n \n ' fpath = os.path.join(genome_dir, fname) if os.path.exists(fpath): return fpath if os.path.exists(f'{fpath}.gz'): return f'{fpath}.gz' if warn_missing: logger.warning(f"Could not find '{fname}(.gz)' in directory {genome_dir}. Methods using this file won't work!") return
def _get_file(genome_dir: str, fname: str, warn_missing: Optional[bool]=True): '\n \n ' fpath = os.path.join(genome_dir, fname) if os.path.exists(fpath): return fpath if os.path.exists(f'{fpath}.gz'): return f'{fpath}.gz' if warn_missing: logger.warning(f"Could not find '{fname}(.gz)' in directory {genome_dir}. Methods using this file won't work!") return<|docstring|>Returns the filepath to a single (gzipped) file in the genome_dir with matching ext.<|endoftext|>
ab592063a5436ead4f504a8c9ef410efa9b8fb3e251c8e6ae42367b23a00a96d
def filter_regex(df: pd.DataFrame, regex: str, invert_match: Optional[bool]=False, column: Union[(str, int)]=0) -> pd.DataFrame: '\n Filter a pandas dataframe by a column (default: 1st, contig name).\n\n Parameters\n ----------\n df: pd.Dataframe\n annotation to filter (a pandas dataframe)\n regex : str\n regex string to match\n invert_match : bool, optional\n keep contigs NOT matching the regex string\n column: str or int, optional\n column name or number to filter (default: 1st, contig name)\n\n Returns\n -------\n pd.DataFrame\n filtered dataframe\n ' if (column not in df.columns): if isinstance(column, int): column = df.columns[column] else: raise ValueError(f"Column '{column}' not found in annotation columns {list(df.columns)}") pattern = re.compile(regex) filter_func = df[column].map((lambda x: (bool(pattern.match(x)) is not invert_match))) return df[filter_func]
Filter a pandas dataframe by a column (default: 1st, contig name). Parameters ---------- df: pd.Dataframe annotation to filter (a pandas dataframe) regex : str regex string to match invert_match : bool, optional keep contigs NOT matching the regex string column: str or int, optional column name or number to filter (default: 1st, contig name) Returns ------- pd.DataFrame filtered dataframe
genomepy/annotation/__init__.py
filter_regex
vanheeringen-lab/genomepy
146
python
def filter_regex(df: pd.DataFrame, regex: str, invert_match: Optional[bool]=False, column: Union[(str, int)]=0) -> pd.DataFrame: '\n Filter a pandas dataframe by a column (default: 1st, contig name).\n\n Parameters\n ----------\n df: pd.Dataframe\n annotation to filter (a pandas dataframe)\n regex : str\n regex string to match\n invert_match : bool, optional\n keep contigs NOT matching the regex string\n column: str or int, optional\n column name or number to filter (default: 1st, contig name)\n\n Returns\n -------\n pd.DataFrame\n filtered dataframe\n ' if (column not in df.columns): if isinstance(column, int): column = df.columns[column] else: raise ValueError(f"Column '{column}' not found in annotation columns {list(df.columns)}") pattern = re.compile(regex) filter_func = df[column].map((lambda x: (bool(pattern.match(x)) is not invert_match))) return df[filter_func]
def filter_regex(df: pd.DataFrame, regex: str, invert_match: Optional[bool]=False, column: Union[(str, int)]=0) -> pd.DataFrame: '\n Filter a pandas dataframe by a column (default: 1st, contig name).\n\n Parameters\n ----------\n df: pd.Dataframe\n annotation to filter (a pandas dataframe)\n regex : str\n regex string to match\n invert_match : bool, optional\n keep contigs NOT matching the regex string\n column: str or int, optional\n column name or number to filter (default: 1st, contig name)\n\n Returns\n -------\n pd.DataFrame\n filtered dataframe\n ' if (column not in df.columns): if isinstance(column, int): column = df.columns[column] else: raise ValueError(f"Column '{column}' not found in annotation columns {list(df.columns)}") pattern = re.compile(regex) filter_func = df[column].map((lambda x: (bool(pattern.match(x)) is not invert_match))) return df[filter_func]<|docstring|>Filter a pandas dataframe by a column (default: 1st, contig name). Parameters ---------- df: pd.Dataframe annotation to filter (a pandas dataframe) regex : str regex string to match invert_match : bool, optional keep contigs NOT matching the regex string column: str or int, optional column name or number to filter (default: 1st, contig name) Returns ------- pd.DataFrame filtered dataframe<|endoftext|>
a952ce81102168fa95d229cc615702b2ac4dd08b62952ffce016c8383326202a
def genes(self, annot: str='bed') -> list: '\n Retrieve gene names from an annotation.\n\n For BED files, names are taken from the \'name\' columns.\n\n For GTF files, names are taken from the \'gene_name\' field\n in the attribute column, if available.\n\n Parameters\n ----------\n annot : str, optional\n Annotation file type: \'bed\' or \'gtf\' (default: "bed")\n\n Returns\n -------\n list\n gene names\n ' if (annot.lower() == 'bed'): return list(set(self.bed.name)) return list(set(self.named_gtf.index))
Retrieve gene names from an annotation. For BED files, names are taken from the 'name' columns. For GTF files, names are taken from the 'gene_name' field in the attribute column, if available. Parameters ---------- annot : str, optional Annotation file type: 'bed' or 'gtf' (default: "bed") Returns ------- list gene names
genomepy/annotation/__init__.py
genes
vanheeringen-lab/genomepy
146
python
def genes(self, annot: str='bed') -> list: '\n Retrieve gene names from an annotation.\n\n For BED files, names are taken from the \'name\' columns.\n\n For GTF files, names are taken from the \'gene_name\' field\n in the attribute column, if available.\n\n Parameters\n ----------\n annot : str, optional\n Annotation file type: \'bed\' or \'gtf\' (default: "bed")\n\n Returns\n -------\n list\n gene names\n ' if (annot.lower() == 'bed'): return list(set(self.bed.name)) return list(set(self.named_gtf.index))
def genes(self, annot: str='bed') -> list: '\n Retrieve gene names from an annotation.\n\n For BED files, names are taken from the \'name\' columns.\n\n For GTF files, names are taken from the \'gene_name\' field\n in the attribute column, if available.\n\n Parameters\n ----------\n annot : str, optional\n Annotation file type: \'bed\' or \'gtf\' (default: "bed")\n\n Returns\n -------\n list\n gene names\n ' if (annot.lower() == 'bed'): return list(set(self.bed.name)) return list(set(self.named_gtf.index))<|docstring|>Retrieve gene names from an annotation. For BED files, names are taken from the 'name' columns. For GTF files, names are taken from the 'gene_name' field in the attribute column, if available. Parameters ---------- annot : str, optional Annotation file type: 'bed' or 'gtf' (default: "bed") Returns ------- list gene names<|endoftext|>
81f2fc048cf633ac056e21640e320167248a10bb2170d3593997c9aeb916ec38
def gene_coords(self, genes: Iterable[str], annot: str='bed') -> pd.DataFrame: '\n Retrieve gene locations.\n\n Parameters\n ----------\n genes : Iterable\n List of gene names as found in the given annotation file type\n annot : str, optional\n Annotation file type: \'bed\' or \'gtf\' (default: "bed")\n\n Returns\n -------\n pandas.DataFrame\n gene annotation\n ' gene_list = list(genes) if (annot.lower() == 'bed'): df = self.bed.set_index('name') gene_info = df[['chrom', 'start', 'end', 'strand']] else: df = self.named_gtf df = df.groupby(['gene_name', 'seqname', 'strand']).agg({'start': np.min, 'end': np.max}).reset_index(level=['seqname', 'strand']) gene_info = df[['seqname', 'start', 'end', 'strand']] gene_info = gene_info.reindex(gene_list).dropna() pct = int(((100 * len(set(gene_info.index))) / len(gene_list))) if (pct < 90): logger.warning(((f'Only {pct}% of genes was found. ' if pct else 'No genes found. ') + 'A list of all gene names can be found with `Annotation.genes()`')) if (annot.lower() == 'bed'): return gene_info.reset_index()[['chrom', 'start', 'end', 'name', 'strand']] else: return gene_info.reset_index()[['seqname', 'start', 'end', 'gene_name', 'strand']]
Retrieve gene locations. Parameters ---------- genes : Iterable List of gene names as found in the given annotation file type annot : str, optional Annotation file type: 'bed' or 'gtf' (default: "bed") Returns ------- pandas.DataFrame gene annotation
genomepy/annotation/__init__.py
gene_coords
vanheeringen-lab/genomepy
146
python
def gene_coords(self, genes: Iterable[str], annot: str='bed') -> pd.DataFrame: '\n Retrieve gene locations.\n\n Parameters\n ----------\n genes : Iterable\n List of gene names as found in the given annotation file type\n annot : str, optional\n Annotation file type: \'bed\' or \'gtf\' (default: "bed")\n\n Returns\n -------\n pandas.DataFrame\n gene annotation\n ' gene_list = list(genes) if (annot.lower() == 'bed'): df = self.bed.set_index('name') gene_info = df[['chrom', 'start', 'end', 'strand']] else: df = self.named_gtf df = df.groupby(['gene_name', 'seqname', 'strand']).agg({'start': np.min, 'end': np.max}).reset_index(level=['seqname', 'strand']) gene_info = df[['seqname', 'start', 'end', 'strand']] gene_info = gene_info.reindex(gene_list).dropna() pct = int(((100 * len(set(gene_info.index))) / len(gene_list))) if (pct < 90): logger.warning(((f'Only {pct}% of genes was found. ' if pct else 'No genes found. ') + 'A list of all gene names can be found with `Annotation.genes()`')) if (annot.lower() == 'bed'): return gene_info.reset_index()[['chrom', 'start', 'end', 'name', 'strand']] else: return gene_info.reset_index()[['seqname', 'start', 'end', 'gene_name', 'strand']]
def gene_coords(self, genes: Iterable[str], annot: str='bed') -> pd.DataFrame: '\n Retrieve gene locations.\n\n Parameters\n ----------\n genes : Iterable\n List of gene names as found in the given annotation file type\n annot : str, optional\n Annotation file type: \'bed\' or \'gtf\' (default: "bed")\n\n Returns\n -------\n pandas.DataFrame\n gene annotation\n ' gene_list = list(genes) if (annot.lower() == 'bed'): df = self.bed.set_index('name') gene_info = df[['chrom', 'start', 'end', 'strand']] else: df = self.named_gtf df = df.groupby(['gene_name', 'seqname', 'strand']).agg({'start': np.min, 'end': np.max}).reset_index(level=['seqname', 'strand']) gene_info = df[['seqname', 'start', 'end', 'strand']] gene_info = gene_info.reindex(gene_list).dropna() pct = int(((100 * len(set(gene_info.index))) / len(gene_list))) if (pct < 90): logger.warning(((f'Only {pct}% of genes was found. ' if pct else 'No genes found. ') + 'A list of all gene names can be found with `Annotation.genes()`')) if (annot.lower() == 'bed'): return gene_info.reset_index()[['chrom', 'start', 'end', 'name', 'strand']] else: return gene_info.reset_index()[['seqname', 'start', 'end', 'gene_name', 'strand']]<|docstring|>Retrieve gene locations. Parameters ---------- genes : Iterable List of gene names as found in the given annotation file type annot : str, optional Annotation file type: 'bed' or 'gtf' (default: "bed") Returns ------- pandas.DataFrame gene annotation<|endoftext|>
0df2594904702dcf0b86237c6be22580f93a80950a218ea7aae5750ab848a617
def map_locations(self, annot: Union[(str, pd.DataFrame)], to: str, drop=True) -> Union[(None, pd.DataFrame)]: '\n Map chromosome mapping from one assembly to another.\n\n Uses the NCBI assembly reports to find contigs.\n Drops missing contigs.\n\n Parameters\n ----------\n annot : str or pd.Dataframe\n annotation to map: "bed", "gtf" or a pandas dataframe.\n to: str\n target provider (UCSC, Ensembl or NCBI)\n drop: bool, optional\n if True, replace the chromosome column.\n If False, add a 2nd chromosome column.\n\n Returns\n -------\n pandas.DataFrame\n chromosome mapping.\n ' genomes_dir = os.path.dirname(self.genome_dir) mapping = map_locations(self.genome, to, genomes_dir) if (mapping is None): return df = _parse_annot(self, annot) index_name = df.index.name if (not (set(([index_name] + df.columns.to_list())) & {'chrom', 'seqname'})): raise ValueError("Location mapping requires a column named 'chrom' or 'seqname'.") is_indexed = (df.index.to_list() != list(range(df.shape[0]))) if is_indexed: df = df.reset_index(level=index_name) index_col = ('chrom' if ('chrom' in df.columns) else 'seqname') df = df.set_index(index_col) df = mapping.join(df, how='inner') df = df.reset_index(drop=drop) df.columns = ([index_col] + df.columns.to_list()[1:]) if is_indexed: df = df.set_index((index_name if index_name else 'index')) return df
Map chromosome mapping from one assembly to another. Uses the NCBI assembly reports to find contigs. Drops missing contigs. Parameters ---------- annot : str or pd.Dataframe annotation to map: "bed", "gtf" or a pandas dataframe. to: str target provider (UCSC, Ensembl or NCBI) drop: bool, optional if True, replace the chromosome column. If False, add a 2nd chromosome column. Returns ------- pandas.DataFrame chromosome mapping.
genomepy/annotation/__init__.py
map_locations
vanheeringen-lab/genomepy
146
python
def map_locations(self, annot: Union[(str, pd.DataFrame)], to: str, drop=True) -> Union[(None, pd.DataFrame)]: '\n Map chromosome mapping from one assembly to another.\n\n Uses the NCBI assembly reports to find contigs.\n Drops missing contigs.\n\n Parameters\n ----------\n annot : str or pd.Dataframe\n annotation to map: "bed", "gtf" or a pandas dataframe.\n to: str\n target provider (UCSC, Ensembl or NCBI)\n drop: bool, optional\n if True, replace the chromosome column.\n If False, add a 2nd chromosome column.\n\n Returns\n -------\n pandas.DataFrame\n chromosome mapping.\n ' genomes_dir = os.path.dirname(self.genome_dir) mapping = map_locations(self.genome, to, genomes_dir) if (mapping is None): return df = _parse_annot(self, annot) index_name = df.index.name if (not (set(([index_name] + df.columns.to_list())) & {'chrom', 'seqname'})): raise ValueError("Location mapping requires a column named 'chrom' or 'seqname'.") is_indexed = (df.index.to_list() != list(range(df.shape[0]))) if is_indexed: df = df.reset_index(level=index_name) index_col = ('chrom' if ('chrom' in df.columns) else 'seqname') df = df.set_index(index_col) df = mapping.join(df, how='inner') df = df.reset_index(drop=drop) df.columns = ([index_col] + df.columns.to_list()[1:]) if is_indexed: df = df.set_index((index_name if index_name else 'index')) return df
def map_locations(self, annot: Union[(str, pd.DataFrame)], to: str, drop=True) -> Union[(None, pd.DataFrame)]: '\n Map chromosome mapping from one assembly to another.\n\n Uses the NCBI assembly reports to find contigs.\n Drops missing contigs.\n\n Parameters\n ----------\n annot : str or pd.Dataframe\n annotation to map: "bed", "gtf" or a pandas dataframe.\n to: str\n target provider (UCSC, Ensembl or NCBI)\n drop: bool, optional\n if True, replace the chromosome column.\n If False, add a 2nd chromosome column.\n\n Returns\n -------\n pandas.DataFrame\n chromosome mapping.\n ' genomes_dir = os.path.dirname(self.genome_dir) mapping = map_locations(self.genome, to, genomes_dir) if (mapping is None): return df = _parse_annot(self, annot) index_name = df.index.name if (not (set(([index_name] + df.columns.to_list())) & {'chrom', 'seqname'})): raise ValueError("Location mapping requires a column named 'chrom' or 'seqname'.") is_indexed = (df.index.to_list() != list(range(df.shape[0]))) if is_indexed: df = df.reset_index(level=index_name) index_col = ('chrom' if ('chrom' in df.columns) else 'seqname') df = df.set_index(index_col) df = mapping.join(df, how='inner') df = df.reset_index(drop=drop) df.columns = ([index_col] + df.columns.to_list()[1:]) if is_indexed: df = df.set_index((index_name if index_name else 'index')) return df<|docstring|>Map chromosome mapping from one assembly to another. Uses the NCBI assembly reports to find contigs. Drops missing contigs. Parameters ---------- annot : str or pd.Dataframe annotation to map: "bed", "gtf" or a pandas dataframe. to: str target provider (UCSC, Ensembl or NCBI) drop: bool, optional if True, replace the chromosome column. If False, add a 2nd chromosome column. Returns ------- pandas.DataFrame chromosome mapping.<|endoftext|>
2dc52e74de8b214a6a899c8b52118e9e26d8705e5fd698a8fe110adc1234464a
def filter_regex(self, annot: Union[(str, pd.DataFrame)], regex: Optional[str]='.*', invert_match: Optional[bool]=False, column: Union[(str, int)]=0) -> pd.DataFrame: '\n Filter a dataframe by any column using regex.\n\n Parameters\n ----------\n annot : str or pd.Dataframe\n annotation to filter: "bed", "gtf" or a pandas dataframe\n regex : str\n regex string to match\n invert_match : bool, optional\n keep contigs NOT matching the regex string\n column: str or int, optional\n column name or number to filter (default: 1st, contig name)\n\n Returns\n -------\n pd.DataFrame\n filtered dataframe\n ' df = _parse_annot(self, annot) return filter_regex(df, regex, invert_match, column)
Filter a dataframe by any column using regex. Parameters ---------- annot : str or pd.Dataframe annotation to filter: "bed", "gtf" or a pandas dataframe regex : str regex string to match invert_match : bool, optional keep contigs NOT matching the regex string column: str or int, optional column name or number to filter (default: 1st, contig name) Returns ------- pd.DataFrame filtered dataframe
genomepy/annotation/__init__.py
filter_regex
vanheeringen-lab/genomepy
146
python
def filter_regex(self, annot: Union[(str, pd.DataFrame)], regex: Optional[str]='.*', invert_match: Optional[bool]=False, column: Union[(str, int)]=0) -> pd.DataFrame: '\n Filter a dataframe by any column using regex.\n\n Parameters\n ----------\n annot : str or pd.Dataframe\n annotation to filter: "bed", "gtf" or a pandas dataframe\n regex : str\n regex string to match\n invert_match : bool, optional\n keep contigs NOT matching the regex string\n column: str or int, optional\n column name or number to filter (default: 1st, contig name)\n\n Returns\n -------\n pd.DataFrame\n filtered dataframe\n ' df = _parse_annot(self, annot) return filter_regex(df, regex, invert_match, column)
def filter_regex(self, annot: Union[(str, pd.DataFrame)], regex: Optional[str]='.*', invert_match: Optional[bool]=False, column: Union[(str, int)]=0) -> pd.DataFrame: '\n Filter a dataframe by any column using regex.\n\n Parameters\n ----------\n annot : str or pd.Dataframe\n annotation to filter: "bed", "gtf" or a pandas dataframe\n regex : str\n regex string to match\n invert_match : bool, optional\n keep contigs NOT matching the regex string\n column: str or int, optional\n column name or number to filter (default: 1st, contig name)\n\n Returns\n -------\n pd.DataFrame\n filtered dataframe\n ' df = _parse_annot(self, annot) return filter_regex(df, regex, invert_match, column)<|docstring|>Filter a dataframe by any column using regex. Parameters ---------- annot : str or pd.Dataframe annotation to filter: "bed", "gtf" or a pandas dataframe regex : str regex string to match invert_match : bool, optional keep contigs NOT matching the regex string column: str or int, optional column name or number to filter (default: 1st, contig name) Returns ------- pd.DataFrame filtered dataframe<|endoftext|>
7e42c26584652ab6ada74d9670203cc31cc7db498aa7bca2820f68937fdefd91
def query_blastn(infile: str, query: str) -> list: '\n Checks a BLASTn file for matching the provided query string\n :param infile: Path to BLASTn file\n :param query: String containing query keywords\n :return: List of hits\n ' query = query.lower() query = query.split(' ') hits = [] with open(infile, 'r') as infile: for line in infile: line_flag = True for keyword in query: if (keyword not in line.lower()): line_flag = False if line_flag: hits.append(line.strip()) return hits
Checks a BLASTn file for matching the provided query string :param infile: Path to BLASTn file :param query: String containing query keywords :return: List of hits
BLASTn_Extract/blastn_extract.py
query_blastn
bfssi-forest-dussault/BLASTn_Extract
1
python
def query_blastn(infile: str, query: str) -> list: '\n Checks a BLASTn file for matching the provided query string\n :param infile: Path to BLASTn file\n :param query: String containing query keywords\n :return: List of hits\n ' query = query.lower() query = query.split(' ') hits = [] with open(infile, 'r') as infile: for line in infile: line_flag = True for keyword in query: if (keyword not in line.lower()): line_flag = False if line_flag: hits.append(line.strip()) return hits
def query_blastn(infile: str, query: str) -> list: '\n Checks a BLASTn file for matching the provided query string\n :param infile: Path to BLASTn file\n :param query: String containing query keywords\n :return: List of hits\n ' query = query.lower() query = query.split(' ') hits = [] with open(infile, 'r') as infile: for line in infile: line_flag = True for keyword in query: if (keyword not in line.lower()): line_flag = False if line_flag: hits.append(line.strip()) return hits<|docstring|>Checks a BLASTn file for matching the provided query string :param infile: Path to BLASTn file :param query: String containing query keywords :return: List of hits<|endoftext|>
895a27a5a67f57574732309aeb65709a2402874fd93687d0ad53b2a67f45f977
def extract_contigs(contigs: str, node_dict: dict, outfile: str, delimiter: str) -> str: '\n Searches through a FASTA file and extracts only contigs that are in the provided node list\n :param contigs: FASTA file\n :param node_dict: Dictionary generated with extract_nodes()\n :param outfile: String path to output file\n :param delimiter: String delimiter to split BLASTn file on (defaults to tab)\n :return: String path to output FASTA\n ' outfile_ = open(outfile, 'w') write_flag = False with open(os.path.abspath(contigs), 'r') as infile: for line in infile: if line.startswith('>'): for (node, hit) in node_dict.items(): if (node in line): outfile_.write((((('>' + node.rsplit('_', 2)[0]) + ' ') + ''.join(hit.split(delimiter)[1:3])) + '\n')) write_flag = True break else: write_flag = False elif write_flag: outfile_.write(line) outfile_.close() return outfile
Searches through a FASTA file and extracts only contigs that are in the provided node list :param contigs: FASTA file :param node_dict: Dictionary generated with extract_nodes() :param outfile: String path to output file :param delimiter: String delimiter to split BLASTn file on (defaults to tab) :return: String path to output FASTA
BLASTn_Extract/blastn_extract.py
extract_contigs
bfssi-forest-dussault/BLASTn_Extract
1
python
def extract_contigs(contigs: str, node_dict: dict, outfile: str, delimiter: str) -> str: '\n Searches through a FASTA file and extracts only contigs that are in the provided node list\n :param contigs: FASTA file\n :param node_dict: Dictionary generated with extract_nodes()\n :param outfile: String path to output file\n :param delimiter: String delimiter to split BLASTn file on (defaults to tab)\n :return: String path to output FASTA\n ' outfile_ = open(outfile, 'w') write_flag = False with open(os.path.abspath(contigs), 'r') as infile: for line in infile: if line.startswith('>'): for (node, hit) in node_dict.items(): if (node in line): outfile_.write((((('>' + node.rsplit('_', 2)[0]) + ' ') + .join(hit.split(delimiter)[1:3])) + '\n')) write_flag = True break else: write_flag = False elif write_flag: outfile_.write(line) outfile_.close() return outfile
def extract_contigs(contigs: str, node_dict: dict, outfile: str, delimiter: str) -> str: '\n Searches through a FASTA file and extracts only contigs that are in the provided node list\n :param contigs: FASTA file\n :param node_dict: Dictionary generated with extract_nodes()\n :param outfile: String path to output file\n :param delimiter: String delimiter to split BLASTn file on (defaults to tab)\n :return: String path to output FASTA\n ' outfile_ = open(outfile, 'w') write_flag = False with open(os.path.abspath(contigs), 'r') as infile: for line in infile: if line.startswith('>'): for (node, hit) in node_dict.items(): if (node in line): outfile_.write((((('>' + node.rsplit('_', 2)[0]) + ' ') + .join(hit.split(delimiter)[1:3])) + '\n')) write_flag = True break else: write_flag = False elif write_flag: outfile_.write(line) outfile_.close() return outfile<|docstring|>Searches through a FASTA file and extracts only contigs that are in the provided node list :param contigs: FASTA file :param node_dict: Dictionary generated with extract_nodes() :param outfile: String path to output file :param delimiter: String delimiter to split BLASTn file on (defaults to tab) :return: String path to output FASTA<|endoftext|>
26d29c710004f5d9e045f6346608666c1d0aa463474b7dd22a8bc3f17f1eef55
def setup(): 'Setups logging.' FORMAT = u'%(levelname)-8s [%(asctime)s] %(message)s ### %(filename)s[LINE:%(lineno)d]' logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) logger.addHandler(logging.FileHandler(f'logs/{ctime()}.log')) coloredlogs.install(level='INFO', fmt=FORMAT, logger=logger) logging.getLogger('discord').setLevel(logging.WARNING)
Setups logging.
core/logs.py
setup
A-Zalt/flasher-rewrite
0
python
def setup(): FORMAT = u'%(levelname)-8s [%(asctime)s] %(message)s ### %(filename)s[LINE:%(lineno)d]' logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) logger.addHandler(logging.FileHandler(f'logs/{ctime()}.log')) coloredlogs.install(level='INFO', fmt=FORMAT, logger=logger) logging.getLogger('discord').setLevel(logging.WARNING)
def setup(): FORMAT = u'%(levelname)-8s [%(asctime)s] %(message)s ### %(filename)s[LINE:%(lineno)d]' logger = logging.getLogger() logger.addHandler(logging.StreamHandler()) logger.addHandler(logging.FileHandler(f'logs/{ctime()}.log')) coloredlogs.install(level='INFO', fmt=FORMAT, logger=logger) logging.getLogger('discord').setLevel(logging.WARNING)<|docstring|>Setups logging.<|endoftext|>
dd661e8daf26472b15e39006a1ca4339d93e14ae762efc8b7248827f8a37efd4
def max_index(X): 'Return the index of the maximum in a numpy array.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The input array.\n\n Returns\n -------\n i : int\n The row index of the maximum.\n j : int\n The column index of the maximum.\n\n Raises\n ------\n ValueError\n If the input is not a numpy error or\n if the shape is not 2D.\n ' if ((X is None) or (type(X) is not np.ndarray)): raise ValueError('The input array X is None or not a numpy array.') if (len(X.shape) != 2): raise ValueError('The shape is not 2D.') s = X.shape[0] result = np.argmax(X) i = (result // s) j = (result % s) return (i, j)
Return the index of the maximum in a numpy array. Parameters ---------- X : ndarray of shape (n_samples, n_features) The input array. Returns ------- i : int The row index of the maximum. j : int The column index of the maximum. Raises ------ ValueError If the input is not a numpy error or if the shape is not 2D.
numpy_questions.py
max_index
OlivierBn/datacamp-assignment1
0
python
def max_index(X): 'Return the index of the maximum in a numpy array.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The input array.\n\n Returns\n -------\n i : int\n The row index of the maximum.\n j : int\n The column index of the maximum.\n\n Raises\n ------\n ValueError\n If the input is not a numpy error or\n if the shape is not 2D.\n ' if ((X is None) or (type(X) is not np.ndarray)): raise ValueError('The input array X is None or not a numpy array.') if (len(X.shape) != 2): raise ValueError('The shape is not 2D.') s = X.shape[0] result = np.argmax(X) i = (result // s) j = (result % s) return (i, j)
def max_index(X): 'Return the index of the maximum in a numpy array.\n\n Parameters\n ----------\n X : ndarray of shape (n_samples, n_features)\n The input array.\n\n Returns\n -------\n i : int\n The row index of the maximum.\n j : int\n The column index of the maximum.\n\n Raises\n ------\n ValueError\n If the input is not a numpy error or\n if the shape is not 2D.\n ' if ((X is None) or (type(X) is not np.ndarray)): raise ValueError('The input array X is None or not a numpy array.') if (len(X.shape) != 2): raise ValueError('The shape is not 2D.') s = X.shape[0] result = np.argmax(X) i = (result // s) j = (result % s) return (i, j)<|docstring|>Return the index of the maximum in a numpy array. Parameters ---------- X : ndarray of shape (n_samples, n_features) The input array. Returns ------- i : int The row index of the maximum. j : int The column index of the maximum. Raises ------ ValueError If the input is not a numpy error or if the shape is not 2D.<|endoftext|>
3c89573b067da9ccff22291dca03066ed9a44c07c4b24ff51169613409adaa1c
def wallis_product(n_terms): 'Implement the Wallis product to compute an approximation of pi.\n\n See:\n https://en.wikipedia.org/wiki/Wallis_product\n\n ' x = 2 for n in range(1, (n_terms + 1)): x *= ((4 * (n ** 2)) / ((4 * (n ** 2)) - 1)) return x
Implement the Wallis product to compute an approximation of pi. See: https://en.wikipedia.org/wiki/Wallis_product
numpy_questions.py
wallis_product
OlivierBn/datacamp-assignment1
0
python
def wallis_product(n_terms): 'Implement the Wallis product to compute an approximation of pi.\n\n See:\n https://en.wikipedia.org/wiki/Wallis_product\n\n ' x = 2 for n in range(1, (n_terms + 1)): x *= ((4 * (n ** 2)) / ((4 * (n ** 2)) - 1)) return x
def wallis_product(n_terms): 'Implement the Wallis product to compute an approximation of pi.\n\n See:\n https://en.wikipedia.org/wiki/Wallis_product\n\n ' x = 2 for n in range(1, (n_terms + 1)): x *= ((4 * (n ** 2)) / ((4 * (n ** 2)) - 1)) return x<|docstring|>Implement the Wallis product to compute an approximation of pi. See: https://en.wikipedia.org/wiki/Wallis_product<|endoftext|>
571e3f5108e89a4f49a33de2f67867489524b7fdb06c0e3739aa940b2ea78924
def __init__(self, **kwargs): "\n :param data: input DataFrame include ['user_id', 'item_id', 'timestamp']\n " pass
:param data: input DataFrame include ['user_id', 'item_id', 'timestamp']
build/lib/DMRecall/baseclass/recall.py
__init__
busesese/DMRecall
9
python
def __init__(self, **kwargs): "\n \n " pass
def __init__(self, **kwargs): "\n \n " pass<|docstring|>:param data: input DataFrame include ['user_id', 'item_id', 'timestamp']<|endoftext|>
7ef5ac1ea4ed36331310ad878407979f060ee20c8c54b3a2ea708fda48a2f6b2
def processData(self): '\n process data before train to get the right data format\n :return:\n ' pass
process data before train to get the right data format :return:
build/lib/DMRecall/baseclass/recall.py
processData
busesese/DMRecall
9
python
def processData(self): '\n process data before train to get the right data format\n :return:\n ' pass
def processData(self): '\n process data before train to get the right data format\n :return:\n ' pass<|docstring|>process data before train to get the right data format :return:<|endoftext|>
d4f2e2a2a19bb7f82e44e78146b6c12829e8cb96ec2fc3057b1612f69c456bd1
def train(self): '\n train the data\n :return:\n ' pass
train the data :return:
build/lib/DMRecall/baseclass/recall.py
train
busesese/DMRecall
9
python
def train(self): '\n train the data\n :return:\n ' pass
def train(self): '\n train the data\n :return:\n ' pass<|docstring|>train the data :return:<|endoftext|>
0ddac82ec73767cb03c6e6bb592eff8ff2da8207840da18e6abcd30eb24bdf5d
def predict(self, items, k=10): '\n predict result for a given user\n :param user: str, user id\n :param items: list, user recent behavior item list\n :param k: predict top k result\n :return: dict\n ' result = dict() if isinstance(items, list): for item in items: if (item in self.item_similarity): for (i, val) in self.item_similarity[item].items(): if (i not in items): if (i not in result): result[i] = val else: result[i] += val return [i for (i, val) in sorted(result.items(), key=(lambda x: x[1]), reverse=True)[:k]] else: raise TypeError('Input parameter type is not list')
predict result for a given user :param user: str, user id :param items: list, user recent behavior item list :param k: predict top k result :return: dict
build/lib/DMRecall/baseclass/recall.py
predict
busesese/DMRecall
9
python
def predict(self, items, k=10): '\n predict result for a given user\n :param user: str, user id\n :param items: list, user recent behavior item list\n :param k: predict top k result\n :return: dict\n ' result = dict() if isinstance(items, list): for item in items: if (item in self.item_similarity): for (i, val) in self.item_similarity[item].items(): if (i not in items): if (i not in result): result[i] = val else: result[i] += val return [i for (i, val) in sorted(result.items(), key=(lambda x: x[1]), reverse=True)[:k]] else: raise TypeError('Input parameter type is not list')
def predict(self, items, k=10): '\n predict result for a given user\n :param user: str, user id\n :param items: list, user recent behavior item list\n :param k: predict top k result\n :return: dict\n ' result = dict() if isinstance(items, list): for item in items: if (item in self.item_similarity): for (i, val) in self.item_similarity[item].items(): if (i not in items): if (i not in result): result[i] = val else: result[i] += val return [i for (i, val) in sorted(result.items(), key=(lambda x: x[1]), reverse=True)[:k]] else: raise TypeError('Input parameter type is not list')<|docstring|>predict result for a given user :param user: str, user id :param items: list, user recent behavior item list :param k: predict top k result :return: dict<|endoftext|>
f0c0bc2266e0a4e4e9696b5104f20b7e1e8462c68fd94d18b25ab12f5920e7fc
def recommend(self, data_pre, k=10): '\n recommend result for given users\n :param data_pre: dict, include user id and user recent behavior item\n :param k: int predict top k result\n :return: dict, key = user and value = user result\n ' result = dict() for (uid, item_list) in data_pre.items(): pred = self.predict(item_list, k) result[uid] = pred return result
recommend result for given users :param data_pre: dict, include user id and user recent behavior item :param k: int predict top k result :return: dict, key = user and value = user result
build/lib/DMRecall/baseclass/recall.py
recommend
busesese/DMRecall
9
python
def recommend(self, data_pre, k=10): '\n recommend result for given users\n :param data_pre: dict, include user id and user recent behavior item\n :param k: int predict top k result\n :return: dict, key = user and value = user result\n ' result = dict() for (uid, item_list) in data_pre.items(): pred = self.predict(item_list, k) result[uid] = pred return result
def recommend(self, data_pre, k=10): '\n recommend result for given users\n :param data_pre: dict, include user id and user recent behavior item\n :param k: int predict top k result\n :return: dict, key = user and value = user result\n ' result = dict() for (uid, item_list) in data_pre.items(): pred = self.predict(item_list, k) result[uid] = pred return result<|docstring|>recommend result for given users :param data_pre: dict, include user id and user recent behavior item :param k: int predict top k result :return: dict, key = user and value = user result<|endoftext|>
659b8033daefb899d9076573225049ee9465d07bb0309b8a01d915eb0f519f41
def AddPageWithDefaultRunNavigate(self, page_url): ' Add a simple page with url equals to page_url that contains only default\n RunNavigateSteps.\n ' self.AddUserStory(page_module.Page(page_url, self, self.base_dir))
Add a simple page with url equals to page_url that contains only default RunNavigateSteps.
tools/telemetry/telemetry/page/page_set.py
AddPageWithDefaultRunNavigate
sunjc53yy/chromium
0
python
def AddPageWithDefaultRunNavigate(self, page_url): ' Add a simple page with url equals to page_url that contains only default\n RunNavigateSteps.\n ' self.AddUserStory(page_module.Page(page_url, self, self.base_dir))
def AddPageWithDefaultRunNavigate(self, page_url): ' Add a simple page with url equals to page_url that contains only default\n RunNavigateSteps.\n ' self.AddUserStory(page_module.Page(page_url, self, self.base_dir))<|docstring|>Add a simple page with url equals to page_url that contains only default RunNavigateSteps.<|endoftext|>
be25ce8a4f59095f42de122d1d8a3a894a9c4fd906313ea126990e0ae5372d1c
@property def wpr_archive_info(self): "Lazily constructs wpr_archive_info if it's not set and returns it." if (self.archive_data_file and (not self._wpr_archive_info)): self._wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(os.path.join(self.base_dir, self.archive_data_file)) return self._wpr_archive_info
Lazily constructs wpr_archive_info if it's not set and returns it.
tools/telemetry/telemetry/page/page_set.py
wpr_archive_info
sunjc53yy/chromium
0
python
@property def wpr_archive_info(self): if (self.archive_data_file and (not self._wpr_archive_info)): self._wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(os.path.join(self.base_dir, self.archive_data_file)) return self._wpr_archive_info
@property def wpr_archive_info(self): if (self.archive_data_file and (not self._wpr_archive_info)): self._wpr_archive_info = page_set_archive_info.PageSetArchiveInfo.FromFile(os.path.join(self.base_dir, self.archive_data_file)) return self._wpr_archive_info<|docstring|>Lazily constructs wpr_archive_info if it's not set and returns it.<|endoftext|>
457944cd6b4f3ecc07207fe7dc64aa93d7e1a2a08ff0faa77142d92be8cf2cb4
def ReorderPageSet(self, results_file): 'Reorders this page set based on the results of a past run.' page_set_dict = {} for page in self.user_stories: page_set_dict[page.url] = page user_stories = [] with open(results_file, 'rb') as csv_file: csv_reader = csv.reader(csv_file) csv_header = csv_reader.next() if ('url' not in csv_header): raise Exception('Unusable results_file.') url_index = csv_header.index('url') for csv_row in csv_reader: if (csv_row[url_index] in page_set_dict): self.AddPage(page_set_dict[csv_row[url_index]]) else: raise Exception('Unusable results_file.') return user_stories
Reorders this page set based on the results of a past run.
tools/telemetry/telemetry/page/page_set.py
ReorderPageSet
sunjc53yy/chromium
0
python
def ReorderPageSet(self, results_file): page_set_dict = {} for page in self.user_stories: page_set_dict[page.url] = page user_stories = [] with open(results_file, 'rb') as csv_file: csv_reader = csv.reader(csv_file) csv_header = csv_reader.next() if ('url' not in csv_header): raise Exception('Unusable results_file.') url_index = csv_header.index('url') for csv_row in csv_reader: if (csv_row[url_index] in page_set_dict): self.AddPage(page_set_dict[csv_row[url_index]]) else: raise Exception('Unusable results_file.') return user_stories
def ReorderPageSet(self, results_file): page_set_dict = {} for page in self.user_stories: page_set_dict[page.url] = page user_stories = [] with open(results_file, 'rb') as csv_file: csv_reader = csv.reader(csv_file) csv_header = csv_reader.next() if ('url' not in csv_header): raise Exception('Unusable results_file.') url_index = csv_header.index('url') for csv_row in csv_reader: if (csv_row[url_index] in page_set_dict): self.AddPage(page_set_dict[csv_row[url_index]]) else: raise Exception('Unusable results_file.') return user_stories<|docstring|>Reorders this page set based on the results of a past run.<|endoftext|>
2549015a0af542a8c7381c950b13af3ab634148415deb675f1f77f1969001ccb
@conan_command(group='Consumer', formatters={'cli': output_search_cli, 'json': output_search_json}) def search(conan_api, parser, *args, **kwargs): '\n Searches for package recipes whose name contain <query> in a remote or in the local cache\n ' parser.add_argument('query', help="Search query to find package recipe reference, e.g., 'boost', 'lib*'") exclusive_args = parser.add_mutually_exclusive_group() exclusive_args.add_argument('-r', '--remote', default=None, action=Extender, help='Remote to search. Accepts wildcards. To search in all remotes use *') exclusive_args.add_argument('-c', '--cache', action='store_true', help='Search in the local cache') args = parser.parse_args(*args) remotes = (args.remote or []) info = conan_api.search_recipes(args.query, remote_patterns=remotes, local_cache=args.cache) return info
Searches for package recipes whose name contain <query> in a remote or in the local cache
conans/cli/commands/search.py
search
photex/conan
6,205
python
@conan_command(group='Consumer', formatters={'cli': output_search_cli, 'json': output_search_json}) def search(conan_api, parser, *args, **kwargs): '\n \n ' parser.add_argument('query', help="Search query to find package recipe reference, e.g., 'boost', 'lib*'") exclusive_args = parser.add_mutually_exclusive_group() exclusive_args.add_argument('-r', '--remote', default=None, action=Extender, help='Remote to search. Accepts wildcards. To search in all remotes use *') exclusive_args.add_argument('-c', '--cache', action='store_true', help='Search in the local cache') args = parser.parse_args(*args) remotes = (args.remote or []) info = conan_api.search_recipes(args.query, remote_patterns=remotes, local_cache=args.cache) return info
@conan_command(group='Consumer', formatters={'cli': output_search_cli, 'json': output_search_json}) def search(conan_api, parser, *args, **kwargs): '\n \n ' parser.add_argument('query', help="Search query to find package recipe reference, e.g., 'boost', 'lib*'") exclusive_args = parser.add_mutually_exclusive_group() exclusive_args.add_argument('-r', '--remote', default=None, action=Extender, help='Remote to search. Accepts wildcards. To search in all remotes use *') exclusive_args.add_argument('-c', '--cache', action='store_true', help='Search in the local cache') args = parser.parse_args(*args) remotes = (args.remote or []) info = conan_api.search_recipes(args.query, remote_patterns=remotes, local_cache=args.cache) return info<|docstring|>Searches for package recipes whose name contain <query> in a remote or in the local cache<|endoftext|>
261ab643d4c25cd6cc0c08ae766d6711b8f3bc59d6ee6a8dbddbab2abe414aef
def create_sun(self): ' \n create a yellow sun, with coordinates (0,0,0) \n ' sun_source = vtkSphereSource() sun_source.SetRadius(0.2) sun_source.SetThetaResolution(20) sun_source.SetPhiResolution(20) sun_mapper = vtkPolyDataMapper() self.planets['sun'] = Planet('sun', sun_source, sun_mapper) self.planets['sun'].actor.GetProperty().SetColor(1, 1, 0) self.renderer.AddActor(self.planets['sun'].actor)
create a yellow sun, with coordinates (0,0,0)
sandbox/src1/TCSE3-3rd-examples/src/py/examples/canvas/vtkplanet.py
create_sun
sniemi/SamPy
5
python
def create_sun(self): ' \n \n ' sun_source = vtkSphereSource() sun_source.SetRadius(0.2) sun_source.SetThetaResolution(20) sun_source.SetPhiResolution(20) sun_mapper = vtkPolyDataMapper() self.planets['sun'] = Planet('sun', sun_source, sun_mapper) self.planets['sun'].actor.GetProperty().SetColor(1, 1, 0) self.renderer.AddActor(self.planets['sun'].actor)
def create_sun(self): ' \n \n ' sun_source = vtkSphereSource() sun_source.SetRadius(0.2) sun_source.SetThetaResolution(20) sun_source.SetPhiResolution(20) sun_mapper = vtkPolyDataMapper() self.planets['sun'] = Planet('sun', sun_source, sun_mapper) self.planets['sun'].actor.GetProperty().SetColor(1, 1, 0) self.renderer.AddActor(self.planets['sun'].actor)<|docstring|>create a yellow sun, with coordinates (0,0,0)<|endoftext|>
ae6c3a2d7abad33b741a193a1815e84bbd823c5314887f01e66b580f70756056
def create_planet(self, name, x, y, z): ' \n create a planet (with disk) at specified coordinates \n ' if ((self.__planet_source == None) and (self.__planet_mapper == None)): self.__planet_source = vtkSphereSource() self.__planet_source.SetRadius(0.1) self.__planet_source.SetThetaResolution(20) self.__planet_source.SetPhiResolution(20) self.__planet_mapper = vtkPolyDataMapper() self.planets[name] = Planet(name, self.__planet_source, self.__planet_mapper, x, y, z, with_disk=1) self.renderer.AddActor(self.planets[name].actor) self.renderer.AddActor(self.planets[name].disk_actor)
create a planet (with disk) at specified coordinates
sandbox/src1/TCSE3-3rd-examples/src/py/examples/canvas/vtkplanet.py
create_planet
sniemi/SamPy
5
python
def create_planet(self, name, x, y, z): ' \n \n ' if ((self.__planet_source == None) and (self.__planet_mapper == None)): self.__planet_source = vtkSphereSource() self.__planet_source.SetRadius(0.1) self.__planet_source.SetThetaResolution(20) self.__planet_source.SetPhiResolution(20) self.__planet_mapper = vtkPolyDataMapper() self.planets[name] = Planet(name, self.__planet_source, self.__planet_mapper, x, y, z, with_disk=1) self.renderer.AddActor(self.planets[name].actor) self.renderer.AddActor(self.planets[name].disk_actor)
def create_planet(self, name, x, y, z): ' \n \n ' if ((self.__planet_source == None) and (self.__planet_mapper == None)): self.__planet_source = vtkSphereSource() self.__planet_source.SetRadius(0.1) self.__planet_source.SetThetaResolution(20) self.__planet_source.SetPhiResolution(20) self.__planet_mapper = vtkPolyDataMapper() self.planets[name] = Planet(name, self.__planet_source, self.__planet_mapper, x, y, z, with_disk=1) self.renderer.AddActor(self.planets[name].actor) self.renderer.AddActor(self.planets[name].disk_actor)<|docstring|>create a planet (with disk) at specified coordinates<|endoftext|>
1e8d9c9c16c7d696167fcec3e73c443cec3b84330983d707180b3e0a2ef48659
def generate_random_password(length=9, letters=True, digits=True, punctuation=True, ignored_chars=''): '\n Generates a cryptographically secure random password\n :param length: Length of password\n :param letters: True to use letters in password\n :param digits: True to use digits in password\n :param punctuation: True to use punctuation in password\n :param ignored_chars: str containing all the characters that should be ignored during generation\n :return: A dictionary containing the password and entropy\n ' if (not (letters or digits or punctuation)): raise ParameterError('At least one set of characters must be selected for a password to be generated') char_pool = '' if letters: char_pool += string.ascii_letters if digits: char_pool += string.digits if punctuation: char_pool += string.punctuation char_list = [char for char in char_pool if (char not in ignored_chars)] result = '' for _ in range(length): result += secrets.choice(char_list) return {'password': result, 'entropy': __calc_entropy_password(result, len(char_list))}
Generates a cryptographically secure random password :param length: Length of password :param letters: True to use letters in password :param digits: True to use digits in password :param punctuation: True to use punctuation in password :param ignored_chars: str containing all the characters that should be ignored during generation :return: A dictionary containing the password and entropy
generators.py
generate_random_password
Tauag/SPass
2
python
def generate_random_password(length=9, letters=True, digits=True, punctuation=True, ignored_chars=): '\n Generates a cryptographically secure random password\n :param length: Length of password\n :param letters: True to use letters in password\n :param digits: True to use digits in password\n :param punctuation: True to use punctuation in password\n :param ignored_chars: str containing all the characters that should be ignored during generation\n :return: A dictionary containing the password and entropy\n ' if (not (letters or digits or punctuation)): raise ParameterError('At least one set of characters must be selected for a password to be generated') char_pool = if letters: char_pool += string.ascii_letters if digits: char_pool += string.digits if punctuation: char_pool += string.punctuation char_list = [char for char in char_pool if (char not in ignored_chars)] result = for _ in range(length): result += secrets.choice(char_list) return {'password': result, 'entropy': __calc_entropy_password(result, len(char_list))}
def generate_random_password(length=9, letters=True, digits=True, punctuation=True, ignored_chars=): '\n Generates a cryptographically secure random password\n :param length: Length of password\n :param letters: True to use letters in password\n :param digits: True to use digits in password\n :param punctuation: True to use punctuation in password\n :param ignored_chars: str containing all the characters that should be ignored during generation\n :return: A dictionary containing the password and entropy\n ' if (not (letters or digits or punctuation)): raise ParameterError('At least one set of characters must be selected for a password to be generated') char_pool = if letters: char_pool += string.ascii_letters if digits: char_pool += string.digits if punctuation: char_pool += string.punctuation char_list = [char for char in char_pool if (char not in ignored_chars)] result = for _ in range(length): result += secrets.choice(char_list) return {'password': result, 'entropy': __calc_entropy_password(result, len(char_list))}<|docstring|>Generates a cryptographically secure random password :param length: Length of password :param letters: True to use letters in password :param digits: True to use digits in password :param punctuation: True to use punctuation in password :param ignored_chars: str containing all the characters that should be ignored during generation :return: A dictionary containing the password and entropy<|endoftext|>
5a4a6c66790c5ca96b9c0e1c7ba5edf2120d41d699c19ff63af2d9febea62d09
def generate_passphrase(word_count=5, pad_length=0, digits=True, punctuation=True, ignored_symbols=''): '\n Generates a passphrase with the specified amount of padding\n :param word_count: Number of words in passphrase\n :param pad_length: The number of padding characters\n :param digits: True to use digits in padding\n :param punctuation: True to use punctuation in padding\n :param ignored_symbols: str containing all the symbols to ignore during padding generation\n :return: A dictionary containing the passphrase, entropy and the +- deviation of the entropy\n ' if (word_count < 2): raise ParameterError('You need at least two words to make a passphrase') with open('object/word_map.pickle', 'rb') as words: word_bank = pickle.load(words) (placements, pad_bank_size) = ({}, 0) if (pad_length > 0): if (not (digits or punctuation)): raise ParameterError('At least one set of characters must be selected for the padding') (placements, pad_bank_size) = __scatter_padding(word_count, pad_length, digits, punctuation, ignored_symbols) (result, words_used, coin) = ('', [], [0, 1]) for i in range(word_count): if (i in placements): result += ''.join((sym for sym in placements[i])) word = secrets.choice(word_bank) if (secrets.choice(coin) == 0): word = (word[0].upper() + word[1:]) result += word words_used.append(word) if (word_count in placements): result += ''.join((sym for sym in placements[word_count])) (entropy, deviation) = __calc_entropy_passphrase(word_count, len(word_bank), pad_length, pad_bank_size) return {'password': result, 'entropy': entropy, 'deviation': deviation}
Generates a passphrase with the specified amount of padding :param word_count: Number of words in passphrase :param pad_length: The number of padding characters :param digits: True to use digits in padding :param punctuation: True to use punctuation in padding :param ignored_symbols: str containing all the symbols to ignore during padding generation :return: A dictionary containing the passphrase, entropy and the +- deviation of the entropy
generators.py
generate_passphrase
Tauag/SPass
2
python
def generate_passphrase(word_count=5, pad_length=0, digits=True, punctuation=True, ignored_symbols=): '\n Generates a passphrase with the specified amount of padding\n :param word_count: Number of words in passphrase\n :param pad_length: The number of padding characters\n :param digits: True to use digits in padding\n :param punctuation: True to use punctuation in padding\n :param ignored_symbols: str containing all the symbols to ignore during padding generation\n :return: A dictionary containing the passphrase, entropy and the +- deviation of the entropy\n ' if (word_count < 2): raise ParameterError('You need at least two words to make a passphrase') with open('object/word_map.pickle', 'rb') as words: word_bank = pickle.load(words) (placements, pad_bank_size) = ({}, 0) if (pad_length > 0): if (not (digits or punctuation)): raise ParameterError('At least one set of characters must be selected for the padding') (placements, pad_bank_size) = __scatter_padding(word_count, pad_length, digits, punctuation, ignored_symbols) (result, words_used, coin) = (, [], [0, 1]) for i in range(word_count): if (i in placements): result += .join((sym for sym in placements[i])) word = secrets.choice(word_bank) if (secrets.choice(coin) == 0): word = (word[0].upper() + word[1:]) result += word words_used.append(word) if (word_count in placements): result += .join((sym for sym in placements[word_count])) (entropy, deviation) = __calc_entropy_passphrase(word_count, len(word_bank), pad_length, pad_bank_size) return {'password': result, 'entropy': entropy, 'deviation': deviation}
def generate_passphrase(word_count=5, pad_length=0, digits=True, punctuation=True, ignored_symbols=): '\n Generates a passphrase with the specified amount of padding\n :param word_count: Number of words in passphrase\n :param pad_length: The number of padding characters\n :param digits: True to use digits in padding\n :param punctuation: True to use punctuation in padding\n :param ignored_symbols: str containing all the symbols to ignore during padding generation\n :return: A dictionary containing the passphrase, entropy and the +- deviation of the entropy\n ' if (word_count < 2): raise ParameterError('You need at least two words to make a passphrase') with open('object/word_map.pickle', 'rb') as words: word_bank = pickle.load(words) (placements, pad_bank_size) = ({}, 0) if (pad_length > 0): if (not (digits or punctuation)): raise ParameterError('At least one set of characters must be selected for the padding') (placements, pad_bank_size) = __scatter_padding(word_count, pad_length, digits, punctuation, ignored_symbols) (result, words_used, coin) = (, [], [0, 1]) for i in range(word_count): if (i in placements): result += .join((sym for sym in placements[i])) word = secrets.choice(word_bank) if (secrets.choice(coin) == 0): word = (word[0].upper() + word[1:]) result += word words_used.append(word) if (word_count in placements): result += .join((sym for sym in placements[word_count])) (entropy, deviation) = __calc_entropy_passphrase(word_count, len(word_bank), pad_length, pad_bank_size) return {'password': result, 'entropy': entropy, 'deviation': deviation}<|docstring|>Generates a passphrase with the specified amount of padding :param word_count: Number of words in passphrase :param pad_length: The number of padding characters :param digits: True to use digits in padding :param punctuation: True to use punctuation in padding :param ignored_symbols: str containing all the symbols to ignore during padding generation :return: A dictionary containing the passphrase, entropy and the +- deviation of the entropy<|endoftext|>
b5cc2412841b586fad0e2db9a40d2593a2cf131cdd4564299f347c095f86c0d9
def __scatter_padding(word_count, pad_length, digits, punctuation, ignored_symbols): '\n Randomly decides where to add padding and which characters to use\n :param word_count: Number of words in passphrase\n :param pad_length: Number of characters to use for padding\n :param digits: True to use digits in padding\n :param punctuation: True to use punctuation in padding\n :param ignored_symbols: str containing all characters to ignore during padding generation\n :return: A tuple containing the padding placements and the size of the character pool used to pad\n ' char_pool = '' if digits: char_pool += string.digits if punctuation: char_pool += string.punctuation char_list = [char for char in char_pool if (char not in ignored_symbols)] indexes = [index for index in range((word_count + 1))] placements = {} for _ in range(pad_length): idx = secrets.choice(indexes) if (idx not in placements): placements.update({idx: [secrets.choice(char_list)]}) else: placements[idx].append(secrets.choice(char_list)) return (placements, len(char_list))
Randomly decides where to add padding and which characters to use :param word_count: Number of words in passphrase :param pad_length: Number of characters to use for padding :param digits: True to use digits in padding :param punctuation: True to use punctuation in padding :param ignored_symbols: str containing all characters to ignore during padding generation :return: A tuple containing the padding placements and the size of the character pool used to pad
generators.py
__scatter_padding
Tauag/SPass
2
python
def __scatter_padding(word_count, pad_length, digits, punctuation, ignored_symbols): '\n Randomly decides where to add padding and which characters to use\n :param word_count: Number of words in passphrase\n :param pad_length: Number of characters to use for padding\n :param digits: True to use digits in padding\n :param punctuation: True to use punctuation in padding\n :param ignored_symbols: str containing all characters to ignore during padding generation\n :return: A tuple containing the padding placements and the size of the character pool used to pad\n ' char_pool = if digits: char_pool += string.digits if punctuation: char_pool += string.punctuation char_list = [char for char in char_pool if (char not in ignored_symbols)] indexes = [index for index in range((word_count + 1))] placements = {} for _ in range(pad_length): idx = secrets.choice(indexes) if (idx not in placements): placements.update({idx: [secrets.choice(char_list)]}) else: placements[idx].append(secrets.choice(char_list)) return (placements, len(char_list))
def __scatter_padding(word_count, pad_length, digits, punctuation, ignored_symbols): '\n Randomly decides where to add padding and which characters to use\n :param word_count: Number of words in passphrase\n :param pad_length: Number of characters to use for padding\n :param digits: True to use digits in padding\n :param punctuation: True to use punctuation in padding\n :param ignored_symbols: str containing all characters to ignore during padding generation\n :return: A tuple containing the padding placements and the size of the character pool used to pad\n ' char_pool = if digits: char_pool += string.digits if punctuation: char_pool += string.punctuation char_list = [char for char in char_pool if (char not in ignored_symbols)] indexes = [index for index in range((word_count + 1))] placements = {} for _ in range(pad_length): idx = secrets.choice(indexes) if (idx not in placements): placements.update({idx: [secrets.choice(char_list)]}) else: placements[idx].append(secrets.choice(char_list)) return (placements, len(char_list))<|docstring|>Randomly decides where to add padding and which characters to use :param word_count: Number of words in passphrase :param pad_length: Number of characters to use for padding :param digits: True to use digits in padding :param punctuation: True to use punctuation in padding :param ignored_symbols: str containing all characters to ignore during padding generation :return: A tuple containing the padding placements and the size of the character pool used to pad<|endoftext|>
63282f342ceb9115663092c246dd8d46658d2c6adbfb3f55905d9f41a16950d6
def __calc_entropy_password(password, pool_size): '\n Calculates the entropy of a random password\n :param password: The password\n :param pool_size: The size of the character pool used to generate password\n :return: Entropy\n ' if ((not password) or (not pool_size)): return 0 inner = math.pow(pool_size, len(password)) return math.log(inner, 2)
Calculates the entropy of a random password :param password: The password :param pool_size: The size of the character pool used to generate password :return: Entropy
generators.py
__calc_entropy_password
Tauag/SPass
2
python
def __calc_entropy_password(password, pool_size): '\n Calculates the entropy of a random password\n :param password: The password\n :param pool_size: The size of the character pool used to generate password\n :return: Entropy\n ' if ((not password) or (not pool_size)): return 0 inner = math.pow(pool_size, len(password)) return math.log(inner, 2)
def __calc_entropy_password(password, pool_size): '\n Calculates the entropy of a random password\n :param password: The password\n :param pool_size: The size of the character pool used to generate password\n :return: Entropy\n ' if ((not password) or (not pool_size)): return 0 inner = math.pow(pool_size, len(password)) return math.log(inner, 2)<|docstring|>Calculates the entropy of a random password :param password: The password :param pool_size: The size of the character pool used to generate password :return: Entropy<|endoftext|>
a86f9ced928be27f3221768c2003f9c34a30976e12009eea0eb71ebfc90765ed
def __calc_entropy_passphrase(word_count, word_bank_size, pad_length, pad_bank_size): '\n Approximates the minimum entropy of the passphrase with its possible deviation\n :param word_count: Number of words in passphrase\n :param word_bank_size: Total number of words in the word bank\n :param pad_length: Number of characters used in padding\n :param pad_bank_size: The size of the character pool used to generate padding\n :return: A tuple containing the minimum entropy and deviation\n ' inner = math.pow((word_bank_size * 2), word_count) entropy = math.log(inner, 2) inner = math.pow(pad_bank_size, pad_length) deviation = math.log(inner, 2) return (entropy, deviation)
Approximates the minimum entropy of the passphrase with its possible deviation :param word_count: Number of words in passphrase :param word_bank_size: Total number of words in the word bank :param pad_length: Number of characters used in padding :param pad_bank_size: The size of the character pool used to generate padding :return: A tuple containing the minimum entropy and deviation
generators.py
__calc_entropy_passphrase
Tauag/SPass
2
python
def __calc_entropy_passphrase(word_count, word_bank_size, pad_length, pad_bank_size): '\n Approximates the minimum entropy of the passphrase with its possible deviation\n :param word_count: Number of words in passphrase\n :param word_bank_size: Total number of words in the word bank\n :param pad_length: Number of characters used in padding\n :param pad_bank_size: The size of the character pool used to generate padding\n :return: A tuple containing the minimum entropy and deviation\n ' inner = math.pow((word_bank_size * 2), word_count) entropy = math.log(inner, 2) inner = math.pow(pad_bank_size, pad_length) deviation = math.log(inner, 2) return (entropy, deviation)
def __calc_entropy_passphrase(word_count, word_bank_size, pad_length, pad_bank_size): '\n Approximates the minimum entropy of the passphrase with its possible deviation\n :param word_count: Number of words in passphrase\n :param word_bank_size: Total number of words in the word bank\n :param pad_length: Number of characters used in padding\n :param pad_bank_size: The size of the character pool used to generate padding\n :return: A tuple containing the minimum entropy and deviation\n ' inner = math.pow((word_bank_size * 2), word_count) entropy = math.log(inner, 2) inner = math.pow(pad_bank_size, pad_length) deviation = math.log(inner, 2) return (entropy, deviation)<|docstring|>Approximates the minimum entropy of the passphrase with its possible deviation :param word_count: Number of words in passphrase :param word_bank_size: Total number of words in the word bank :param pad_length: Number of characters used in padding :param pad_bank_size: The size of the character pool used to generate padding :return: A tuple containing the minimum entropy and deviation<|endoftext|>
f4b36ac5573e0af23bc86d4f97f4a29dc98cc8c1f2ed98a49db7a21d0bbfd33e
def __call__(self, fun, mean, var, *args, **kwargs): "\n Compute the Gaussian Expectation of a function f:\n\n X ~ N(mean, var)\n E[f(X)] = ∫f(x, *args, **kwargs)p(x)dx\n\n Using the formula:\n E[f(X)] = sum_{i=1}^{N_quad_points} f(x_i) * w_i\n\n where x_i, w_i must be provided by the inheriting class through self._build_X_W.\n The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX].\n\n :param fun: Callable or Iterable of Callables that operates elementwise, with\n signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping:\n X shape: [N_quad_points, b1, b2, ..., bX, d],\n usually [N_quad_points, N, d]\n f(X) shape: [N_quad_points, b1, b2, ...., bX, d'],\n usually [N_quad_points, N, 1] or [N_quad_points, N, d]\n In most cases, f should only operate over the last dimension of X\n :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d],\n representing the mean of a d-Variate Gaussian distribution\n :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d],\n representing the variance of a d-Variate Gaussian distribution\n :param *args: Passed to fun\n :param **kargs: Passed to fun\n :return: Array/Tensor with shape [b1, b2, ...., bX, d'],\n usually [N, d] or [N, 1]\n " (X, W) = self._build_X_W(mean, var) if isinstance(fun, Iterable): return [tf.reduce_sum((f(X, *args, **kwargs) * W), axis=0) for f in fun] return tf.reduce_sum((fun(X, *args, **kwargs) * W), axis=0)
Compute the Gaussian Expectation of a function f: X ~ N(mean, var) E[f(X)] = ∫f(x, *args, **kwargs)p(x)dx Using the formula: E[f(X)] = sum_{i=1}^{N_quad_points} f(x_i) * w_i where x_i, w_i must be provided by the inheriting class through self._build_X_W. The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX]. :param fun: Callable or Iterable of Callables that operates elementwise, with signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping: X shape: [N_quad_points, b1, b2, ..., bX, d], usually [N_quad_points, N, d] f(X) shape: [N_quad_points, b1, b2, ...., bX, d'], usually [N_quad_points, N, 1] or [N_quad_points, N, d] In most cases, f should only operate over the last dimension of X :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d], representing the mean of a d-Variate Gaussian distribution :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d], representing the variance of a d-Variate Gaussian distribution :param *args: Passed to fun :param **kargs: Passed to fun :return: Array/Tensor with shape [b1, b2, ...., bX, d'], usually [N, d] or [N, 1]
gpflow/quadrature/base.py
__call__
HarrySpearing/GPflow
1,724
python
def __call__(self, fun, mean, var, *args, **kwargs): "\n Compute the Gaussian Expectation of a function f:\n\n X ~ N(mean, var)\n E[f(X)] = ∫f(x, *args, **kwargs)p(x)dx\n\n Using the formula:\n E[f(X)] = sum_{i=1}^{N_quad_points} f(x_i) * w_i\n\n where x_i, w_i must be provided by the inheriting class through self._build_X_W.\n The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX].\n\n :param fun: Callable or Iterable of Callables that operates elementwise, with\n signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping:\n X shape: [N_quad_points, b1, b2, ..., bX, d],\n usually [N_quad_points, N, d]\n f(X) shape: [N_quad_points, b1, b2, ...., bX, d'],\n usually [N_quad_points, N, 1] or [N_quad_points, N, d]\n In most cases, f should only operate over the last dimension of X\n :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d],\n representing the mean of a d-Variate Gaussian distribution\n :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d],\n representing the variance of a d-Variate Gaussian distribution\n :param *args: Passed to fun\n :param **kargs: Passed to fun\n :return: Array/Tensor with shape [b1, b2, ...., bX, d'],\n usually [N, d] or [N, 1]\n " (X, W) = self._build_X_W(mean, var) if isinstance(fun, Iterable): return [tf.reduce_sum((f(X, *args, **kwargs) * W), axis=0) for f in fun] return tf.reduce_sum((fun(X, *args, **kwargs) * W), axis=0)
def __call__(self, fun, mean, var, *args, **kwargs): "\n Compute the Gaussian Expectation of a function f:\n\n X ~ N(mean, var)\n E[f(X)] = ∫f(x, *args, **kwargs)p(x)dx\n\n Using the formula:\n E[f(X)] = sum_{i=1}^{N_quad_points} f(x_i) * w_i\n\n where x_i, w_i must be provided by the inheriting class through self._build_X_W.\n The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX].\n\n :param fun: Callable or Iterable of Callables that operates elementwise, with\n signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping:\n X shape: [N_quad_points, b1, b2, ..., bX, d],\n usually [N_quad_points, N, d]\n f(X) shape: [N_quad_points, b1, b2, ...., bX, d'],\n usually [N_quad_points, N, 1] or [N_quad_points, N, d]\n In most cases, f should only operate over the last dimension of X\n :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d],\n representing the mean of a d-Variate Gaussian distribution\n :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d],\n representing the variance of a d-Variate Gaussian distribution\n :param *args: Passed to fun\n :param **kargs: Passed to fun\n :return: Array/Tensor with shape [b1, b2, ...., bX, d'],\n usually [N, d] or [N, 1]\n " (X, W) = self._build_X_W(mean, var) if isinstance(fun, Iterable): return [tf.reduce_sum((f(X, *args, **kwargs) * W), axis=0) for f in fun] return tf.reduce_sum((fun(X, *args, **kwargs) * W), axis=0)<|docstring|>Compute the Gaussian Expectation of a function f: X ~ N(mean, var) E[f(X)] = ∫f(x, *args, **kwargs)p(x)dx Using the formula: E[f(X)] = sum_{i=1}^{N_quad_points} f(x_i) * w_i where x_i, w_i must be provided by the inheriting class through self._build_X_W. The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX]. :param fun: Callable or Iterable of Callables that operates elementwise, with signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping: X shape: [N_quad_points, b1, b2, ..., bX, d], usually [N_quad_points, N, d] f(X) shape: [N_quad_points, b1, b2, ...., bX, d'], usually [N_quad_points, N, 1] or [N_quad_points, N, d] In most cases, f should only operate over the last dimension of X :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d], representing the mean of a d-Variate Gaussian distribution :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d], representing the variance of a d-Variate Gaussian distribution :param *args: Passed to fun :param **kargs: Passed to fun :return: Array/Tensor with shape [b1, b2, ...., bX, d'], usually [N, d] or [N, 1]<|endoftext|>
8a48fa580430b7c8f4573df8e42afa506db2853fa8d5ad05db332f784ee62f5f
def logspace(self, fun: Union[(Callable, Iterable[Callable])], mean, var, *args, **kwargs): "\n Compute the Gaussian log-Expectation of a the exponential of a function f:\n\n X ~ N(mean, var)\n log E[exp[f(X)]] = log ∫exp[f(x, *args, **kwargs)]p(x)dx\n\n Using the formula:\n log E[exp[f(X)]] = log sum_{i=1}^{N_quad_points} exp[f(x_i) + log w_i]\n\n where x_i, w_i must be provided by the inheriting class through self._build_X_W.\n The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX].\n\n :param fun: Callable or Iterable of Callables that operates elementwise, with\n signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping:\n X shape: [N_quad_points, b1, b2, ..., bX, d],\n usually [N_quad_points, N, d]\n f(X) shape: [N_quad_points, b1, b2, ...., bX, d'],\n usually [N_quad_points, N, 1] or [N_quad_points, N, d]\n In most cases, f should only operate over the last dimension of X\n :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d],\n representing the mean of a d-Variate Gaussian distribution\n :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d],\n representing the variance of a d-Variate Gaussian distribution\n :param *args: Passed to fun\n :param **kargs: Passed to fun\n :return: Array/Tensor with shape [b1, b2, ...., bX, d'],\n usually [N, d] or [N, 1]\n " (X, W) = self._build_X_W(mean, var) logW = tf.math.log(W) if isinstance(fun, Iterable): return [tf.reduce_logsumexp((f(X, *args, **kwargs) + logW), axis=0) for f in fun] return tf.reduce_logsumexp((fun(X, *args, **kwargs) + logW), axis=0)
Compute the Gaussian log-Expectation of a the exponential of a function f: X ~ N(mean, var) log E[exp[f(X)]] = log ∫exp[f(x, *args, **kwargs)]p(x)dx Using the formula: log E[exp[f(X)]] = log sum_{i=1}^{N_quad_points} exp[f(x_i) + log w_i] where x_i, w_i must be provided by the inheriting class through self._build_X_W. The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX]. :param fun: Callable or Iterable of Callables that operates elementwise, with signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping: X shape: [N_quad_points, b1, b2, ..., bX, d], usually [N_quad_points, N, d] f(X) shape: [N_quad_points, b1, b2, ...., bX, d'], usually [N_quad_points, N, 1] or [N_quad_points, N, d] In most cases, f should only operate over the last dimension of X :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d], representing the mean of a d-Variate Gaussian distribution :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d], representing the variance of a d-Variate Gaussian distribution :param *args: Passed to fun :param **kargs: Passed to fun :return: Array/Tensor with shape [b1, b2, ...., bX, d'], usually [N, d] or [N, 1]
gpflow/quadrature/base.py
logspace
HarrySpearing/GPflow
1,724
python
def logspace(self, fun: Union[(Callable, Iterable[Callable])], mean, var, *args, **kwargs): "\n Compute the Gaussian log-Expectation of a the exponential of a function f:\n\n X ~ N(mean, var)\n log E[exp[f(X)]] = log ∫exp[f(x, *args, **kwargs)]p(x)dx\n\n Using the formula:\n log E[exp[f(X)]] = log sum_{i=1}^{N_quad_points} exp[f(x_i) + log w_i]\n\n where x_i, w_i must be provided by the inheriting class through self._build_X_W.\n The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX].\n\n :param fun: Callable or Iterable of Callables that operates elementwise, with\n signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping:\n X shape: [N_quad_points, b1, b2, ..., bX, d],\n usually [N_quad_points, N, d]\n f(X) shape: [N_quad_points, b1, b2, ...., bX, d'],\n usually [N_quad_points, N, 1] or [N_quad_points, N, d]\n In most cases, f should only operate over the last dimension of X\n :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d],\n representing the mean of a d-Variate Gaussian distribution\n :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d],\n representing the variance of a d-Variate Gaussian distribution\n :param *args: Passed to fun\n :param **kargs: Passed to fun\n :return: Array/Tensor with shape [b1, b2, ...., bX, d'],\n usually [N, d] or [N, 1]\n " (X, W) = self._build_X_W(mean, var) logW = tf.math.log(W) if isinstance(fun, Iterable): return [tf.reduce_logsumexp((f(X, *args, **kwargs) + logW), axis=0) for f in fun] return tf.reduce_logsumexp((fun(X, *args, **kwargs) + logW), axis=0)
def logspace(self, fun: Union[(Callable, Iterable[Callable])], mean, var, *args, **kwargs): "\n Compute the Gaussian log-Expectation of a the exponential of a function f:\n\n X ~ N(mean, var)\n log E[exp[f(X)]] = log ∫exp[f(x, *args, **kwargs)]p(x)dx\n\n Using the formula:\n log E[exp[f(X)]] = log sum_{i=1}^{N_quad_points} exp[f(x_i) + log w_i]\n\n where x_i, w_i must be provided by the inheriting class through self._build_X_W.\n The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX].\n\n :param fun: Callable or Iterable of Callables that operates elementwise, with\n signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping:\n X shape: [N_quad_points, b1, b2, ..., bX, d],\n usually [N_quad_points, N, d]\n f(X) shape: [N_quad_points, b1, b2, ...., bX, d'],\n usually [N_quad_points, N, 1] or [N_quad_points, N, d]\n In most cases, f should only operate over the last dimension of X\n :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d],\n representing the mean of a d-Variate Gaussian distribution\n :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d],\n representing the variance of a d-Variate Gaussian distribution\n :param *args: Passed to fun\n :param **kargs: Passed to fun\n :return: Array/Tensor with shape [b1, b2, ...., bX, d'],\n usually [N, d] or [N, 1]\n " (X, W) = self._build_X_W(mean, var) logW = tf.math.log(W) if isinstance(fun, Iterable): return [tf.reduce_logsumexp((f(X, *args, **kwargs) + logW), axis=0) for f in fun] return tf.reduce_logsumexp((fun(X, *args, **kwargs) + logW), axis=0)<|docstring|>Compute the Gaussian log-Expectation of a the exponential of a function f: X ~ N(mean, var) log E[exp[f(X)]] = log ∫exp[f(x, *args, **kwargs)]p(x)dx Using the formula: log E[exp[f(X)]] = log sum_{i=1}^{N_quad_points} exp[f(x_i) + log w_i] where x_i, w_i must be provided by the inheriting class through self._build_X_W. The computations broadcast along batch-dimensions, represented by [b1, b2, ..., bX]. :param fun: Callable or Iterable of Callables that operates elementwise, with signature f(X, *args, **kwargs). Moreover, it must satisfy the shape-mapping: X shape: [N_quad_points, b1, b2, ..., bX, d], usually [N_quad_points, N, d] f(X) shape: [N_quad_points, b1, b2, ...., bX, d'], usually [N_quad_points, N, 1] or [N_quad_points, N, d] In most cases, f should only operate over the last dimension of X :param mean: Array/Tensor with shape [b1, b2, ..., bX, d], usually [N, d], representing the mean of a d-Variate Gaussian distribution :param var: Array/Tensor with shape b1, b2, ..., bX, d], usually [N, d], representing the variance of a d-Variate Gaussian distribution :param *args: Passed to fun :param **kargs: Passed to fun :return: Array/Tensor with shape [b1, b2, ...., bX, d'], usually [N, d] or [N, 1]<|endoftext|>
e84ea1f81a084b99126a46c9a39cdc67820d7fd3df9a629073e9cb2e6c1977e7
@pytest.mark.skipif((not product_details.last_update), reason="We don't want to download product_details on travis") def test_spotcheck(): "Check a couple product-details files to make sure they're available." languages = product_details.languages assert (languages['el']['English'] == 'Greek') assert (languages['el']['native'] == u'Ελληνικά') assert (product_details.firefox_history_major_releases['1.0'] == '2004-11-09')
Check a couple product-details files to make sure they're available.
src/olympia/amo/tests/test_amo_utils.py
test_spotcheck
fdintino/nginxconf-2018-mozilla-addons-server
10
python
@pytest.mark.skipif((not product_details.last_update), reason="We don't want to download product_details on travis") def test_spotcheck(): languages = product_details.languages assert (languages['el']['English'] == 'Greek') assert (languages['el']['native'] == u'Ελληνικά') assert (product_details.firefox_history_major_releases['1.0'] == '2004-11-09')
@pytest.mark.skipif((not product_details.last_update), reason="We don't want to download product_details on travis") def test_spotcheck(): languages = product_details.languages assert (languages['el']['English'] == 'Greek') assert (languages['el']['native'] == u'Ελληνικά') assert (product_details.firefox_history_major_releases['1.0'] == '2004-11-09')<|docstring|>Check a couple product-details files to make sure they're available.<|endoftext|>
88a508e67775ac38c93433301f11b04426aaeb3739e6488802c787d06efb1489
def __args_default__(self) -> dict: 'Defines a Dictionary of Default Values for Keyword Arguments (or Attributes)' return {'unitGroup': 'metric', 'contentType': 'csv', 'aggregateHours': 24}
Defines a Dictionary of Default Values for Keyword Arguments (or Attributes)
VisualCrossing/_api.py
__args_default__
ayushsingh-07/VisualCrossing
3
python
def __args_default__(self) -> dict: return {'unitGroup': 'metric', 'contentType': 'csv', 'aggregateHours': 24}
def __args_default__(self) -> dict: return {'unitGroup': 'metric', 'contentType': 'csv', 'aggregateHours': 24}<|docstring|>Defines a Dictionary of Default Values for Keyword Arguments (or Attributes)<|endoftext|>
d9d5c39656ddd95ba4b52b3f6aea33fed1928c9a33f56aa8b0d016652f77e26d
def __get_args_default__(self, args: str): 'Get the Default Value associated with a Keyword Argument' return self.__args_default__().get(args, None)
Get the Default Value associated with a Keyword Argument
VisualCrossing/_api.py
__get_args_default__
ayushsingh-07/VisualCrossing
3
python
def __get_args_default__(self, args: str): return self.__args_default__().get(args, None)
def __get_args_default__(self, args: str): return self.__args_default__().get(args, None)<|docstring|>Get the Default Value associated with a Keyword Argument<|endoftext|>
2e1505d691970fd02e80255a0822d13213ddca98d93c8326037255f2592bbbab
@property def __optional_args__(self): 'Get List of all the Optional Keyword Arguments Accepted by the API' return self.__args_default__().keys()
Get List of all the Optional Keyword Arguments Accepted by the API
VisualCrossing/_api.py
__optional_args__
ayushsingh-07/VisualCrossing
3
python
@property def __optional_args__(self): return self.__args_default__().keys()
@property def __optional_args__(self): return self.__args_default__().keys()<|docstring|>Get List of all the Optional Keyword Arguments Accepted by the API<|endoftext|>
cf85fa0e70b2168eed2dc283f13198e4affc5b1a8dc9033f91f225365012a07e
def generate_config(self, defaultSettings: bool=True, fileName: str='config.json', overwrite: bool=False, keepBackup: bool=True, **kwargs) -> bool: 'Generate configuration file at `__homepath__` when executed\n\n The configuration file can be generated with default settings as defined at\n :func:`__args_default__` else, user is requested to pass all necessary settings\n in a correct format (as required by API) to the function, setting `key` as the\n attribute name, and `value` as the desired value. Users are also advised not to\n save the `API_KEY` in the configuration file (for security purpose), and to use\n :func:`_generate_key` to save the key file in an encrypted format.\n\n :param defaultSettings: Should you wish to save the configuration file with\n the default settings. If set to `False` then user is\n requested to pass all necessary attributes (`key`) and\n their values. Defaults to `True`.\n\n :param fileName: Output file name (with extension - `json`). Defaults to\n `config.json`.\n\n :param overwrite: Overwrite existing configuration file, if exists (same filename).\n Defaults to `False`.\n\n :param keepBackup: If same file name exists, then setting the parameter to `True` will\n create a backup of the file with the following format\n `<original-name>.<UUID>.json` where `UUID` is a randomly generated\n 7-charecters long name. Defaults to `True`.\n\n Accepts n-Keyword Arguments, which are all default settings that can be used to initialize\n the API.\n ' outfile = join(__homepath__, fileName) if defaultSettings: attrs = self.__args_default__() else: attrs = kwargs attrs = {'__header__': {'program': __name__, 'version': __version__, 'homepath': __homepath__}, 'platform': {'platform': platform.platform(), 'architecture': platform.machine(), 'version': platform.version(), 'system': platform.system(), 'processor': platform.processor(), 'uname': platform.uname()}, 'attributes': attrs, 'timestamp': ctime()} def write_json(kv: dict, file: str): with open(file, 'w') as f: json.dump(kv, f, sort_keys=False, indent=4, default=str) if Path(outfile).is_file(): warnings.warn(f'{outfile} already exists.', FileExists) if keepBackup: try: (name, extension) = fileName.split('.') except ValueError as err: name = fileName.split('.')[0] extension = 'json' warnings.warn(f'{fileName} is not of proper type. Setting name as: {name}', ImproperFileName) new_file = '.'.join([name, str(uuid4())[:7], extension]) print(f'Old configuration file is available at {new_file}') try: copy(outfile, join(__homepath__, new_file)) except TypeError: copy(str(outfile), str(join(__homepath__, new_file))) else: warnings.warn(f'{outfile} will be overwritten without a backup.', FileExists) if overwrite: write_json(attrs, outfile) else: raise ValueError(f'{outfile} already exists, and `overwrite` is set to `False`') else: write_json(attrs, outfile) return True
Generate configuration file at `__homepath__` when executed The configuration file can be generated with default settings as defined at :func:`__args_default__` else, user is requested to pass all necessary settings in a correct format (as required by API) to the function, setting `key` as the attribute name, and `value` as the desired value. Users are also advised not to save the `API_KEY` in the configuration file (for security purpose), and to use :func:`_generate_key` to save the key file in an encrypted format. :param defaultSettings: Should you wish to save the configuration file with the default settings. If set to `False` then user is requested to pass all necessary attributes (`key`) and their values. Defaults to `True`. :param fileName: Output file name (with extension - `json`). Defaults to `config.json`. :param overwrite: Overwrite existing configuration file, if exists (same filename). Defaults to `False`. :param keepBackup: If same file name exists, then setting the parameter to `True` will create a backup of the file with the following format `<original-name>.<UUID>.json` where `UUID` is a randomly generated 7-charecters long name. Defaults to `True`. Accepts n-Keyword Arguments, which are all default settings that can be used to initialize the API.
VisualCrossing/_api.py
generate_config
ayushsingh-07/VisualCrossing
3
python
def generate_config(self, defaultSettings: bool=True, fileName: str='config.json', overwrite: bool=False, keepBackup: bool=True, **kwargs) -> bool: 'Generate configuration file at `__homepath__` when executed\n\n The configuration file can be generated with default settings as defined at\n :func:`__args_default__` else, user is requested to pass all necessary settings\n in a correct format (as required by API) to the function, setting `key` as the\n attribute name, and `value` as the desired value. Users are also advised not to\n save the `API_KEY` in the configuration file (for security purpose), and to use\n :func:`_generate_key` to save the key file in an encrypted format.\n\n :param defaultSettings: Should you wish to save the configuration file with\n the default settings. If set to `False` then user is\n requested to pass all necessary attributes (`key`) and\n their values. Defaults to `True`.\n\n :param fileName: Output file name (with extension - `json`). Defaults to\n `config.json`.\n\n :param overwrite: Overwrite existing configuration file, if exists (same filename).\n Defaults to `False`.\n\n :param keepBackup: If same file name exists, then setting the parameter to `True` will\n create a backup of the file with the following format\n `<original-name>.<UUID>.json` where `UUID` is a randomly generated\n 7-charecters long name. Defaults to `True`.\n\n Accepts n-Keyword Arguments, which are all default settings that can be used to initialize\n the API.\n ' outfile = join(__homepath__, fileName) if defaultSettings: attrs = self.__args_default__() else: attrs = kwargs attrs = {'__header__': {'program': __name__, 'version': __version__, 'homepath': __homepath__}, 'platform': {'platform': platform.platform(), 'architecture': platform.machine(), 'version': platform.version(), 'system': platform.system(), 'processor': platform.processor(), 'uname': platform.uname()}, 'attributes': attrs, 'timestamp': ctime()} def write_json(kv: dict, file: str): with open(file, 'w') as f: json.dump(kv, f, sort_keys=False, indent=4, default=str) if Path(outfile).is_file(): warnings.warn(f'{outfile} already exists.', FileExists) if keepBackup: try: (name, extension) = fileName.split('.') except ValueError as err: name = fileName.split('.')[0] extension = 'json' warnings.warn(f'{fileName} is not of proper type. Setting name as: {name}', ImproperFileName) new_file = '.'.join([name, str(uuid4())[:7], extension]) print(f'Old configuration file is available at {new_file}') try: copy(outfile, join(__homepath__, new_file)) except TypeError: copy(str(outfile), str(join(__homepath__, new_file))) else: warnings.warn(f'{outfile} will be overwritten without a backup.', FileExists) if overwrite: write_json(attrs, outfile) else: raise ValueError(f'{outfile} already exists, and `overwrite` is set to `False`') else: write_json(attrs, outfile) return True
def generate_config(self, defaultSettings: bool=True, fileName: str='config.json', overwrite: bool=False, keepBackup: bool=True, **kwargs) -> bool: 'Generate configuration file at `__homepath__` when executed\n\n The configuration file can be generated with default settings as defined at\n :func:`__args_default__` else, user is requested to pass all necessary settings\n in a correct format (as required by API) to the function, setting `key` as the\n attribute name, and `value` as the desired value. Users are also advised not to\n save the `API_KEY` in the configuration file (for security purpose), and to use\n :func:`_generate_key` to save the key file in an encrypted format.\n\n :param defaultSettings: Should you wish to save the configuration file with\n the default settings. If set to `False` then user is\n requested to pass all necessary attributes (`key`) and\n their values. Defaults to `True`.\n\n :param fileName: Output file name (with extension - `json`). Defaults to\n `config.json`.\n\n :param overwrite: Overwrite existing configuration file, if exists (same filename).\n Defaults to `False`.\n\n :param keepBackup: If same file name exists, then setting the parameter to `True` will\n create a backup of the file with the following format\n `<original-name>.<UUID>.json` where `UUID` is a randomly generated\n 7-charecters long name. Defaults to `True`.\n\n Accepts n-Keyword Arguments, which are all default settings that can be used to initialize\n the API.\n ' outfile = join(__homepath__, fileName) if defaultSettings: attrs = self.__args_default__() else: attrs = kwargs attrs = {'__header__': {'program': __name__, 'version': __version__, 'homepath': __homepath__}, 'platform': {'platform': platform.platform(), 'architecture': platform.machine(), 'version': platform.version(), 'system': platform.system(), 'processor': platform.processor(), 'uname': platform.uname()}, 'attributes': attrs, 'timestamp': ctime()} def write_json(kv: dict, file: str): with open(file, 'w') as f: json.dump(kv, f, sort_keys=False, indent=4, default=str) if Path(outfile).is_file(): warnings.warn(f'{outfile} already exists.', FileExists) if keepBackup: try: (name, extension) = fileName.split('.') except ValueError as err: name = fileName.split('.')[0] extension = 'json' warnings.warn(f'{fileName} is not of proper type. Setting name as: {name}', ImproperFileName) new_file = '.'.join([name, str(uuid4())[:7], extension]) print(f'Old configuration file is available at {new_file}') try: copy(outfile, join(__homepath__, new_file)) except TypeError: copy(str(outfile), str(join(__homepath__, new_file))) else: warnings.warn(f'{outfile} will be overwritten without a backup.', FileExists) if overwrite: write_json(attrs, outfile) else: raise ValueError(f'{outfile} already exists, and `overwrite` is set to `False`') else: write_json(attrs, outfile) return True<|docstring|>Generate configuration file at `__homepath__` when executed The configuration file can be generated with default settings as defined at :func:`__args_default__` else, user is requested to pass all necessary settings in a correct format (as required by API) to the function, setting `key` as the attribute name, and `value` as the desired value. Users are also advised not to save the `API_KEY` in the configuration file (for security purpose), and to use :func:`_generate_key` to save the key file in an encrypted format. :param defaultSettings: Should you wish to save the configuration file with the default settings. If set to `False` then user is requested to pass all necessary attributes (`key`) and their values. Defaults to `True`. :param fileName: Output file name (with extension - `json`). Defaults to `config.json`. :param overwrite: Overwrite existing configuration file, if exists (same filename). Defaults to `False`. :param keepBackup: If same file name exists, then setting the parameter to `True` will create a backup of the file with the following format `<original-name>.<UUID>.json` where `UUID` is a randomly generated 7-charecters long name. Defaults to `True`. Accepts n-Keyword Arguments, which are all default settings that can be used to initialize the API.<|endoftext|>
34f23798ae001ffb7e170a119322d6ecd9f4762b1b3a80b818b65d45b275c6f5
def CreatePrivateKey() -> List[int]: 'Create private key.' lib.CreatePrivateKey.restype = BuffBytes return toBytes(lib.CreatePrivateKey())
Create private key.
wrappers/python/lirisi/library.py
CreatePrivateKey
noot/lirisi
0
python
def CreatePrivateKey() -> List[int]: lib.CreatePrivateKey.restype = BuffBytes return toBytes(lib.CreatePrivateKey())
def CreatePrivateKey() -> List[int]: lib.CreatePrivateKey.restype = BuffBytes return toBytes(lib.CreatePrivateKey())<|docstring|>Create private key.<|endoftext|>
9d5c5cd4b3909f22975e3397679bfde1bd42fdcb5c1f483db7627df5f59199f9
def ExtractPublicKey(privateKey: List[int]) -> List[int]: 'Extract public key form private key.' lib.ExtractPublicKey.argtypes = [GoSlice] lib.ExtractPublicKey.restype = BuffBytes return toBytes(lib.ExtractPublicKey(goSlice(privateKey)))
Extract public key form private key.
wrappers/python/lirisi/library.py
ExtractPublicKey
noot/lirisi
0
python
def ExtractPublicKey(privateKey: List[int]) -> List[int]: lib.ExtractPublicKey.argtypes = [GoSlice] lib.ExtractPublicKey.restype = BuffBytes return toBytes(lib.ExtractPublicKey(goSlice(privateKey)))
def ExtractPublicKey(privateKey: List[int]) -> List[int]: lib.ExtractPublicKey.argtypes = [GoSlice] lib.ExtractPublicKey.restype = BuffBytes return toBytes(lib.ExtractPublicKey(goSlice(privateKey)))<|docstring|>Extract public key form private key.<|endoftext|>
9482cf931b7a72b8396c10ad59fdca59720bc2c6bdbca23f637b204e06dd184a
def CreateRingOfPublicKeys(size: int) -> bytes: 'Create ring of public keys.' lib.CreateRingOfPublicKeys.argtypes = [ctypes.c_longlong] lib.CreateRingOfPublicKeys.restype = BuffBytes return toBytes(lib.CreateRingOfPublicKeys(size))
Create ring of public keys.
wrappers/python/lirisi/library.py
CreateRingOfPublicKeys
noot/lirisi
0
python
def CreateRingOfPublicKeys(size: int) -> bytes: lib.CreateRingOfPublicKeys.argtypes = [ctypes.c_longlong] lib.CreateRingOfPublicKeys.restype = BuffBytes return toBytes(lib.CreateRingOfPublicKeys(size))
def CreateRingOfPublicKeys(size: int) -> bytes: lib.CreateRingOfPublicKeys.argtypes = [ctypes.c_longlong] lib.CreateRingOfPublicKeys.restype = BuffBytes return toBytes(lib.CreateRingOfPublicKeys(size))<|docstring|>Create ring of public keys.<|endoftext|>
bc56fd96b07a37ee58f1e92169d2eb38b47fad110f14e748bc99ee8500e850bb
def CreateSignature(message: List[int], pubKeysRing: List[int], privateKey: List[int]) -> List[int]: 'Extract public key form private key.' lib.CreateSignature.argtypes = [GoSlice, GoSlice, GoSlice] lib.CreateSignature.restype = BuffBytes return toBytes(lib.CreateSignature(goSlice(message), goSlice(pubKeysRing), goSlice(privateKey)))
Extract public key form private key.
wrappers/python/lirisi/library.py
CreateSignature
noot/lirisi
0
python
def CreateSignature(message: List[int], pubKeysRing: List[int], privateKey: List[int]) -> List[int]: lib.CreateSignature.argtypes = [GoSlice, GoSlice, GoSlice] lib.CreateSignature.restype = BuffBytes return toBytes(lib.CreateSignature(goSlice(message), goSlice(pubKeysRing), goSlice(privateKey)))
def CreateSignature(message: List[int], pubKeysRing: List[int], privateKey: List[int]) -> List[int]: lib.CreateSignature.argtypes = [GoSlice, GoSlice, GoSlice] lib.CreateSignature.restype = BuffBytes return toBytes(lib.CreateSignature(goSlice(message), goSlice(pubKeysRing), goSlice(privateKey)))<|docstring|>Extract public key form private key.<|endoftext|>
04cab579ac82bc3df7183d759209698bad3950eb4c4b8c7a1a79f9256b3983fd
def VerifySignature(message: List[int], pubKeysRing: List[int], signature: List[int]) -> bool: 'Verify signature.' lib.VerifySignature.argtypes = [GoSlice, GoSlice, GoSlice] return bool(lib.VerifySignature(goSlice(message), goSlice(pubKeysRing), goSlice(signature)))
Verify signature.
wrappers/python/lirisi/library.py
VerifySignature
noot/lirisi
0
python
def VerifySignature(message: List[int], pubKeysRing: List[int], signature: List[int]) -> bool: lib.VerifySignature.argtypes = [GoSlice, GoSlice, GoSlice] return bool(lib.VerifySignature(goSlice(message), goSlice(pubKeysRing), goSlice(signature)))
def VerifySignature(message: List[int], pubKeysRing: List[int], signature: List[int]) -> bool: lib.VerifySignature.argtypes = [GoSlice, GoSlice, GoSlice] return bool(lib.VerifySignature(goSlice(message), goSlice(pubKeysRing), goSlice(signature)))<|docstring|>Verify signature.<|endoftext|>
61b773f575c157076bd4d906e15f75b1cfcabab221bd0bc3da7ffa9f58e12cf3
def SignToPEM(sign: List[int]) -> List[int]: 'Signature to PEM.' lib.SignToPEM.argtypes = [GoSlice] lib.SignToPEM.restype = BuffBytes return toBytes(lib.SignToPEM(goSlice(sign)))
Signature to PEM.
wrappers/python/lirisi/library.py
SignToPEM
noot/lirisi
0
python
def SignToPEM(sign: List[int]) -> List[int]: lib.SignToPEM.argtypes = [GoSlice] lib.SignToPEM.restype = BuffBytes return toBytes(lib.SignToPEM(goSlice(sign)))
def SignToPEM(sign: List[int]) -> List[int]: lib.SignToPEM.argtypes = [GoSlice] lib.SignToPEM.restype = BuffBytes return toBytes(lib.SignToPEM(goSlice(sign)))<|docstring|>Signature to PEM.<|endoftext|>
766d6f5b73275a7344af3dcf12e74ebb5dd6f28ce7f57f365f64fd5d7b22b19e
def PEMtoSign(sign: List[int]) -> List[int]: 'PEM to signature.' lib.PEMtoSign.argtypes = [GoSlice] lib.PEMtoSign.restype = BuffBytes return toBytes(lib.PEMtoSign(goSlice(sign)))
PEM to signature.
wrappers/python/lirisi/library.py
PEMtoSign
noot/lirisi
0
python
def PEMtoSign(sign: List[int]) -> List[int]: lib.PEMtoSign.argtypes = [GoSlice] lib.PEMtoSign.restype = BuffBytes return toBytes(lib.PEMtoSign(goSlice(sign)))
def PEMtoSign(sign: List[int]) -> List[int]: lib.PEMtoSign.argtypes = [GoSlice] lib.PEMtoSign.restype = BuffBytes return toBytes(lib.PEMtoSign(goSlice(sign)))<|docstring|>PEM to signature.<|endoftext|>
ff857af3e0e3470dac4574472ec33d70a61b1648d538a86f706c030a5db62871
def GetPubKeyBytesSize() -> int: 'Get size of bytes serializted public key.' return lib.GetPubKeyBytesSize()
Get size of bytes serializted public key.
wrappers/python/lirisi/library.py
GetPubKeyBytesSize
noot/lirisi
0
python
def GetPubKeyBytesSize() -> int: return lib.GetPubKeyBytesSize()
def GetPubKeyBytesSize() -> int: return lib.GetPubKeyBytesSize()<|docstring|>Get size of bytes serializted public key.<|endoftext|>
334139f55cc49861d5d0d883b2c49d7f10a012797524705649ba937c0264c00a
def GetKeyImage(sign: List[int]) -> List[int]: 'Get KeyImage from signature.' lib.GetKeyImage.argtypes = [GoSlice] lib.GetKeyImage.restype = BuffBytes return toBytes(lib.GetKeyImage(goSlice(sign)))
Get KeyImage from signature.
wrappers/python/lirisi/library.py
GetKeyImage
noot/lirisi
0
python
def GetKeyImage(sign: List[int]) -> List[int]: lib.GetKeyImage.argtypes = [GoSlice] lib.GetKeyImage.restype = BuffBytes return toBytes(lib.GetKeyImage(goSlice(sign)))
def GetKeyImage(sign: List[int]) -> List[int]: lib.GetKeyImage.argtypes = [GoSlice] lib.GetKeyImage.restype = BuffBytes return toBytes(lib.GetKeyImage(goSlice(sign)))<|docstring|>Get KeyImage from signature.<|endoftext|>
f4c3942d4b90cb91930a3837e43291688207b76fa132e4e7716131fef370e44d
def goSlice(values: List[int]) -> GoSlice: 'Make GoClice instance with aaray of bytes.' length = len(values) return GoSlice((ctypes.c_ubyte * length)(*values), length, length)
Make GoClice instance with aaray of bytes.
wrappers/python/lirisi/library.py
goSlice
noot/lirisi
0
python
def goSlice(values: List[int]) -> GoSlice: length = len(values) return GoSlice((ctypes.c_ubyte * length)(*values), length, length)
def goSlice(values: List[int]) -> GoSlice: length = len(values) return GoSlice((ctypes.c_ubyte * length)(*values), length, length)<|docstring|>Make GoClice instance with aaray of bytes.<|endoftext|>
3fe6e0e0541faa78c7f5e0f2b32dca4976a3495256da940eeadfbd3d873ab1ed
def toBytes(buff: BuffBytes) -> List[int]: 'Create array of bytes from buffer.' size = int.from_bytes(buff.data[:8], byteorder='little') return buff.data[:(size + 8)][8:]
Create array of bytes from buffer.
wrappers/python/lirisi/library.py
toBytes
noot/lirisi
0
python
def toBytes(buff: BuffBytes) -> List[int]: size = int.from_bytes(buff.data[:8], byteorder='little') return buff.data[:(size + 8)][8:]
def toBytes(buff: BuffBytes) -> List[int]: size = int.from_bytes(buff.data[:8], byteorder='little') return buff.data[:(size + 8)][8:]<|docstring|>Create array of bytes from buffer.<|endoftext|>
3ccea5a23e4a55d0287902ac4cb9e6816934c6fc61a5e05d2cdb224e72f5fd9b
def ToHex(array: List[int]) -> bytes: 'Convert array of bytes to hex string.' return ''.join(['{:>02x}'.format(n) for n in array]).encode()
Convert array of bytes to hex string.
wrappers/python/lirisi/library.py
ToHex
noot/lirisi
0
python
def ToHex(array: List[int]) -> bytes: return .join(['{:>02x}'.format(n) for n in array]).encode()
def ToHex(array: List[int]) -> bytes: return .join(['{:>02x}'.format(n) for n in array]).encode()<|docstring|>Convert array of bytes to hex string.<|endoftext|>
b5c489c5f18280694658e16d34f47a6ba3072bfac22c500779654ba94e85fcfa
def ToBase64(array: List[int]) -> bytes: 'Convert array of bytes to base64 string.' return base64.standard_b64encode(bytes(array))
Convert array of bytes to base64 string.
wrappers/python/lirisi/library.py
ToBase64
noot/lirisi
0
python
def ToBase64(array: List[int]) -> bytes: return base64.standard_b64encode(bytes(array))
def ToBase64(array: List[int]) -> bytes: return base64.standard_b64encode(bytes(array))<|docstring|>Convert array of bytes to base64 string.<|endoftext|>
bbe5a6e42128459842608f88e94270182dca0b8a773f87f65b788251bd547468
def create_glove_embedding_init(idx2word, glove_file): '\n Bilinear Attention Networks\n Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang\n https://github.com/jnhwkim/ban-vqa\n ' word2emb = {} with open(glove_file, 'r', encoding='utf-8') as f: entries = f.readlines() emb_dim = (len(entries[0].split(' ')) - 1) print(('embedding dim is %d' % emb_dim)) weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32) for entry in entries: vals = entry.split(' ') word = vals[0] vals = list(map(float, vals[1:])) word2emb[word] = np.array(vals) for idx in range(1, len(idx2word)): word = ind2word.get(str(idx)) if (word not in word2emb): continue weights[(idx - 1)] = word2emb[word] return weights
Bilinear Attention Networks Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang https://github.com/jnhwkim/ban-vqa
utils/utils.py
create_glove_embedding_init
gicheonkang/DAN-VisDial
35
python
def create_glove_embedding_init(idx2word, glove_file): '\n Bilinear Attention Networks\n Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang\n https://github.com/jnhwkim/ban-vqa\n ' word2emb = {} with open(glove_file, 'r', encoding='utf-8') as f: entries = f.readlines() emb_dim = (len(entries[0].split(' ')) - 1) print(('embedding dim is %d' % emb_dim)) weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32) for entry in entries: vals = entry.split(' ') word = vals[0] vals = list(map(float, vals[1:])) word2emb[word] = np.array(vals) for idx in range(1, len(idx2word)): word = ind2word.get(str(idx)) if (word not in word2emb): continue weights[(idx - 1)] = word2emb[word] return weights
def create_glove_embedding_init(idx2word, glove_file): '\n Bilinear Attention Networks\n Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang\n https://github.com/jnhwkim/ban-vqa\n ' word2emb = {} with open(glove_file, 'r', encoding='utf-8') as f: entries = f.readlines() emb_dim = (len(entries[0].split(' ')) - 1) print(('embedding dim is %d' % emb_dim)) weights = np.zeros((len(idx2word), emb_dim), dtype=np.float32) for entry in entries: vals = entry.split(' ') word = vals[0] vals = list(map(float, vals[1:])) word2emb[word] = np.array(vals) for idx in range(1, len(idx2word)): word = ind2word.get(str(idx)) if (word not in word2emb): continue weights[(idx - 1)] = word2emb[word] return weights<|docstring|>Bilinear Attention Networks Jin-Hwa Kim, Jaehyun Jun, Byoung-Tak Zhang https://github.com/jnhwkim/ban-vqa<|endoftext|>
7cb6268a95ffeb1411c888bc1f548925a928be4eb9416299e7b7816d9e89a111
@mock.patch('datapackage_pipelines.lib.dump.dumper_base.ingest') @mock.patch('datapackage_pipelines.lib.dump.dumper_base.spew') def mock_dump_test(processor, ingest_tuple, mock_spew, mock_ingest): 'Helper function returns the `spew` for a given processor with a given\n `ingest` tuple.' mock_ingest.return_value = ingest_tuple file_path = processor module_name = '__main__' spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return mock_spew.call_args
Helper function returns the `spew` for a given processor with a given `ingest` tuple.
tests/test_dump_to_ckan.py
mock_dump_test
OriHoch/datapackage-pipelines-ckan
0
python
@mock.patch('datapackage_pipelines.lib.dump.dumper_base.ingest') @mock.patch('datapackage_pipelines.lib.dump.dumper_base.spew') def mock_dump_test(processor, ingest_tuple, mock_spew, mock_ingest): 'Helper function returns the `spew` for a given processor with a given\n `ingest` tuple.' mock_ingest.return_value = ingest_tuple file_path = processor module_name = '__main__' spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return mock_spew.call_args
@mock.patch('datapackage_pipelines.lib.dump.dumper_base.ingest') @mock.patch('datapackage_pipelines.lib.dump.dumper_base.spew') def mock_dump_test(processor, ingest_tuple, mock_spew, mock_ingest): 'Helper function returns the `spew` for a given processor with a given\n `ingest` tuple.' mock_ingest.return_value = ingest_tuple file_path = processor module_name = '__main__' spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) return mock_spew.call_args<|docstring|>Helper function returns the `spew` for a given processor with a given `ingest` tuple.<|endoftext|>
b20cd1370460e073819867bd1aa7260f51d4d69c14256fa93cc6de63a21a7ab6
@requests_mock.mock() def test_dump_to_ckan_package_create_error(self, mock_request): 'Create failed due to existing package, no overwrite so raise\n exception' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) mock_request.post(package_create_url, json={'success': False, 'error': {'__type': 'Validation Error', 'name': ['That URL is already in use.']}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': []} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key'} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, [])) spew_res_iter = spew_args[1] with self.assertRaises(Exception): list(spew_res_iter) requests = mock_request.request_history assert (len(requests) == 1) assert (requests[0].url == package_create_url)
Create failed due to existing package, no overwrite so raise exception
tests/test_dump_to_ckan.py
test_dump_to_ckan_package_create_error
OriHoch/datapackage-pipelines-ckan
0
python
@requests_mock.mock() def test_dump_to_ckan_package_create_error(self, mock_request): 'Create failed due to existing package, no overwrite so raise\n exception' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) mock_request.post(package_create_url, json={'success': False, 'error': {'__type': 'Validation Error', 'name': ['That URL is already in use.']}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': []} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key'} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, [])) spew_res_iter = spew_args[1] with self.assertRaises(Exception): list(spew_res_iter) requests = mock_request.request_history assert (len(requests) == 1) assert (requests[0].url == package_create_url)
@requests_mock.mock() def test_dump_to_ckan_package_create_error(self, mock_request): 'Create failed due to existing package, no overwrite so raise\n exception' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) mock_request.post(package_create_url, json={'success': False, 'error': {'__type': 'Validation Error', 'name': ['That URL is already in use.']}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': []} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key'} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, [])) spew_res_iter = spew_args[1] with self.assertRaises(Exception): list(spew_res_iter) requests = mock_request.request_history assert (len(requests) == 1) assert (requests[0].url == package_create_url)<|docstring|>Create failed due to existing package, no overwrite so raise exception<|endoftext|>
573fd7663a38669d305c36011a5e7e77bf6e0b7fb490f79acbbfc636fd15b139
@requests_mock.mock() def test_dump_to_ckan_package_create_error_overwrite(self, mock_request): 'Create failed due to existing package, overwrite so update existing\n package.' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) package_update_url = '{}package_update'.format(base_url) mock_request.post(package_create_url, json={'success': False, 'error': {'__type': 'Validation Error', 'name': ['That URL is already in use.']}}) mock_request.post(package_update_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': []} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, [])) spew_res_iter = spew_args[1] assert (list(spew_res_iter) == []) requests = mock_request.request_history assert (len(requests) == 2) assert (requests[0].url == package_create_url) assert (requests[1].url == package_update_url)
Create failed due to existing package, overwrite so update existing package.
tests/test_dump_to_ckan.py
test_dump_to_ckan_package_create_error_overwrite
OriHoch/datapackage-pipelines-ckan
0
python
@requests_mock.mock() def test_dump_to_ckan_package_create_error_overwrite(self, mock_request): 'Create failed due to existing package, overwrite so update existing\n package.' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) package_update_url = '{}package_update'.format(base_url) mock_request.post(package_create_url, json={'success': False, 'error': {'__type': 'Validation Error', 'name': ['That URL is already in use.']}}) mock_request.post(package_update_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': []} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, [])) spew_res_iter = spew_args[1] assert (list(spew_res_iter) == []) requests = mock_request.request_history assert (len(requests) == 2) assert (requests[0].url == package_create_url) assert (requests[1].url == package_update_url)
@requests_mock.mock() def test_dump_to_ckan_package_create_error_overwrite(self, mock_request): 'Create failed due to existing package, overwrite so update existing\n package.' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) package_update_url = '{}package_update'.format(base_url) mock_request.post(package_create_url, json={'success': False, 'error': {'__type': 'Validation Error', 'name': ['That URL is already in use.']}}) mock_request.post(package_update_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': []} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, [])) spew_res_iter = spew_args[1] assert (list(spew_res_iter) == []) requests = mock_request.request_history assert (len(requests) == 2) assert (requests[0].url == package_create_url) assert (requests[1].url == package_update_url)<|docstring|>Create failed due to existing package, overwrite so update existing package.<|endoftext|>
063eb53499df3284b2659b1aeca0b91dc748fd72b13e6b851a164e96c0978d1f
@requests_mock.mock() def test_dump_to_ckan_package_create_resources(self, mock_request): 'Create package with non-streaming resources.' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) mock_request.post(resource_create_url, json={'success': True, 'result': {'id': 'ckan-resource-id'}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'name': 'resource_not_streamed', 'path': '.', 'format': 'csv'}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed_02', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, [])) spew_res_iter = spew_args[1] assert (list(spew_res_iter) == []) requests = mock_request.request_history assert (len(requests) == 3) assert (requests[0].url == package_create_url) assert (requests[1].url == resource_create_url) assert (requests[2].url == resource_create_url)
Create package with non-streaming resources.
tests/test_dump_to_ckan.py
test_dump_to_ckan_package_create_resources
OriHoch/datapackage-pipelines-ckan
0
python
@requests_mock.mock() def test_dump_to_ckan_package_create_resources(self, mock_request): base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) mock_request.post(resource_create_url, json={'success': True, 'result': {'id': 'ckan-resource-id'}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'name': 'resource_not_streamed', 'path': '.', 'format': 'csv'}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed_02', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, [])) spew_res_iter = spew_args[1] assert (list(spew_res_iter) == []) requests = mock_request.request_history assert (len(requests) == 3) assert (requests[0].url == package_create_url) assert (requests[1].url == resource_create_url) assert (requests[2].url == resource_create_url)
@requests_mock.mock() def test_dump_to_ckan_package_create_resources(self, mock_request): base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) mock_request.post(resource_create_url, json={'success': True, 'result': {'id': 'ckan-resource-id'}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'name': 'resource_not_streamed', 'path': '.', 'format': 'csv'}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed_02', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, [])) spew_res_iter = spew_args[1] assert (list(spew_res_iter) == []) requests = mock_request.request_history assert (len(requests) == 3) assert (requests[0].url == package_create_url) assert (requests[1].url == resource_create_url) assert (requests[2].url == resource_create_url)<|docstring|>Create package with non-streaming resources.<|endoftext|>
0fdbc62f4eb3fef30676e3c901e6cb9d4f1644c0de2aed8aaf4795574bd74f79
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource(self, mock_request): 'Create package with streaming resource.' base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) mock_request.post(resource_create_url, json={'success': True, 'result': {'id': 'ckan-resource-id'}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})]))) spew_res_iter = spew_args[1] for r in spew_res_iter: list(r) requests = mock_request.request_history assert (len(requests) == 3) assert (requests[0].url == package_create_url) assert (requests[1].url == resource_create_url) assert (requests[2].url == resource_create_url)
Create package with streaming resource.
tests/test_dump_to_ckan.py
test_dump_to_ckan_package_create_streaming_resource
OriHoch/datapackage-pipelines-ckan
0
python
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource(self, mock_request): base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) mock_request.post(resource_create_url, json={'success': True, 'result': {'id': 'ckan-resource-id'}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})]))) spew_res_iter = spew_args[1] for r in spew_res_iter: list(r) requests = mock_request.request_history assert (len(requests) == 3) assert (requests[0].url == package_create_url) assert (requests[1].url == resource_create_url) assert (requests[2].url == resource_create_url)
@requests_mock.mock() def test_dump_to_ckan_package_create_streaming_resource(self, mock_request): base_url = 'https://demo.ckan.org/api/3/action/' package_create_url = '{}package_create'.format(base_url) resource_create_url = '{}resource_create'.format(base_url) mock_request.post(package_create_url, json={'success': True, 'result': {'id': 'ckan-package-id'}}) mock_request.post(resource_create_url, json={'success': True, 'result': {'id': 'ckan-resource-id'}}) datapackage = {'name': 'my-datapackage', 'project': 'my-project', 'resources': [{'dpp:streamedFrom': 'https://example.com/file.csv', 'dpp:streaming': True, 'name': 'resource_streamed.csv', 'path': 'data/file.csv', 'schema': {'fields': [{'name': 'first', 'type': 'string'}, {'name': 'last', 'type': 'string'}]}}, {'dpp:streamedFrom': 'https://example.com/file_02.csv', 'name': 'resource_not_streamed.csv', 'path': '.'}]} params = {'ckan-host': 'https://demo.ckan.org', 'ckan-api-key': 'my-api-key', 'overwrite_existing': True, 'force-format': True} processor_dir = os.path.dirname(datapackage_pipelines_ckan.processors.__file__) processor_path = os.path.join(processor_dir, 'dump/to_ckan.py') json_file = {'first': 'Fred', 'last': 'Smith'} json_file = json.dumps(json_file) (spew_args, _) = mock_dump_test(processor_path, (params, datapackage, iter([ResourceIterator(io.StringIO(json_file), datapackage['resources'][0], {'schema': {'fields': []}})]))) spew_res_iter = spew_args[1] for r in spew_res_iter: list(r) requests = mock_request.request_history assert (len(requests) == 3) assert (requests[0].url == package_create_url) assert (requests[1].url == resource_create_url) assert (requests[2].url == resource_create_url)<|docstring|>Create package with streaming resource.<|endoftext|>