body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
7a339fc6311f8b392412a46f8e57b0a854195c8db17de5048b039b86d3862c54
|
def powerset(lst):
'returns the power set of the list - the set of all subsets of the list'
if (lst == []):
return [[]]
lose_it = powerset(lst[1:])
use_it = map((lambda subset: ([lst[0]] + subset)), lose_it)
return (lose_it + use_it)
|
returns the power set of the list - the set of all subsets of the list
|
use_it_or_lose_it.py
|
powerset
|
jschmidtnj/CS115
| 0 |
python
|
def powerset(lst):
if (lst == []):
return [[]]
lose_it = powerset(lst[1:])
use_it = map((lambda subset: ([lst[0]] + subset)), lose_it)
return (lose_it + use_it)
|
def powerset(lst):
if (lst == []):
return [[]]
lose_it = powerset(lst[1:])
use_it = map((lambda subset: ([lst[0]] + subset)), lose_it)
return (lose_it + use_it)<|docstring|>returns the power set of the list - the set of all subsets of the list<|endoftext|>
|
257b4f313c5f30370c50cf47a804da8fd504f7a0a4b473f22827338d1f938ca1
|
def subset(target, lst):
'determines whether or not it is possible to create target sum using the\n values in the list. Values in teh list can be positive, negative, or zero.'
if (target == 0):
return True
if (lst == []):
return False
'and and or are short-cut operators in python. THe second operand is not evaluated\n when the overall result can be deduced by evaluating the second operand'
return (subset((target - lst[0]), lst[1:]) or subset(target, lst[1:]))
|
determines whether or not it is possible to create target sum using the
values in the list. Values in teh list can be positive, negative, or zero.
|
use_it_or_lose_it.py
|
subset
|
jschmidtnj/CS115
| 0 |
python
|
def subset(target, lst):
'determines whether or not it is possible to create target sum using the\n values in the list. Values in teh list can be positive, negative, or zero.'
if (target == 0):
return True
if (lst == []):
return False
'and and or are short-cut operators in python. THe second operand is not evaluated\n when the overall result can be deduced by evaluating the second operand'
return (subset((target - lst[0]), lst[1:]) or subset(target, lst[1:]))
|
def subset(target, lst):
'determines whether or not it is possible to create target sum using the\n values in the list. Values in teh list can be positive, negative, or zero.'
if (target == 0):
return True
if (lst == []):
return False
'and and or are short-cut operators in python. THe second operand is not evaluated\n when the overall result can be deduced by evaluating the second operand'
return (subset((target - lst[0]), lst[1:]) or subset(target, lst[1:]))<|docstring|>determines whether or not it is possible to create target sum using the
values in the list. Values in teh list can be positive, negative, or zero.<|endoftext|>
|
e5e49f26aec371324d72fd1089feb485b3addb2839d16eb174d981fbcf0e1500
|
def subset_with_values(target, lst):
'Determines whether or not it is possible to create the target sum using\n values in the list. Values in the list can be positive, negative, or zero.\n The function returns a tuple of exactly two items. The first is a boolean,\n that indicates true if the sum is possible and false if it is not. The second\n element in the tuple is a list of all values that add up to make the target sum.'
if (target == 0):
return (True, [])
if (lst == []):
return (False, [])
use_it = subset_with_values((target - lst[0]), lst[1:])
if use_it[0]:
return (True, ([lst[0]] + use_it[1]))
return subset_with_values(target, lst[1:])
|
Determines whether or not it is possible to create the target sum using
values in the list. Values in the list can be positive, negative, or zero.
The function returns a tuple of exactly two items. The first is a boolean,
that indicates true if the sum is possible and false if it is not. The second
element in the tuple is a list of all values that add up to make the target sum.
|
use_it_or_lose_it.py
|
subset_with_values
|
jschmidtnj/CS115
| 0 |
python
|
def subset_with_values(target, lst):
'Determines whether or not it is possible to create the target sum using\n values in the list. Values in the list can be positive, negative, or zero.\n The function returns a tuple of exactly two items. The first is a boolean,\n that indicates true if the sum is possible and false if it is not. The second\n element in the tuple is a list of all values that add up to make the target sum.'
if (target == 0):
return (True, [])
if (lst == []):
return (False, [])
use_it = subset_with_values((target - lst[0]), lst[1:])
if use_it[0]:
return (True, ([lst[0]] + use_it[1]))
return subset_with_values(target, lst[1:])
|
def subset_with_values(target, lst):
'Determines whether or not it is possible to create the target sum using\n values in the list. Values in the list can be positive, negative, or zero.\n The function returns a tuple of exactly two items. The first is a boolean,\n that indicates true if the sum is possible and false if it is not. The second\n element in the tuple is a list of all values that add up to make the target sum.'
if (target == 0):
return (True, [])
if (lst == []):
return (False, [])
use_it = subset_with_values((target - lst[0]), lst[1:])
if use_it[0]:
return (True, ([lst[0]] + use_it[1]))
return subset_with_values(target, lst[1:])<|docstring|>Determines whether or not it is possible to create the target sum using
values in the list. Values in the list can be positive, negative, or zero.
The function returns a tuple of exactly two items. The first is a boolean,
that indicates true if the sum is possible and false if it is not. The second
element in the tuple is a list of all values that add up to make the target sum.<|endoftext|>
|
1180aef177e00a7195b28a252cbc0d5dfcf793d862a5056a21f57cfa9db9dce4
|
def LCSWithValues(S1, S2):
'returns the longest common string'
if ((S1 == '') or (S2 == '')):
return (0, '')
if (S1[0] == S2[0]):
result = LCSWithValues(S1[1:], S2[1:])
return ((1 + result[0]), (S1[0] + result[1]))
useS1 = LCSWithValues(S1, S2[1:])
useS2 = LCSWithValues(S1[1:], S2)
if (useS1[0] > useS2[0]):
return useS1
return useS2
|
returns the longest common string
|
use_it_or_lose_it.py
|
LCSWithValues
|
jschmidtnj/CS115
| 0 |
python
|
def LCSWithValues(S1, S2):
if ((S1 == ) or (S2 == )):
return (0, )
if (S1[0] == S2[0]):
result = LCSWithValues(S1[1:], S2[1:])
return ((1 + result[0]), (S1[0] + result[1]))
useS1 = LCSWithValues(S1, S2[1:])
useS2 = LCSWithValues(S1[1:], S2)
if (useS1[0] > useS2[0]):
return useS1
return useS2
|
def LCSWithValues(S1, S2):
if ((S1 == ) or (S2 == )):
return (0, )
if (S1[0] == S2[0]):
result = LCSWithValues(S1[1:], S2[1:])
return ((1 + result[0]), (S1[0] + result[1]))
useS1 = LCSWithValues(S1, S2[1:])
useS2 = LCSWithValues(S1[1:], S2)
if (useS1[0] > useS2[0]):
return useS1
return useS2<|docstring|>returns the longest common string<|endoftext|>
|
a8cc646fceff27f6807c527fcc5de9e4cc0225b42d31a8472307206b12926af3
|
def _get_all_query_string(self, changelist):
"\n If there's a default value set the all parameter needs to be provided\n however, if a default is not set the all parameter is not required.\n "
if self.default_filter_value:
return changelist.get_query_string({self.parameter_name: self.show_all_param_value})
return changelist.get_query_string(remove=[self.parameter_name])
|
If there's a default value set the all parameter needs to be provided
however, if a default is not set the all parameter is not required.
|
djangocms_content_expiry/filters.py
|
_get_all_query_string
|
Aiky30/djangocms-content-expiry
| 0 |
python
|
def _get_all_query_string(self, changelist):
"\n If there's a default value set the all parameter needs to be provided\n however, if a default is not set the all parameter is not required.\n "
if self.default_filter_value:
return changelist.get_query_string({self.parameter_name: self.show_all_param_value})
return changelist.get_query_string(remove=[self.parameter_name])
|
def _get_all_query_string(self, changelist):
"\n If there's a default value set the all parameter needs to be provided\n however, if a default is not set the all parameter is not required.\n "
if self.default_filter_value:
return changelist.get_query_string({self.parameter_name: self.show_all_param_value})
return changelist.get_query_string(remove=[self.parameter_name])<|docstring|>If there's a default value set the all parameter needs to be provided
however, if a default is not set the all parameter is not required.<|endoftext|>
|
c9a9b792051ccf98b58fe3e340309a60dddae0809d3298f406372c96fd4945d0
|
def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString}
response_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString}
method_implementations = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages)}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
|
The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0
|
vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py
|
beta_create_ImageAnnotator_server
|
maheshgurav/google-cloud-python
| 2 |
python
|
def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString}
response_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString}
method_implementations = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages)}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)
|
def beta_create_ImageAnnotator_server(servicer, pool=None, pool_size=None, default_timeout=None, maximum_timeout=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.FromString}
response_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.SerializeToString}
method_implementations = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): face_utilities.unary_unary_inline(servicer.BatchAnnotateImages)}
server_options = beta_implementations.server_options(request_deserializers=request_deserializers, response_serializers=response_serializers, thread_pool=pool, thread_pool_size=pool_size, default_timeout=default_timeout, maximum_timeout=maximum_timeout)
return beta_implementations.server(method_implementations, options=server_options)<|docstring|>The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0<|endoftext|>
|
370c1936ef489eb7ee1b51965491fe537a9911ba2a33d3a3330134554a24997c
|
def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString}
response_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString}
cardinalities = {'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1p1beta1.ImageAnnotator', cardinalities, options=stub_options)
|
The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0
|
vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py
|
beta_create_ImageAnnotator_stub
|
maheshgurav/google-cloud-python
| 2 |
python
|
def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString}
response_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString}
cardinalities = {'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1p1beta1.ImageAnnotator', cardinalities, options=stub_options)
|
def beta_create_ImageAnnotator_stub(channel, host=None, metadata_transformer=None, pool=None, pool_size=None):
'The Beta API is deprecated for 0.15.0 and later.\n\n It is recommended to use the GA API (classes and functions in this\n file not marked beta) for all further purposes. This function was\n generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0'
request_serializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesRequest.SerializeToString}
response_deserializers = {('google.cloud.vision.v1p1beta1.ImageAnnotator', 'BatchAnnotateImages'): BatchAnnotateImagesResponse.FromString}
cardinalities = {'BatchAnnotateImages': cardinality.Cardinality.UNARY_UNARY}
stub_options = beta_implementations.stub_options(host=host, metadata_transformer=metadata_transformer, request_serializers=request_serializers, response_deserializers=response_deserializers, thread_pool=pool, thread_pool_size=pool_size)
return beta_implementations.dynamic_stub(channel, 'google.cloud.vision.v1p1beta1.ImageAnnotator', cardinalities, options=stub_options)<|docstring|>The Beta API is deprecated for 0.15.0 and later.
It is recommended to use the GA API (classes and functions in this
file not marked beta) for all further purposes. This function was
generated only to ease transition from grpcio<0.15.0 to grpcio>=0.15.0<|endoftext|>
|
a2da290fda118c851c1d28cf948bc178738790ba0bc9027eda999f069bc99794
|
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.BatchAnnotateImages = channel.unary_unary('/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages', request_serializer=BatchAnnotateImagesRequest.SerializeToString, response_deserializer=BatchAnnotateImagesResponse.FromString)
|
Constructor.
Args:
channel: A grpc.Channel.
|
vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py
|
__init__
|
maheshgurav/google-cloud-python
| 2 |
python
|
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.BatchAnnotateImages = channel.unary_unary('/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages', request_serializer=BatchAnnotateImagesRequest.SerializeToString, response_deserializer=BatchAnnotateImagesResponse.FromString)
|
def __init__(self, channel):
'Constructor.\n\n Args:\n channel: A grpc.Channel.\n '
self.BatchAnnotateImages = channel.unary_unary('/google.cloud.vision.v1p1beta1.ImageAnnotator/BatchAnnotateImages', request_serializer=BatchAnnotateImagesRequest.SerializeToString, response_deserializer=BatchAnnotateImagesResponse.FromString)<|docstring|>Constructor.
Args:
channel: A grpc.Channel.<|endoftext|>
|
687d56eb0494d3fc806bb634bc9463d25c10413281d39682fde3a365ef582b92
|
def BatchAnnotateImages(self, request, context):
'Run image detection and annotation for a batch of images.\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
Run image detection and annotation for a batch of images.
|
vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py
|
BatchAnnotateImages
|
maheshgurav/google-cloud-python
| 2 |
python
|
def BatchAnnotateImages(self, request, context):
'\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
|
def BatchAnnotateImages(self, request, context):
'\n '
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')<|docstring|>Run image detection and annotation for a batch of images.<|endoftext|>
|
8e238283f42b7c6507b7c2685cd7fb595fe1bc5630818a592e0b3e85929974bb
|
def BatchAnnotateImages(self, request, context):
'Run image detection and annotation for a batch of images.\n '
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
|
Run image detection and annotation for a batch of images.
|
vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py
|
BatchAnnotateImages
|
maheshgurav/google-cloud-python
| 2 |
python
|
def BatchAnnotateImages(self, request, context):
'\n '
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)
|
def BatchAnnotateImages(self, request, context):
'\n '
context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)<|docstring|>Run image detection and annotation for a batch of images.<|endoftext|>
|
28c1177cafbf93b16b8fb11a735ddc9c2b716213068ec5907cc4a138da2dc75d
|
def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
'Run image detection and annotation for a batch of images.\n '
raise NotImplementedError()
|
Run image detection and annotation for a batch of images.
|
vision/google/cloud/vision_v1p1beta1/proto/image_annotator_pb2.py
|
BatchAnnotateImages
|
maheshgurav/google-cloud-python
| 2 |
python
|
def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
'\n '
raise NotImplementedError()
|
def BatchAnnotateImages(self, request, timeout, metadata=None, with_call=False, protocol_options=None):
'\n '
raise NotImplementedError()<|docstring|>Run image detection and annotation for a batch of images.<|endoftext|>
|
0a6bd40143cf35e10584ddbc9dd4dfc514dd7cbb0320dac4de8b715ba26a52e1
|
def group_policies_gen(flat_policies, config):
'Filter policies using the following steps:\n 1. Apply prioritization among the policies that are sharing the same policy type and resource type\n 2. Remove redundant policies that may applicable across different types of resource\n 3. Filter policies based on type and return\n :param flat_policies: list of flat policies\n :return: Filtered policies\n '
filtered_policies = defaultdict(list)
policy_name = []
policies = [x for x in flat_policies if x[list(x.keys())[0]]['type']]
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [(x if isinstance(x, list) else [x]) for x in attrs]
attributes = [(list_flatten(x) if isinstance(x, list) else x) for x in attrs_list]
for y in itertools.product(*attributes):
aggregated_policies.setdefault(y, [])
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
prioritized_policy = aggregated_policies[key][0]
if (list(prioritized_policy.keys())[0] not in policy_name):
filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies
|
Filter policies using the following steps:
1. Apply prioritization among the policies that are sharing the same policy type and resource type
2. Remove redundant policies that may applicable across different types of resource
3. Filter policies based on type and return
:param flat_policies: list of flat policies
:return: Filtered policies
|
osdf/adapters/policy/utils.py
|
group_policies_gen
|
onap/optf-osdf
| 3 |
python
|
def group_policies_gen(flat_policies, config):
'Filter policies using the following steps:\n 1. Apply prioritization among the policies that are sharing the same policy type and resource type\n 2. Remove redundant policies that may applicable across different types of resource\n 3. Filter policies based on type and return\n :param flat_policies: list of flat policies\n :return: Filtered policies\n '
filtered_policies = defaultdict(list)
policy_name = []
policies = [x for x in flat_policies if x[list(x.keys())[0]]['type']]
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [(x if isinstance(x, list) else [x]) for x in attrs]
attributes = [(list_flatten(x) if isinstance(x, list) else x) for x in attrs_list]
for y in itertools.product(*attributes):
aggregated_policies.setdefault(y, [])
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
prioritized_policy = aggregated_policies[key][0]
if (list(prioritized_policy.keys())[0] not in policy_name):
filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies
|
def group_policies_gen(flat_policies, config):
'Filter policies using the following steps:\n 1. Apply prioritization among the policies that are sharing the same policy type and resource type\n 2. Remove redundant policies that may applicable across different types of resource\n 3. Filter policies based on type and return\n :param flat_policies: list of flat policies\n :return: Filtered policies\n '
filtered_policies = defaultdict(list)
policy_name = []
policies = [x for x in flat_policies if x[list(x.keys())[0]]['type']]
priority = config.get('policy_info', {}).get('prioritization_attributes', {})
aggregated_policies = dict()
for plc in policies:
attrs = [dot_notation(plc[list(plc.keys())[0]], dot_path) for key in priority.keys() for dot_path in priority[key]]
attrs_list = [(x if isinstance(x, list) else [x]) for x in attrs]
attributes = [(list_flatten(x) if isinstance(x, list) else x) for x in attrs_list]
for y in itertools.product(*attributes):
aggregated_policies.setdefault(y, [])
aggregated_policies[y].append(plc)
for key in aggregated_policies.keys():
prioritized_policy = aggregated_policies[key][0]
if (list(prioritized_policy.keys())[0] not in policy_name):
filtered_policies[prioritized_policy[list(prioritized_policy.keys())[0]]['type']].append(prioritized_policy)
policy_name.append(list(prioritized_policy.keys())[0])
return filtered_policies<|docstring|>Filter policies using the following steps:
1. Apply prioritization among the policies that are sharing the same policy type and resource type
2. Remove redundant policies that may applicable across different types of resource
3. Filter policies based on type and return
:param flat_policies: list of flat policies
:return: Filtered policies<|endoftext|>
|
062d19282643ea28b229b8ee01f69b653f3f0df8a5fa9f5a87da2ab7e4b90f85
|
def policy_name_as_regex(policy_name):
'Get the correct policy name as a regex\n (e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml\n So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)\n :param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy\n :return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*\n '
p = policy_name.partition('.')
return ((((p[0] + p[1]) + '.*') + p[2]) + '.*')
|
Get the correct policy name as a regex
(e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml
So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)
:param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy
:return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*
|
osdf/adapters/policy/utils.py
|
policy_name_as_regex
|
onap/optf-osdf
| 3 |
python
|
def policy_name_as_regex(policy_name):
'Get the correct policy name as a regex\n (e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml\n So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)\n :param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy\n :return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*\n '
p = policy_name.partition('.')
return ((((p[0] + p[1]) + '.*') + p[2]) + '.*')
|
def policy_name_as_regex(policy_name):
'Get the correct policy name as a regex\n (e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml\n So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)\n :param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy\n :return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*\n '
p = policy_name.partition('.')
return ((((p[0] + p[1]) + '.*') + p[2]) + '.*')<|docstring|>Get the correct policy name as a regex
(e.g. OOF_HAS_vCPE.cloudAttributePolicy ends up in policy as OOF_HAS_vCPE.Config_MS_cloudAttributePolicy.1.xml
So, for now, we query it as OOF_HAS_vCPE..*aicAttributePolicy.*)
:param policy_name: Example: OOF_HAS_vCPE.aicAttributePolicy
:return: regexp for policy: Example: OOF_HAS_vCPE..*aicAttributePolicy.*<|endoftext|>
|
bdf5bed08bce51cf67b9e5afdadec72394e895e47f85cf2789da6ec905b351bd
|
def retrieve_node(req_json, reference):
'\n Get the child node(s) from the dot-notation [reference] and parent [req_json].\n For placement and other requests, there are encoded JSONs inside the request or policy,\n so we need to expand it and then do a search over the parent plus expanded JSON.\n '
req_json_copy = copy.deepcopy(req_json)
info = dot_notation(req_json_copy, reference)
return (list_flatten(info) if isinstance(info, list) else info)
|
Get the child node(s) from the dot-notation [reference] and parent [req_json].
For placement and other requests, there are encoded JSONs inside the request or policy,
so we need to expand it and then do a search over the parent plus expanded JSON.
|
osdf/adapters/policy/utils.py
|
retrieve_node
|
onap/optf-osdf
| 3 |
python
|
def retrieve_node(req_json, reference):
'\n Get the child node(s) from the dot-notation [reference] and parent [req_json].\n For placement and other requests, there are encoded JSONs inside the request or policy,\n so we need to expand it and then do a search over the parent plus expanded JSON.\n '
req_json_copy = copy.deepcopy(req_json)
info = dot_notation(req_json_copy, reference)
return (list_flatten(info) if isinstance(info, list) else info)
|
def retrieve_node(req_json, reference):
'\n Get the child node(s) from the dot-notation [reference] and parent [req_json].\n For placement and other requests, there are encoded JSONs inside the request or policy,\n so we need to expand it and then do a search over the parent plus expanded JSON.\n '
req_json_copy = copy.deepcopy(req_json)
info = dot_notation(req_json_copy, reference)
return (list_flatten(info) if isinstance(info, list) else info)<|docstring|>Get the child node(s) from the dot-notation [reference] and parent [req_json].
For placement and other requests, there are encoded JSONs inside the request or policy,
so we need to expand it and then do a search over the parent plus expanded JSON.<|endoftext|>
|
452e00bf3fad0eed21edb9818f1d1abb74fd449c0b15ae7f4948e630fca94431
|
def reroot(root: expression.Expression, source_path: path.Path) -> expression.Expression:
'Reroot to a new path, maintaining a input proto index.\n\n Similar to root.get_descendant_or_error(source_path): however, this\n method retains the ability to get a map to the original index.\n\n Args:\n root: the original root.\n source_path: the path to the new root.\n\n Returns:\n the new root.\n '
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root
|
Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.
|
struct2tensor/expression_impl/reroot.py
|
reroot
|
rtg0795/struct2tensor
| 30 |
python
|
def reroot(root: expression.Expression, source_path: path.Path) -> expression.Expression:
'Reroot to a new path, maintaining a input proto index.\n\n Similar to root.get_descendant_or_error(source_path): however, this\n method retains the ability to get a map to the original index.\n\n Args:\n root: the original root.\n source_path: the path to the new root.\n\n Returns:\n the new root.\n '
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root
|
def reroot(root: expression.Expression, source_path: path.Path) -> expression.Expression:
'Reroot to a new path, maintaining a input proto index.\n\n Similar to root.get_descendant_or_error(source_path): however, this\n method retains the ability to get a map to the original index.\n\n Args:\n root: the original root.\n source_path: the path to the new root.\n\n Returns:\n the new root.\n '
new_root = root
for step in source_path.field_list:
new_root = _RerootExpression(new_root, step)
return new_root<|docstring|>Reroot to a new path, maintaining a input proto index.
Similar to root.get_descendant_or_error(source_path): however, this
method retains the ability to get a map to the original index.
Args:
root: the original root.
source_path: the path to the new root.
Returns:
the new root.<|endoftext|>
|
60cc06c617833833ea0b4eb9eb5ed3abfaf66e6fe3d72959027696478e3cc3fb
|
def __init__(self, root: expression.Expression):
'Constructor for proto index expression.\n\n Args:\n root: an expression that must return a RootNodeTensor.\n '
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root
|
Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor.
|
struct2tensor/expression_impl/reroot.py
|
__init__
|
rtg0795/struct2tensor
| 30 |
python
|
def __init__(self, root: expression.Expression):
'Constructor for proto index expression.\n\n Args:\n root: an expression that must return a RootNodeTensor.\n '
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root
|
def __init__(self, root: expression.Expression):
'Constructor for proto index expression.\n\n Args:\n root: an expression that must return a RootNodeTensor.\n '
super().__init__(is_repeated=False, my_type=tf.int64)
self._root = root<|docstring|>Constructor for proto index expression.
Args:
root: an expression that must return a RootNodeTensor.<|endoftext|>
|
ffa937f5db47b155553aeaea89fc591426c962d98036f068aeac9dd960177503
|
def _prediction_loop(self, dataloader: DataLoader, description: str, task_name: str, mode: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by `evaluate()` and `predict()`.\n Works both with or without labels.\n '
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.prediction_loss_only)
model = self.model
if (self.args.n_gpu > 1):
model = torch.nn.DataParallel(model)
else:
model = self.model
batch_size = dataloader.batch_size
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', self.num_examples(dataloader))
logger.info(' Batch size = %d', batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(((inputs.get(k) is not None) for k in ['labels', 'lm_labels', 'masked_lm_labels']))
for (k, v) in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
(step_eval_loss, logits) = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if (not prediction_loss_only):
if (preds is None):
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if (inputs.get('labels') is not None):
if (label_ids is None):
label_ids = inputs['labels'].detach()
else:
label_ids = torch.cat((label_ids, inputs['labels'].detach()), dim=0)
if (self.args.local_rank != (- 1)):
if (preds is not None):
preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
if (label_ids is not None):
label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
if (preds is not None):
preds = xm.mesh_reduce('eval_preds', preds, torch.cat)
if (label_ids is not None):
label_ids = xm.mesh_reduce('eval_label_ids', label_ids, torch.cat)
if (preds is not None):
preds = preds.cpu().numpy()
if (label_ids is not None):
label_ids = label_ids.cpu().numpy()
if ((self.compute_metrics is not None) and (preds is not None) and (label_ids is not None)):
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if (len(eval_losses) > 0):
metrics[f'{task_name}_{mode}_loss'] = np.mean(eval_losses)
for key in list(metrics.keys()):
if (not key.startswith(f'{task_name}_{mode}_')):
metrics[f'{task_name}_{mode}_{key}'] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
|
Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.
|
src/mtl_trainer.py
|
_prediction_loop
|
Daupler/CA-MTL
| 0 |
python
|
def _prediction_loop(self, dataloader: DataLoader, description: str, task_name: str, mode: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by `evaluate()` and `predict()`.\n Works both with or without labels.\n '
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.prediction_loss_only)
model = self.model
if (self.args.n_gpu > 1):
model = torch.nn.DataParallel(model)
else:
model = self.model
batch_size = dataloader.batch_size
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', self.num_examples(dataloader))
logger.info(' Batch size = %d', batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(((inputs.get(k) is not None) for k in ['labels', 'lm_labels', 'masked_lm_labels']))
for (k, v) in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
(step_eval_loss, logits) = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if (not prediction_loss_only):
if (preds is None):
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if (inputs.get('labels') is not None):
if (label_ids is None):
label_ids = inputs['labels'].detach()
else:
label_ids = torch.cat((label_ids, inputs['labels'].detach()), dim=0)
if (self.args.local_rank != (- 1)):
if (preds is not None):
preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
if (label_ids is not None):
label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
if (preds is not None):
preds = xm.mesh_reduce('eval_preds', preds, torch.cat)
if (label_ids is not None):
label_ids = xm.mesh_reduce('eval_label_ids', label_ids, torch.cat)
if (preds is not None):
preds = preds.cpu().numpy()
if (label_ids is not None):
label_ids = label_ids.cpu().numpy()
if ((self.compute_metrics is not None) and (preds is not None) and (label_ids is not None)):
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if (len(eval_losses) > 0):
metrics[f'{task_name}_{mode}_loss'] = np.mean(eval_losses)
for key in list(metrics.keys()):
if (not key.startswith(f'{task_name}_{mode}_')):
metrics[f'{task_name}_{mode}_{key}'] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
|
def _prediction_loop(self, dataloader: DataLoader, description: str, task_name: str, mode: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by `evaluate()` and `predict()`.\n Works both with or without labels.\n '
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.prediction_loss_only)
model = self.model
if (self.args.n_gpu > 1):
model = torch.nn.DataParallel(model)
else:
model = self.model
batch_size = dataloader.batch_size
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', self.num_examples(dataloader))
logger.info(' Batch size = %d', batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
for inputs in tqdm(dataloader, desc=description):
has_labels = any(((inputs.get(k) is not None) for k in ['labels', 'lm_labels', 'masked_lm_labels']))
for (k, v) in inputs.items():
inputs[k] = v.to(self.args.device)
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
(step_eval_loss, logits) = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if (not prediction_loss_only):
if (preds is None):
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if (inputs.get('labels') is not None):
if (label_ids is None):
label_ids = inputs['labels'].detach()
else:
label_ids = torch.cat((label_ids, inputs['labels'].detach()), dim=0)
if (self.args.local_rank != (- 1)):
if (preds is not None):
preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
if (label_ids is not None):
label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
elif is_tpu_available():
if (preds is not None):
preds = xm.mesh_reduce('eval_preds', preds, torch.cat)
if (label_ids is not None):
label_ids = xm.mesh_reduce('eval_label_ids', label_ids, torch.cat)
if (preds is not None):
preds = preds.cpu().numpy()
if (label_ids is not None):
label_ids = label_ids.cpu().numpy()
if ((self.compute_metrics is not None) and (preds is not None) and (label_ids is not None)):
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if (len(eval_losses) > 0):
metrics[f'{task_name}_{mode}_loss'] = np.mean(eval_losses)
for key in list(metrics.keys()):
if (not key.startswith(f'{task_name}_{mode}_')):
metrics[f'{task_name}_{mode}_{key}'] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)<|docstring|>Prediction/evaluation loop, shared by `evaluate()` and `predict()`.
Works both with or without labels.<|endoftext|>
|
8d6780803731c1dfc1d2514dceff663cab2c40b380de5fe317f37d08577f0374
|
def fold_split(self, random_seed=None):
'\n Splitting the folds.\n\n Args:\n random_seed: Random seed for reproducibility\n\n Returns:\n tensor containing indices for folds, where dim=0 is the fold number\n\n '
if (random_seed is not None):
torch.manual_seed(random_seed)
fold_idx = torch.randperm(self.dataset.__len__())
fold_idx = fold_idx[:self.folded_size].view((- 1), self.fold_size)
return fold_idx
|
Splitting the folds.
Args:
random_seed: Random seed for reproducibility
Returns:
tensor containing indices for folds, where dim=0 is the fold number
|
pymatch/utils/KFold.py
|
fold_split
|
raharth/PyMatch
| 10 |
python
|
def fold_split(self, random_seed=None):
'\n Splitting the folds.\n\n Args:\n random_seed: Random seed for reproducibility\n\n Returns:\n tensor containing indices for folds, where dim=0 is the fold number\n\n '
if (random_seed is not None):
torch.manual_seed(random_seed)
fold_idx = torch.randperm(self.dataset.__len__())
fold_idx = fold_idx[:self.folded_size].view((- 1), self.fold_size)
return fold_idx
|
def fold_split(self, random_seed=None):
'\n Splitting the folds.\n\n Args:\n random_seed: Random seed for reproducibility\n\n Returns:\n tensor containing indices for folds, where dim=0 is the fold number\n\n '
if (random_seed is not None):
torch.manual_seed(random_seed)
fold_idx = torch.randperm(self.dataset.__len__())
fold_idx = fold_idx[:self.folded_size].view((- 1), self.fold_size)
return fold_idx<|docstring|>Splitting the folds.
Args:
random_seed: Random seed for reproducibility
Returns:
tensor containing indices for folds, where dim=0 is the fold number<|endoftext|>
|
7cd01ceaeeec303ca22d300b40f1d155861871c9b36368f9fb7c58bb4f4d5d86
|
def fold_loaders(self, fold=(- 1)):
'\n Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of\n the original data set.\n\n Args:\n fold: fold number to return\n\n Returns:\n (train data loader, test data loader)\n\n '
if (fold == (- 1)):
fold = self.fold
test_fold_idx = self.fold_idx[fold]
train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if (i != fold)]].view((- 1))
train_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx))
test_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx))
self.fold = ((self.fold + 1) % self.n_fold)
return (train_loader, test_loader)
|
Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of
the original data set.
Args:
fold: fold number to return
Returns:
(train data loader, test data loader)
|
pymatch/utils/KFold.py
|
fold_loaders
|
raharth/PyMatch
| 10 |
python
|
def fold_loaders(self, fold=(- 1)):
'\n Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of\n the original data set.\n\n Args:\n fold: fold number to return\n\n Returns:\n (train data loader, test data loader)\n\n '
if (fold == (- 1)):
fold = self.fold
test_fold_idx = self.fold_idx[fold]
train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if (i != fold)]].view((- 1))
train_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx))
test_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx))
self.fold = ((self.fold + 1) % self.n_fold)
return (train_loader, test_loader)
|
def fold_loaders(self, fold=(- 1)):
'\n Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of\n the original data set.\n\n Args:\n fold: fold number to return\n\n Returns:\n (train data loader, test data loader)\n\n '
if (fold == (- 1)):
fold = self.fold
test_fold_idx = self.fold_idx[fold]
train_fold_idx = self.fold_idx[[i for i in range(self.n_fold) if (i != fold)]].view((- 1))
train_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(train_fold_idx))
test_loader = torch.utils.data.DataLoader(self.dataset, batch_size=self.batch_size, num_workers=self.num_workers, pin_memory=self.pin_memory, sampler=torch.utils.data.SubsetRandomSampler(test_fold_idx))
self.fold = ((self.fold + 1) % self.n_fold)
return (train_loader, test_loader)<|docstring|>Loading a specific fold as train and test data loader. If no fold number is provided it returns the next fold. It returns a randomly sampled subset of
the original data set.
Args:
fold: fold number to return
Returns:
(train data loader, test data loader)<|endoftext|>
|
2f860cad73f0910b2ad7d19c85cb15a4a0cfc03df20516a5404f1555108798fe
|
def __init__(self, mnemonic, numberOfChannels=4, numberOfRois=32, pv=None, dxpType='mca', responseTimeout=15, output='out'):
' Constructor\n responseTimeout : how much time to wait dxp answer\n '
super().__init__(mnemonic, NUMPOINTS, output, dxpType)
self.acquiring = False
self.rois = numberOfRois
|
Constructor
responseTimeout : how much time to wait dxp answer
|
py4syn/epics/DxpFakeClass.py
|
__init__
|
gabrielpreviato/py4syn
| 12 |
python
|
def __init__(self, mnemonic, numberOfChannels=4, numberOfRois=32, pv=None, dxpType='mca', responseTimeout=15, output='out'):
' Constructor\n responseTimeout : how much time to wait dxp answer\n '
super().__init__(mnemonic, NUMPOINTS, output, dxpType)
self.acquiring = False
self.rois = numberOfRois
|
def __init__(self, mnemonic, numberOfChannels=4, numberOfRois=32, pv=None, dxpType='mca', responseTimeout=15, output='out'):
' Constructor\n responseTimeout : how much time to wait dxp answer\n '
super().__init__(mnemonic, NUMPOINTS, output, dxpType)
self.acquiring = False
self.rois = numberOfRois<|docstring|>Constructor
responseTimeout : how much time to wait dxp answer<|endoftext|>
|
36079239ded266b6ee2793a61ddf976dfa3c42a7d9c03f900c2168c48f14f188
|
def statusChange(self, value, **kw):
'\n Helper callback used to wait for the end of the acquisition.\n '
pass
|
Helper callback used to wait for the end of the acquisition.
|
py4syn/epics/DxpFakeClass.py
|
statusChange
|
gabrielpreviato/py4syn
| 12 |
python
|
def statusChange(self, value, **kw):
'\n \n '
pass
|
def statusChange(self, value, **kw):
'\n \n '
pass<|docstring|>Helper callback used to wait for the end of the acquisition.<|endoftext|>
|
33e6729f22113aa0c5351c0bb328a1d685eb81c3ac13305e250dbd84ef44cfe0
|
def setCountTime(self, time):
'\n Method to set the count time of a scaler device.\n\n Parameters\n ----------\n time : `float`\n Count time to set to scaler device .\n\n Returns\n -------\n out : None\n '
pass
|
Method to set the count time of a scaler device.
Parameters
----------
time : `float`
Count time to set to scaler device .
Returns
-------
out : None
|
py4syn/epics/DxpFakeClass.py
|
setCountTime
|
gabrielpreviato/py4syn
| 12 |
python
|
def setCountTime(self, time):
'\n Method to set the count time of a scaler device.\n\n Parameters\n ----------\n time : `float`\n Count time to set to scaler device .\n\n Returns\n -------\n out : None\n '
pass
|
def setCountTime(self, time):
'\n Method to set the count time of a scaler device.\n\n Parameters\n ----------\n time : `float`\n Count time to set to scaler device .\n\n Returns\n -------\n out : None\n '
pass<|docstring|>Method to set the count time of a scaler device.
Parameters
----------
time : `float`
Count time to set to scaler device .
Returns
-------
out : None<|endoftext|>
|
213bfc08499a961ce84bf0fb42a6001aea6abbebbefa7052ac2d92e72a80fc7c
|
def getValueChannel(self, **kwargs):
'Return intensity\n channel is on format mcaC.Rr, where C is the channel and\n r is the ROI'
channel = kwargs['channel']
c = (int(channel[CHANNELPOSITION]) - 1)
if (len(channel) > ROIPOSITION):
return np.random.rand()
else:
self.saveSpectrum(c, **kwargs)
return 1.0
|
Return intensity
channel is on format mcaC.Rr, where C is the channel and
r is the ROI
|
py4syn/epics/DxpFakeClass.py
|
getValueChannel
|
gabrielpreviato/py4syn
| 12 |
python
|
def getValueChannel(self, **kwargs):
'Return intensity\n channel is on format mcaC.Rr, where C is the channel and\n r is the ROI'
channel = kwargs['channel']
c = (int(channel[CHANNELPOSITION]) - 1)
if (len(channel) > ROIPOSITION):
return np.random.rand()
else:
self.saveSpectrum(c, **kwargs)
return 1.0
|
def getValueChannel(self, **kwargs):
'Return intensity\n channel is on format mcaC.Rr, where C is the channel and\n r is the ROI'
channel = kwargs['channel']
c = (int(channel[CHANNELPOSITION]) - 1)
if (len(channel) > ROIPOSITION):
return np.random.rand()
else:
self.saveSpectrum(c, **kwargs)
return 1.0<|docstring|>Return intensity
channel is on format mcaC.Rr, where C is the channel and
r is the ROI<|endoftext|>
|
ddf7b05da17d94bb255653ea9a9f215ef063a2d7fff292247340fd369b9052c2
|
def wait(self):
'\n Blocks until the acquisition completes.\n '
pass
|
Blocks until the acquisition completes.
|
py4syn/epics/DxpFakeClass.py
|
wait
|
gabrielpreviato/py4syn
| 12 |
python
|
def wait(self):
'\n \n '
pass
|
def wait(self):
'\n \n '
pass<|docstring|>Blocks until the acquisition completes.<|endoftext|>
|
8042b99321e5532535d4fc32314987b47c4b3a7d820da843c28475be105e5ea5
|
def canMonitor(self):
' Returns false indcating Dxp cannot be use as a counter monitor'
return False
|
Returns false indcating Dxp cannot be use as a counter monitor
|
py4syn/epics/DxpFakeClass.py
|
canMonitor
|
gabrielpreviato/py4syn
| 12 |
python
|
def canMonitor(self):
' '
return False
|
def canMonitor(self):
' '
return False<|docstring|>Returns false indcating Dxp cannot be use as a counter monitor<|endoftext|>
|
248387e3b85e17985a1f7493ff7fef9ae680edf64a694b2c0ee395e1b11c4046
|
def canStopCount(self):
'\n Returns true indicating that Dxp has a stop command.\n '
return True
|
Returns true indicating that Dxp has a stop command.
|
py4syn/epics/DxpFakeClass.py
|
canStopCount
|
gabrielpreviato/py4syn
| 12 |
python
|
def canStopCount(self):
'\n \n '
return True
|
def canStopCount(self):
'\n \n '
return True<|docstring|>Returns true indicating that Dxp has a stop command.<|endoftext|>
|
1ba1be472c1cc572f99d5733ce86ed30cb277172da5f0ac497592484c8b14596
|
def getValue(self, **kwargs):
'\n This is a dummy method that always returns zero, which is part of the\n :class:`py4syn.epics.ICountable` interface. Dxp does not return\n a value while scanning. Instead, it stores a mca file with result .\n '
if kwargs:
return self.getValueChannel(**kwargs)
return self.getValueChannel()
|
This is a dummy method that always returns zero, which is part of the
:class:`py4syn.epics.ICountable` interface. Dxp does not return
a value while scanning. Instead, it stores a mca file with result .
|
py4syn/epics/DxpFakeClass.py
|
getValue
|
gabrielpreviato/py4syn
| 12 |
python
|
def getValue(self, **kwargs):
'\n This is a dummy method that always returns zero, which is part of the\n :class:`py4syn.epics.ICountable` interface. Dxp does not return\n a value while scanning. Instead, it stores a mca file with result .\n '
if kwargs:
return self.getValueChannel(**kwargs)
return self.getValueChannel()
|
def getValue(self, **kwargs):
'\n This is a dummy method that always returns zero, which is part of the\n :class:`py4syn.epics.ICountable` interface. Dxp does not return\n a value while scanning. Instead, it stores a mca file with result .\n '
if kwargs:
return self.getValueChannel(**kwargs)
return self.getValueChannel()<|docstring|>This is a dummy method that always returns zero, which is part of the
:class:`py4syn.epics.ICountable` interface. Dxp does not return
a value while scanning. Instead, it stores a mca file with result .<|endoftext|>
|
da3b1411332c1f386f993425f20da0f19dec6f29582fd6dffb7394ea9f5a2d10
|
def setPresetValue(self, channel, val):
'Dummy method'
pass
|
Dummy method
|
py4syn/epics/DxpFakeClass.py
|
setPresetValue
|
gabrielpreviato/py4syn
| 12 |
python
|
def setPresetValue(self, channel, val):
pass
|
def setPresetValue(self, channel, val):
pass<|docstring|>Dummy method<|endoftext|>
|
411bce91c93d90a2d37f544e8465bfad6911eb21ed67377ad521617d65c6d9aa
|
def startCollectImage(self, rows=0, cols=0):
'Start to collect an image\n When collect an image, the points will be saved on a hdf file'
super().startCollectImage('int32', rows, cols)
|
Start to collect an image
When collect an image, the points will be saved on a hdf file
|
py4syn/epics/DxpFakeClass.py
|
startCollectImage
|
gabrielpreviato/py4syn
| 12 |
python
|
def startCollectImage(self, rows=0, cols=0):
'Start to collect an image\n When collect an image, the points will be saved on a hdf file'
super().startCollectImage('int32', rows, cols)
|
def startCollectImage(self, rows=0, cols=0):
'Start to collect an image\n When collect an image, the points will be saved on a hdf file'
super().startCollectImage('int32', rows, cols)<|docstring|>Start to collect an image
When collect an image, the points will be saved on a hdf file<|endoftext|>
|
85255cc15290a9c0a64016787afdf19a2db3d83664171abf0328b54548d30123
|
def format_cfg(cfg):
'Format experiment config for friendly display'
def list2str(cfg):
for (key, value) in cfg.items():
if isinstance(value, dict):
cfg[key] = list2str(value)
elif isinstance(value, list):
if ((len(value) == 0) or isinstance(value[0], (int, float))):
cfg[key] = str(value)
else:
for (i, item) in enumerate(value):
if isinstance(item, dict):
value[i] = list2str(item)
cfg[key] = value
return cfg
cfg = list2str(copy.deepcopy(cfg))
json_str = json.dumps(cfg, indent=2, ensure_ascii=False).split('\n')
json_str = [re.sub('(\\"|(!\\],$)|\\s$)', '', line) for line in json_str]
cfg_str = '\n'.join([line.rstrip() for line in json_str if line.strip()])
return cfg_str
|
Format experiment config for friendly display
|
up/utils/general/cfg_helper.py
|
format_cfg
|
ModelTC/EOD
| 196 |
python
|
def format_cfg(cfg):
def list2str(cfg):
for (key, value) in cfg.items():
if isinstance(value, dict):
cfg[key] = list2str(value)
elif isinstance(value, list):
if ((len(value) == 0) or isinstance(value[0], (int, float))):
cfg[key] = str(value)
else:
for (i, item) in enumerate(value):
if isinstance(item, dict):
value[i] = list2str(item)
cfg[key] = value
return cfg
cfg = list2str(copy.deepcopy(cfg))
json_str = json.dumps(cfg, indent=2, ensure_ascii=False).split('\n')
json_str = [re.sub('(\\"|(!\\],$)|\\s$)', , line) for line in json_str]
cfg_str = '\n'.join([line.rstrip() for line in json_str if line.strip()])
return cfg_str
|
def format_cfg(cfg):
def list2str(cfg):
for (key, value) in cfg.items():
if isinstance(value, dict):
cfg[key] = list2str(value)
elif isinstance(value, list):
if ((len(value) == 0) or isinstance(value[0], (int, float))):
cfg[key] = str(value)
else:
for (i, item) in enumerate(value):
if isinstance(item, dict):
value[i] = list2str(item)
cfg[key] = value
return cfg
cfg = list2str(copy.deepcopy(cfg))
json_str = json.dumps(cfg, indent=2, ensure_ascii=False).split('\n')
json_str = [re.sub('(\\"|(!\\],$)|\\s$)', , line) for line in json_str]
cfg_str = '\n'.join([line.rstrip() for line in json_str if line.strip()])
return cfg_str<|docstring|>Format experiment config for friendly display<|endoftext|>
|
39261bfd99da9ab2c70cb93b4e0a55465e722a2e06ab70e610691e0bb0429610
|
def try_decode(val):
'bool, int, float, or str'
if (val.upper() == 'FALSE'):
return False
elif (val.upper() == 'TRUE'):
return True
if val.isdigit():
return int(val)
if is_number(val):
return float(val)
return val
|
bool, int, float, or str
|
up/utils/general/cfg_helper.py
|
try_decode
|
ModelTC/EOD
| 196 |
python
|
def try_decode(val):
if (val.upper() == 'FALSE'):
return False
elif (val.upper() == 'TRUE'):
return True
if val.isdigit():
return int(val)
if is_number(val):
return float(val)
return val
|
def try_decode(val):
if (val.upper() == 'FALSE'):
return False
elif (val.upper() == 'TRUE'):
return True
if val.isdigit():
return int(val)
if is_number(val):
return float(val)
return val<|docstring|>bool, int, float, or str<|endoftext|>
|
8bf722e453297e58b39ab6a2f0dc0fdc915a8115512a20eb1d93a126811c079d
|
def daml_compile(name, srcs, version=_default_project_version, target=None, **kwargs):
'Build a DAML project, with a generated daml.yaml.'
if (len(srcs) == 0):
fail("daml_compile: Expected `srcs' to be non-empty.")
daml_yaml = (name + '.yaml')
_daml_configure(name=(name + '.configure'), project_name=name, project_version=version, daml_yaml=daml_yaml, target=target, **kwargs)
_daml_build(name=(name + '.build'), daml_yaml=daml_yaml, srcs=srcs, dar_dict={}, dar=(name + '.dar'), **kwargs)
_inspect_dar(base=name)
|
Build a DAML project, with a generated daml.yaml.
|
rules_daml/daml.bzl
|
daml_compile
|
FlashSheridan/daml
| 0 |
python
|
def daml_compile(name, srcs, version=_default_project_version, target=None, **kwargs):
if (len(srcs) == 0):
fail("daml_compile: Expected `srcs' to be non-empty.")
daml_yaml = (name + '.yaml')
_daml_configure(name=(name + '.configure'), project_name=name, project_version=version, daml_yaml=daml_yaml, target=target, **kwargs)
_daml_build(name=(name + '.build'), daml_yaml=daml_yaml, srcs=srcs, dar_dict={}, dar=(name + '.dar'), **kwargs)
_inspect_dar(base=name)
|
def daml_compile(name, srcs, version=_default_project_version, target=None, **kwargs):
if (len(srcs) == 0):
fail("daml_compile: Expected `srcs' to be non-empty.")
daml_yaml = (name + '.yaml')
_daml_configure(name=(name + '.configure'), project_name=name, project_version=version, daml_yaml=daml_yaml, target=target, **kwargs)
_daml_build(name=(name + '.build'), daml_yaml=daml_yaml, srcs=srcs, dar_dict={}, dar=(name + '.dar'), **kwargs)
_inspect_dar(base=name)<|docstring|>Build a DAML project, with a generated daml.yaml.<|endoftext|>
|
2c750a17a9a5a68593e45116511157fed3fd4cd2cfd3b0f6c80c382fd8e24032
|
def daml_compile_with_dalf(name, version=_default_project_version, **kwargs):
'Build a DAML project, with a generated daml.yaml, and extract the main DALF.'
daml_compile(name=name, version=version, **kwargs)
_extract_main_dalf(name=(name + '.extract'), project_name=name, project_version=version, dar=(name + '.dar'), dalf=(name + '.dalf'))
|
Build a DAML project, with a generated daml.yaml, and extract the main DALF.
|
rules_daml/daml.bzl
|
daml_compile_with_dalf
|
FlashSheridan/daml
| 0 |
python
|
def daml_compile_with_dalf(name, version=_default_project_version, **kwargs):
daml_compile(name=name, version=version, **kwargs)
_extract_main_dalf(name=(name + '.extract'), project_name=name, project_version=version, dar=(name + '.dar'), dalf=(name + '.dalf'))
|
def daml_compile_with_dalf(name, version=_default_project_version, **kwargs):
daml_compile(name=name, version=version, **kwargs)
_extract_main_dalf(name=(name + '.extract'), project_name=name, project_version=version, dar=(name + '.dar'), dalf=(name + '.dalf'))<|docstring|>Build a DAML project, with a generated daml.yaml, and extract the main DALF.<|endoftext|>
|
4ac30c42e0b73a1a1db7daf59fba30b14fdedf314d8b232c39bbbfd5e60664f6
|
def daml_build_test(name, project_dir, daml_config_basename='daml.yaml', daml_subdir_basename='daml', dar_dict={}, **kwargs):
'Build a DAML project and validate the resulting .dar file.'
daml_yaml = ((project_dir + '/') + daml_config_basename)
srcs = native.glob([(((project_dir + '/') + daml_subdir_basename) + '/**/*.daml')])
_daml_build(name=name, daml_yaml=daml_yaml, srcs=srcs, dar_dict=dar_dict, dar=(name + '.dar'), **kwargs)
_daml_validate_test(name=(name + '.test'), dar=(name + '.dar'))
|
Build a DAML project and validate the resulting .dar file.
|
rules_daml/daml.bzl
|
daml_build_test
|
FlashSheridan/daml
| 0 |
python
|
def daml_build_test(name, project_dir, daml_config_basename='daml.yaml', daml_subdir_basename='daml', dar_dict={}, **kwargs):
daml_yaml = ((project_dir + '/') + daml_config_basename)
srcs = native.glob([(((project_dir + '/') + daml_subdir_basename) + '/**/*.daml')])
_daml_build(name=name, daml_yaml=daml_yaml, srcs=srcs, dar_dict=dar_dict, dar=(name + '.dar'), **kwargs)
_daml_validate_test(name=(name + '.test'), dar=(name + '.dar'))
|
def daml_build_test(name, project_dir, daml_config_basename='daml.yaml', daml_subdir_basename='daml', dar_dict={}, **kwargs):
daml_yaml = ((project_dir + '/') + daml_config_basename)
srcs = native.glob([(((project_dir + '/') + daml_subdir_basename) + '/**/*.daml')])
_daml_build(name=name, daml_yaml=daml_yaml, srcs=srcs, dar_dict=dar_dict, dar=(name + '.dar'), **kwargs)
_daml_validate_test(name=(name + '.test'), dar=(name + '.dar'))<|docstring|>Build a DAML project and validate the resulting .dar file.<|endoftext|>
|
8d4eba1abf56ff012c29b05113db4429cc4da91014b0add6e4df1f11c5529f5e
|
def convert(self):
'Perform the conversion from datapackage to destination format\n '
handle = self._header()
logger.debug(self.default_values)
for (name, df) in self.package.items():
logger.debug(name)
if df.empty:
columns = [x['name'] for x in df._metadata['schema']['fields']]
df = pd.DataFrame(columns=columns)
df = df.reset_index()
if ('index' in df.columns):
df = df.drop(columns='index')
logger.debug('Number of columns: %s, %s', len(df.columns), df.columns)
if (len(df.columns) > 1):
default_value = self.default_values[name]
self._write_parameter(df, name, handle, default=default_value)
else:
self._write_set(df, name, handle)
self._footer(handle)
handle.close()
|
Perform the conversion from datapackage to destination format
|
src/otoole/preprocess/narrow_to_datafile.py
|
convert
|
chrwm/otoole
| 0 |
python
|
def convert(self):
'\n '
handle = self._header()
logger.debug(self.default_values)
for (name, df) in self.package.items():
logger.debug(name)
if df.empty:
columns = [x['name'] for x in df._metadata['schema']['fields']]
df = pd.DataFrame(columns=columns)
df = df.reset_index()
if ('index' in df.columns):
df = df.drop(columns='index')
logger.debug('Number of columns: %s, %s', len(df.columns), df.columns)
if (len(df.columns) > 1):
default_value = self.default_values[name]
self._write_parameter(df, name, handle, default=default_value)
else:
self._write_set(df, name, handle)
self._footer(handle)
handle.close()
|
def convert(self):
'\n '
handle = self._header()
logger.debug(self.default_values)
for (name, df) in self.package.items():
logger.debug(name)
if df.empty:
columns = [x['name'] for x in df._metadata['schema']['fields']]
df = pd.DataFrame(columns=columns)
df = df.reset_index()
if ('index' in df.columns):
df = df.drop(columns='index')
logger.debug('Number of columns: %s, %s', len(df.columns), df.columns)
if (len(df.columns) > 1):
default_value = self.default_values[name]
self._write_parameter(df, name, handle, default=default_value)
else:
self._write_set(df, name, handle)
self._footer(handle)
handle.close()<|docstring|>Perform the conversion from datapackage to destination format<|endoftext|>
|
c3c29b3e0b0c30c409263fa9f7eb4e961d38eb5c1a971daa72ab759765c10b09
|
@abstractmethod
def _write_parameter(self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float) -> pd.DataFrame:
'Write parameter data'
raise NotImplementedError()
|
Write parameter data
|
src/otoole/preprocess/narrow_to_datafile.py
|
_write_parameter
|
chrwm/otoole
| 0 |
python
|
@abstractmethod
def _write_parameter(self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float) -> pd.DataFrame:
raise NotImplementedError()
|
@abstractmethod
def _write_parameter(self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float) -> pd.DataFrame:
raise NotImplementedError()<|docstring|>Write parameter data<|endoftext|>
|
3061ea07aa5f3adbd772933102f916ed00101f2135a1ccf35080485944ee38aa
|
@abstractmethod
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO) -> pd.DataFrame:
'Write set data'
raise NotImplementedError()
|
Write set data
|
src/otoole/preprocess/narrow_to_datafile.py
|
_write_set
|
chrwm/otoole
| 0 |
python
|
@abstractmethod
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO) -> pd.DataFrame:
raise NotImplementedError()
|
@abstractmethod
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO) -> pd.DataFrame:
raise NotImplementedError()<|docstring|>Write set data<|endoftext|>
|
b2c852ce075b62ca80b575bdaf71fdca7bea99b026e9a6062285a8667b9039e1
|
def _write_parameter(self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float):
'Write parameter data to a csv file, omitting data which matches the default value\n\n Arguments\n ---------\n filepath : StreamIO\n df : pandas.DataFrame\n parameter_name : str\n handle: TextIO\n default : int\n '
df = self._form_parameter(df, default)
handle.write('param default {} : {} :=\n'.format(default, parameter_name))
df.to_csv(path_or_buf=handle, sep=' ', header=False, index=False)
handle.write(';\n')
|
Write parameter data to a csv file, omitting data which matches the default value
Arguments
---------
filepath : StreamIO
df : pandas.DataFrame
parameter_name : str
handle: TextIO
default : int
|
src/otoole/preprocess/narrow_to_datafile.py
|
_write_parameter
|
chrwm/otoole
| 0 |
python
|
def _write_parameter(self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float):
'Write parameter data to a csv file, omitting data which matches the default value\n\n Arguments\n ---------\n filepath : StreamIO\n df : pandas.DataFrame\n parameter_name : str\n handle: TextIO\n default : int\n '
df = self._form_parameter(df, default)
handle.write('param default {} : {} :=\n'.format(default, parameter_name))
df.to_csv(path_or_buf=handle, sep=' ', header=False, index=False)
handle.write(';\n')
|
def _write_parameter(self, df: pd.DataFrame, parameter_name: str, handle: TextIO, default: float):
'Write parameter data to a csv file, omitting data which matches the default value\n\n Arguments\n ---------\n filepath : StreamIO\n df : pandas.DataFrame\n parameter_name : str\n handle: TextIO\n default : int\n '
df = self._form_parameter(df, default)
handle.write('param default {} : {} :=\n'.format(default, parameter_name))
df.to_csv(path_or_buf=handle, sep=' ', header=False, index=False)
handle.write(';\n')<|docstring|>Write parameter data to a csv file, omitting data which matches the default value
Arguments
---------
filepath : StreamIO
df : pandas.DataFrame
parameter_name : str
handle: TextIO
default : int<|endoftext|>
|
209e49f6336df00933fde9a1756200afb546feecbb8c0def2c66a9f7e5b6f438
|
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO):
'\n\n Arguments\n ---------\n df : pandas.DataFrame\n set_name : str\n handle: TextIO\n '
handle.write('set {} :=\n'.format(set_name))
df.to_csv(path_or_buf=handle, sep=' ', header=False, index=False)
handle.write(';\n')
|
Arguments
---------
df : pandas.DataFrame
set_name : str
handle: TextIO
|
src/otoole/preprocess/narrow_to_datafile.py
|
_write_set
|
chrwm/otoole
| 0 |
python
|
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO):
'\n\n Arguments\n ---------\n df : pandas.DataFrame\n set_name : str\n handle: TextIO\n '
handle.write('set {} :=\n'.format(set_name))
df.to_csv(path_or_buf=handle, sep=' ', header=False, index=False)
handle.write(';\n')
|
def _write_set(self, df: pd.DataFrame, set_name, handle: TextIO):
'\n\n Arguments\n ---------\n df : pandas.DataFrame\n set_name : str\n handle: TextIO\n '
handle.write('set {} :=\n'.format(set_name))
df.to_csv(path_or_buf=handle, sep=' ', header=False, index=False)
handle.write(';\n')<|docstring|>Arguments
---------
df : pandas.DataFrame
set_name : str
handle: TextIO<|endoftext|>
|
bb3ed0469149fa92f7528787c3dd431cc3e882299a2057475ac9c83c81984a8c
|
def _form_parameter(self, df: pd.DataFrame, parameter_name: str, default: float) -> pd.DataFrame:
'Converts data into wide format\n\n Arguments\n ---------\n df: pd.DataFrame\n parameter_name: str\n default: float\n\n Returns\n -------\n pandas.DataFrame\n '
if (not df.empty):
names = df.columns.to_list()
if (len(names) > 2):
logger.debug('More than 2 columns for {}: {}'.format(parameter_name, names))
rows = names[0:(- 2)]
columns = names[(- 2)]
values = names[(- 1)]
logger.debug('Rows: {}; columns: {}; values: {}', rows, columns, values)
logger.debug('dtypes: {}'.format(df.dtypes))
pivot = pd.pivot_table(df, index=rows, columns=columns, values=values, fill_value=default)
elif (len(names) == 2):
logger.debug('Two columns for {}: {}'.format(parameter_name, names))
values = names[(- 1)]
rows = names[0:(- 2)]
logger.debug('Rows: {}; values: {}', rows, values)
pivot = pd.pivot_table(df, index=rows, values=values, fill_value=default)
else:
logger.debug('One column for {}: {}'.format(parameter_name, names))
pivot = df.copy()
pivot = pivot.reset_index(drop=True)
else:
logger.debug('Dataframe {} is empty'.format(parameter_name))
pivot = df.copy()
return pivot
|
Converts data into wide format
Arguments
---------
df: pd.DataFrame
parameter_name: str
default: float
Returns
-------
pandas.DataFrame
|
src/otoole/preprocess/narrow_to_datafile.py
|
_form_parameter
|
chrwm/otoole
| 0 |
python
|
def _form_parameter(self, df: pd.DataFrame, parameter_name: str, default: float) -> pd.DataFrame:
'Converts data into wide format\n\n Arguments\n ---------\n df: pd.DataFrame\n parameter_name: str\n default: float\n\n Returns\n -------\n pandas.DataFrame\n '
if (not df.empty):
names = df.columns.to_list()
if (len(names) > 2):
logger.debug('More than 2 columns for {}: {}'.format(parameter_name, names))
rows = names[0:(- 2)]
columns = names[(- 2)]
values = names[(- 1)]
logger.debug('Rows: {}; columns: {}; values: {}', rows, columns, values)
logger.debug('dtypes: {}'.format(df.dtypes))
pivot = pd.pivot_table(df, index=rows, columns=columns, values=values, fill_value=default)
elif (len(names) == 2):
logger.debug('Two columns for {}: {}'.format(parameter_name, names))
values = names[(- 1)]
rows = names[0:(- 2)]
logger.debug('Rows: {}; values: {}', rows, values)
pivot = pd.pivot_table(df, index=rows, values=values, fill_value=default)
else:
logger.debug('One column for {}: {}'.format(parameter_name, names))
pivot = df.copy()
pivot = pivot.reset_index(drop=True)
else:
logger.debug('Dataframe {} is empty'.format(parameter_name))
pivot = df.copy()
return pivot
|
def _form_parameter(self, df: pd.DataFrame, parameter_name: str, default: float) -> pd.DataFrame:
'Converts data into wide format\n\n Arguments\n ---------\n df: pd.DataFrame\n parameter_name: str\n default: float\n\n Returns\n -------\n pandas.DataFrame\n '
if (not df.empty):
names = df.columns.to_list()
if (len(names) > 2):
logger.debug('More than 2 columns for {}: {}'.format(parameter_name, names))
rows = names[0:(- 2)]
columns = names[(- 2)]
values = names[(- 1)]
logger.debug('Rows: {}; columns: {}; values: {}', rows, columns, values)
logger.debug('dtypes: {}'.format(df.dtypes))
pivot = pd.pivot_table(df, index=rows, columns=columns, values=values, fill_value=default)
elif (len(names) == 2):
logger.debug('Two columns for {}: {}'.format(parameter_name, names))
values = names[(- 1)]
rows = names[0:(- 2)]
logger.debug('Rows: {}; values: {}', rows, values)
pivot = pd.pivot_table(df, index=rows, values=values, fill_value=default)
else:
logger.debug('One column for {}: {}'.format(parameter_name, names))
pivot = df.copy()
pivot = pivot.reset_index(drop=True)
else:
logger.debug('Dataframe {} is empty'.format(parameter_name))
pivot = df.copy()
return pivot<|docstring|>Converts data into wide format
Arguments
---------
df: pd.DataFrame
parameter_name: str
default: float
Returns
-------
pandas.DataFrame<|endoftext|>
|
6b23e19f6a83498188e5ab1f64c5c4b5d450d30dddea586bbd8158e14d1d4c6f
|
def q_shift_variants(q_values_prediction, q_values_input, corrected_reflectivity, n_variants, scale=0.001):
'Create ``n_variants`` interpolated reflectivity curve variants with randomly distributed q shifts.'
shift = np.random.normal(loc=0, size=n_variants, scale=scale).reshape(n_variants, 1)
shifted_qs = (np.tile(q_values_input, (n_variants, 1)) + shift)
interpolated_curves = np.zeros((n_variants, len(q_values_prediction)))
for i in range(n_variants):
interpolated_curves[i] = interp_reflectivity(q_values_prediction, shifted_qs[i], corrected_reflectivity)
return (interpolated_curves, shift)
|
Create ``n_variants`` interpolated reflectivity curve variants with randomly distributed q shifts.
|
mlreflect/curve_fitter/minimizer.py
|
q_shift_variants
|
schreiber-lab/mlreflect
| 0 |
python
|
def q_shift_variants(q_values_prediction, q_values_input, corrected_reflectivity, n_variants, scale=0.001):
shift = np.random.normal(loc=0, size=n_variants, scale=scale).reshape(n_variants, 1)
shifted_qs = (np.tile(q_values_input, (n_variants, 1)) + shift)
interpolated_curves = np.zeros((n_variants, len(q_values_prediction)))
for i in range(n_variants):
interpolated_curves[i] = interp_reflectivity(q_values_prediction, shifted_qs[i], corrected_reflectivity)
return (interpolated_curves, shift)
|
def q_shift_variants(q_values_prediction, q_values_input, corrected_reflectivity, n_variants, scale=0.001):
shift = np.random.normal(loc=0, size=n_variants, scale=scale).reshape(n_variants, 1)
shifted_qs = (np.tile(q_values_input, (n_variants, 1)) + shift)
interpolated_curves = np.zeros((n_variants, len(q_values_prediction)))
for i in range(n_variants):
interpolated_curves[i] = interp_reflectivity(q_values_prediction, shifted_qs[i], corrected_reflectivity)
return (interpolated_curves, shift)<|docstring|>Create ``n_variants`` interpolated reflectivity curve variants with randomly distributed q shifts.<|endoftext|>
|
df7f6d0fcb3a08cf6d0128dc26b5f286bae3edf65fdf201c0812370a301e86f1
|
def curve_scaling_variants(corrected_reflectivity, n_variants, scale=0.1):
'Create ``n_variants`` reflectivity curve variants with randomly distributed scaling factors.'
scalings = np.random.normal(loc=1, size=n_variants, scale=scale).reshape(n_variants, 1)
scaled_curves = np.zeros((n_variants, len(corrected_reflectivity)))
for i in range(n_variants):
scaled_curves[i] = (corrected_reflectivity.copy() * scalings[i])
return (scaled_curves, scalings)
|
Create ``n_variants`` reflectivity curve variants with randomly distributed scaling factors.
|
mlreflect/curve_fitter/minimizer.py
|
curve_scaling_variants
|
schreiber-lab/mlreflect
| 0 |
python
|
def curve_scaling_variants(corrected_reflectivity, n_variants, scale=0.1):
scalings = np.random.normal(loc=1, size=n_variants, scale=scale).reshape(n_variants, 1)
scaled_curves = np.zeros((n_variants, len(corrected_reflectivity)))
for i in range(n_variants):
scaled_curves[i] = (corrected_reflectivity.copy() * scalings[i])
return (scaled_curves, scalings)
|
def curve_scaling_variants(corrected_reflectivity, n_variants, scale=0.1):
scalings = np.random.normal(loc=1, size=n_variants, scale=scale).reshape(n_variants, 1)
scaled_curves = np.zeros((n_variants, len(corrected_reflectivity)))
for i in range(n_variants):
scaled_curves[i] = (corrected_reflectivity.copy() * scalings[i])
return (scaled_curves, scalings)<|docstring|>Create ``n_variants`` reflectivity curve variants with randomly distributed scaling factors.<|endoftext|>
|
7807491f7361509fd05e6fb724a2b07d8ae604f44bb29924de39e09fe4f496f8
|
def curve_variant_log_mse(curve, variant_curves):
'Calculate the log MSE of a curve and a :class:`ndarray` of curves'
errors = (np.log10(curve) - np.log10(variant_curves))
return np.mean((errors ** 2), axis=1)
|
Calculate the log MSE of a curve and a :class:`ndarray` of curves
|
mlreflect/curve_fitter/minimizer.py
|
curve_variant_log_mse
|
schreiber-lab/mlreflect
| 0 |
python
|
def curve_variant_log_mse(curve, variant_curves):
errors = (np.log10(curve) - np.log10(variant_curves))
return np.mean((errors ** 2), axis=1)
|
def curve_variant_log_mse(curve, variant_curves):
errors = (np.log10(curve) - np.log10(variant_curves))
return np.mean((errors ** 2), axis=1)<|docstring|>Calculate the log MSE of a curve and a :class:`ndarray` of curves<|endoftext|>
|
53458e5131541f826420031210711069c9c8c9fc3eb047547a7c1aa983f71421
|
def least_log_mean_squares_fit(q_values, data, predicted_labels, sample, output_preprocessor, fraction_bounds=(0.5, 0.5, 0.1)):
'Fits the data with a model curve with ``scipy.optimize.curve_fit`` using ``predicted_labels`` as start values.'
prep_labels = output_preprocessor.apply_preprocessing(predicted_labels)[0]
start_values = np.array(prep_labels)[0]
bounds = ([(val - (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)], [(val + (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)])
fit_result = curve_fit(fitting_model(q_values, sample, output_preprocessor), q_values, np.log10(data), p0=start_values, bounds=bounds)
return output_preprocessor.restore_labels(np.atleast_2d(fit_result[0]))
|
Fits the data with a model curve with ``scipy.optimize.curve_fit`` using ``predicted_labels`` as start values.
|
mlreflect/curve_fitter/minimizer.py
|
least_log_mean_squares_fit
|
schreiber-lab/mlreflect
| 0 |
python
|
def least_log_mean_squares_fit(q_values, data, predicted_labels, sample, output_preprocessor, fraction_bounds=(0.5, 0.5, 0.1)):
prep_labels = output_preprocessor.apply_preprocessing(predicted_labels)[0]
start_values = np.array(prep_labels)[0]
bounds = ([(val - (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)], [(val + (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)])
fit_result = curve_fit(fitting_model(q_values, sample, output_preprocessor), q_values, np.log10(data), p0=start_values, bounds=bounds)
return output_preprocessor.restore_labels(np.atleast_2d(fit_result[0]))
|
def least_log_mean_squares_fit(q_values, data, predicted_labels, sample, output_preprocessor, fraction_bounds=(0.5, 0.5, 0.1)):
prep_labels = output_preprocessor.apply_preprocessing(predicted_labels)[0]
start_values = np.array(prep_labels)[0]
bounds = ([(val - (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)], [(val + (bound * abs(val))) for (val, bound) in zip(start_values, fraction_bounds)])
fit_result = curve_fit(fitting_model(q_values, sample, output_preprocessor), q_values, np.log10(data), p0=start_values, bounds=bounds)
return output_preprocessor.restore_labels(np.atleast_2d(fit_result[0]))<|docstring|>Fits the data with a model curve with ``scipy.optimize.curve_fit`` using ``predicted_labels`` as start values.<|endoftext|>
|
4514244894552e762c7520148fb4cb38b3750fbc5e02b65903f7e0e88aae8088
|
def log_mse_loss(prep_labels, data, generator, output_preprocessor):
'MSE loss between a reflectivity curve and a model curve generated with the given normalized labels.'
restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels))
model = generator.simulate_reflectivity(restored_labels, progress_bar=False)[0]
loss = mean_squared_error(np.log10(data), np.log10(model))
return loss
|
MSE loss between a reflectivity curve and a model curve generated with the given normalized labels.
|
mlreflect/curve_fitter/minimizer.py
|
log_mse_loss
|
schreiber-lab/mlreflect
| 0 |
python
|
def log_mse_loss(prep_labels, data, generator, output_preprocessor):
restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels))
model = generator.simulate_reflectivity(restored_labels, progress_bar=False)[0]
loss = mean_squared_error(np.log10(data), np.log10(model))
return loss
|
def log_mse_loss(prep_labels, data, generator, output_preprocessor):
restored_labels = output_preprocessor.restore_labels(np.atleast_2d(prep_labels))
model = generator.simulate_reflectivity(restored_labels, progress_bar=False)[0]
loss = mean_squared_error(np.log10(data), np.log10(model))
return loss<|docstring|>MSE loss between a reflectivity curve and a model curve generated with the given normalized labels.<|endoftext|>
|
43000381cbc4350cc361004a69cd451365dce77e13f724550b8eb5a697dc2e45
|
def mean_squared_error(array1, array2):
'Returns element-wise mean squared error between two arrays.'
if (len(array1) != len(array2)):
raise ValueError(f'array1 and array2 must be of same length ({len(array1)} != {len(array2)})')
else:
error = (np.asarray(array1) - np.asarray(array2))
return np.mean(np.atleast_2d((error ** 2)), axis=1)
|
Returns element-wise mean squared error between two arrays.
|
mlreflect/curve_fitter/minimizer.py
|
mean_squared_error
|
schreiber-lab/mlreflect
| 0 |
python
|
def mean_squared_error(array1, array2):
if (len(array1) != len(array2)):
raise ValueError(f'array1 and array2 must be of same length ({len(array1)} != {len(array2)})')
else:
error = (np.asarray(array1) - np.asarray(array2))
return np.mean(np.atleast_2d((error ** 2)), axis=1)
|
def mean_squared_error(array1, array2):
if (len(array1) != len(array2)):
raise ValueError(f'array1 and array2 must be of same length ({len(array1)} != {len(array2)})')
else:
error = (np.asarray(array1) - np.asarray(array2))
return np.mean(np.atleast_2d((error ** 2)), axis=1)<|docstring|>Returns element-wise mean squared error between two arrays.<|endoftext|>
|
2fa46e64284c96ff50f403feb94abfddf7ce7bf1d3e1f814273bce0bb9b75c24
|
@bp_rack.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_search.require(http_exception=403)
def lists():
'\n 货架列表\n :return:\n '
template_name = 'rack/lists.html'
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack lists')
form = RackSearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
search_condition = [(Rack.status_delete == STATUS_DEL_NO)]
if (request.method == 'POST'):
if (not form.validate_on_submit()):
flash(_('Search Failure'), 'danger')
if (hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors):
map((lambda x: flash(x, 'danger')), form.csrf_token.errors)
else:
if (form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION):
search_condition.append((Rack.warehouse_id == form.warehouse_id.data))
if form.name.data:
search_condition.append((Rack.name == form.name.data))
if (form.op.data == OPERATION_EXPORT):
if (not permission_rack_section_export.can()):
abort(403)
column_names = Rack.__table__.columns.keys()
query_sets = get_rack_rows(*search_condition)
return excel.make_response_from_query_sets(query_sets=query_sets, column_names=column_names, file_type='csv', file_name=('%s.csv' % _('rack lists')))
if (form.op.data == OPERATION_DELETE):
if (not permission_rack_section_del.can()):
abort(403)
rack_ids = request.form.getlist('rack_id')
permitted = True
for rack_id in rack_ids:
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for rack_id in rack_ids:
current_time = datetime.utcnow()
rack_data = {'status_delete': STATUS_DEL_OK, 'delete_time': current_time, 'update_time': current_time}
result = edit_rack(rack_id, rack_data)
result_total = (result_total and result)
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
pagination = get_rack_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
return render_template(template_name, form=form, pagination=pagination, **document_info)
|
货架列表
:return:
|
app_backend/views/rack.py
|
lists
|
zhanghe06/bearing_project
| 1 |
python
|
@bp_rack.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_search.require(http_exception=403)
def lists():
'\n 货架列表\n :return:\n '
template_name = 'rack/lists.html'
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack lists')
form = RackSearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
search_condition = [(Rack.status_delete == STATUS_DEL_NO)]
if (request.method == 'POST'):
if (not form.validate_on_submit()):
flash(_('Search Failure'), 'danger')
if (hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors):
map((lambda x: flash(x, 'danger')), form.csrf_token.errors)
else:
if (form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION):
search_condition.append((Rack.warehouse_id == form.warehouse_id.data))
if form.name.data:
search_condition.append((Rack.name == form.name.data))
if (form.op.data == OPERATION_EXPORT):
if (not permission_rack_section_export.can()):
abort(403)
column_names = Rack.__table__.columns.keys()
query_sets = get_rack_rows(*search_condition)
return excel.make_response_from_query_sets(query_sets=query_sets, column_names=column_names, file_type='csv', file_name=('%s.csv' % _('rack lists')))
if (form.op.data == OPERATION_DELETE):
if (not permission_rack_section_del.can()):
abort(403)
rack_ids = request.form.getlist('rack_id')
permitted = True
for rack_id in rack_ids:
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for rack_id in rack_ids:
current_time = datetime.utcnow()
rack_data = {'status_delete': STATUS_DEL_OK, 'delete_time': current_time, 'update_time': current_time}
result = edit_rack(rack_id, rack_data)
result_total = (result_total and result)
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
pagination = get_rack_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
return render_template(template_name, form=form, pagination=pagination, **document_info)
|
@bp_rack.route('/lists.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_search.require(http_exception=403)
def lists():
'\n 货架列表\n :return:\n '
template_name = 'rack/lists.html'
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack lists')
form = RackSearchForm(request.form)
form.warehouse_id.choices = get_warehouse_choices()
search_condition = [(Rack.status_delete == STATUS_DEL_NO)]
if (request.method == 'POST'):
if (not form.validate_on_submit()):
flash(_('Search Failure'), 'danger')
if (hasattr(form, 'csrf_token') and getattr(form, 'csrf_token').errors):
map((lambda x: flash(x, 'danger')), form.csrf_token.errors)
else:
if (form.warehouse_id.data != DEFAULT_SEARCH_CHOICES_INT_OPTION):
search_condition.append((Rack.warehouse_id == form.warehouse_id.data))
if form.name.data:
search_condition.append((Rack.name == form.name.data))
if (form.op.data == OPERATION_EXPORT):
if (not permission_rack_section_export.can()):
abort(403)
column_names = Rack.__table__.columns.keys()
query_sets = get_rack_rows(*search_condition)
return excel.make_response_from_query_sets(query_sets=query_sets, column_names=column_names, file_type='csv', file_name=('%s.csv' % _('rack lists')))
if (form.op.data == OPERATION_DELETE):
if (not permission_rack_section_del.can()):
abort(403)
rack_ids = request.form.getlist('rack_id')
permitted = True
for rack_id in rack_ids:
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
flash(_('Del Failure, %(ext_msg)s', ext_msg=ext_msg), 'danger')
permitted = False
break
if permitted:
result_total = True
for rack_id in rack_ids:
current_time = datetime.utcnow()
rack_data = {'status_delete': STATUS_DEL_OK, 'delete_time': current_time, 'update_time': current_time}
result = edit_rack(rack_id, rack_data)
result_total = (result_total and result)
if result_total:
flash(_('Del Success'), 'success')
else:
flash(_('Del Failure'), 'danger')
pagination = get_rack_pagination(form.page.data, PER_PAGE_BACKEND, *search_condition)
return render_template(template_name, form=form, pagination=pagination, **document_info)<|docstring|>货架列表
:return:<|endoftext|>
|
28b26d883b49c2619e777b051610ddf248e9e2e11d22cc239363ea57e519b4b5
|
@bp_rack.route('/<int:rack_id>/info.html')
@login_required
@permission_rack_section_get.require(http_exception=403)
def info(rack_id):
'\n 货架详情\n :param rack_id:\n :return:\n '
rack_info = get_rack_row_by_id(rack_id)
if (not rack_info):
abort(404)
if (rack_info.status_delete == STATUS_DEL_OK):
abort(410)
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack info')
return render_template('rack/info.html', rack_info=rack_info, **document_info)
|
货架详情
:param rack_id:
:return:
|
app_backend/views/rack.py
|
info
|
zhanghe06/bearing_project
| 1 |
python
|
@bp_rack.route('/<int:rack_id>/info.html')
@login_required
@permission_rack_section_get.require(http_exception=403)
def info(rack_id):
'\n 货架详情\n :param rack_id:\n :return:\n '
rack_info = get_rack_row_by_id(rack_id)
if (not rack_info):
abort(404)
if (rack_info.status_delete == STATUS_DEL_OK):
abort(410)
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack info')
return render_template('rack/info.html', rack_info=rack_info, **document_info)
|
@bp_rack.route('/<int:rack_id>/info.html')
@login_required
@permission_rack_section_get.require(http_exception=403)
def info(rack_id):
'\n 货架详情\n :param rack_id:\n :return:\n '
rack_info = get_rack_row_by_id(rack_id)
if (not rack_info):
abort(404)
if (rack_info.status_delete == STATUS_DEL_OK):
abort(410)
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack info')
return render_template('rack/info.html', rack_info=rack_info, **document_info)<|docstring|>货架详情
:param rack_id:
:return:<|endoftext|>
|
c2160a72c585d476d075353509b682c11624229baaca4a3f3c0cd4337fcbc075
|
@bp_rack.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_add.require(http_exception=403)
def add():
'\n 创建货架\n :return:\n '
template_name = 'rack/add.html'
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack add')
form = RackAddForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='create')
if (request.method == 'GET'):
return render_template(template_name, form=form, **document_info)
if (request.method == 'POST'):
if (not form.validate_on_submit()):
flash(_('Add Failure'), 'danger')
return render_template(template_name, form=form, **document_info)
current_time = datetime.utcnow()
rack_data = {'warehouse_id': form.warehouse_id.data, 'name': form.name.data, 'create_time': current_time, 'update_time': current_time}
result = add_rack(rack_data)
if result:
flash(_('Add Success'), 'success')
return redirect((request.args.get('next') or url_for('rack.lists')))
else:
flash(_('Add Failure'), 'danger')
return render_template(template_name, form=form, **document_info)
|
创建货架
:return:
|
app_backend/views/rack.py
|
add
|
zhanghe06/bearing_project
| 1 |
python
|
@bp_rack.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_add.require(http_exception=403)
def add():
'\n 创建货架\n :return:\n '
template_name = 'rack/add.html'
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack add')
form = RackAddForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='create')
if (request.method == 'GET'):
return render_template(template_name, form=form, **document_info)
if (request.method == 'POST'):
if (not form.validate_on_submit()):
flash(_('Add Failure'), 'danger')
return render_template(template_name, form=form, **document_info)
current_time = datetime.utcnow()
rack_data = {'warehouse_id': form.warehouse_id.data, 'name': form.name.data, 'create_time': current_time, 'update_time': current_time}
result = add_rack(rack_data)
if result:
flash(_('Add Success'), 'success')
return redirect((request.args.get('next') or url_for('rack.lists')))
else:
flash(_('Add Failure'), 'danger')
return render_template(template_name, form=form, **document_info)
|
@bp_rack.route('/add.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_add.require(http_exception=403)
def add():
'\n 创建货架\n :return:\n '
template_name = 'rack/add.html'
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack add')
form = RackAddForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='create')
if (request.method == 'GET'):
return render_template(template_name, form=form, **document_info)
if (request.method == 'POST'):
if (not form.validate_on_submit()):
flash(_('Add Failure'), 'danger')
return render_template(template_name, form=form, **document_info)
current_time = datetime.utcnow()
rack_data = {'warehouse_id': form.warehouse_id.data, 'name': form.name.data, 'create_time': current_time, 'update_time': current_time}
result = add_rack(rack_data)
if result:
flash(_('Add Success'), 'success')
return redirect((request.args.get('next') or url_for('rack.lists')))
else:
flash(_('Add Failure'), 'danger')
return render_template(template_name, form=form, **document_info)<|docstring|>创建货架
:return:<|endoftext|>
|
3ba5cc860502d45d5c35222e90d257a6916de5604450aba1b3b55d95ce22ce86
|
@bp_rack.route('/<int:rack_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_edit.require(http_exception=403)
def edit(rack_id):
'\n 货架编辑\n '
rack_info = get_rack_row_by_id(rack_id)
if (not rack_info):
abort(404)
if (rack_info.status_delete == STATUS_DEL_OK):
abort(410)
template_name = 'rack/edit.html'
form = RackEditForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='update')
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack edit')
if (request.method == 'GET'):
form.warehouse_id.data = rack_info.warehouse_id
form.name.data = rack_info.name
return render_template(template_name, rack_id=rack_id, form=form, **document_info)
if (request.method == 'POST'):
if (not form.validate_on_submit()):
flash(_('Edit Failure'), 'danger')
return render_template(template_name, rack_id=rack_id, form=form, **document_info)
current_time = datetime.utcnow()
rack_data = {'warehouse_id': form.warehouse_id.data, 'name': form.name.data, 'update_time': current_time}
result = edit_rack(rack_id, rack_data)
if result:
flash(_('Edit Success'), 'success')
return redirect((request.args.get('next') or url_for('rack.lists')))
else:
flash(_('Edit Failure'), 'danger')
return render_template(template_name, rack_id=rack_id, form=form, **document_info)
|
货架编辑
|
app_backend/views/rack.py
|
edit
|
zhanghe06/bearing_project
| 1 |
python
|
@bp_rack.route('/<int:rack_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_edit.require(http_exception=403)
def edit(rack_id):
'\n \n '
rack_info = get_rack_row_by_id(rack_id)
if (not rack_info):
abort(404)
if (rack_info.status_delete == STATUS_DEL_OK):
abort(410)
template_name = 'rack/edit.html'
form = RackEditForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='update')
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack edit')
if (request.method == 'GET'):
form.warehouse_id.data = rack_info.warehouse_id
form.name.data = rack_info.name
return render_template(template_name, rack_id=rack_id, form=form, **document_info)
if (request.method == 'POST'):
if (not form.validate_on_submit()):
flash(_('Edit Failure'), 'danger')
return render_template(template_name, rack_id=rack_id, form=form, **document_info)
current_time = datetime.utcnow()
rack_data = {'warehouse_id': form.warehouse_id.data, 'name': form.name.data, 'update_time': current_time}
result = edit_rack(rack_id, rack_data)
if result:
flash(_('Edit Success'), 'success')
return redirect((request.args.get('next') or url_for('rack.lists')))
else:
flash(_('Edit Failure'), 'danger')
return render_template(template_name, rack_id=rack_id, form=form, **document_info)
|
@bp_rack.route('/<int:rack_id>/edit.html', methods=['GET', 'POST'])
@login_required
@permission_rack_section_edit.require(http_exception=403)
def edit(rack_id):
'\n \n '
rack_info = get_rack_row_by_id(rack_id)
if (not rack_info):
abort(404)
if (rack_info.status_delete == STATUS_DEL_OK):
abort(410)
template_name = 'rack/edit.html'
form = RackEditForm(request.form)
form.warehouse_id.choices = get_warehouse_choices(option_type='update')
document_info = DOCUMENT_INFO.copy()
document_info['TITLE'] = _('rack edit')
if (request.method == 'GET'):
form.warehouse_id.data = rack_info.warehouse_id
form.name.data = rack_info.name
return render_template(template_name, rack_id=rack_id, form=form, **document_info)
if (request.method == 'POST'):
if (not form.validate_on_submit()):
flash(_('Edit Failure'), 'danger')
return render_template(template_name, rack_id=rack_id, form=form, **document_info)
current_time = datetime.utcnow()
rack_data = {'warehouse_id': form.warehouse_id.data, 'name': form.name.data, 'update_time': current_time}
result = edit_rack(rack_id, rack_data)
if result:
flash(_('Edit Success'), 'success')
return redirect((request.args.get('next') or url_for('rack.lists')))
else:
flash(_('Edit Failure'), 'danger')
return render_template(template_name, rack_id=rack_id, form=form, **document_info)<|docstring|>货架编辑<|endoftext|>
|
7bf8f0174c53cfc537a98e406ce4e40ca9a5159496160bc1195a3cba22c8877d
|
@bp_rack.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
'\n 货架删除\n :return:\n '
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
if (not permission_rack_section_del.can()):
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if (not ((request.method == 'GET') and request.is_xhr)):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_id = request.args.get('rack_id', 0, type=int)
if (not rack_id):
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_info = get_rack_row_by_id(rack_id)
if (not rack_info):
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if (rack_info.status_delete == STATUS_DEL_OK):
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
current_time = datetime.utcnow()
rack_data = {'status_delete': STATUS_DEL_OK, 'delete_time': current_time, 'update_time': current_time}
result = edit_rack(rack_id, rack_data)
if result:
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)
|
货架删除
:return:
|
app_backend/views/rack.py
|
ajax_delete
|
zhanghe06/bearing_project
| 1 |
python
|
@bp_rack.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
'\n 货架删除\n :return:\n '
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
if (not permission_rack_section_del.can()):
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if (not ((request.method == 'GET') and request.is_xhr)):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_id = request.args.get('rack_id', 0, type=int)
if (not rack_id):
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_info = get_rack_row_by_id(rack_id)
if (not rack_info):
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if (rack_info.status_delete == STATUS_DEL_OK):
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
current_time = datetime.utcnow()
rack_data = {'status_delete': STATUS_DEL_OK, 'delete_time': current_time, 'update_time': current_time}
result = edit_rack(rack_id, rack_data)
if result:
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)
|
@bp_rack.route('/ajax/del', methods=['GET', 'POST'])
@login_required
def ajax_delete():
'\n 货架删除\n :return:\n '
ajax_success_msg = AJAX_SUCCESS_MSG.copy()
ajax_failure_msg = AJAX_FAILURE_MSG.copy()
if (not permission_rack_section_del.can()):
ext_msg = _('Permission Denied')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if (not ((request.method == 'GET') and request.is_xhr)):
ext_msg = _('Method Not Allowed')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_id = request.args.get('rack_id', 0, type=int)
if (not rack_id):
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
rack_info = get_rack_row_by_id(rack_id)
if (not rack_info):
ext_msg = _('ID does not exist')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if (rack_info.status_delete == STATUS_DEL_OK):
ext_msg = _('Already deleted')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
if count_inventory(**{'rack_id': rack_id, 'status_delete': STATUS_DEL_NO}):
ext_msg = _('Currently In Use')
ajax_failure_msg['msg'] = _('Del Failure, %(ext_msg)s', ext_msg=ext_msg)
return jsonify(ajax_failure_msg)
current_time = datetime.utcnow()
rack_data = {'status_delete': STATUS_DEL_OK, 'delete_time': current_time, 'update_time': current_time}
result = edit_rack(rack_id, rack_data)
if result:
ajax_success_msg['msg'] = _('Del Success')
return jsonify(ajax_success_msg)
else:
ajax_failure_msg['msg'] = _('Del Failure')
return jsonify(ajax_failure_msg)<|docstring|>货架删除
:return:<|endoftext|>
|
be5992e7395bec46163f712c933a24b33bf924a8db2c1d5a9abe33ee80467c58
|
@bp_rack.route('/ajax/get_rack_choices', methods=['GET', 'POST'])
@login_required
def ajax_get_rack_choices():
'\n 货架选项\n :return:\n '
warehouse_id = request.args.get('warehouse_id', 0, type=int)
rack_choices = get_rack_choices(warehouse_id)
return jsonify(rack_choices)
|
货架选项
:return:
|
app_backend/views/rack.py
|
ajax_get_rack_choices
|
zhanghe06/bearing_project
| 1 |
python
|
@bp_rack.route('/ajax/get_rack_choices', methods=['GET', 'POST'])
@login_required
def ajax_get_rack_choices():
'\n 货架选项\n :return:\n '
warehouse_id = request.args.get('warehouse_id', 0, type=int)
rack_choices = get_rack_choices(warehouse_id)
return jsonify(rack_choices)
|
@bp_rack.route('/ajax/get_rack_choices', methods=['GET', 'POST'])
@login_required
def ajax_get_rack_choices():
'\n 货架选项\n :return:\n '
warehouse_id = request.args.get('warehouse_id', 0, type=int)
rack_choices = get_rack_choices(warehouse_id)
return jsonify(rack_choices)<|docstring|>货架选项
:return:<|endoftext|>
|
b7091bb3636ad3ac80800993c25a6133dc1247b4050d3ad5a75c7f00ecc93083
|
def channel_split_naive(r, channel_ranges):
'Slower but simpler implementation of straxen.split_channel_ranges'
results = []
for (left, right) in channel_ranges:
results.append(r[np.in1d(r['channel'], np.arange(left, (right + 1)))])
return results
|
Slower but simpler implementation of straxen.split_channel_ranges
|
tests/test_channel_split.py
|
channel_split_naive
|
zhut19/straxen
| 14 |
python
|
def channel_split_naive(r, channel_ranges):
results = []
for (left, right) in channel_ranges:
results.append(r[np.in1d(r['channel'], np.arange(left, (right + 1)))])
return results
|
def channel_split_naive(r, channel_ranges):
results = []
for (left, right) in channel_ranges:
results.append(r[np.in1d(r['channel'], np.arange(left, (right + 1)))])
return results<|docstring|>Slower but simpler implementation of straxen.split_channel_ranges<|endoftext|>
|
ff2dd90e5277c03c11843c2e0b3c64f7dd14952bd8369e669f154f1d40e2516a
|
def make_sqlx(conn, schema, tables):
'Make sqlx lookup function for given tables'
table_func_map = {}
for table in tables:
ntRec = namedtuple(table, tables[table].columns.keys())
table_func_map[table] = SqlX(conn, table, schema, ntRec)
def sqlx(expr) -> SqlX:
obj = jmespath.search(expr, table_func_map)
if (not obj):
raise Exception('sqlx: Cannot find "{}"'.format(expr))
return obj
return sqlx
|
Make sqlx lookup function for given tables
|
xutil/database/base.py
|
make_sqlx
|
flarco/n1slutil
| 1 |
python
|
def make_sqlx(conn, schema, tables):
table_func_map = {}
for table in tables:
ntRec = namedtuple(table, tables[table].columns.keys())
table_func_map[table] = SqlX(conn, table, schema, ntRec)
def sqlx(expr) -> SqlX:
obj = jmespath.search(expr, table_func_map)
if (not obj):
raise Exception('sqlx: Cannot find "{}"'.format(expr))
return obj
return sqlx
|
def make_sqlx(conn, schema, tables):
table_func_map = {}
for table in tables:
ntRec = namedtuple(table, tables[table].columns.keys())
table_func_map[table] = SqlX(conn, table, schema, ntRec)
def sqlx(expr) -> SqlX:
obj = jmespath.search(expr, table_func_map)
if (not obj):
raise Exception('sqlx: Cannot find "{}"'.format(expr))
return obj
return sqlx<|docstring|>Make sqlx lookup function for given tables<|endoftext|>
|
64c5377787ab0499d3ad00c2d6abe0c0a02cfa1a71106d94f62a3b2145d1a28d
|
def get_sql_sources(sql_text, echo=False):
'Obtain the source tables of a query\n '
import sqlparse
sql_text = re.sub('as\\(', 'as (', sql_text, 0, (re.MULTILINE | re.IGNORECASE))
statements = sqlparse.parse(sql_text)
cte_aliases = set()
sql_sources = {}
def get_sources(statement):
sources_dict = {}
last_kw_from = False
last_kw_join = False
cte_mode = False
last_tok = None
done = False
while (not done):
for tok in statement.tokens:
if tok.is_group:
if (cte_mode and isinstance(tok, sqlparse.sql.IdentifierList)):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier):
for tok3 in tok2.tokens:
if isinstance(tok3, sqlparse.sql.Parenthesis):
cte_aliases.add(tok3.parent.normalized.lower())
sources_dict2 = get_sources(tok3)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok, sqlparse.sql.Parenthesis):
sources_dict2 = get_sources(tok)
sources_dict = {**sources_dict, **sources_dict2}
else:
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Parenthesis):
cte_aliases.add(tok2.parent.normalized.lower())
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
if ((last_kw_from or last_kw_join) and last_tok.is_whitespace):
if isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if (isinstance(tok2, sqlparse.sql.Identifier) and ('(' in tok2.value)):
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
elif (isinstance(tok2, sqlparse.sql.Identifier) and (tok2.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok2.normalized.lower()))
sources_dict[tok2.normalized.lower()] = tok.parent
elif (isinstance(tok, sqlparse.sql.Identifier) and (tok.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok.normalized.lower()))
sources_dict[tok.normalized.lower()] = tok.parent
last_kw_join = False
if (tok.is_keyword and (tok.normalized == 'WITH')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'GROUP')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'WHERE')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'ORDER')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'CREATE')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'SELECT')):
cte_mode = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'FROM')):
last_kw_from = True
elif (tok.is_keyword and ('JOIN' in tok.normalized)):
last_kw_join = True
last_tok = tok
done = True
return sources_dict
for (s, statement) in enumerate(statements):
has_from = False
last_kw_create = False
last_kw_create_table = False
create_table = None
for tok in statement.tokens:
if (isinstance(tok, sqlparse.sql.Identifier) and last_kw_create_table):
create_table = tok.normalized
last_kw_create_table = False
last_kw_create = False
if echo:
log(('-CREATE TABLE ' + create_table))
if (tok.is_keyword and (tok.normalized == 'TABLE') and last_kw_create):
last_kw_create_table = True
if (tok.is_keyword and (tok.normalized == 'CREATE')):
last_kw_create = True
if (tok.is_keyword and (tok.normalized == 'FROM')):
has_from = True
last_tok = tok
if has_from:
sources_dict = get_sources(statement)
if create_table:
sql_sources[create_table] = sorted(sources_dict)
else:
sql_sources[s] = sorted(sources_dict)
return sql_sources
|
Obtain the source tables of a query
|
xutil/database/base.py
|
get_sql_sources
|
flarco/n1slutil
| 1 |
python
|
def get_sql_sources(sql_text, echo=False):
'\n '
import sqlparse
sql_text = re.sub('as\\(', 'as (', sql_text, 0, (re.MULTILINE | re.IGNORECASE))
statements = sqlparse.parse(sql_text)
cte_aliases = set()
sql_sources = {}
def get_sources(statement):
sources_dict = {}
last_kw_from = False
last_kw_join = False
cte_mode = False
last_tok = None
done = False
while (not done):
for tok in statement.tokens:
if tok.is_group:
if (cte_mode and isinstance(tok, sqlparse.sql.IdentifierList)):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier):
for tok3 in tok2.tokens:
if isinstance(tok3, sqlparse.sql.Parenthesis):
cte_aliases.add(tok3.parent.normalized.lower())
sources_dict2 = get_sources(tok3)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok, sqlparse.sql.Parenthesis):
sources_dict2 = get_sources(tok)
sources_dict = {**sources_dict, **sources_dict2}
else:
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Parenthesis):
cte_aliases.add(tok2.parent.normalized.lower())
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
if ((last_kw_from or last_kw_join) and last_tok.is_whitespace):
if isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if (isinstance(tok2, sqlparse.sql.Identifier) and ('(' in tok2.value)):
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
elif (isinstance(tok2, sqlparse.sql.Identifier) and (tok2.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok2.normalized.lower()))
sources_dict[tok2.normalized.lower()] = tok.parent
elif (isinstance(tok, sqlparse.sql.Identifier) and (tok.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok.normalized.lower()))
sources_dict[tok.normalized.lower()] = tok.parent
last_kw_join = False
if (tok.is_keyword and (tok.normalized == 'WITH')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'GROUP')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'WHERE')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'ORDER')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'CREATE')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'SELECT')):
cte_mode = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'FROM')):
last_kw_from = True
elif (tok.is_keyword and ('JOIN' in tok.normalized)):
last_kw_join = True
last_tok = tok
done = True
return sources_dict
for (s, statement) in enumerate(statements):
has_from = False
last_kw_create = False
last_kw_create_table = False
create_table = None
for tok in statement.tokens:
if (isinstance(tok, sqlparse.sql.Identifier) and last_kw_create_table):
create_table = tok.normalized
last_kw_create_table = False
last_kw_create = False
if echo:
log(('-CREATE TABLE ' + create_table))
if (tok.is_keyword and (tok.normalized == 'TABLE') and last_kw_create):
last_kw_create_table = True
if (tok.is_keyword and (tok.normalized == 'CREATE')):
last_kw_create = True
if (tok.is_keyword and (tok.normalized == 'FROM')):
has_from = True
last_tok = tok
if has_from:
sources_dict = get_sources(statement)
if create_table:
sql_sources[create_table] = sorted(sources_dict)
else:
sql_sources[s] = sorted(sources_dict)
return sql_sources
|
def get_sql_sources(sql_text, echo=False):
'\n '
import sqlparse
sql_text = re.sub('as\\(', 'as (', sql_text, 0, (re.MULTILINE | re.IGNORECASE))
statements = sqlparse.parse(sql_text)
cte_aliases = set()
sql_sources = {}
def get_sources(statement):
sources_dict = {}
last_kw_from = False
last_kw_join = False
cte_mode = False
last_tok = None
done = False
while (not done):
for tok in statement.tokens:
if tok.is_group:
if (cte_mode and isinstance(tok, sqlparse.sql.IdentifierList)):
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Identifier):
for tok3 in tok2.tokens:
if isinstance(tok3, sqlparse.sql.Parenthesis):
cte_aliases.add(tok3.parent.normalized.lower())
sources_dict2 = get_sources(tok3)
sources_dict = {**sources_dict, **sources_dict2}
elif isinstance(tok, sqlparse.sql.Parenthesis):
sources_dict2 = get_sources(tok)
sources_dict = {**sources_dict, **sources_dict2}
else:
for tok2 in tok.tokens:
if isinstance(tok2, sqlparse.sql.Parenthesis):
cte_aliases.add(tok2.parent.normalized.lower())
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
if ((last_kw_from or last_kw_join) and last_tok.is_whitespace):
if isinstance(tok, sqlparse.sql.IdentifierList):
for tok2 in tok.tokens:
if (isinstance(tok2, sqlparse.sql.Identifier) and ('(' in tok2.value)):
sources_dict2 = get_sources(tok2)
sources_dict = {**sources_dict, **sources_dict2}
elif (isinstance(tok2, sqlparse.sql.Identifier) and (tok2.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok2.normalized.lower()))
sources_dict[tok2.normalized.lower()] = tok.parent
elif (isinstance(tok, sqlparse.sql.Identifier) and (tok.normalized.lower() not in cte_aliases)):
if echo:
log(('+Table = ' + tok.normalized.lower()))
sources_dict[tok.normalized.lower()] = tok.parent
last_kw_join = False
if (tok.is_keyword and (tok.normalized == 'WITH')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'GROUP')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'WHERE')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'ORDER')):
last_kw_join = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'CREATE')):
cte_mode = True
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'SELECT')):
cte_mode = False
last_kw_from = False
elif (tok.is_keyword and (tok.normalized == 'FROM')):
last_kw_from = True
elif (tok.is_keyword and ('JOIN' in tok.normalized)):
last_kw_join = True
last_tok = tok
done = True
return sources_dict
for (s, statement) in enumerate(statements):
has_from = False
last_kw_create = False
last_kw_create_table = False
create_table = None
for tok in statement.tokens:
if (isinstance(tok, sqlparse.sql.Identifier) and last_kw_create_table):
create_table = tok.normalized
last_kw_create_table = False
last_kw_create = False
if echo:
log(('-CREATE TABLE ' + create_table))
if (tok.is_keyword and (tok.normalized == 'TABLE') and last_kw_create):
last_kw_create_table = True
if (tok.is_keyword and (tok.normalized == 'CREATE')):
last_kw_create = True
if (tok.is_keyword and (tok.normalized == 'FROM')):
has_from = True
last_tok = tok
if has_from:
sources_dict = get_sources(statement)
if create_table:
sql_sources[create_table] = sorted(sources_dict)
else:
sql_sources[s] = sorted(sources_dict)
return sql_sources<|docstring|>Obtain the source tables of a query<|endoftext|>
|
0ec58c5143beeaafd865bb0102e03a458921c49c87cc94e374e594a8551037e7
|
def __init__(self, conn_dict, profile=None, echo=False):
'Inititate connection'
self._cred = struct(conn_dict)
self._cred.kwargs = conn_dict.get('kwargs', {})
self.name = self._cred.get('name', None)
self.username = self._cred.get('username', None)
self.type = self._cred.type
self.engine = None
self._cursor_description = None
self.profile = profile
self.batch_size = 10000
self.fetch_size = 20000
self.echo = echo
self.connect()
self.last_connect = now()
template_base_path = '{}/database/templates/base.yaml'.format(get_dir_path())
self.template_dict = read_yaml(template_base_path)
template_path = '{}/database/templates/{}.yaml'.format(get_dir_path(), self.type)
temp_dict = read_yaml(template_path)
for key1 in temp_dict:
if isinstance(temp_dict[key1], dict):
if (key1 not in self.template_dict):
self.template_dict[key1] = temp_dict[key1]
for key2 in temp_dict[key1]:
self.template_dict[key1][key2] = temp_dict[key1][key2]
else:
self.template_dict[key1] = temp_dict[key1]
self.variables = self._template('variables')
if os.getenv('PROFILE_YAML'):
other_vars = get_variables()
for key in other_vars:
self.variables[key] = other_vars[key]
self.tmp_folder = self.variables['tmp_folder']
self.set_variables()
if echo:
log('Connected to {} as {}'.format(self._cred.name, self._cred.user))
|
Inititate connection
|
xutil/database/base.py
|
__init__
|
flarco/n1slutil
| 1 |
python
|
def __init__(self, conn_dict, profile=None, echo=False):
self._cred = struct(conn_dict)
self._cred.kwargs = conn_dict.get('kwargs', {})
self.name = self._cred.get('name', None)
self.username = self._cred.get('username', None)
self.type = self._cred.type
self.engine = None
self._cursor_description = None
self.profile = profile
self.batch_size = 10000
self.fetch_size = 20000
self.echo = echo
self.connect()
self.last_connect = now()
template_base_path = '{}/database/templates/base.yaml'.format(get_dir_path())
self.template_dict = read_yaml(template_base_path)
template_path = '{}/database/templates/{}.yaml'.format(get_dir_path(), self.type)
temp_dict = read_yaml(template_path)
for key1 in temp_dict:
if isinstance(temp_dict[key1], dict):
if (key1 not in self.template_dict):
self.template_dict[key1] = temp_dict[key1]
for key2 in temp_dict[key1]:
self.template_dict[key1][key2] = temp_dict[key1][key2]
else:
self.template_dict[key1] = temp_dict[key1]
self.variables = self._template('variables')
if os.getenv('PROFILE_YAML'):
other_vars = get_variables()
for key in other_vars:
self.variables[key] = other_vars[key]
self.tmp_folder = self.variables['tmp_folder']
self.set_variables()
if echo:
log('Connected to {} as {}'.format(self._cred.name, self._cred.user))
|
def __init__(self, conn_dict, profile=None, echo=False):
self._cred = struct(conn_dict)
self._cred.kwargs = conn_dict.get('kwargs', {})
self.name = self._cred.get('name', None)
self.username = self._cred.get('username', None)
self.type = self._cred.type
self.engine = None
self._cursor_description = None
self.profile = profile
self.batch_size = 10000
self.fetch_size = 20000
self.echo = echo
self.connect()
self.last_connect = now()
template_base_path = '{}/database/templates/base.yaml'.format(get_dir_path())
self.template_dict = read_yaml(template_base_path)
template_path = '{}/database/templates/{}.yaml'.format(get_dir_path(), self.type)
temp_dict = read_yaml(template_path)
for key1 in temp_dict:
if isinstance(temp_dict[key1], dict):
if (key1 not in self.template_dict):
self.template_dict[key1] = temp_dict[key1]
for key2 in temp_dict[key1]:
self.template_dict[key1][key2] = temp_dict[key1][key2]
else:
self.template_dict[key1] = temp_dict[key1]
self.variables = self._template('variables')
if os.getenv('PROFILE_YAML'):
other_vars = get_variables()
for key in other_vars:
self.variables[key] = other_vars[key]
self.tmp_folder = self.variables['tmp_folder']
self.set_variables()
if echo:
log('Connected to {} as {}'.format(self._cred.name, self._cred.user))<|docstring|>Inititate connection<|endoftext|>
|
1128817e75719e6942d23199debded4d09051357656ec0ed36989dcc06ef0970
|
def connect(self):
'Connect to Database'
self.engine = self.get_engine()
self.connection = self.engine.connect()
|
Connect to Database
|
xutil/database/base.py
|
connect
|
flarco/n1slutil
| 1 |
python
|
def connect(self):
self.engine = self.get_engine()
self.connection = self.engine.connect()
|
def connect(self):
self.engine = self.get_engine()
self.connection = self.engine.connect()<|docstring|>Connect to Database<|endoftext|>
|
a6642b8380d2022398ea26c102b764eb7e61938824d686263802da8a4fdeb599
|
def close(self):
'Close database connection'
self.conn.connection.close()
|
Close database connection
|
xutil/database/base.py
|
close
|
flarco/n1slutil
| 1 |
python
|
def close(self):
self.conn.connection.close()
|
def close(self):
self.conn.connection.close()<|docstring|>Close database connection<|endoftext|>
|
64fa3fd960a8a3a62275dd152299121d6cbb1c756a5b79f182ffe16f6b428ec5
|
def reconnect(self, min_tresh=0):
'Re-Connect to Database if minute threshold reached'
if ((now() - self.last_connect).total_seconds() > (min_tresh * 60)):
log('Reconnecting to {}...'.format(self.name))
self.connect()
self.last_connect = now()
|
Re-Connect to Database if minute threshold reached
|
xutil/database/base.py
|
reconnect
|
flarco/n1slutil
| 1 |
python
|
def reconnect(self, min_tresh=0):
if ((now() - self.last_connect).total_seconds() > (min_tresh * 60)):
log('Reconnecting to {}...'.format(self.name))
self.connect()
self.last_connect = now()
|
def reconnect(self, min_tresh=0):
if ((now() - self.last_connect).total_seconds() > (min_tresh * 60)):
log('Reconnecting to {}...'.format(self.name))
self.connect()
self.last_connect = now()<|docstring|>Re-Connect to Database if minute threshold reached<|endoftext|>
|
1ad79b6097319a4d6e7d2a45677b12ae18e9ab1e02ca7d3d239ec7d70b0bce23
|
def set_variables(self):
'Set custom variables'
raise Exception("Method 'set_variables' is not implemented!")
|
Set custom variables
|
xutil/database/base.py
|
set_variables
|
flarco/n1slutil
| 1 |
python
|
def set_variables(self):
raise Exception("Method 'set_variables' is not implemented!")
|
def set_variables(self):
raise Exception("Method 'set_variables' is not implemented!")<|docstring|>Set custom variables<|endoftext|>
|
b92600ce4820daadd26ce8d31fa79cb7faf57d3a58351cc5db9bcb4ff91167b1
|
def get_dialect(self, echo=False):
'SQLAlchemy dialect'
raise Exception("Method 'get_dialect' is not implemented!")
|
SQLAlchemy dialect
|
xutil/database/base.py
|
get_dialect
|
flarco/n1slutil
| 1 |
python
|
def get_dialect(self, echo=False):
raise Exception("Method 'get_dialect' is not implemented!")
|
def get_dialect(self, echo=False):
raise Exception("Method 'get_dialect' is not implemented!")<|docstring|>SQLAlchemy dialect<|endoftext|>
|
92528a6f62fb091d56979b41b2e2a375a4a870f19e1b3345af7f0a76cca64f97
|
def check_pk(self, table, fields):
'Check Primary key to ensure there are not duplicates'
if ('where' in fields.lower()):
(fields, where_clause) = fields.lower().split('where')
where_clause = ('where ' + where_clause)
else:
where_clause = ''
sql = "\n select\n '{table}' as table,\n case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result\n from {table}\n {where_clause}\n ".format(table=table, fields=fields, where_clause=where_clause)
data = self.query(sql, echo=False)
headers = self._fields
print(ptable(headers, data))
if (data[0].pk_result == 'FAIL'):
raise Exception('PK Text failed for table "{}" with fields "{}"'.format(table, fields))
|
Check Primary key to ensure there are not duplicates
|
xutil/database/base.py
|
check_pk
|
flarco/n1slutil
| 1 |
python
|
def check_pk(self, table, fields):
if ('where' in fields.lower()):
(fields, where_clause) = fields.lower().split('where')
where_clause = ('where ' + where_clause)
else:
where_clause =
sql = "\n select\n '{table}' as table,\n case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result\n from {table}\n {where_clause}\n ".format(table=table, fields=fields, where_clause=where_clause)
data = self.query(sql, echo=False)
headers = self._fields
print(ptable(headers, data))
if (data[0].pk_result == 'FAIL'):
raise Exception('PK Text failed for table "{}" with fields "{}"'.format(table, fields))
|
def check_pk(self, table, fields):
if ('where' in fields.lower()):
(fields, where_clause) = fields.lower().split('where')
where_clause = ('where ' + where_clause)
else:
where_clause =
sql = "\n select\n '{table}' as table,\n case when count(1) = count({fields}) then 'PASS' else 'FAIL' end as pk_result\n from {table}\n {where_clause}\n ".format(table=table, fields=fields, where_clause=where_clause)
data = self.query(sql, echo=False)
headers = self._fields
print(ptable(headers, data))
if (data[0].pk_result == 'FAIL'):
raise Exception('PK Text failed for table "{}" with fields "{}"'.format(table, fields))<|docstring|>Check Primary key to ensure there are not duplicates<|endoftext|>
|
eea4807c757c78fc45421f3dc7f6921b0e4982a59fbea728639b2b6dfe4a4b5d
|
def execute_multi(self, sql, dtype='namedtuple', limit=None, echo=True, query_name='Record', log=log):
"\n Execute multiple SQL statements separtated by ';'. Returns a generator.\n Example:\n for fields, rows in conn.execute(sql):\n print(fields)\n print(len(rows))\n "
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sqls = sql.split(';')
for sql in sqls:
if (not sql.strip()):
continue
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", '').split(',')
args = [a.strip() for a in args]
cursor.callproc(procedure, args)
continue
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
(yield (fields, rows))
|
Execute multiple SQL statements separtated by ';'. Returns a generator.
Example:
for fields, rows in conn.execute(sql):
print(fields)
print(len(rows))
|
xutil/database/base.py
|
execute_multi
|
flarco/n1slutil
| 1 |
python
|
def execute_multi(self, sql, dtype='namedtuple', limit=None, echo=True, query_name='Record', log=log):
"\n Execute multiple SQL statements separtated by ';'. Returns a generator.\n Example:\n for fields, rows in conn.execute(sql):\n print(fields)\n print(len(rows))\n "
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sqls = sql.split(';')
for sql in sqls:
if (not sql.strip()):
continue
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", ).split(',')
args = [a.strip() for a in args]
cursor.callproc(procedure, args)
continue
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
(yield (fields, rows))
|
def execute_multi(self, sql, dtype='namedtuple', limit=None, echo=True, query_name='Record', log=log):
"\n Execute multiple SQL statements separtated by ';'. Returns a generator.\n Example:\n for fields, rows in conn.execute(sql):\n print(fields)\n print(len(rows))\n "
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sqls = sql.split(';')
for sql in sqls:
if (not sql.strip()):
continue
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", ).split(',')
args = [a.strip() for a in args]
cursor.callproc(procedure, args)
continue
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
(yield (fields, rows))<|docstring|>Execute multiple SQL statements separtated by ';'. Returns a generator.
Example:
for fields, rows in conn.execute(sql):
print(fields)
print(len(rows))<|endoftext|>
|
ded5534da2e201c609a0021a59707a6a690e54d07b117a26622d3cd569fa243e
|
def execute(self, sql, dtype='tuple', limit=None, echo=True, query_name='Record', log=log):
'Execute SQL, return last result'
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", '').split(',')
args = [a.strip() for a in args]
connection = self.engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(procedure, args)
self._fields = self._get_cursor_fields(cursor_desc=cursor.description)
rows = list(cursor.fetchall())
cursor.close()
connection.commit()
return (fields, rows)
finally:
connection.close()
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
return (fields, rows)
|
Execute SQL, return last result
|
xutil/database/base.py
|
execute
|
flarco/n1slutil
| 1 |
python
|
def execute(self, sql, dtype='tuple', limit=None, echo=True, query_name='Record', log=log):
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", ).split(',')
args = [a.strip() for a in args]
connection = self.engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(procedure, args)
self._fields = self._get_cursor_fields(cursor_desc=cursor.description)
rows = list(cursor.fetchall())
cursor.close()
connection.commit()
return (fields, rows)
finally:
connection.close()
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
return (fields, rows)
|
def execute(self, sql, dtype='tuple', limit=None, echo=True, query_name='Record', log=log):
self.reconnect(min_tresh=10)
data = None
fields = None
rows = []
message_mapping = {'drop ': 'Dropping {}.', 'truncate ': 'Truncating {}.', 'select ': 'Selecting {}.', 'create ': 'Creating {}.', 'insert ': 'Inserting {}.', 'alter ': 'Altering {}.', 'update ': 'Updating {}.', 'delete ': 'Deleting {}.', 'exec ': 'Calling Procedure {}.', 'grant ': 'Granting {}.'}
sql_ = sql.strip().lower()
for (word, message) in message_mapping.items():
if sql_.startswith(word):
if echo:
log(message.format(' '.join(sql_.splitlines()[0].split()[1:3]).upper()))
break
if sql_.startswith('exec '):
procedure = sql_[5:].split('(')[0]
args = sql_[5:].split('(')[1][:(- 1)].replace("'", ).split(',')
args = [a.strip() for a in args]
connection = self.engine.raw_connection()
try:
cursor = connection.cursor()
cursor.callproc(procedure, args)
self._fields = self._get_cursor_fields(cursor_desc=cursor.description)
rows = list(cursor.fetchall())
cursor.close()
connection.commit()
return (fields, rows)
finally:
connection.close()
try:
self._fields = []
rows = self.query(sql, rec_name=query_name, dtype=dtype, limit=limit, echo=echo, log=log)
fields = self._fields
if (('-- pk_test:' in sql.lower()) and sql_.startswith('create')):
sql_lines = sql_.splitlines()
regexp = 'create\\s+table\\s+(\\S*)[\\sa-zA-Z\\d]+ as'
table = re.findall(regexp, sql_lines[0])[0]
line = [l for l in sql_lines if l.strip().lower().startswith('-- pk_test:')][0]
fields = line.split(':')[(- 1)]
self.check_pk(table, fields)
except Exception as E:
message = get_exception_message().lower()
if (sql_.startswith('drop ') and (self.error_msg['table_not_exist'] in message)):
log('WARNING: Table already dropped.')
else:
raise E
if (not fields):
fields = []
return (fields, rows)<|docstring|>Execute SQL, return last result<|endoftext|>
|
991ccf259d42090a2fa899a6b949ca562fa3b6d8016742062d777f178eecdc53
|
def insert(self, table, data, echo=False):
'Insert records of namedtuple or dicts'
raise Exception('insert not implemented')
|
Insert records of namedtuple or dicts
|
xutil/database/base.py
|
insert
|
flarco/n1slutil
| 1 |
python
|
def insert(self, table, data, echo=False):
raise Exception('insert not implemented')
|
def insert(self, table, data, echo=False):
raise Exception('insert not implemented')<|docstring|>Insert records of namedtuple or dicts<|endoftext|>
|
74d5d7ae69215fb12faeae182262b906bbabb712c3f63ab5f245499e477d6e13
|
def drop_table(self, table, log=log):
'Drop table'
try:
sql = self._template('core.drop_table').format(table)
self._do_execute(sql)
except Exception as E:
message = get_exception_message().lower()
if (self._template('error_filter.table_not_exist') in message):
if self.echo:
log('Table "{}" already dropped.'.format(table))
else:
raise E
|
Drop table
|
xutil/database/base.py
|
drop_table
|
flarco/n1slutil
| 1 |
python
|
def drop_table(self, table, log=log):
try:
sql = self._template('core.drop_table').format(table)
self._do_execute(sql)
except Exception as E:
message = get_exception_message().lower()
if (self._template('error_filter.table_not_exist') in message):
if self.echo:
log('Table "{}" already dropped.'.format(table))
else:
raise E
|
def drop_table(self, table, log=log):
try:
sql = self._template('core.drop_table').format(table)
self._do_execute(sql)
except Exception as E:
message = get_exception_message().lower()
if (self._template('error_filter.table_not_exist') in message):
if self.echo:
log('Table "{}" already dropped.'.format(table))
else:
raise E<|docstring|>Drop table<|endoftext|>
|
11592db5d01eb56d78c4a9c5ab75c4d1d1e893d65dd5cd5afe7e6189e9ab667d
|
def create_table(self, table, field_types, drop=False, log=log):
'Create table'
if drop:
self.drop_table(table, log=log)
new_ftypes = OrderedDict()
for f in field_types:
(ftype, max_len, dec_len) = field_types[f]
if dec_len:
suff = '({},{})'.format(max_len, dec_len)
elif max_len:
suff = '({})'.format(max_len)
else:
suff = ''
new_ftypes[f] = self._template('general_type_map')[ftype].replace('()', suff)
field_types_str = ', \n'.join([((self._fix_f_name(field) + ' ') + new_ftypes[field]) for field in new_ftypes])
sql = self._template('core.create_table').format(table=table, col_types=field_types_str)
try:
self._do_execute(sql)
except Exception as e:
raise e
log('Created table "{}"'.format(table))
|
Create table
|
xutil/database/base.py
|
create_table
|
flarco/n1slutil
| 1 |
python
|
def create_table(self, table, field_types, drop=False, log=log):
if drop:
self.drop_table(table, log=log)
new_ftypes = OrderedDict()
for f in field_types:
(ftype, max_len, dec_len) = field_types[f]
if dec_len:
suff = '({},{})'.format(max_len, dec_len)
elif max_len:
suff = '({})'.format(max_len)
else:
suff =
new_ftypes[f] = self._template('general_type_map')[ftype].replace('()', suff)
field_types_str = ', \n'.join([((self._fix_f_name(field) + ' ') + new_ftypes[field]) for field in new_ftypes])
sql = self._template('core.create_table').format(table=table, col_types=field_types_str)
try:
self._do_execute(sql)
except Exception as e:
raise e
log('Created table "{}"'.format(table))
|
def create_table(self, table, field_types, drop=False, log=log):
if drop:
self.drop_table(table, log=log)
new_ftypes = OrderedDict()
for f in field_types:
(ftype, max_len, dec_len) = field_types[f]
if dec_len:
suff = '({},{})'.format(max_len, dec_len)
elif max_len:
suff = '({})'.format(max_len)
else:
suff =
new_ftypes[f] = self._template('general_type_map')[ftype].replace('()', suff)
field_types_str = ', \n'.join([((self._fix_f_name(field) + ' ') + new_ftypes[field]) for field in new_ftypes])
sql = self._template('core.create_table').format(table=table, col_types=field_types_str)
try:
self._do_execute(sql)
except Exception as e:
raise e
log('Created table "{}"'.format(table))<|docstring|>Create table<|endoftext|>
|
8552087d57da4d6950a288e8d18e8fce31d23a0c98b4df2a6b18c49675356ae9
|
def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):
'Get fields of active Select cursor'
fields = OrderedDict()
cursor_desc = (cursor_desc if cursor_desc else self._cursor_description)
if (cursor_desc == None):
return []
for f in cursor_desc:
f_name = f[0].lower()
if as_dict:
if native_type:
f_type = f[1]
else:
f_type = self.reverse_data_map[f[1]]
if ('cx_Oracle.NUMBER' in str(f[1])):
if (f[4] and (f[4] > 11)):
f_type = 'long'
if (f[5] and (f[5] > 0)):
f_type = 'double'
fields[f_name] = f_type
else:
fields[f_name] = None
if as_dict:
return fields
else:
return list(fields.keys())
|
Get fields of active Select cursor
|
xutil/database/base.py
|
_get_cursor_fields
|
flarco/n1slutil
| 1 |
python
|
def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):
fields = OrderedDict()
cursor_desc = (cursor_desc if cursor_desc else self._cursor_description)
if (cursor_desc == None):
return []
for f in cursor_desc:
f_name = f[0].lower()
if as_dict:
if native_type:
f_type = f[1]
else:
f_type = self.reverse_data_map[f[1]]
if ('cx_Oracle.NUMBER' in str(f[1])):
if (f[4] and (f[4] > 11)):
f_type = 'long'
if (f[5] and (f[5] > 0)):
f_type = 'double'
fields[f_name] = f_type
else:
fields[f_name] = None
if as_dict:
return fields
else:
return list(fields.keys())
|
def _get_cursor_fields(self, as_dict=False, native_type=True, cursor_desc=None):
fields = OrderedDict()
cursor_desc = (cursor_desc if cursor_desc else self._cursor_description)
if (cursor_desc == None):
return []
for f in cursor_desc:
f_name = f[0].lower()
if as_dict:
if native_type:
f_type = f[1]
else:
f_type = self.reverse_data_map[f[1]]
if ('cx_Oracle.NUMBER' in str(f[1])):
if (f[4] and (f[4] > 11)):
f_type = 'long'
if (f[5] and (f[5] > 0)):
f_type = 'double'
fields[f_name] = f_type
else:
fields[f_name] = None
if as_dict:
return fields
else:
return list(fields.keys())<|docstring|>Get fields of active Select cursor<|endoftext|>
|
e784c53226e41a4b36798921ca6b8fa97368dd50f2884affd7604e929b17fd4a
|
def stream(self, sql, rec_name='Record', dtype='namedtuple', yield_chuncks=False, chunk_size=None, limit=None, echo=True):
'Stream Select from SQL, yield records as they come in'
self.reconnect(min_tresh=10)
if echo:
log("Streaming SQL for '{}'.".format(rec_name))
fetch_size = (limit if limit else self.fetch_size)
fetch_size = (chunk_size if chunk_size else fetch_size)
try:
self._do_execute(sql)
except Exception as e:
raise e
if (dtype == 'tuple'):
make_rec = (lambda row: row)
make_batch = (lambda rows: rows)
elif (dtype == 'dataframe'):
yield_chuncks = True
make_batch = (lambda rows: pandas.DataFrame(rows, columns=self._fields))
else:
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), self._fields)
make_rec = (lambda row: Record(*row))
make_batch = (lambda rows: [make_rec(r) for r in rows])
self._stream_counter = 0
while True:
if (not self._fields):
break
rows = self.result.fetchmany(fetch_size)
if rows:
if yield_chuncks:
batch = make_batch(rows)
self._stream_counter += len(batch)
if len(batch):
(yield batch)
else:
for row in rows:
self._stream_counter += 1
(yield make_rec(row))
else:
break
if limit:
break
|
Stream Select from SQL, yield records as they come in
|
xutil/database/base.py
|
stream
|
flarco/n1slutil
| 1 |
python
|
def stream(self, sql, rec_name='Record', dtype='namedtuple', yield_chuncks=False, chunk_size=None, limit=None, echo=True):
self.reconnect(min_tresh=10)
if echo:
log("Streaming SQL for '{}'.".format(rec_name))
fetch_size = (limit if limit else self.fetch_size)
fetch_size = (chunk_size if chunk_size else fetch_size)
try:
self._do_execute(sql)
except Exception as e:
raise e
if (dtype == 'tuple'):
make_rec = (lambda row: row)
make_batch = (lambda rows: rows)
elif (dtype == 'dataframe'):
yield_chuncks = True
make_batch = (lambda rows: pandas.DataFrame(rows, columns=self._fields))
else:
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), self._fields)
make_rec = (lambda row: Record(*row))
make_batch = (lambda rows: [make_rec(r) for r in rows])
self._stream_counter = 0
while True:
if (not self._fields):
break
rows = self.result.fetchmany(fetch_size)
if rows:
if yield_chuncks:
batch = make_batch(rows)
self._stream_counter += len(batch)
if len(batch):
(yield batch)
else:
for row in rows:
self._stream_counter += 1
(yield make_rec(row))
else:
break
if limit:
break
|
def stream(self, sql, rec_name='Record', dtype='namedtuple', yield_chuncks=False, chunk_size=None, limit=None, echo=True):
self.reconnect(min_tresh=10)
if echo:
log("Streaming SQL for '{}'.".format(rec_name))
fetch_size = (limit if limit else self.fetch_size)
fetch_size = (chunk_size if chunk_size else fetch_size)
try:
self._do_execute(sql)
except Exception as e:
raise e
if (dtype == 'tuple'):
make_rec = (lambda row: row)
make_batch = (lambda rows: rows)
elif (dtype == 'dataframe'):
yield_chuncks = True
make_batch = (lambda rows: pandas.DataFrame(rows, columns=self._fields))
else:
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), self._fields)
make_rec = (lambda row: Record(*row))
make_batch = (lambda rows: [make_rec(r) for r in rows])
self._stream_counter = 0
while True:
if (not self._fields):
break
rows = self.result.fetchmany(fetch_size)
if rows:
if yield_chuncks:
batch = make_batch(rows)
self._stream_counter += len(batch)
if len(batch):
(yield batch)
else:
for row in rows:
self._stream_counter += 1
(yield make_rec(row))
else:
break
if limit:
break<|docstring|>Stream Select from SQL, yield records as they come in<|endoftext|>
|
c4dac5cb0402429b4f38e1b3f11763a51bd6b4c63d4e032776a9dcec6e5a374b
|
def query(self, sql, rec_name='Record', dtype='namedtuple', limit=None, echo=True, retrying=False, log=log):
'Select from SQL, return list of namedtuples'
self.reconnect(min_tresh=10)
s_t = datetime.datetime.now()
_data = list(self.stream(sql, dtype=dtype, echo=False, limit=limit))
if (not self.result.closed):
self.result.close()
fields = self._fields
if (not fields):
return []
if (dtype == 'namedtuple'):
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), fields)
if limit:
data = [Record(*row) for row in _data]
else:
data = [Record(*row) for row in _data]
elif (dtype == 'tuple'):
if limit:
data = [tuple(row) for row in _data]
else:
data = [tuple(row) for row in _data]
elif (dtype == 'dataframe'):
if limit:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
raise Exception('{} is not recongnized.'.format(dtype))
secs = (datetime.datetime.now() - s_t).total_seconds()
rate = round((len(data) / secs), 1)
if echo:
log(' >>> Got {} rows in {} secs [{} r/s].'.format(len(data), secs, rate))
return data
|
Select from SQL, return list of namedtuples
|
xutil/database/base.py
|
query
|
flarco/n1slutil
| 1 |
python
|
def query(self, sql, rec_name='Record', dtype='namedtuple', limit=None, echo=True, retrying=False, log=log):
self.reconnect(min_tresh=10)
s_t = datetime.datetime.now()
_data = list(self.stream(sql, dtype=dtype, echo=False, limit=limit))
if (not self.result.closed):
self.result.close()
fields = self._fields
if (not fields):
return []
if (dtype == 'namedtuple'):
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), fields)
if limit:
data = [Record(*row) for row in _data]
else:
data = [Record(*row) for row in _data]
elif (dtype == 'tuple'):
if limit:
data = [tuple(row) for row in _data]
else:
data = [tuple(row) for row in _data]
elif (dtype == 'dataframe'):
if limit:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
raise Exception('{} is not recongnized.'.format(dtype))
secs = (datetime.datetime.now() - s_t).total_seconds()
rate = round((len(data) / secs), 1)
if echo:
log(' >>> Got {} rows in {} secs [{} r/s].'.format(len(data), secs, rate))
return data
|
def query(self, sql, rec_name='Record', dtype='namedtuple', limit=None, echo=True, retrying=False, log=log):
self.reconnect(min_tresh=10)
s_t = datetime.datetime.now()
_data = list(self.stream(sql, dtype=dtype, echo=False, limit=limit))
if (not self.result.closed):
self.result.close()
fields = self._fields
if (not fields):
return []
if (dtype == 'namedtuple'):
Record = namedtuple(rec_name.replace(' ', '_').replace('.', '_'), fields)
if limit:
data = [Record(*row) for row in _data]
else:
data = [Record(*row) for row in _data]
elif (dtype == 'tuple'):
if limit:
data = [tuple(row) for row in _data]
else:
data = [tuple(row) for row in _data]
elif (dtype == 'dataframe'):
if limit:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
data = pandas.DataFrame([row for row in _data], columns=fields)
else:
raise Exception('{} is not recongnized.'.format(dtype))
secs = (datetime.datetime.now() - s_t).total_seconds()
rate = round((len(data) / secs), 1)
if echo:
log(' >>> Got {} rows in {} secs [{} r/s].'.format(len(data), secs, rate))
return data<|docstring|>Select from SQL, return list of namedtuples<|endoftext|>
|
98c610c71ed3352ebed2a9de5753d48e2ab6be08be1f9c58f034790b3453cabb
|
def get_schemas(self, echo=True):
'Get list of schemas.'
Rec = namedtuple('Schemas', 'schema')
self._fields = Rec._fields
sql_tmpl = self._template('metadata.schemas')
if sql_tmpl:
schemas = [r[0] for r in self.query(sql_tmpl)]
else:
self.get_engine(echo=echo)
schemas = self.engine_inspect.get_schema_names()
rows = [Rec(s) for s in schemas]
return rows
|
Get list of schemas.
|
xutil/database/base.py
|
get_schemas
|
flarco/n1slutil
| 1 |
python
|
def get_schemas(self, echo=True):
Rec = namedtuple('Schemas', 'schema')
self._fields = Rec._fields
sql_tmpl = self._template('metadata.schemas')
if sql_tmpl:
schemas = [r[0] for r in self.query(sql_tmpl)]
else:
self.get_engine(echo=echo)
schemas = self.engine_inspect.get_schema_names()
rows = [Rec(s) for s in schemas]
return rows
|
def get_schemas(self, echo=True):
Rec = namedtuple('Schemas', 'schema')
self._fields = Rec._fields
sql_tmpl = self._template('metadata.schemas')
if sql_tmpl:
schemas = [r[0] for r in self.query(sql_tmpl)]
else:
self.get_engine(echo=echo)
schemas = self.engine_inspect.get_schema_names()
rows = [Rec(s) for s in schemas]
return rows<|docstring|>Get list of schemas.<|endoftext|>
|
d4f2d411f364ed8b6bc70dba4314b92cacbbd69e20242a5a314522dbb48c8ad3
|
def get_objects(self, schema, object_type='all', echo=True):
"Get metadata for objects. object_type in 'all', 'table', 'view'"
Rec = namedtuple('Table', 'schema object_name object_type')
self._fields = Rec._fields
def get_rec(object_name, object_type):
r_dict = dict(schema=schema, object_name=object_name, object_type=object_type)
return Rec(**r_dict)
if (object_type == 'all'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
elif (object_type == 'table'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
elif (object_type == 'view'):
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
else:
raise Exception('Object type "{}" not supported!'.format(object_type))
return rows
|
Get metadata for objects. object_type in 'all', 'table', 'view'
|
xutil/database/base.py
|
get_objects
|
flarco/n1slutil
| 1 |
python
|
def get_objects(self, schema, object_type='all', echo=True):
Rec = namedtuple('Table', 'schema object_name object_type')
self._fields = Rec._fields
def get_rec(object_name, object_type):
r_dict = dict(schema=schema, object_name=object_name, object_type=object_type)
return Rec(**r_dict)
if (object_type == 'all'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
elif (object_type == 'table'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
elif (object_type == 'view'):
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
else:
raise Exception('Object type "{}" not supported!'.format(object_type))
return rows
|
def get_objects(self, schema, object_type='all', echo=True):
Rec = namedtuple('Table', 'schema object_name object_type')
self._fields = Rec._fields
def get_rec(object_name, object_type):
r_dict = dict(schema=schema, object_name=object_name, object_type=object_type)
return Rec(**r_dict)
if (object_type == 'all'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
elif (object_type == 'table'):
table_rows = self.get_tables(schema)
rows = [get_rec(r.table, 'table') for r in sorted(table_rows)]
elif (object_type == 'view'):
view_rows = self.get_views(schema)
rows += [get_rec(r.view, 'view') for r in sorted(view_rows)]
else:
raise Exception('Object type "{}" not supported!'.format(object_type))
return rows<|docstring|>Get metadata for objects. object_type in 'all', 'table', 'view'<|endoftext|>
|
baa2c1769bd5f16e7b518d6708472367f68cfcc2857cdfc7dd87d5a20e05171d
|
def get_tables(self, schema, echo=True):
'Get metadata for tables.'
schemas = (schema if isinstance(schema, list) else [schema])
def get_tables_for(schema):
def get_rec(table):
self._fields = ['schema', 'table']
return tuple([schema, table])
Rec = namedtuple('Table', 'schema table')
self._fields = Rec._fields
r_dict = dict(schema=schema, table=table)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.tables')
if sql_tmpl:
tables = self.query(sql_tmpl.format(schema=schema))
if hasattr(self, '_std_get_tables'):
tables = self._std_get_tables(schema, tables)
else:
self.get_engine(echo=echo)
tables = self.engine_inspect.get_table_names(schema)
return [get_rec(v) for v in sorted(tables)]
rows = []
for schema in schemas:
for row in get_tables_for(schema):
rows.append(row)
return rows
|
Get metadata for tables.
|
xutil/database/base.py
|
get_tables
|
flarco/n1slutil
| 1 |
python
|
def get_tables(self, schema, echo=True):
schemas = (schema if isinstance(schema, list) else [schema])
def get_tables_for(schema):
def get_rec(table):
self._fields = ['schema', 'table']
return tuple([schema, table])
Rec = namedtuple('Table', 'schema table')
self._fields = Rec._fields
r_dict = dict(schema=schema, table=table)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.tables')
if sql_tmpl:
tables = self.query(sql_tmpl.format(schema=schema))
if hasattr(self, '_std_get_tables'):
tables = self._std_get_tables(schema, tables)
else:
self.get_engine(echo=echo)
tables = self.engine_inspect.get_table_names(schema)
return [get_rec(v) for v in sorted(tables)]
rows = []
for schema in schemas:
for row in get_tables_for(schema):
rows.append(row)
return rows
|
def get_tables(self, schema, echo=True):
schemas = (schema if isinstance(schema, list) else [schema])
def get_tables_for(schema):
def get_rec(table):
self._fields = ['schema', 'table']
return tuple([schema, table])
Rec = namedtuple('Table', 'schema table')
self._fields = Rec._fields
r_dict = dict(schema=schema, table=table)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.tables')
if sql_tmpl:
tables = self.query(sql_tmpl.format(schema=schema))
if hasattr(self, '_std_get_tables'):
tables = self._std_get_tables(schema, tables)
else:
self.get_engine(echo=echo)
tables = self.engine_inspect.get_table_names(schema)
return [get_rec(v) for v in sorted(tables)]
rows = []
for schema in schemas:
for row in get_tables_for(schema):
rows.append(row)
return rows<|docstring|>Get metadata for tables.<|endoftext|>
|
5dc0405053af132ade21aa57d849581cf706426ae3f9397f73046b45a1737eb0
|
def get_views(self, schema, echo=True):
'Get metadata for views.'
schemas = (schema if isinstance(schema, list) else [schema])
def get_views_for(schema):
def get_rec(view):
self._fields = ['schema', 'view']
return tuple([schema, view])
Rec = namedtuple('View', 'schema view')
self._fields = Rec._fields
r_dict = dict(schema=schema, view=view)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.views')
if sql_tmpl:
views = [r[0] for r in self.query(sql_tmpl.format(schema=schema))]
else:
self.get_engine(echo=echo)
views = self.engine_inspect.get_view_names(schema)
return [get_rec(v) for v in sorted(views)]
rows = []
for schema in schemas:
for row in get_views_for(schema):
rows.append(row)
return rows
|
Get metadata for views.
|
xutil/database/base.py
|
get_views
|
flarco/n1slutil
| 1 |
python
|
def get_views(self, schema, echo=True):
schemas = (schema if isinstance(schema, list) else [schema])
def get_views_for(schema):
def get_rec(view):
self._fields = ['schema', 'view']
return tuple([schema, view])
Rec = namedtuple('View', 'schema view')
self._fields = Rec._fields
r_dict = dict(schema=schema, view=view)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.views')
if sql_tmpl:
views = [r[0] for r in self.query(sql_tmpl.format(schema=schema))]
else:
self.get_engine(echo=echo)
views = self.engine_inspect.get_view_names(schema)
return [get_rec(v) for v in sorted(views)]
rows = []
for schema in schemas:
for row in get_views_for(schema):
rows.append(row)
return rows
|
def get_views(self, schema, echo=True):
schemas = (schema if isinstance(schema, list) else [schema])
def get_views_for(schema):
def get_rec(view):
self._fields = ['schema', 'view']
return tuple([schema, view])
Rec = namedtuple('View', 'schema view')
self._fields = Rec._fields
r_dict = dict(schema=schema, view=view)
return Rec(**r_dict)
sql_tmpl = self._template('metadata.views')
if sql_tmpl:
views = [r[0] for r in self.query(sql_tmpl.format(schema=schema))]
else:
self.get_engine(echo=echo)
views = self.engine_inspect.get_view_names(schema)
return [get_rec(v) for v in sorted(views)]
rows = []
for schema in schemas:
for row in get_views_for(schema):
rows.append(row)
return rows<|docstring|>Get metadata for views.<|endoftext|>
|
3b807a9ecd936cfd89e933b388fbdc6dc286e05e4e401ddeaf25aad8f3827bcf
|
def get_columns(self, table_name, object_type=None, echo=False, include_schema_table=True, native_type=True):
'Get column metadata for table'
if include_schema_table:
headers = 'schema table id column_name type nullable default autoincrement'
else:
headers = 'id column_name type nullable default autoincrement'
Rec = namedtuple('Columns', headers)
self._fields = Rec._fields
all_rows = []
table_names = (table_name if isinstance(table_name, list) else [table_name])
for table_name in table_names:
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict, column_order):
if include_schema_table:
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['column_name'] = r_dict['name']
r_dict['type'] = str(r_dict['type'])
if (not native_type):
r_dict['type'] = r_dict['type'].lower()
r_dict['type'] = (r_dict['type'].split('(')[0] if ('(' in r_dict['type']) else r_dict['type'])
native_type_map = self._template('native_type_map')
if (not (r_dict['type'] in native_type_map)):
raise Exception('Field type "{}" not in native_type_map for {}'.format(r_dict['type'], self.type))
r_dict['type'] = native_type_map[r_dict['type']]
r_dict['id'] = column_order
for k in list(r_dict):
if (k not in headers.split()):
del r_dict[k]
if ('(' in r_dict['type']):
r_dict['type'] = r_dict['type'].split('(')[0]
return Rec(**r_dict)
sql_tmpl = self._template('metadata.columns')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
if hasattr(self, '_std_get_columns'):
rows = self._std_get_columns(schema, table, rows)
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_columns(table, schema=schema)
all_rows += [get_rec(r_dict, (i + 1)) for (i, r_dict) in enumerate(rows)]
self._fields = Rec._fields
return all_rows
|
Get column metadata for table
|
xutil/database/base.py
|
get_columns
|
flarco/n1slutil
| 1 |
python
|
def get_columns(self, table_name, object_type=None, echo=False, include_schema_table=True, native_type=True):
if include_schema_table:
headers = 'schema table id column_name type nullable default autoincrement'
else:
headers = 'id column_name type nullable default autoincrement'
Rec = namedtuple('Columns', headers)
self._fields = Rec._fields
all_rows = []
table_names = (table_name if isinstance(table_name, list) else [table_name])
for table_name in table_names:
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict, column_order):
if include_schema_table:
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['column_name'] = r_dict['name']
r_dict['type'] = str(r_dict['type'])
if (not native_type):
r_dict['type'] = r_dict['type'].lower()
r_dict['type'] = (r_dict['type'].split('(')[0] if ('(' in r_dict['type']) else r_dict['type'])
native_type_map = self._template('native_type_map')
if (not (r_dict['type'] in native_type_map)):
raise Exception('Field type "{}" not in native_type_map for {}'.format(r_dict['type'], self.type))
r_dict['type'] = native_type_map[r_dict['type']]
r_dict['id'] = column_order
for k in list(r_dict):
if (k not in headers.split()):
del r_dict[k]
if ('(' in r_dict['type']):
r_dict['type'] = r_dict['type'].split('(')[0]
return Rec(**r_dict)
sql_tmpl = self._template('metadata.columns')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
if hasattr(self, '_std_get_columns'):
rows = self._std_get_columns(schema, table, rows)
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_columns(table, schema=schema)
all_rows += [get_rec(r_dict, (i + 1)) for (i, r_dict) in enumerate(rows)]
self._fields = Rec._fields
return all_rows
|
def get_columns(self, table_name, object_type=None, echo=False, include_schema_table=True, native_type=True):
if include_schema_table:
headers = 'schema table id column_name type nullable default autoincrement'
else:
headers = 'id column_name type nullable default autoincrement'
Rec = namedtuple('Columns', headers)
self._fields = Rec._fields
all_rows = []
table_names = (table_name if isinstance(table_name, list) else [table_name])
for table_name in table_names:
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict, column_order):
if include_schema_table:
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['column_name'] = r_dict['name']
r_dict['type'] = str(r_dict['type'])
if (not native_type):
r_dict['type'] = r_dict['type'].lower()
r_dict['type'] = (r_dict['type'].split('(')[0] if ('(' in r_dict['type']) else r_dict['type'])
native_type_map = self._template('native_type_map')
if (not (r_dict['type'] in native_type_map)):
raise Exception('Field type "{}" not in native_type_map for {}'.format(r_dict['type'], self.type))
r_dict['type'] = native_type_map[r_dict['type']]
r_dict['id'] = column_order
for k in list(r_dict):
if (k not in headers.split()):
del r_dict[k]
if ('(' in r_dict['type']):
r_dict['type'] = r_dict['type'].split('(')[0]
return Rec(**r_dict)
sql_tmpl = self._template('metadata.columns')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
if hasattr(self, '_std_get_columns'):
rows = self._std_get_columns(schema, table, rows)
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_columns(table, schema=schema)
all_rows += [get_rec(r_dict, (i + 1)) for (i, r_dict) in enumerate(rows)]
self._fields = Rec._fields
return all_rows<|docstring|>Get column metadata for table<|endoftext|>
|
17bce1b78848abad11517ba21ea99047937c33033fc7fcf8c49b6f602ff544e0
|
def get_primary_keys(self, table_name, echo=False):
'Get PK metadata for table'
Rec = namedtuple('PKs', 'schema table pk_name column_name column_order')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(col, pk_name, column_order):
r_dict = {}
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['pk_name'] = pk_name
r_dict['column_name'] = col
r_dict['column_order'] = column_order
return Rec(**r_dict)
sql_tmpl = self._template('metadata.primary_keys')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
r_dict = self.engine_inspect.get_pk_constraint(table, schema=schema)
rows = [get_rec(col, r_dict['name'], (i + 1)) for (i, col) in enumerate(r_dict['constrained_columns'])]
return rows
|
Get PK metadata for table
|
xutil/database/base.py
|
get_primary_keys
|
flarco/n1slutil
| 1 |
python
|
def get_primary_keys(self, table_name, echo=False):
Rec = namedtuple('PKs', 'schema table pk_name column_name column_order')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(col, pk_name, column_order):
r_dict = {}
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['pk_name'] = pk_name
r_dict['column_name'] = col
r_dict['column_order'] = column_order
return Rec(**r_dict)
sql_tmpl = self._template('metadata.primary_keys')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
r_dict = self.engine_inspect.get_pk_constraint(table, schema=schema)
rows = [get_rec(col, r_dict['name'], (i + 1)) for (i, col) in enumerate(r_dict['constrained_columns'])]
return rows
|
def get_primary_keys(self, table_name, echo=False):
Rec = namedtuple('PKs', 'schema table pk_name column_name column_order')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(col, pk_name, column_order):
r_dict = {}
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['pk_name'] = pk_name
r_dict['column_name'] = col
r_dict['column_order'] = column_order
return Rec(**r_dict)
sql_tmpl = self._template('metadata.primary_keys')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
r_dict = self.engine_inspect.get_pk_constraint(table, schema=schema)
rows = [get_rec(col, r_dict['name'], (i + 1)) for (i, col) in enumerate(r_dict['constrained_columns'])]
return rows<|docstring|>Get PK metadata for table<|endoftext|>
|
0d95e8c477a7ded1541044aba5fc36beeb11bc2d1a239db15b50350628a1509b
|
def get_indexes(self, table_name, echo=False):
'Get indexes metadata for table'
Rec = namedtuple('Indexes', 'schema table index_name column_name column_order unique')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict):
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['index_name'] = r_dict['name']
r_dict['unique'] = str(r_dict['unique'])
del r_dict['name']
for (i, col) in enumerate(r_dict['column_names']):
r_dict['column_name'] = col
r_dict['column_order'] = (i + 1)
(yield Rec(**r_dict))
sql_tmpl = self._template('metadata.indexes')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_indexes(table, schema=schema)
rows = [get_rec(r_dict) for r_dict in rows]
return rows
|
Get indexes metadata for table
|
xutil/database/base.py
|
get_indexes
|
flarco/n1slutil
| 1 |
python
|
def get_indexes(self, table_name, echo=False):
Rec = namedtuple('Indexes', 'schema table index_name column_name column_order unique')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict):
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['index_name'] = r_dict['name']
r_dict['unique'] = str(r_dict['unique'])
del r_dict['name']
for (i, col) in enumerate(r_dict['column_names']):
r_dict['column_name'] = col
r_dict['column_order'] = (i + 1)
(yield Rec(**r_dict))
sql_tmpl = self._template('metadata.indexes')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_indexes(table, schema=schema)
rows = [get_rec(r_dict) for r_dict in rows]
return rows
|
def get_indexes(self, table_name, echo=False):
Rec = namedtuple('Indexes', 'schema table index_name column_name column_order unique')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
def get_rec(r_dict):
r_dict['schema'] = schema
r_dict['table'] = table
r_dict['index_name'] = r_dict['name']
r_dict['unique'] = str(r_dict['unique'])
del r_dict['name']
for (i, col) in enumerate(r_dict['column_names']):
r_dict['column_name'] = col
r_dict['column_order'] = (i + 1)
(yield Rec(**r_dict))
sql_tmpl = self._template('metadata.indexes')
if sql_tmpl:
rows = self.query(sql_tmpl.format(table=table, schema=schema))
else:
self.get_engine(echo=echo)
rows = self.engine_inspect.get_indexes(table, schema=schema)
rows = [get_rec(r_dict) for r_dict in rows]
return rows<|docstring|>Get indexes metadata for table<|endoftext|>
|
9a0ed2162f0d01cf8331ddc4f0d94850bee8d082bcbaebb469838fa23f64b0a3
|
def get_ddl(self, table_name, object_type=None, echo=True):
'Get ddl for table'
Rec = namedtuple('DDL', 'ddl')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
sql_tmpl = self._template('metadata.ddl')
if sql_tmpl:
rows = self.query(sql_tmpl.format(schema=schema, table=table, obj_type=object_type))
else:
self.get_engine(echo=echo)
ddl = self.engine_inspect.get_view_definition(table, schema=schema)
rows = ([Rec(ddl)] if ddl else [])
self._fields = Rec._fields
return rows
|
Get ddl for table
|
xutil/database/base.py
|
get_ddl
|
flarco/n1slutil
| 1 |
python
|
def get_ddl(self, table_name, object_type=None, echo=True):
Rec = namedtuple('DDL', 'ddl')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
sql_tmpl = self._template('metadata.ddl')
if sql_tmpl:
rows = self.query(sql_tmpl.format(schema=schema, table=table, obj_type=object_type))
else:
self.get_engine(echo=echo)
ddl = self.engine_inspect.get_view_definition(table, schema=schema)
rows = ([Rec(ddl)] if ddl else [])
self._fields = Rec._fields
return rows
|
def get_ddl(self, table_name, object_type=None, echo=True):
Rec = namedtuple('DDL', 'ddl')
self._fields = Rec._fields
(schema, table) = self._split_schema_table(table_name)
sql_tmpl = self._template('metadata.ddl')
if sql_tmpl:
rows = self.query(sql_tmpl.format(schema=schema, table=table, obj_type=object_type))
else:
self.get_engine(echo=echo)
ddl = self.engine_inspect.get_view_definition(table, schema=schema)
rows = ([Rec(ddl)] if ddl else [])
self._fields = Rec._fields
return rows<|docstring|>Get ddl for table<|endoftext|>
|
e8c0beb21952202faabaa462699032b5930d50fd2c0b89a3a1921b582455fe6e
|
def get_all_columns(self):
'Get all columns for all tables / views'
sql_tmpl = self._template('metadata.all_columns')
if (not sql_tmpl):
raise Exception('get_all_columns not implemented for {}'.format(self.type))
rows = self.query(sql_tmpl)
return rows
|
Get all columns for all tables / views
|
xutil/database/base.py
|
get_all_columns
|
flarco/n1slutil
| 1 |
python
|
def get_all_columns(self):
sql_tmpl = self._template('metadata.all_columns')
if (not sql_tmpl):
raise Exception('get_all_columns not implemented for {}'.format(self.type))
rows = self.query(sql_tmpl)
return rows
|
def get_all_columns(self):
sql_tmpl = self._template('metadata.all_columns')
if (not sql_tmpl):
raise Exception('get_all_columns not implemented for {}'.format(self.type))
rows = self.query(sql_tmpl)
return rows<|docstring|>Get all columns for all tables / views<|endoftext|>
|
3de54e1433e8d5a6df6d14e4f53646ee88882ad2085a6fe30e0c5bb2b5a8928f
|
def get_all_tables(self, filter, as_sql=False):
'Get all tables / views'
sql_tmpl = self._template('metadata.all_tables')
if (not sql_tmpl):
raise Exception('get_all_tables not implemented for {}'.format(self.type))
sql = sql_tmpl.format(filter=filter)
return (sql if as_sql else self.query(sql, echo=False))
|
Get all tables / views
|
xutil/database/base.py
|
get_all_tables
|
flarco/n1slutil
| 1 |
python
|
def get_all_tables(self, filter, as_sql=False):
sql_tmpl = self._template('metadata.all_tables')
if (not sql_tmpl):
raise Exception('get_all_tables not implemented for {}'.format(self.type))
sql = sql_tmpl.format(filter=filter)
return (sql if as_sql else self.query(sql, echo=False))
|
def get_all_tables(self, filter, as_sql=False):
sql_tmpl = self._template('metadata.all_tables')
if (not sql_tmpl):
raise Exception('get_all_tables not implemented for {}'.format(self.type))
sql = sql_tmpl.format(filter=filter)
return (sql if as_sql else self.query(sql, echo=False))<|docstring|>Get all tables / views<|endoftext|>
|
f09b6282662a437fbcb281a37e77a5765bf00a20add3201c6fbc2f33cedfcaf1
|
def analyze_fields(self, analysis, table_name, fields=[], as_sql=False, union=True, expr_func_map={}, **kwargs):
'Base function for field level analysis\n expr_func_map: contains mapping for expression to SQL function to all fields\n '
if ('.' not in table_name):
raise Exception("table_name must have schema and name in it with a '.'")
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
(schema, table) = self._split_schema_table(table_name)
field_rows = self.get_columns(table_name)
field_type = {r.column_name.lower(): r.type for r in field_rows}
if (not fields):
fields = [r.column_name for r in field_rows]
for expr in list(expr_func_map):
tmpl_path = ('function.' + expr_func_map[expr])
expr_func_map[expr] = ',\n'.join([self._template(tmpl_path).format(field=field) for field in [r.column_name for r in field_rows]])
sep = (' \nunion all\n' if union else ' \n ;\n')
sql = sep.join([self._template(('analysis.' + analysis)).format(schema=schema, field=field, table=table, type=(field_type[field.lower()] if field else ''), **expr_func_map, **kwargs) for field in fields])
return (sql if as_sql else self.query(sql, analysis, echo=False))
|
Base function for field level analysis
expr_func_map: contains mapping for expression to SQL function to all fields
|
xutil/database/base.py
|
analyze_fields
|
flarco/n1slutil
| 1 |
python
|
def analyze_fields(self, analysis, table_name, fields=[], as_sql=False, union=True, expr_func_map={}, **kwargs):
'Base function for field level analysis\n expr_func_map: contains mapping for expression to SQL function to all fields\n '
if ('.' not in table_name):
raise Exception("table_name must have schema and name in it with a '.'")
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
(schema, table) = self._split_schema_table(table_name)
field_rows = self.get_columns(table_name)
field_type = {r.column_name.lower(): r.type for r in field_rows}
if (not fields):
fields = [r.column_name for r in field_rows]
for expr in list(expr_func_map):
tmpl_path = ('function.' + expr_func_map[expr])
expr_func_map[expr] = ',\n'.join([self._template(tmpl_path).format(field=field) for field in [r.column_name for r in field_rows]])
sep = (' \nunion all\n' if union else ' \n ;\n')
sql = sep.join([self._template(('analysis.' + analysis)).format(schema=schema, field=field, table=table, type=(field_type[field.lower()] if field else ), **expr_func_map, **kwargs) for field in fields])
return (sql if as_sql else self.query(sql, analysis, echo=False))
|
def analyze_fields(self, analysis, table_name, fields=[], as_sql=False, union=True, expr_func_map={}, **kwargs):
'Base function for field level analysis\n expr_func_map: contains mapping for expression to SQL function to all fields\n '
if ('.' not in table_name):
raise Exception("table_name must have schema and name in it with a '.'")
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
(schema, table) = self._split_schema_table(table_name)
field_rows = self.get_columns(table_name)
field_type = {r.column_name.lower(): r.type for r in field_rows}
if (not fields):
fields = [r.column_name for r in field_rows]
for expr in list(expr_func_map):
tmpl_path = ('function.' + expr_func_map[expr])
expr_func_map[expr] = ',\n'.join([self._template(tmpl_path).format(field=field) for field in [r.column_name for r in field_rows]])
sep = (' \nunion all\n' if union else ' \n ;\n')
sql = sep.join([self._template(('analysis.' + analysis)).format(schema=schema, field=field, table=table, type=(field_type[field.lower()] if field else ), **expr_func_map, **kwargs) for field in fields])
return (sql if as_sql else self.query(sql, analysis, echo=False))<|docstring|>Base function for field level analysis
expr_func_map: contains mapping for expression to SQL function to all fields<|endoftext|>
|
8317f34a3dbfb77308cd32ae8fde365408551b967d532b998dba93cf354577dc
|
def analyze_tables(self, analysis, tables=[], as_sql=False, **kwargs):
'Base function for table level analysis'
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
if ((not tables) and ('schema' in kwargs)):
rows = self.get_schemas(kwargs['schema'])
crt_obj = (lambda r: struct(dict(schema=r.schema, table=r.object_name)))
objs = [crt_obj(r) for r in rows]
else:
crt_obj = (lambda schema, table: struct(dict(schema=schema, table=table)))
objs = [crt_obj(*self._split_schema_table(t)) for t in tables]
sql = ' \nunion all\n'.join([self._template(('analysis.' + analysis)).format(schema=obj.schema, table=obj.table, **kwargs) for obj in objs])
return (sql if as_sql else self.query(sql, analysis, echo=False))
|
Base function for table level analysis
|
xutil/database/base.py
|
analyze_tables
|
flarco/n1slutil
| 1 |
python
|
def analyze_tables(self, analysis, tables=[], as_sql=False, **kwargs):
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
if ((not tables) and ('schema' in kwargs)):
rows = self.get_schemas(kwargs['schema'])
crt_obj = (lambda r: struct(dict(schema=r.schema, table=r.object_name)))
objs = [crt_obj(r) for r in rows]
else:
crt_obj = (lambda schema, table: struct(dict(schema=schema, table=table)))
objs = [crt_obj(*self._split_schema_table(t)) for t in tables]
sql = ' \nunion all\n'.join([self._template(('analysis.' + analysis)).format(schema=obj.schema, table=obj.table, **kwargs) for obj in objs])
return (sql if as_sql else self.query(sql, analysis, echo=False))
|
def analyze_tables(self, analysis, tables=[], as_sql=False, **kwargs):
if (analysis not in self.template_dict['analysis']):
raise Exception("'{}' not found in template for '{}'.".format(analysis, self.type))
if ((not tables) and ('schema' in kwargs)):
rows = self.get_schemas(kwargs['schema'])
crt_obj = (lambda r: struct(dict(schema=r.schema, table=r.object_name)))
objs = [crt_obj(r) for r in rows]
else:
crt_obj = (lambda schema, table: struct(dict(schema=schema, table=table)))
objs = [crt_obj(*self._split_schema_table(t)) for t in tables]
sql = ' \nunion all\n'.join([self._template(('analysis.' + analysis)).format(schema=obj.schema, table=obj.table, **kwargs) for obj in objs])
return (sql if as_sql else self.query(sql, analysis, echo=False))<|docstring|>Base function for table level analysis<|endoftext|>
|
a260aa7a8739bdb61974c40199ebb8772a0a7d2fc5f930a0e28dbd3b861f8a20
|
def create_or_get_cache_dir(self, module=''):
'create (if not exists) or return cache dir path for module'
cache_dir = '{}/{}'.format(self.__cache_dir, module)
if (not os.path.exists(cache_dir)):
os.makedirs(cache_dir)
return cache_dir
|
create (if not exists) or return cache dir path for module
|
ods/ods.py
|
create_or_get_cache_dir
|
open-datastudio/ods
| 6 |
python
|
def create_or_get_cache_dir(self, module=):
cache_dir = '{}/{}'.format(self.__cache_dir, module)
if (not os.path.exists(cache_dir)):
os.makedirs(cache_dir)
return cache_dir
|
def create_or_get_cache_dir(self, module=):
cache_dir = '{}/{}'.format(self.__cache_dir, module)
if (not os.path.exists(cache_dir)):
os.makedirs(cache_dir)
return cache_dir<|docstring|>create (if not exists) or return cache dir path for module<|endoftext|>
|
d859c845eb6f19e9d2955cf014752105af3425ddbe86cf9dd1eaab726106e560
|
def __init__(self, backup_policy=None):
'SetBackupPolicyRequestBody - a model defined in huaweicloud sdk'
self._backup_policy = None
self.discriminator = None
self.backup_policy = backup_policy
|
SetBackupPolicyRequestBody - a model defined in huaweicloud sdk
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
__init__
|
githubmilesma/huaweicloud-sdk-python-v3
| 1 |
python
|
def __init__(self, backup_policy=None):
self._backup_policy = None
self.discriminator = None
self.backup_policy = backup_policy
|
def __init__(self, backup_policy=None):
self._backup_policy = None
self.discriminator = None
self.backup_policy = backup_policy<|docstring|>SetBackupPolicyRequestBody - a model defined in huaweicloud sdk<|endoftext|>
|
ffe91840b6c38b35603932e289c3a98ce26743f1bd0391f98e1df272b79afbcf
|
@property
def backup_policy(self):
'Gets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :return: The backup_policy of this SetBackupPolicyRequestBody.\n :rtype: BackupPolicy\n '
return self._backup_policy
|
Gets the backup_policy of this SetBackupPolicyRequestBody.
:return: The backup_policy of this SetBackupPolicyRequestBody.
:rtype: BackupPolicy
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
backup_policy
|
githubmilesma/huaweicloud-sdk-python-v3
| 1 |
python
|
@property
def backup_policy(self):
'Gets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :return: The backup_policy of this SetBackupPolicyRequestBody.\n :rtype: BackupPolicy\n '
return self._backup_policy
|
@property
def backup_policy(self):
'Gets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :return: The backup_policy of this SetBackupPolicyRequestBody.\n :rtype: BackupPolicy\n '
return self._backup_policy<|docstring|>Gets the backup_policy of this SetBackupPolicyRequestBody.
:return: The backup_policy of this SetBackupPolicyRequestBody.
:rtype: BackupPolicy<|endoftext|>
|
aed68daf82e9fb5f79e8e9543446f7f966de92a0609b77c9db45687446557ebd
|
@backup_policy.setter
def backup_policy(self, backup_policy):
'Sets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.\n :type: BackupPolicy\n '
self._backup_policy = backup_policy
|
Sets the backup_policy of this SetBackupPolicyRequestBody.
:param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.
:type: BackupPolicy
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
backup_policy
|
githubmilesma/huaweicloud-sdk-python-v3
| 1 |
python
|
@backup_policy.setter
def backup_policy(self, backup_policy):
'Sets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.\n :type: BackupPolicy\n '
self._backup_policy = backup_policy
|
@backup_policy.setter
def backup_policy(self, backup_policy):
'Sets the backup_policy of this SetBackupPolicyRequestBody.\n\n\n :param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.\n :type: BackupPolicy\n '
self._backup_policy = backup_policy<|docstring|>Sets the backup_policy of this SetBackupPolicyRequestBody.
:param backup_policy: The backup_policy of this SetBackupPolicyRequestBody.
:type: BackupPolicy<|endoftext|>
|
23795442a46e2cd10dec98fded44ed9172a29971e98983a30ad89baa6c9c0a03
|
def to_dict(self):
'Returns the model properties as a dict'
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result
|
Returns the model properties as a dict
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
to_dict
|
githubmilesma/huaweicloud-sdk-python-v3
| 1 |
python
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result
|
def to_dict(self):
result = {}
for (attr, _) in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
elif (attr in self.sensitive_list):
result[attr] = '****'
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|>
|
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
|
def to_str(self):
'Returns the string representation of the model'
return pprint.pformat(self.to_dict())
|
Returns the string representation of the model
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
to_str
|
githubmilesma/huaweicloud-sdk-python-v3
| 1 |
python
|
def to_str(self):
return pprint.pformat(self.to_dict())
|
def to_str(self):
return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
|
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
|
def __repr__(self):
'For `print` and `pprint`'
return self.to_str()
|
For `print` and `pprint`
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
__repr__
|
githubmilesma/huaweicloud-sdk-python-v3
| 1 |
python
|
def __repr__(self):
return self.to_str()
|
def __repr__(self):
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
|
9c5bb75197376d0792d8d84beb70ed61c4b1366198ca62bd2dcf4d79c52078b2
|
def __eq__(self, other):
'Returns true if both objects are equal'
if (not isinstance(other, SetBackupPolicyRequestBody)):
return False
return (self.__dict__ == other.__dict__)
|
Returns true if both objects are equal
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
__eq__
|
githubmilesma/huaweicloud-sdk-python-v3
| 1 |
python
|
def __eq__(self, other):
if (not isinstance(other, SetBackupPolicyRequestBody)):
return False
return (self.__dict__ == other.__dict__)
|
def __eq__(self, other):
if (not isinstance(other, SetBackupPolicyRequestBody)):
return False
return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|>
|
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42
|
def __ne__(self, other):
'Returns true if both objects are not equal'
return (not (self == other))
|
Returns true if both objects are not equal
|
huaweicloud-sdk-dds/huaweicloudsdkdds/v3/model/set_backup_policy_request_body.py
|
__ne__
|
githubmilesma/huaweicloud-sdk-python-v3
| 1 |
python
|
def __ne__(self, other):
return (not (self == other))
|
def __ne__(self, other):
return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|>
|
9e07857f3269477dccc6c433756f9f9588b8241d31741ca2732848f7fa1a9212
|
def main() -> None:
'\n Entry point of this test project.\n '
ap.Stage(background_color='#333', stage_width=1000, stage_height=500)
sprite: ap.Sprite = ap.Sprite()
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
sprite.graphics.move_to(x=50, y=30)
sprite.graphics.line_to(x=450, y=30)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=20))
sprite.graphics.move_to(x=50, y=60)
sprite.graphics.line_to(x=450, y=60)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=20, space_size=0))
sprite.graphics.move_to(x=50, y=90)
sprite.graphics.line_to(x=450, y=90)
sprite.graphics.line_style(color='#0af', thickness=3)
sprite.graphics.move_to(x=40, y=120)
sprite.graphics.line_to(x=460, y=120)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
polyline: ap.Polyline = sprite.graphics.move_to(x=50, y=150)
sprite.graphics.line_to(x=450, y=150)
sprite.graphics.line_to(x=700, y=250)
sprite.graphics.line_to(x=700, y=150)
polyline.click(on_polyline_click)
ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)
|
Entry point of this test project.
|
test_projects/line_round_dot_setting/main.py
|
main
|
ynsnf/apysc
| 16 |
python
|
def main() -> None:
'\n \n '
ap.Stage(background_color='#333', stage_width=1000, stage_height=500)
sprite: ap.Sprite = ap.Sprite()
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
sprite.graphics.move_to(x=50, y=30)
sprite.graphics.line_to(x=450, y=30)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=20))
sprite.graphics.move_to(x=50, y=60)
sprite.graphics.line_to(x=450, y=60)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=20, space_size=0))
sprite.graphics.move_to(x=50, y=90)
sprite.graphics.line_to(x=450, y=90)
sprite.graphics.line_style(color='#0af', thickness=3)
sprite.graphics.move_to(x=40, y=120)
sprite.graphics.line_to(x=460, y=120)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
polyline: ap.Polyline = sprite.graphics.move_to(x=50, y=150)
sprite.graphics.line_to(x=450, y=150)
sprite.graphics.line_to(x=700, y=250)
sprite.graphics.line_to(x=700, y=150)
polyline.click(on_polyline_click)
ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)
|
def main() -> None:
'\n \n '
ap.Stage(background_color='#333', stage_width=1000, stage_height=500)
sprite: ap.Sprite = ap.Sprite()
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
sprite.graphics.move_to(x=50, y=30)
sprite.graphics.line_to(x=450, y=30)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=20))
sprite.graphics.move_to(x=50, y=60)
sprite.graphics.line_to(x=450, y=60)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=20, space_size=0))
sprite.graphics.move_to(x=50, y=90)
sprite.graphics.line_to(x=450, y=90)
sprite.graphics.line_style(color='#0af', thickness=3)
sprite.graphics.move_to(x=40, y=120)
sprite.graphics.line_to(x=460, y=120)
sprite.graphics.line_style(color='#0af', round_dot_setting=ap.LineRoundDotSetting(round_size=10, space_size=10))
polyline: ap.Polyline = sprite.graphics.move_to(x=50, y=150)
sprite.graphics.line_to(x=450, y=150)
sprite.graphics.line_to(x=700, y=250)
sprite.graphics.line_to(x=700, y=150)
polyline.click(on_polyline_click)
ap.save_overall_html(dest_dir_path=_DEST_DIR_PATH)<|docstring|>Entry point of this test project.<|endoftext|>
|
2c1b782605df9260aec00e1754eacee1e1948b56b27fd18d6d0b42eb26a22c3f
|
def on_polyline_click(e: ap.MouseEvent[ap.Polyline], options: dict) -> None:
'\n Handler that called when polyline is clicked.\n\n Parameters\n ----------\n e : MouseEvent\n Created MouseEvent instance.\n options : dict\n Optional parameters.\n '
polyline: ap.Polyline = e.this
polyline.line_round_dot_setting = None
|
Handler that called when polyline is clicked.
Parameters
----------
e : MouseEvent
Created MouseEvent instance.
options : dict
Optional parameters.
|
test_projects/line_round_dot_setting/main.py
|
on_polyline_click
|
ynsnf/apysc
| 16 |
python
|
def on_polyline_click(e: ap.MouseEvent[ap.Polyline], options: dict) -> None:
'\n Handler that called when polyline is clicked.\n\n Parameters\n ----------\n e : MouseEvent\n Created MouseEvent instance.\n options : dict\n Optional parameters.\n '
polyline: ap.Polyline = e.this
polyline.line_round_dot_setting = None
|
def on_polyline_click(e: ap.MouseEvent[ap.Polyline], options: dict) -> None:
'\n Handler that called when polyline is clicked.\n\n Parameters\n ----------\n e : MouseEvent\n Created MouseEvent instance.\n options : dict\n Optional parameters.\n '
polyline: ap.Polyline = e.this
polyline.line_round_dot_setting = None<|docstring|>Handler that called when polyline is clicked.
Parameters
----------
e : MouseEvent
Created MouseEvent instance.
options : dict
Optional parameters.<|endoftext|>
|
18e9fb5fe461d7abb0b2f9cf7cde507b46370644f7d1f078e422ef9435f136c3
|
def log(self, message):
'\n Logs a message for analysis of model training.\n '
self._logger.log(message)
|
Logs a message for analysis of model training.
|
rafiki/model/log.py
|
log
|
Yirui-Wang/rafiki
| 1 |
python
|
def log(self, message):
'\n \n '
self._logger.log(message)
|
def log(self, message):
'\n \n '
self._logger.log(message)<|docstring|>Logs a message for analysis of model training.<|endoftext|>
|
e55efd90c01ca289ac0b9eedccb73a2908d9937c43c6f44f1c4ee030c9aeb67f
|
def define_loss_plot(self):
'\n Convenience method of defining a plot of ``loss`` against ``epoch``.\n To be used with ``log_loss_metric()``.\n '
self.define_plot('Loss Over Epochs', ['loss'], x_axis='epoch')
|
Convenience method of defining a plot of ``loss`` against ``epoch``.
To be used with ``log_loss_metric()``.
|
rafiki/model/log.py
|
define_loss_plot
|
Yirui-Wang/rafiki
| 1 |
python
|
def define_loss_plot(self):
'\n Convenience method of defining a plot of ``loss`` against ``epoch``.\n To be used with ``log_loss_metric()``.\n '
self.define_plot('Loss Over Epochs', ['loss'], x_axis='epoch')
|
def define_loss_plot(self):
'\n Convenience method of defining a plot of ``loss`` against ``epoch``.\n To be used with ``log_loss_metric()``.\n '
self.define_plot('Loss Over Epochs', ['loss'], x_axis='epoch')<|docstring|>Convenience method of defining a plot of ``loss`` against ``epoch``.
To be used with ``log_loss_metric()``.<|endoftext|>
|
624a76a79ff7b38efccea6f1bd42b258cab3b0daf7d1b642bba07cdc635e389e
|
def log_loss_metric(self, loss, epoch):
'\n Convenience method for logging `loss` against `epoch`.\n To be used with ``define_loss_plot()``.\n '
self.log_metrics(loss=loss, epoch=epoch)
|
Convenience method for logging `loss` against `epoch`.
To be used with ``define_loss_plot()``.
|
rafiki/model/log.py
|
log_loss_metric
|
Yirui-Wang/rafiki
| 1 |
python
|
def log_loss_metric(self, loss, epoch):
'\n Convenience method for logging `loss` against `epoch`.\n To be used with ``define_loss_plot()``.\n '
self.log_metrics(loss=loss, epoch=epoch)
|
def log_loss_metric(self, loss, epoch):
'\n Convenience method for logging `loss` against `epoch`.\n To be used with ``define_loss_plot()``.\n '
self.log_metrics(loss=loss, epoch=epoch)<|docstring|>Convenience method for logging `loss` against `epoch`.
To be used with ``define_loss_plot()``.<|endoftext|>
|
34b06862e88a7ff2d9a93a95375c875508ad3219f6e61d1e597928c7fbff9c90
|
def define_plot(self, title, metrics, x_axis=None):
'\n Defines a plot for a set of metrics for analysis of model training.\n By default, metrics will be plotted against time.\n '
self._logger.define_plot(title, metrics, x_axis)
|
Defines a plot for a set of metrics for analysis of model training.
By default, metrics will be plotted against time.
|
rafiki/model/log.py
|
define_plot
|
Yirui-Wang/rafiki
| 1 |
python
|
def define_plot(self, title, metrics, x_axis=None):
'\n Defines a plot for a set of metrics for analysis of model training.\n By default, metrics will be plotted against time.\n '
self._logger.define_plot(title, metrics, x_axis)
|
def define_plot(self, title, metrics, x_axis=None):
'\n Defines a plot for a set of metrics for analysis of model training.\n By default, metrics will be plotted against time.\n '
self._logger.define_plot(title, metrics, x_axis)<|docstring|>Defines a plot for a set of metrics for analysis of model training.
By default, metrics will be plotted against time.<|endoftext|>
|
365392fcb18608671432af15e629a5e0a6ec4b5a2553527d04403090b5bfa5f9
|
def log_metrics(self, **kwargs):
'\n Logs metrics for a single point in time { <metric>: <value> }.\n <value> should be a number.\n '
self._logger.log_metrics(**kwargs)
|
Logs metrics for a single point in time { <metric>: <value> }.
<value> should be a number.
|
rafiki/model/log.py
|
log_metrics
|
Yirui-Wang/rafiki
| 1 |
python
|
def log_metrics(self, **kwargs):
'\n Logs metrics for a single point in time { <metric>: <value> }.\n <value> should be a number.\n '
self._logger.log_metrics(**kwargs)
|
def log_metrics(self, **kwargs):
'\n Logs metrics for a single point in time { <metric>: <value> }.\n <value> should be a number.\n '
self._logger.log_metrics(**kwargs)<|docstring|>Logs metrics for a single point in time { <metric>: <value> }.
<value> should be a number.<|endoftext|>
|
aad6d434a880a23e02d1c825102d3b786f960dae1342bbd1686c303dd4391e95
|
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
'\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n '
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = (cm.astype('float') / cm.sum(axis=1)[(:, np.newaxis)])
print('Normalized confusion matrix')
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = (cm.max() / 2.0)
for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[(i, j)], horizontalalignment='center', color=('white' if (cm[(i, j)] > thresh) else 'black'))
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
|
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
|
TrainValue/multiclass_svm.py
|
plot_confusion_matrix
|
xuanthuong/DOU-SI
| 0 |
python
|
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
'\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n '
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = (cm.astype('float') / cm.sum(axis=1)[(:, np.newaxis)])
print('Normalized confusion matrix')
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = (cm.max() / 2.0)
for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[(i, j)], horizontalalignment='center', color=('white' if (cm[(i, j)] > thresh) else 'black'))
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
|
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):
'\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n '
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if normalize:
cm = (cm.astype('float') / cm.sum(axis=1)[(:, np.newaxis)])
print('Normalized confusion matrix')
else:
print('Confusion matrix, without normalization')
print(cm)
thresh = (cm.max() / 2.0)
for (i, j) in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, cm[(i, j)], horizontalalignment='center', color=('white' if (cm[(i, j)] > thresh) else 'black'))
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')<|docstring|>This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.<|endoftext|>
|
f9c04a07ca203621acf60f2b6d7fd185ff62290499797d71389366c347178b63
|
def GetHumanReadable(size, precision=2):
'Takes a byte sized input and computes the closest\n human readable format, e.g., in megabytes etc.'
suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
suffixIndex = 0
while ((size > 1024) and (suffixIndex < 4)):
suffixIndex += 1
size = (size / 1024)
return ('%.*f%s' % (precision, size, suffixes[suffixIndex]))
|
Takes a byte sized input and computes the closest
human readable format, e.g., in megabytes etc.
|
exercise_05/exercise_code/networks/compute_network_size.py
|
GetHumanReadable
|
Sihifu/i2dl
| 0 |
python
|
def GetHumanReadable(size, precision=2):
'Takes a byte sized input and computes the closest\n human readable format, e.g., in megabytes etc.'
suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
suffixIndex = 0
while ((size > 1024) and (suffixIndex < 4)):
suffixIndex += 1
size = (size / 1024)
return ('%.*f%s' % (precision, size, suffixes[suffixIndex]))
|
def GetHumanReadable(size, precision=2):
'Takes a byte sized input and computes the closest\n human readable format, e.g., in megabytes etc.'
suffixes = ['B', 'KB', 'MB', 'GB', 'TB']
suffixIndex = 0
while ((size > 1024) and (suffixIndex < 4)):
suffixIndex += 1
size = (size / 1024)
return ('%.*f%s' % (precision, size, suffixes[suffixIndex]))<|docstring|>Takes a byte sized input and computes the closest
human readable format, e.g., in megabytes etc.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.