blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e8493a1b631c82cd20a805041411a0ddabce63d0 | 5e5b8a66d35be6b86d3754069613fe49108a700d | /scripts/webquestions-preprocessing/paraphrase_rules.py | b8ff50290c2c4c922205a06b3aa5feddffdd7d10 | [
"CC-BY-4.0"
]
| permissive | saraswat/graph-parser | e77f9880f38d1d23cf5aebb149be997d9c715745 | da8800503174dce0590a55b817cd024354e41d9e | refs/heads/master | 2021-01-11T09:01:50.414615 | 2016-12-23T13:17:59 | 2016-12-23T13:17:59 | 77,409,845 | 1 | 0 | null | 2016-12-26T22:26:28 | 2016-12-26T22:26:28 | null | UTF-8 | Python | false | false | 3,122 | py | '''
Created on 26 May 2014
@author: siva
'''
import json
import re
import sys
for line in sys.stdin:
line = json.loads(line)
# print line
# sentence = line['sentence']
sentence = " ".join([word["word"] for word in line["words"]])
if re.search(" do \?$", sentence):
# what did Einstein do?
# sentence = re.sub(" do\?$", " serve as\?", sentence)
words = line['words']
words.pop(-1)
words.pop(-1)
word = { "word" : "profession", "ner" : "0"}
words.append(word)
word = { "word" : "?", "ner" : "0"}
words.append(word)
for word in words:
if word['word'] == 'did' or word['word'] == 'do' or word['word'] == 'does':
word['word'] = 'is'
if re.search("Where ((is)|(was)) .* from \?$", sentence):
# where is Obama from ?
#sentence = re.sub(" from\?$", " born in ?", sentence)
words = line['words']
entities = line['entities']
check = False
for entity in entities:
if entity["index"] == len(words) - 3:
check = True
if check:
words.pop(-1)
words.pop(-1)
word = { "word" : "born", "ner" : "0"}
words.append(word)
word = { "word" : "in", "ner" : "0"}
words.append(word)
word = { "word" : "?", "ner" : "0"}
words.append(word)
'''if re.search("((name)|(type)|(kind))", sentence):
# What is the name of the president of US
#sentence = re.sub(" the ((name[s]?)|(type[s]?)|(kind[s]?)) of", "", sentence)
#sentence = re.sub(" ((name[s]?)|(type[s]?)|(kind[s]?)) of", "", sentence)
#sentence = re.sub(" ((name[s]?)|(type[s]?)|(kind[s]?))", "", sentence)
words = line['words']
entities = line['entities']
for i, word in enumerate(words):
if re.match("((name)|(kind)|(type))", word['word']):
if len(words) > i + 1 and words[i + 1]["word"] == "of":
words.pop(i)
words.pop(i)
for entity in entities:
if entity["index"] > i:
entity["index"] += -2
else:
words.pop(i)
if words[i - 1]["word"] == "the" or words[i - 1]["word"] == "a":
words.pop(i - 1)
for entity in entities:
if entity["index"] > i - 1:
entity["index"] += -1
break'''
sentence_mod = " ".join([word["word"] for word in line["words"]])
# print sentence_mod
if re.match("((What)|(Who)) ((is)|(was)) [^\s]+ \?", sentence_mod):
words = line["words"]
words[0] = {"word" : "What", "ner" : "0"}
words[1] = {"word" : "is", "ner" : "0"}
words[3] = {"word" : "'s", "ner" : "0"}
words.append({"word" : "profession", "ner" : "0"})
words.append({"word" : "?", "ner" : "0"})
print json.dumps(line) | [
"[email protected]"
]
| |
e1a70889e373ca860d381781148acddcf9e13a57 | d1ddb9e9e75d42986eba239550364cff3d8f5203 | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/cloudiot/v1beta1/cloudiot_v1beta1_client.py | b2ff82c30ed6816f1ea9058a0ee4fe9536f38a48 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | bopopescu/searchparty | 8ecd702af0d610a7ad3a8df9c4d448f76f46c450 | afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6 | refs/heads/master | 2022-11-19T14:44:55.421926 | 2017-07-28T14:55:43 | 2017-07-28T14:55:43 | 282,495,798 | 0 | 0 | Apache-2.0 | 2020-07-25T17:48:53 | 2020-07-25T17:48:52 | null | UTF-8 | Python | false | false | 21,034 | py | """Generated client library for cloudiot version v1beta1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.py import base_api
from googlecloudsdk.third_party.apis.cloudiot.v1beta1 import cloudiot_v1beta1_messages as messages
class CloudiotV1beta1(base_api.BaseApiClient):
"""Generated client library for service cloudiot version v1beta1."""
MESSAGES_MODULE = messages
BASE_URL = u'https://cloudiot.googleapis.com/'
_PACKAGE = u'cloudiot'
_SCOPES = [u'https://www.googleapis.com/auth/cloud-platform', u'https://www.googleapis.com/auth/cloudiot']
_VERSION = u'v1beta1'
_CLIENT_ID = '1042881264118.apps.googleusercontent.com'
_CLIENT_SECRET = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_USER_AGENT = 'x_Tw5K8nnjoRAqULM9PFAC2b'
_CLIENT_CLASS_NAME = u'CloudiotV1beta1'
_URL_VERSION = u'v1beta1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None):
"""Create a new cloudiot handle."""
url = url or self.BASE_URL
super(CloudiotV1beta1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers)
self.projects_locations_registries_devices_configVersions = self.ProjectsLocationsRegistriesDevicesConfigVersionsService(self)
self.projects_locations_registries_devices = self.ProjectsLocationsRegistriesDevicesService(self)
self.projects_locations_registries = self.ProjectsLocationsRegistriesService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsRegistriesDevicesConfigVersionsService(base_api.BaseApiService):
"""Service class for the projects_locations_registries_devices_configVersions resource."""
_NAME = u'projects_locations_registries_devices_configVersions'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsRegistriesDevicesConfigVersionsService, self).__init__(client)
self._upload_configs = {
}
def List(self, request, global_params=None):
"""Lists the last few versions of the device configuration in descending.
order (i.e.: newest first).
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesConfigVersionsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDeviceConfigVersionsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}/configVersions',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.devices.configVersions.list',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'numVersions'],
relative_path=u'v1beta1/{+name}/configVersions',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesConfigVersionsListRequest',
response_type_name=u'ListDeviceConfigVersionsResponse',
supports_download=False,
)
class ProjectsLocationsRegistriesDevicesService(base_api.BaseApiService):
"""Service class for the projects_locations_registries_devices resource."""
_NAME = u'projects_locations_registries_devices'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsRegistriesDevicesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a device in a device registry.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Device) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.devices.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1beta1/{+parent}/devices',
request_field=u'device',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesCreateRequest',
response_type_name=u'Device',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes a device.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}',
http_method=u'DELETE',
method_id=u'cloudiot.projects.locations.registries.devices.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets details about a device.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Device) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.devices.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesGetRequest',
response_type_name=u'Device',
supports_download=False,
)
def List(self, request, global_params=None):
"""List devices in a device registry.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDevicesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.devices.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'deviceIds', u'deviceNumIds', u'fieldMask', u'pageSize', u'pageToken'],
relative_path=u'v1beta1/{+parent}/devices',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesListRequest',
response_type_name=u'ListDevicesResponse',
supports_download=False,
)
def ModifyCloudToDeviceConfig(self, request, global_params=None):
"""Modifies the configuration for the device, which is eventually sent from.
the Cloud IoT servers. Returns the modified configuration version and its
meta-data.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceConfig) The response message.
"""
config = self.GetMethodConfig('ModifyCloudToDeviceConfig')
return self._RunMethod(
config, request, global_params=global_params)
ModifyCloudToDeviceConfig.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}:modifyCloudToDeviceConfig',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.devices.modifyCloudToDeviceConfig',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}:modifyCloudToDeviceConfig',
request_field=u'modifyCloudToDeviceConfigRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesModifyCloudToDeviceConfigRequest',
response_type_name=u'DeviceConfig',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates a device.
Args:
request: (CloudiotProjectsLocationsRegistriesDevicesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Device) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}/devices/{devicesId}',
http_method=u'PATCH',
method_id=u'cloudiot.projects.locations.registries.devices.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1beta1/{+name}',
request_field=u'device',
request_type_name=u'CloudiotProjectsLocationsRegistriesDevicesPatchRequest',
response_type_name=u'Device',
supports_download=False,
)
class ProjectsLocationsRegistriesService(base_api.BaseApiService):
"""Service class for the projects_locations_registries resource."""
_NAME = u'projects_locations_registries'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsRegistriesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
"""Creates a device registry that contains devices.
Args:
request: (CloudiotProjectsLocationsRegistriesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceRegistry) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.create',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[],
relative_path=u'v1beta1/{+parent}/registries',
request_field=u'deviceRegistry',
request_type_name=u'CloudiotProjectsLocationsRegistriesCreateRequest',
response_type_name=u'DeviceRegistry',
supports_download=False,
)
def Delete(self, request, global_params=None):
"""Deletes a device registry configuration.
Args:
request: (CloudiotProjectsLocationsRegistriesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}',
http_method=u'DELETE',
method_id=u'cloudiot.projects.locations.registries.delete',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesDeleteRequest',
response_type_name=u'Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
"""Gets a device registry configuration.
Args:
request: (CloudiotProjectsLocationsRegistriesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceRegistry) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.get',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[],
relative_path=u'v1beta1/{+name}',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesGetRequest',
response_type_name=u'DeviceRegistry',
supports_download=False,
)
def GetIamPolicy(self, request, global_params=None):
"""Gets the access control policy for a resource.
Returns an empty policy if the resource exists and does not have a policy
set.
Args:
request: (CloudiotProjectsLocationsRegistriesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:getIamPolicy',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.getIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1beta1/{+resource}:getIamPolicy',
request_field=u'getIamPolicyRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesGetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def List(self, request, global_params=None):
"""Lists device registries.
Args:
request: (CloudiotProjectsLocationsRegistriesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListDeviceRegistriesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries',
http_method=u'GET',
method_id=u'cloudiot.projects.locations.registries.list',
ordered_params=[u'parent'],
path_params=[u'parent'],
query_params=[u'pageSize', u'pageToken'],
relative_path=u'v1beta1/{+parent}/registries',
request_field='',
request_type_name=u'CloudiotProjectsLocationsRegistriesListRequest',
response_type_name=u'ListDeviceRegistriesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
"""Updates a device registry configuration.
Args:
request: (CloudiotProjectsLocationsRegistriesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(DeviceRegistry) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}',
http_method=u'PATCH',
method_id=u'cloudiot.projects.locations.registries.patch',
ordered_params=[u'name'],
path_params=[u'name'],
query_params=[u'updateMask'],
relative_path=u'v1beta1/{+name}',
request_field=u'deviceRegistry',
request_type_name=u'CloudiotProjectsLocationsRegistriesPatchRequest',
response_type_name=u'DeviceRegistry',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
"""Sets the access control policy on the specified resource. Replaces any.
existing policy.
Args:
request: (CloudiotProjectsLocationsRegistriesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:setIamPolicy',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.setIamPolicy',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1beta1/{+resource}:setIamPolicy',
request_field=u'setIamPolicyRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesSetIamPolicyRequest',
response_type_name=u'Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
"""Returns permissions that a caller has on the specified resource.
If the resource does not exist, this will return an empty set of
permissions, not a NOT_FOUND error.
Args:
request: (CloudiotProjectsLocationsRegistriesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path=u'v1beta1/projects/{projectsId}/locations/{locationsId}/registries/{registriesId}:testIamPermissions',
http_method=u'POST',
method_id=u'cloudiot.projects.locations.registries.testIamPermissions',
ordered_params=[u'resource'],
path_params=[u'resource'],
query_params=[],
relative_path=u'v1beta1/{+resource}:testIamPermissions',
request_field=u'testIamPermissionsRequest',
request_type_name=u'CloudiotProjectsLocationsRegistriesTestIamPermissionsRequest',
response_type_name=u'TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = u'projects_locations'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = u'projects'
def __init__(self, client):
super(CloudiotV1beta1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"[email protected]"
]
| |
e0f9e0cc67afaf29f291926c9c6aa95c05deb166 | 5792baf9e18ad91816cc42f4725b099a4dce7b7b | /HackerRank/Strings/Python sWap cASE.py | 9e524564145bac64f1ed70970b832d5b588f495a | []
| no_license | deepakorantak/Python | 83b6782db0b5428d47fbc29193076e8ed5f5e285 | 9781133ce5a5c6f87efb5d4aa132a63ba1290f76 | refs/heads/master | 2020-03-23T19:55:30.075700 | 2019-02-19T06:24:42 | 2019-02-19T06:24:42 | 142,010,440 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py | def swap_case(s):
return str.swapcase(s)
if __name__ == '__main__':
s = input()
if len(s) > 0 and len(s) <= 1000:
result = swap_case(s)
print(result) | [
"[email protected]"
]
| |
d226826efc7925a38771ffa80e803b71f8684253 | 288a00d2ab34cba6c389b8c2444455aee55a8a95 | /tests/test_overwrites.py | 6be0434f5d59a65c73dba6e837e5662c22636de7 | [
"BSD-2-Clause"
]
| permissive | JohannesBuchner/pystrict3 | ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb | 18b0dd369082422f9bf0f89c72e7acb53a49849c | refs/heads/master | 2023-08-14T06:37:37.954880 | 2023-07-13T11:16:38 | 2023-07-13T11:16:38 | 268,571,175 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | import ast
from hypothesis import given
from hypothesis.strategies import text
from pystrict3lib import assert_unknown, preknown
def test_assert_unknown():
node = ast.parse("print('hello world')").body[0]
known = {}
assert_unknown("name", known, node, "filename")
def test_assert_known():
node = ast.parse("print('hello world')").body[0]
known = {}
assert_unknown("name", known, node, "filename")
| [
"[email protected]"
]
| |
50509f1fcaee6a8db649657d24ee5a29044b19e6 | 6932a9ae700a623f16a3aef417d0598cf6d4f389 | /karasu_speak.py | c8c028b30786e6c5b67abc979a0d40f60e63f06a | [
"MIT"
]
| permissive | MuAuan/hirakegoma | 9f1a252d913749a2c16ae5bd7a8870550048d26d | 861879af1016c25b7a14bcabe543bfba47fd57f3 | refs/heads/master | 2020-04-27T20:12:25.315594 | 2019-03-24T12:38:30 | 2019-03-24T12:38:30 | 174,649,241 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,935 | py | # -*- coding: utf-8 -*-
import cv2
import pyaudio
import sys
import time
import wave
import pydub
from pydub import AudioSegment
import moviepy.editor as mp
import datetime
import os
from vgg16_like import model_family_cnn
from keras.preprocessing import image
import matplotlib.pyplot as plt
import keras
import numpy as np
def prediction(imgSrc,model):
#np.random.seed(1337) # for reproducibility
img_rows,img_cols=128, 128
img = np.array(imgSrc)
img = img.reshape(1, img_rows,img_cols,3)
img = img.astype('float32')
img /= 255
t0=time.time()
y_pred = model.predict(img)
return y_pred
def karasu_responder(model,path,img_rows,img_cols):
imgSrc=[]
#for j in range(0,100000,1):
# j += 1
imgSrc = image.load_img(path, target_size=(img_rows,img_cols))
#plt.imshow(imgSrc)
#plt.pause(1)
#plt.close()
pred = prediction(imgSrc,model)
#print(pred[0])
if pred[0][0]>=0.5:
filename = "karasu-miyama_out1.wav"
print("angry")
elif pred[0][1]>=0.5:
#filename = "karasu_kero_out3.wav"
filename = "karasu-normal_out1.wav"
print("normal")
elif pred[0][2]>=0.5:
#filename = "karasu_kero_out1.wav"
filename = "karasu-others_out1.wav" #karasu-hageshii_out.wav
print("others")
return filename
num_classes = 3
img_rows,img_cols=128, 128
input_shape = (img_rows,img_cols,3)
model = model_family_cnn(input_shape, num_classes = num_classes)
# load the weights from the last epoch
model.load_weights('params_karasu-0angry-1normal-2others.hdf5', by_name=True)
print('Model loaded.')
path = "./out_test/figure.jpg"
img_rows,img_cols=128,128
s=0
while True:
if os.path.exists(path)==True:
s += 1
for j in range(0,50000000,1):
j += 1
"""
if s%3 == 0:
path="./out_test/figure_angry.jpg"
elif s%3 == 1:
path="./out_test/figure_normal.jpg"
else:
path="./out_test/figure_others.jpg"
"""
filename=karasu_responder(model,path,img_rows,img_cols)
wf = wave.open(filename, "rb")
# チャンク数を指定
CHUNK1 = 1024
#filename = "hirakegoma.wav"
wf = wave.open(filename, "rb")
# PyAudioのインスタンスを生成
p1 = pyaudio.PyAudio()
# Streamを生成
stream1 = p1.open(format=p1.get_format_from_width(wf.getsampwidth()),
channels=wf.getnchannels(),
rate=wf.getframerate(),
output=True)
# データを1度に1024個読み取る
input1 = wf.readframes(CHUNK1)
# 実行
while stream1.is_active():
output = stream1.write(input1)
input1 = wf.readframes(CHUNK1)
if input1==b'':
os.remove(path)
break
| [
"[email protected]"
]
| |
fb94fc1597debf5a7a51e313349f8349d6bfb26d | 0cc4eb3cb54f8394c127ace62d3108fdb5230c85 | /.spack-env/view/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/_codecs.pyi | cc46a5a2b0b4513b177439dd5c53dfa3f3058b1e | []
| no_license | jacobmerson/spack-develop-env | 5b2d76f58c0b64ae97c64f77a3c4d33a770c71c8 | 5fca20ca343b1a76f05fc635c87f94ed25417d94 | refs/heads/master | 2022-07-04T02:22:50.264727 | 2020-05-06T05:13:50 | 2020-05-06T05:13:50 | 261,657,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | pyi | /lore/mersoj/spack/spack/opt/spack/linux-rhel7-x86_64/gcc-7.3.0/py-jedi-0.17.0-zugnvpgjfmuk5x4rfhhxlsknl2g226yt/lib/python3.7/site-packages/jedi/third_party/typeshed/stdlib/2and3/_codecs.pyi | [
"[email protected]"
]
| |
be9cf6de41337a706ff9fa46d7816b99d1f552a0 | b306aab9dcea2dd83dda700bc9f7b9f1a32cff3a | /CAIL2021/slsb/main.py | f67c06674df00f1d0948662b5528d9c5174dd6c3 | [
"Apache-2.0"
]
| permissive | Tulpen/CAIL | d6ca9981c7ea2603ae61675ba330a9614cd9398d | c4cfa98ab4ecedbce34a7a5a186830486047540c | refs/heads/master | 2023-04-23T20:07:56.774530 | 2021-04-16T13:18:36 | 2021-04-16T13:18:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,066 | py | """Test model for SMP-CAIL2020-Argmine.
Author: Tsinghuaboy [email protected]
Usage:
python main.py --model_config 'config/bert_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'bert-submission-test-1.csv'
python main.py --model_config 'config/rnn_config.json' \
--in_file 'data/SMP-CAIL2020-test1.csv' \
--out_file 'rnn-submission-test-1.csv'
"""
import argparse
import itertools
import json
import os
import re
from types import SimpleNamespace
import fire
import pandas as pd
import torch
from torch.utils.data import DataLoader
from data import Data
from evaluate import evaluate, handy_tool, calculate_accuracy_f1
from model import RnnForSentencePairClassification, BertYForClassification, NERNet,NERWNet
from utils import load_torch_model
LABELS = ['1', '2', '3', '4', '5']
MODEL_MAP = {
'bert': BertYForClassification,
'rnn': NERNet,
'rnnkv': NERWNet
}
all_types = ['LAK', 'OTH', 'HYD', 'ORG', 'LOC', 'RIV', 'RES', 'TER', 'DAM', 'PER']
def result_to_json(string, tags):
item = {"string": string, "entities": []}
entity_name = ""
entity_start = 0
idx = 0
i = -1
zipped = zip(string, tags)
listzip = list(zipped)
last = len(listzip)
for char, tag in listzip:
i += 1
if tag == 0:
item["entities"].append({"word": char, "start": idx, "end": idx+1, "type":'s'})
elif (tag % 3) == 1:
entity_name += char
entity_start = idx
elif (tag % 3) == 2:
type_index = (tag-1) // 3
if (entity_name != "") and (i == last):
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": all_types[type_index]})
entity_name = ""
else:
entity_name += char
elif (tag % 3)+3 == 3: # or i == len(zipped)
type_index = (tag-1) // 3
entity_name += char
item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": all_types[type_index]})
entity_name = ""
else:
entity_name = ""
entity_start = idx
idx += 1
return item
def remove(text):
cleanr = re.compile(r"[ !#\$%&'\(\)*\+,-./:;<=>?@\^_`{|}~“”?!【】()、’‘…¥·]*")
cleantext = re.sub(cleanr, '', text)
return cleantext
def main(out_file='output/result.json',
model_config='config/rnn_config.json'):
"""Test model for given test set on 1 GPU or CPU.
Args:
in_file: file to be tested
out_file: output file
model_config: config file
"""
# 0. Load config
with open(model_config) as fin:
config = json.load(fin, object_hook=lambda d: SimpleNamespace(**d))
if torch.cuda.is_available():
device = torch.device('cuda')
# device = torch.device('cpu')
else:
device = torch.device('cpu')
#0. preprocess file
# id_list = []
# with open(in_file, 'r', encoding='utf-8') as fin:
# for line in fin:
# sents = json.loads(line.strip())
# id = sents['id']
# id_list.append(id)
# id_dict = dict(zip(range(len(id_list)), id_list))
# 1. Load data
data = Data(vocab_file=os.path.join(config.model_path, 'vocab.txt'),
max_seq_len=config.max_seq_len,
model_type=config.model_type, config=config)
test_set, sc_list, label_list = data.load_file(config.test_file_path, train=False)
token_list = []
for line in sc_list:
tokens = data.tokenizer.convert_ids_to_tokens(line)
token_list.append(tokens)
data_loader_test = DataLoader(
test_set, batch_size=config.batch_size, shuffle=False)
# 2. Load model
model = MODEL_MAP[config.model_type](config)
model = load_torch_model(
model, model_path=os.path.join(config.model_path, 'model.bin'))
model.to(device)
# 3. Evaluate
answer_list, length_list = evaluate(model, data_loader_test, device, isTest=True)
def flatten(ll):
return list(itertools.chain(*ll))
# train_answers = handy_tool(label_list, length_list) #gold
# #answer_list = handy_tool(answer_list, length_list) #prediction
# train_answers = flatten(train_answers)
# train_predictions = flatten(answer_list)
#
# train_acc, train_f1 = calculate_accuracy_f1(
# train_answers, train_predictions)
# print(train_acc, train_f1)
test_json = json.load(open(config.test_file_path, 'r', encoding='utf-8'))
id_list = [item['id'] for item in test_json]
mod_tokens_list = handy_tool(token_list, length_list)
result = [result_to_json(t, s) for t,s in zip(mod_tokens_list, answer_list)]
# 4. Write answers to file
with open(out_file, 'w', encoding='utf8') as fout:
result_list = []
for id, item in zip(id_list,result):
entities = item['entities']
words = [d['word']+"-"+d['type'] for d in entities if d['type'] !='s']
unique_words = []
for w in words:
if w not in unique_words:
unique_words.append(w)
item = {}
item['id'] = id
item['entities'] = unique_words
result_list.append(item)
json.dump(result_list,fout,ensure_ascii=False, indent=4)
#fout.write(" ".join(words) + "\n")
# para_list = pd.read_csv(temp_file)['para'].to_list()
# summary_dict = dict(zip(id_dict.values(), [""] * len(id_dict)))
#
# result = zip(para_list, token_list)
# for id, summary in result:
# summary_dict[id_dict[id]] += remove(summary).replace(" ","")
#
# with open(out_file, 'w', encoding='utf8') as fout:
# for id, sumamry in summary_dict.items():
# fout.write(json.dumps({'id':id,'summary':sumamry}, ensure_ascii=False) + '\n')
if __name__ == '__main__':
fire.Fire(main)
| [
"[email protected]"
]
| |
10c5cd8101cff8672ef60125ceffa4769b4d7c27 | 21682f70ff130169d8800a06b1a6d8bf7f46e45a | /functions/decoraters/variablelengthargument/demo.py | 15d8a2f86e7c77c3241c3e719adc2bc1555f7f55 | []
| no_license | Aravind2595/MarchPythonProject | 03c3aeee40f5ff2c635861ac29f31a7633499d51 | 9aa9241632b7f96e7e1cb33b3adb7b8def36f1f8 | refs/heads/master | 2023-05-05T04:16:45.556737 | 2021-05-20T08:43:52 | 2021-05-20T08:43:52 | 368,791,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 252 | py | def add(*args): #muttiple argument passing ;it will also accept 0 argument
res=0 #* is important not 'args' eg: *hai or *arg=
for num in args:#argument will be stored in tuple format
res+=num
return res
print(add(10,20,30,40)) | [
"[email protected]"
]
| |
d0bbe41af3a825c8397a6ae8f3261c2be29c4625 | 15960f0aa40915ddc93cd5c8a840a4abfb167cf1 | /groups/models.py | 0fae1fa57df655ee1187afa7e7aea983641ef30c | []
| no_license | phouse512/piper | 74d815fd443482abc80418dbed678b1431e17eb9 | 70f651db8af4edb625f6ba249556d3c2d04a350b | refs/heads/master | 2022-05-04T20:36:56.354336 | 2018-12-19T04:28:23 | 2018-12-19T04:28:23 | 40,972,739 | 0 | 0 | null | 2018-11-17T16:56:21 | 2015-08-18T13:13:15 | Python | UTF-8 | Python | false | false | 399 | py | from django.db import models
from users.models import User
class Group(models.Model):
id = models.AutoField(primary_key=True)
name = models.CharField(max_length=30)
class Meta:
db_table = 'groups'
class GroupMembership(models.Model):
id = models.AutoField(primary_key=True)
user = models.ForeignKey(User)
group = models.ForeignKey(Group)
class Meta:
db_table = 'groups_membership'
| [
"[email protected]"
]
| |
62d885e2dfc1f0c0f22c2711cb0bcfabeb0641b3 | 0942ec9cdda81f754d05ae9893605769ed5c1111 | /flask-video-streaming/camera_pi.py | f94a07a7875d2c5f6778403d01ea02da3986608e | [
"MIT"
]
| permissive | sourceperl/rpi.webcam.pi3 | f9fa061bc05bab9720c9e372c96f65e431ad5673 | ea8559ca93f771250961a63fbe0f7acc3a7a2338 | refs/heads/master | 2020-12-25T14:38:24.234521 | 2016-07-21T14:56:01 | 2016-07-21T14:56:01 | 63,687,773 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | import time
import io
import threading
import picamera
class Camera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
def initialize(self):
if Camera.thread is None:
# start background frame thread
Camera.thread = threading.Thread(target=self._thread)
Camera.thread.daemon = True
Camera.thread.start()
# wait until frames start to be available
while self.frame is None:
time.sleep(0)
def get_frame(self):
Camera.last_access = time.time()
self.initialize()
return self.frame
@classmethod
def _thread(cls):
with picamera.PiCamera() as camera:
# camera setup
camera.resolution = (640, 480)
camera.hflip = True
camera.vflip = True
stream = io.BytesIO()
for foo in camera.capture_continuous(stream, 'jpeg',
use_video_port=True):
# store frame
stream.seek(0)
cls.frame = stream.read()
# reset stream for next frame
stream.seek(0)
stream.truncate()
# if there hasn't been any clients asking for frames in
# the last 10 seconds stop the thread
if time.time() - cls.last_access > 10:
break
cls.thread = None
| [
"[email protected]"
]
| |
34179ff136b9b68223fd42cb9f5fbe54e95a88de | af0dcf80a36da4ac6894dc517ad1870f702c3122 | /azure-mgmt-web/azure/mgmt/web/models/csm_publishing_profile_options.py | 99b9542ab7d50b0a1d29b9d31f8743561ff5afa3 | [
"Apache-2.0"
]
| permissive | FlavioAlexander/azure-sdk-for-python | 4c6151ca17886f9e4d47e1ccc469859abdedca5a | 8c7416749f9a5697e0311bc9af8fe5c0d524ca03 | refs/heads/master | 2021-01-24T02:34:37.194767 | 2016-07-03T23:47:23 | 2016-07-03T23:47:23 | 62,738,173 | 0 | 1 | null | 2016-07-06T16:54:12 | 2016-07-06T16:54:10 | null | UTF-8 | Python | false | false | 1,346 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft and contributors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CsmPublishingProfileOptions(Model):
"""Publishing options for requested profile.
:param format: Name of the format. Valid values are:
FileZilla3
WebDeploy -- default
Ftp
:type format: str
"""
_attribute_map = {
'format': {'key': 'format', 'type': 'str'},
}
def __init__(self, format=None):
self.format = format
| [
"[email protected]"
]
| |
a560d10713bc976b978431314f53a75111c1555a | 3aa8222bb2edc93c9202ccbcf6f331cdf73cd5a2 | /FundRatingNSDL/nsdl_extraction/setup.py | ac0e1546b51b54b85c28ad5f48c2c3952b296cc5 | []
| no_license | pavithra-ft/ft-automation | a977809823e587efd596b02e3a8286f887d12116 | 946e1c35b785bfc3ea31d5903e021d4bc99fe302 | refs/heads/main | 2023-04-24T19:54:28.478577 | 2021-05-11T17:53:08 | 2021-05-11T17:53:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | # Automatically created by: scrapyd-deploy
from setuptools import setup, find_packages
setup(
name='project',
version='1.0',
packages=find_packages(),
entry_points={'scrapy': ['settings = nsdl_extraction.settings']},
)
| [
"[email protected]"
]
| |
460f676c069089996fb607db849fb892c0b4ab8a | c2e16633921d1efe584d93d769eaa7892a2fd8f3 | /list,advanced/Messaging.py | aa33649a6ed9732f8fa8110516c633e59d131daa | []
| no_license | yosifnandrov/softuni-stuff | bd53d418fe143ea4633a5488c1f80648da0b9ef7 | 2a76e5aee2029edf901634750d28cf153d73ece3 | refs/heads/main | 2023-04-17T19:53:30.254790 | 2021-05-06T11:33:39 | 2021-05-06T11:33:39 | 364,884,782 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | number = input().split()
message = input()
def get_sum(n):
sum = 0
for i in n:
sum += int(i)
return sum
for i in number:
summary = get_sum(i)
for l in range(len(message)):
if l == summary:
print(message[l], end="")
message = message[0:l:] + message[l + 1::]
break
elif l == len(message) - 1:
l = summary - len(message)
print(message[l], end="")
message = message[0:l:] + message[l + 1::]
| [
"[email protected]"
]
| |
c5420358fb87484239026919e290e881a7b4c6c4 | 2ce0c37ac7d9beeac23db688f97a1f502b92d13a | /store/models/store.py | 878b20d11d588de233e55c8908f1c894374734b0 | []
| no_license | AmrElsayedEG/inventory-system | 0cdb0634b33117b13bfcae8642f979448d831369 | d4bc483612c3b721918d75f24ab0d7fa29b78ce3 | refs/heads/main | 2023-08-20T22:32:25.113740 | 2021-10-04T08:55:44 | 2021-10-04T08:55:44 | 413,344,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | py | from django.db import models
class Store(models.Model):
name = models.CharField(max_length=100)
address = models.CharField(max_length=200)
coordinates = models.JSONField(blank=True, null=True)
def __str__(self) -> str:
return self.name | [
"[email protected]"
]
| |
1e4fc17bed5f3bca085566203de7580dbe427874 | b5187b5ffd53a2cdc8ec6ed94effc39702c1ea31 | /loyalty_app/loyalty/doctype/sales_list/sales_list.py | 3f630555c69f95a1f081ba33f89e1bac9d77c915 | [
"MIT"
]
| permissive | vignesharumainayagam/engagex-loyalty_app-backup- | 946a7f75c5ae5cce33313142a0b4e6ba29d67cb6 | 4c326c5f7b22572146f0b946d6498e85ac22a143 | refs/heads/master | 2020-03-11T18:00:14.106005 | 2018-04-19T05:36:06 | 2018-04-19T05:36:06 | 130,163,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 254 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Loyalty and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class Sales_list(Document):
pass
| [
"[email protected]"
]
| |
53b2af0868849bff57dbd8b705257e3f2690e172 | a88d9c0176f5e4c0d0bd9664270e000ebb5edbd9 | /component/tile/sensor_tile.py | 9b549723c7bbb0854467b7bcc1072e972e246aa2 | [
"MIT"
]
| permissive | sandroklippel/fcdm | fb81c73fc6bd1cf296f9301272923c3627474d3f | 5a54e6352bb574ba409be38882ff0d13b3473b7a | refs/heads/master | 2023-08-19T22:05:52.055545 | 2021-08-24T11:23:40 | 2021-08-24T11:23:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,828 | py | from datetime import datetime as dt
from sepal_ui import sepalwidgets as sw
import ipyvuetify as v
from component import parameter as cp
from component.message import cm
class SensorTile(sw.Tile):
def __init__(self, model):
# create adjustable variables end and start
self.end = dt.now().year
self.start = 1950 # prior to any sats
# create the widgets
self.sensors_select = v.Select(label=cm.input_lbl.sensor, items=[], v_model=[], multiple=True, chips=True, deletable_chips=True)
landsat_7_switch = v.Switch(label=cm.input_lbl.do_threshold, v_model =model.improve_L7)
landsat_7_slider = v.Slider(class_='mt-5', label=cm.input_lbl.threshold, min=0, max=.3, step=.001, v_model=model.improve_threshold, thumb_label='always')
cloud_buffer = v.Slider(class_='mt-5', label=cm.input_lbl.cloud_buffer, min=0, max =2500, step=10, v_model=model.cloud_buffer, thumb_label='always')
# bind them to io
model \
.bind(self.sensors_select, 'sensors',) \
.bind(landsat_7_switch, 'improve_L7',) \
.bind(landsat_7_slider, 'improve_threshold',) \
.bind(cloud_buffer, 'cloud_buffer',)
super().__init__(
'nested_widget',
cm.tile.sensor,
inputs = [self.sensors_select, landsat_7_switch, landsat_7_slider, cloud_buffer],
alert = sw.Alert()
)
# add js behaviour
self.sensors_select.observe(self._check_sensor, 'v_model')
model.observe(self._change_start, 'reference_start')
model.observe(self._change_end, 'analysis_end')
def _check_sensor(self, change):
"""
prevent users from selecting landsat and sentinel 2 sensors
provide a warning message to help understanding
"""
# exit if its a removal
if len(change['new']) < len(change['old']):
self.alert.reset()
return self
# use positionning in the list as boolean value
sensors = ['landsat', 'sentinel']
# guess the new input
new_value = list(set(change['new']) - set(change['old']))[0]
id_ = next(i for i, s in enumerate(sensors) if s in new_value)
if sensors[id_] in new_value:
if any(sensors[not id_] in s for s in change['old']):
change['owner'].v_model = [new_value]
self.alert.add_live_msg(cm.no_mix, 'warning')
else:
self.alert.reset()
return self
def _change_end(self, change):
self.end = int(change['new'][:4]) if change['new'] else dt.now().year
self._check_sensor_availability()
return self
def _change_start(self, change):
self.start = int(change['new'][:4]) if change['new'] else 1950
self._check_sensor_availability()
return self
def _check_sensor_availability(self):
"""reduce the number of available satellites based on the dates selected by the user"""
# reset current values
self.sensors_select.items = []
self.sensors_select.v_model = []
# check every satellite availability
years = range(self.start, self.end + 1)
sensors = []
for s in cp.sensors:
if any(e in years for e in [cp.sensors[s]['start'], cp.sensors[s]['end']]):
sensors.append(s)
elif cp.sensors[s]['start'] < self.start and cp.sensors[s]['end'] > self.end:
sensors.append(s)
self.sensors_select.items = sensors
return self
| [
"[email protected]"
]
| |
320687db2d1fc6caf127a9b4f5b1f96927e80f57 | 2acd3d3616ab6ae6a56602f8c28c9cb2d6fd6db0 | /config/settings.py | 13c13dee96727b13a815724aa918b12547a4d6e4 | []
| no_license | EgorovM/hb_maker | 45f3d7375d3aa5c06127f5bfc7cab2bff88192da | d0524cc81e5c5cb85b4de1a397219876d44daf42 | refs/heads/main | 2023-09-03T05:09:39.297948 | 2021-11-11T20:43:47 | 2021-11-11T20:43:47 | 352,812,794 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,274 | py | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-cu6ykde21-!@=582-#1mgj$5xhph@hxybo1qqtsut8uas^w@b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'creator',
'viewer',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static',)
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media',)
MEDIA_URL = '/media/'
| [
"[email protected]"
]
| |
ea02622ccae8492548b091136b268bf259b5cebd | 23ec6adce704bff40d04cd6fc0ba446375405b68 | /Non Leetcode Solutions/linked_list_py.py | 378d3a34b439b5b394c573f968a35ed1cc2897d6 | []
| no_license | amoghrajesh/Coding | 1845be9ea8df2d13d2a21ebef9ee6de750c8831d | a7dc41a4963f97dfb62ee4b1cab5ed80043cfdef | refs/heads/master | 2023-08-31T10:10:48.948129 | 2023-08-30T15:04:02 | 2023-08-30T15:04:02 | 267,779,618 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,952 | py | class Node(object):
def __init__(self,data,next_node=None):
self.data=data
self.next_node=next_node
def get_next(self):
return self.next_node
def set_next(self,next_node):
self.next_node=next_node
def get_data(self):
return self.data
def set_data(self,data):
self.data=data
def has_next(self):
if self.get_next() is None:
return False
return True
def toString(self):
return str(self.get_data())
class LinkedList(object):
def __init__(self,r=None):
self.root=r
self.size=0
def get_size(self):
return self.size
def add(self,d):#add at beginning
new_node=Node(d,self.root)
self.root=new_node
self.size+=1
def remove(self,data):
this_node=self.root
prev_node=None
while this_node is not None:
if this_node.get_data() == data:
if prev_node is not None:
prev_node.set_next(this_node.get_next())
else:
self.root=this_node.get_next()
self.size-=1
return True
else:
prev_node=this_node
this_node=this_node.get_next()
return False
def find(self,data):
this_node=self.root
while this_node is not None:
if this_node.get_data() == data:
return True
this_node=this_node.get_next()
return False
def print_list(self):
this_node=self.root
while this_node.has_next():
print(this_node.toString())
this_node=this_node.get_next()
myList=LinkedList()
myList.add(1)
myList.add(4)
myList.add(6)
myList.add(2)
print("size:",myList.get_size())
'''myList.remove(6)
print("size:",myList.get_size())
print("Is 2 present?",myList.find(-2))'''
myList.print_list()
| [
"[email protected]"
]
| |
c4be81c83c88067b9cf207fdeb2ab275f44e2c08 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /F4iemEeFfsaFoMpAF_4.py | 786cba909da3a937ac21071a5cc1d90693d4e336 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 776 | py | """
This is a list of single characters with an unwanted character at the end:
["H", "e", "l", "l", "o", "!", "\0"]
You could also just type "Hello!" when initializing a variable, creating the
string "Hello!"
Create a function that will return a string by combining the given character
list, not including the unwanted final character.
### Examples
cpp_txt(["H", "i", "!", "\0"]) ➞ "Hi!"
cpp_txt(["H", "e", "l", "l", "o", "!", "\0"]) ➞ "Hello!"
cpp_txt(["J", "A", "V", "a", "\0"]) ➞ "JAVa"
### Notes
This is a translation of a C++ challenge and is trivial in Python, but perhaps
it will be helpful to someone out there. (No challenge is trivial until you
know how to solve it :)
"""
def cpp_txt(lst):
return ''.join(lst[:-1])
| [
"[email protected]"
]
| |
6068e0dfbaa8b3e02df630a1f8f2d8551b444403 | 2eaecdb1ed42170463993b8b2285296c5ef2231d | /apps/ciudad/admin.py | d7e080b95887458bf100d3a8e00e6edfdc8c6041 | []
| no_license | ivanfdaza/tribunaleclesiasticoIIS | 9639fc66a2c99baa45b8276f4a1e035bdf294e2e | acb164ab8464b71d0461acf03bdd5e3386b57893 | refs/heads/master | 2022-11-21T10:32:14.925326 | 2020-07-23T16:21:26 | 2020-07-23T16:21:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 177 | py | from django.contrib import admin
# Register your models here.
from apps.ciudad.models import Ciudad, Departamento
admin.site.register(Ciudad)
admin.site.register(Departamento) | [
"[email protected]"
]
| |
88be1a8dbca36a3704310ed5d08336575231773d | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/cirq_new/cirq_program/startCirq_pragma263.py | bc6b11402b4ca6da63e2f0bf7495b508300d9153 | [
"BSD-3-Clause"
]
| permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,624 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=15
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
class Opty(cirq.PointOptimizer):
def optimization_at(
self,
circuit: 'cirq.Circuit',
index: int,
op: 'cirq.Operation'
) -> Optional[cirq.PointOptimizationSummary]:
if (isinstance(op, cirq.ops.GateOperation) and isinstance(op.gate, cirq.CZPowGate)):
return cirq.PointOptimizationSummary(
clear_span=1,
clear_qubits=op.qubits,
new_operations=[
cirq.CZ(*op.qubits),
cirq.X.on_each(*op.qubits),
cirq.X.on_each(*op.qubits),
]
)
#thatsNoCode
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[1])) # number=7
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[0])) # number=12
c.append(cirq.CZ.on(input_qubit[3],input_qubit[0])) # number=13
c.append(cirq.H.on(input_qubit[0])) # number=14
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=8
c.append(cirq.SWAP.on(input_qubit[1],input_qubit[0])) # number=9
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=10
c.append(cirq.SWAP.on(input_qubit[3],input_qubit[0])) # number=11
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2820
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq_pragma263.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | [
"[email protected]"
]
| |
0efe388f3e3a95551a15c6e5f3c3ac7d3ae444c5 | b9062ed0431544160161a270fe669858c3ca9633 | /blog/migrations/0003_auto_20191101_2319.py | f9b453d4abd8102f08dc12a51a8acc1e12851805 | []
| no_license | sd8917/LearnDjango | 350f73ed7077d0b3ac9aa2f1e0fd7d05f67faf05 | 87a9c6c5932f685a01ad6125faf81ac94a1fac5d | refs/heads/master | 2022-12-03T18:18:13.770896 | 2019-11-05T06:35:32 | 2019-11-05T06:35:32 | 219,081,219 | 1 | 0 | null | 2022-11-22T04:36:24 | 2019-11-02T00:14:05 | Python | UTF-8 | Python | false | false | 443 | py | # Generated by Django 2.2.6 on 2019-11-01 17:49
import ckeditor_uploader.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20191101_1435'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='content',
field=ckeditor_uploader.fields.RichTextUploadingField(),
),
]
| [
"[email protected]"
]
| |
d45b2df2ceb71ae350e9d6a317ee4e09741e503e | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_207/507.py | 3881fca5b62b736452cde9286a5ba5618161c3b5 | []
| no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,981 | py | def validate(s):
pass
def solver(line):
n,r,o,y,g,b,v = line
t1 = b - o
t2 = y - v
t3 = r - g
if t1 < 0 or t2 < 0 or t3 < 0:
return "IMPOSSIBLE"
if 0 in [t1,t2,t3]:
if line[1:].count(0) == 4:
L = [(r,'R'),(o,'O'),(y,'Y'),(g,'G'),(b,'B'),(v,'V')]
L.sort(key = lambda x: -x[0])
if L[0][0] == L[1][0]:
return (L[0][1] + L[1][1]) * L[0][0]
else:
return "IMPOSSIBLE"
else:
return "IMPOSSIBLE"
L = [t1,t2,t3]
if sum(L) < 2 * max(L):
return "IMPOSSIBLE"
else:
L = [[t1,'B'],[t2,'Y'],[t3,'R']]
s = '_'
while sum(i[0] for i in L) > 3:
#error: haven't enforced start != end
L.sort(key = lambda x: -x[0])
if L[0][1] != s[-1]:
s += L[0][1]
L[0][0] -= 1
else:
s += L[1][1]
L[1][0] -= 1
if L[1][0] < 0:
print "bad stuff"
s = s[1:]
if s:
t = s[0] + s[-1]
else:
t = 'RR'
d = {'RR' : 'BRY',
'RY' : 'BRY',
'RB' : 'YRB',
'YR' : 'BYR',
'YY' : 'BYR',
'YB' : 'RYB',
'BR' : 'YBR',
'BY' : 'RBY',
'BB' : 'RBY'}
s += d[t]
s = s.replace('B','BO' * o + 'B', 1)
s = s.replace('Y','YV' * v + 'Y', 1)
s = s.replace('R','RG' * g + 'R', 1)
return s
#case testing needs to happen
fout = open('out.txt','w')
f = open('in.txt')
T = int(f.readline())
for case in range(1,T+1):
line = f.readline()
line = line.split()
line = [int(i) for i in line]
ans = solver(line)
str = "Case #%d: %s\n" % (case, ans)
print str,
fout.write(str)
f.close()
fout.close()
| [
"[email protected]"
]
| |
3a206f6d8e955b15bbd61988b40ea1f668583f18 | 8ef5a09d76a11c56963f18e6a08474a1a8bafe3c | /algorithm/dp_subset_sum.py | 44580f16c302081909155ac156cefc69cf012378 | []
| no_license | roiei/algo | 32c4677649c7666db148f6183fbfbf66c8b1969f | ae8bb8bf4ae4026ccaf1dce323b4098547dd35ec | refs/heads/master | 2022-04-01T19:21:27.768675 | 2022-02-19T06:15:29 | 2022-02-19T06:15:29 | 169,021,154 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,898 | py |
nums = [34, 4, 12, 5, 2]
target = 9
#target = 40
def dfs(nums, depth, n, target):
if target == 0:
return True
if depth == n or target < 0:
return False
res = dfs(nums, depth + 1, n, target - nums[depth]),
res += dfs(nums, depth + 1, n, target),
return any(res)
mem = {}
def dfs_dp(nums, depth, n, target):
if depth in mem:
return mem[depth]
if target == 0:
return True
if depth == n or target < 0:
return False
res = dfs(nums, depth+1, n, target - nums[depth]),
res += dfs(nums, depth+1, n, target),
mem[depth] = any(res)
return mem[depth]
def isSubsetSum(nums, n, target):
subset = ([[False for i in range(target+1)] for i in range(n+1)])
for i in range(n+1):
subset[i][0] = True
for i in range(1, target+1):
subset[0][i] = False
for i in range(1, n+1):
for j in range(1, target+1):
if j < nums[i-1]:
subset[i][j] = subset[i-1][j]
else:
subset[i][j] = (subset[i-1][j] or
subset[i-1][j-nums[i-1]])
return subset[n][target]
def is_subset_sum(nums, n, target):
dp = [False]*(target+1)
cmb = [True]*(target+1)
for num in nums:
if num <= target:
print(f'num = {num}')
dp[num] = True
cmb[num] = False
for i in range(1, target+1):
if dp[i] == True and (i+num <= target):
if i != num and cmb[i] == False:
dp[i+num] = True
return dp[target]
# print(dfs(nums, 0, len(nums), target))
# print(dfs_dp(nums, 0, len(nums), target))
print(isSubsetSum(nums, len(nums), target))
print(is_subset_sum(nums, len(nums), target))
| [
"[email protected]"
]
| |
832974b9068a90cd72f7987a17131faae3924d37 | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /xcp2k/classes/_cell3.py | c1150112a0c843de3db8f2c0d137662bf75a7671 | []
| no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,148 | py | from xcp2k.inputsection import InputSection
from _cell_ref1 import _cell_ref1
class _cell3(InputSection):
def __init__(self):
InputSection.__init__(self)
self.A = None
self.B = None
self.C = None
self.Abc = None
self.Alpha_beta_gamma = None
self.Cell_file_name = None
self.Cell_file_format = None
self.Periodic = None
self.Multiple_unit_cell = None
self.Symmetry = None
self.CELL_REF = _cell_ref1()
self._name = "CELL"
self._keywords = {'A': 'A', 'Cell_file_format': 'CELL_FILE_FORMAT', 'C': 'C', 'B': 'B', 'Symmetry': 'SYMMETRY', 'Alpha_beta_gamma': 'ALPHA_BETA_GAMMA', 'Multiple_unit_cell': 'MULTIPLE_UNIT_CELL', 'Periodic': 'PERIODIC', 'Abc': 'ABC', 'Cell_file_name': 'CELL_FILE_NAME'}
self._subsections = {'CELL_REF': 'CELL_REF'}
self._aliases = {'Angles': 'Alpha_beta_gamma'}
@property
def Angles(self):
"""
See documentation for Alpha_beta_gamma
"""
return self.Alpha_beta_gamma
@Angles.setter
def Angles(self, value):
self.Alpha_beta_gamma = value
| [
"[email protected]"
]
| |
9aff07ad32360b10ae281e93532a2f1af7a967f5 | 7826681647933249c8949c00238392a0128b4a18 | /cosypose/simulator/__init__.py | 6242dfa1c761870f2a85f43957247c13b7b53277 | [
"MIT"
]
| permissive | imankgoyal/cosypose | b35678a32a6491bb15d645bc867f4b2e49bee6d2 | fa494447d72777f1d3bd5bd134d79e5db0526009 | refs/heads/master | 2022-12-09T11:18:23.188868 | 2020-08-31T15:34:02 | 2020-08-31T15:34:02 | 291,834,596 | 2 | 0 | MIT | 2020-08-31T22:06:12 | 2020-08-31T22:06:11 | null | UTF-8 | Python | false | false | 173 | py | from .body import Body
from .camera import Camera
from .base_scene import BaseScene
from .caching import BodyCache, TextureCache
from .textures import apply_random_textures
| [
"[email protected]"
]
| |
49254eb20c5905f20020b227a913eea9b9007358 | 457c673c8c8d704ec150322e4eeee2fde4f827ca | /Programming Basic/First_Steps_in_coding_lab/07_Projects_Creation.py | ccf04b1496f502d612f560496cd25f03a08b4d0e | []
| no_license | xMrShadyx/SoftUni | 13c08d56108bf8b1ff56d17bb2a4b804381e0d4e | ce4adcd6e8425134d138fd8f4b6101d4eb1c520b | refs/heads/master | 2023-08-02T03:10:16.205251 | 2021-06-20T05:52:15 | 2021-06-20T05:52:15 | 276,562,926 | 5 | 1 | null | 2021-09-22T19:35:25 | 2020-07-02T06:07:35 | Python | UTF-8 | Python | false | false | 191 | py | architect = input()
amount_projects = int(input())
total_time = amount_projects * 3
print(f'The architect {architect} will need {total_time} hours to complete {amount_projects} project/s.') | [
"[email protected]"
]
| |
98e60fc6389398e16d76d7de6e665ef79eac8947 | dcc36a7744d657e15385803fcd13335685a595af | /quantdigger/demo/test_backtest.py | 9500f1a4557b50deac1f9ee32fcf107612863f33 | []
| no_license | timedcy/quantdigger | 777c28ba96d7dba1cb491a634f46e3968f3232bb | bc492811c796caaad3801d379bb485c1986d4619 | refs/heads/master | 2021-01-22T01:27:52.102183 | 2015-11-08T04:35:37 | 2015-11-08T04:35:37 | 45,767,058 | 1 | 0 | null | 2015-11-08T04:36:13 | 2015-11-08T04:36:13 | null | UTF-8 | Python | false | false | 2,362 | py | # -*- coding: utf-8 -*-
from quantdigger.engine.execute_unit import ExecuteUnit
from quantdigger.indicators.common import MA, BOLL
from quantdigger.engine.strategy import TradingStrategy
from quantdigger.util import pcontract, stock
from quantdigger.digger import deals
import plotting
#def average(series, n):
#""" 一个可选的平均线函数 """
### @todo plot element
#sum_ = 0
#for i in range(0, n):
#sum_ += series[i]
#return sum_ / n
class DemoStrategy(TradingStrategy):
""" 策略实例 """
def __init__(self, exe):
super(DemoStrategy, self).__init__(exe)
print 'start: ', self.datetime[0]
self.ma20 = MA(self, self.close, 20,'ma20', 'b', '1')
self.ma10 = MA(self, self.close, 10,'ma10', 'y', '1')
self.b_upper, self.b_middler, self.b_lower = BOLL(self, self.close, 10,'boll10', 'y', '1')
#self.ma2 = NumberSeries(self)
def on_bar(self):
""" 策略函数,对每根Bar运行一次。"""
#self.ma2.update(average(self.open, 10))
if self.ma10[1] < self.ma20[1] and self.ma10 > self.ma20:
self.buy('long', self.open, 1, contract = 'IF000.SHFE')
elif self.position() > 0 and self.ma10[1] > self.ma20[1] and self.ma10 < self.ma20:
self.sell('long', self.open, 1)
# 夸品种数据引用
#print self.position(), self.cash()
#print self.datetime, self.b_upper, self.b_middler, self.b_lower
#print self.datetime[0]
if __name__ == '__main__':
try:
pcon = pcontract('BB.SHFE', '1.Minute')
#begin_dt, end_dt = '2015-05-25', '2015-06-01'
#pcon = stock('600848','10.Minute') # 通过tushare下载股票数据
simulator = ExecuteUnit([pcon, pcon])
algo = DemoStrategy(simulator)
#algo1 = DemoStrategy(simulator)
#algo2 = DemoStrategy(simulator)
simulator.run()
# 显示回测结果
from quantdigger.datastruct import TradeSide
ping = 0
kai = 0
for t in algo.blotter.transactions:
if t.side == TradeSide.PING:
ping += t.quantity
elif t.side == TradeSide.KAI:
kai += t.quantity
else:
raise "error"
print "ping: ", ping
print "kai: ", kai
assert kai >= ping
| [
"[email protected]"
]
| |
03dde1a263827b35b7aaa86f9f7835c933b700cc | 48f73b5b78da81c388d76d685ec47bb6387eefdd | /scrapeHackerrankCode/codes/find-point.py | fc6157b3e72153e83bf8e16e66b00a8c13227285 | []
| no_license | abidkhan484/hacerrankScraping | ad0ceda6c86d321d98768b169d63ea1ee7ccd861 | 487bbf115117bd5c293298e77f15ae810a50b82d | refs/heads/master | 2021-09-18T19:27:52.173164 | 2018-07-18T12:12:51 | 2018-07-18T12:12:51 | 111,005,462 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | # Accepted
# Python 3
def find_point(x1, y1, x2, y2):
print((2*x2-x1), (2*y2-y1))
for _ in range(int(input().strip())):
x1, y1, x2, y2 = input().split()
x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)
find_point(x1, y1, x2, y2)
| [
"[email protected]"
]
| |
0d2b450afa4215b6663f3662b252963b05493a13 | 7e2d802a17e42d50974af29e4c9b658d5da6471b | /IC/21-Missing-drone.py | addde9d1910b7c9628616aee6fecb212e90f64fb | []
| no_license | siddharthadtt1/Leet | a46290bacdf569f69d523413c0129676727cb20e | 1d8b96257f94e16d0c1ccf8d8e8cd3cbd9bdabce | refs/heads/master | 2020-06-20T16:21:15.915761 | 2017-05-15T22:35:42 | 2017-05-15T22:35:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 28 | py | # see Leet 136 single number | [
"[email protected]"
]
| |
a056eed4886e7ab503371a8658a267813fb8a067 | dc7632da4f04385142ea86e4a63c8537eaa2edeb | /django_app/config/urls.py | a7a6a32944ca6639e025c365d8c4507f1ca55a53 | []
| no_license | jmnghn/0621_test | bf4ecd4cc4ce56e06f4f07922d53e4dfb7471bab | 9cb3930d24644897074a336c998759a6a86b656a | refs/heads/master | 2022-10-11T17:53:34.715991 | 2017-06-22T05:03:39 | 2017-06-22T05:03:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 819 | py | """test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^post/', include('post.urls'))
]
| [
"[email protected]"
]
| |
6924472770c9b64625e91f5425599c76f151c774 | e3946d91dc5fe71989c2f4b6390232865fcb5d1b | /fjord/flags/spicedham_utils.py | 2e87b27ba33d4e350010c6253dd663f313cda103 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
]
| permissive | zeusintuivo/fjord | 61b632fd6df0e1b3508e628fe4f682a937cc0244 | 3bd227004d369df1fdc39f06acff12ebc8f0fe34 | refs/heads/master | 2021-01-16T18:28:52.564638 | 2014-09-24T21:02:51 | 2014-09-24T21:02:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,166 | py | import json
import os
import re
import threading
from spicedham import Spicedham
from spicedham.backend import BaseBackend
from fjord.flags.models import Store
class FjordBackend(BaseBackend):
def __init__(self, config):
pass
def reset(self):
Store.objects.all().delete()
def get_key(self, classifier, key, default=None):
try:
obj = Store.objects.filter(classifier=classifier, key=key)[0]
value = json.loads(obj.value)
except (IndexError, Store.DoesNotExist):
value = default
return value
def set_key(self, classifier, key, value):
value = json.dumps(value)
try:
obj = Store.objects.filter(classifier=classifier, key=key)[0]
obj.value = value
except (IndexError, Store.DoesNotExist):
obj = Store.objects.create(
classifier=classifier, key=key, value=value)
obj.save()
def set_key_list(self, classifier, key_value_tuples):
for key, value in key_value_tuples:
self.set_key(classifier, key, value)
TOKEN_RE = re.compile(r'\W')
def tokenize(text):
"""Takes a piece of text and tokenizes it into train/classify tokens"""
# FIXME: This is a shite tokenizer and doesn't handle urls
# well. (We should handle urls well.)
tokens = TOKEN_RE.split(text)
return [token.lower() for token in tokens if token]
_cached_spicedham = threading.local()
def get_spicedham():
"""Retrieve a Spicedham object
These objects are cached threadlocal.
"""
sham = getattr(_cached_spicedham, 'sham', None)
if sham is None:
config = {
'backend': 'FjordBackend'
}
sham = Spicedham(config)
_cached_spicedham.sham = sham
return sham
def train_cmd(path, classification):
"""Recreates training data using datafiles in path"""
path = os.path.abspath(path)
if not os.path.exists(path):
raise ValueError('path "%s" does not exist' % path)
sham = get_spicedham()
# Wipe existing training data.
print 'Wiping existing data...'
sham.backend.reset()
# Load all data for when classifier=True
true_path = os.path.join(path, classification)
print 'Loading classifier=True data from %s...' % true_path
files = [os.path.join(true_path, fn)
for fn in os.listdir(true_path) if fn.endswith('.json')]
print ' %s records...' % len(files)
for fn in files:
print ' - ' + fn
with open(fn, 'r') as fp:
data = json.load(fp)
sham.train(tokenize(data['description']), match=True)
# Load all data for when classifier=False
false_path = os.path.join(path, 'not_' + classification)
print 'Loading classifier=False data from %s...' % false_path
files = [os.path.join(false_path, fn)
for fn in os.listdir(false_path) if fn.endswith('.json')]
print ' %s records...' % len(files)
for fn in files:
print ' - ' + fn
with open(fn, 'r') as fp:
data = json.load(fp)
sham.train(tokenize(data['description']), match=False)
print 'Done!'
| [
"[email protected]"
]
| |
e95625894d5cba62471ce44e67b02160ea805c8f | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/shangtai/codejamC.py | be3aad06742fb467f6074f6d0a440327d6d7dc65 | []
| no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 999 | py | T = int(raw_input())
N, J = map(int, raw_input().split())
def is_prime(n):
if n == 2 or n == 3: return True
if n < 2 or n%2 == 0: return False
if n < 9: return True
if n%3 == 0: return False
r = int(n**0.5)
f = 5
while f <= r:
if n%f == 0: return False
if n%(f+2) == 0: return False
f +=6
return True
def primefactors(x):
loop=2
while loop<=x:
if x%loop==0:
x/=loop
return loop
else:
loop+=1
print "Case #1:"
j=0
for candidate in xrange(2**(N-2)):
candidate=candidate<<1
candidate+=(1+(1<<(N-1)))
candidate="{0:b}".format(candidate)
factorlist=[candidate]
for base in xrange(2,11):
candidatebase=int(candidate,base)
if is_prime(candidatebase):
break
else:
factorlist.append(primefactors(candidatebase))
if len(factorlist)==10:
j+=1
for i in factorlist:
print i,
print
if j==J:
break
| [
"[[email protected]]"
]
| |
ae5b5e91cf43266b95ffaeb5f1795e03a00655ff | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part003803.py | 26d534cc630c79581554130b81c7f37de6f38777 | []
| no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,094 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher62345(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({0: 1}), [
(VariableWithCount('i2.2.2.0', 1, 1, S(0)), Add)
]),
1: (1, Multiset({1: 1}), [
(VariableWithCount('i2.2.3.0', 1, 1, S(0)), Add)
]),
2: (2, Multiset({2: 1}), [
(VariableWithCount('i2.2.1.2.0', 1, 1, S(0)), Add)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Add
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher62345._instance is None:
CommutativeMatcher62345._instance = CommutativeMatcher62345()
return CommutativeMatcher62345._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 62344
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 62346
if len(subjects) >= 1:
tmp2 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.2.1.0', tmp2)
except ValueError:
pass
else:
pass
# State 62347
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst2
subjects.appendleft(tmp2)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.3.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 63479
if len(subjects) >= 1:
tmp5 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.3.1.0', tmp5)
except ValueError:
pass
else:
pass
# State 63480
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst2
subjects.appendleft(tmp5)
subst1 = Substitution(subst0)
try:
subst1.try_add_variable('i2.2.1.2.1.0_1', S(1))
except ValueError:
pass
else:
pass
# State 65481
if len(subjects) >= 1:
tmp8 = subjects.popleft()
subst2 = Substitution(subst1)
try:
subst2.try_add_variable('i2.2.1.2.1.0', tmp8)
except ValueError:
pass
else:
pass
# State 65482
if len(subjects) == 0:
pass
# 2: x*d
yield 2, subst2
subjects.appendleft(tmp8)
if len(subjects) >= 1 and isinstance(subjects[0], Mul):
tmp10 = subjects.popleft()
associative1 = tmp10
associative_type1 = type(tmp10)
subjects11 = deque(tmp10._args)
matcher = CommutativeMatcher62349.get()
tmp12 = subjects11
subjects11 = []
for s in tmp12:
matcher.add_subject(s)
for pattern_index, subst1 in matcher.match(tmp12, subst0):
pass
if pattern_index == 0:
pass
# State 62350
if len(subjects) == 0:
pass
# 0: x*f
yield 0, subst1
if pattern_index == 1:
pass
# State 63481
if len(subjects) == 0:
pass
# 1: x*f
yield 1, subst1
if pattern_index == 2:
pass
# State 65483
if len(subjects) == 0:
pass
# 2: x*d
yield 2, subst1
subjects.appendleft(tmp10)
return
yield
from matchpy.matching.many_to_one import CommutativeMatcher
from .generated_part003804 import *
from collections import deque
from matchpy.utils import VariableWithCount
from multiset import Multiset | [
"[email protected]"
]
| |
a508f1b21eb0f6780f7e25f0848a5d2a51ae29ab | 1edd52cf197e5ae67b5939a3beb3e70761334e62 | /Udemy/python/Session-1&2-Intro-print-indentation-comments/using_indent.py | dce3afaf2ce37572dcc99994e37f4ba78baff000 | []
| no_license | sandeepmchary/Devops_wordpress_Notes | bdcd85d526780d03c494ecb93e714e7ffe0a4d58 | ffd2092162073e1e7342c6066d023d04e6ca8c1c | refs/heads/master | 2022-06-18T21:33:02.471025 | 2022-06-12T11:14:47 | 2022-06-12T11:14:47 | 154,679,658 | 1 | 4 | null | 2022-05-19T16:59:57 | 2018-10-25T13:51:40 | HTML | UTF-8 | Python | false | false | 117 | py | print("this is for indentation")
if 3 > 1:
print("using if condition")
print("we are comparing 3 with 1")
| [
"[email protected]"
]
| |
2b2ecf4b17dd2b31fbfbf57f46f019b2b1eb04ec | c903382b1c2d170ca5a00a4482ee23be94da76d8 | /quokka/core/admin/views.py | b7dff6308251262454ea8aa0e2499378eaebf24c | [
"MIT"
]
| permissive | alyoung/quokka | 63c74ff913fe3d3b5ebdef38d9d267b149a6c9c1 | a38749379f01c01cc887838999efa364dea5de04 | refs/heads/master | 2021-01-17T22:56:34.760694 | 2013-11-22T21:42:50 | 2013-11-22T21:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,499 | py | # Create customized index view class
from flask import current_app
from quokka.core.models import Content
from quokka.utils.routing import expose
from quokka.core.widgets import TextEditor, PrepopulatedText
from .ajax import AjaxModelLoader
from .models import BaseIndexView, BaseView, ModelAdmin, BaseContentAdmin
class IndexView(BaseIndexView):
roles_accepted = ('admin', 'editor', 'moderator', 'writer', 'staff')
@expose('/')
def index(self):
return self.render('admin/index.html')
class InspectorView(BaseView):
roles_accepted = ('admin',)
@expose('/')
def index(self):
context = {
"app": current_app
}
return self.render('admin/inspector.html', **context)
###############################################################
# Admin model views
###############################################################
class LinkAdmin(BaseContentAdmin):
roles_accepted = ('admin', 'editor', 'writer', 'moderator')
column_list = ('title', 'channel', 'slug', 'published')
form_columns = ('title', 'slug', 'channel', 'link',
'content_format', 'summary', 'contents',
'values', 'available_at', 'available_until', 'published')
form_args = {
'summary': {'widget': TextEditor()}
}
class ConfigAdmin(ModelAdmin):
roles_accepted = ('admin', 'developer')
column_list = ("group", "description", "published",
"created_at", "updated_at")
column_filters = ("group", "description")
form_columns = ("group", "description", "published", "values")
class SubContentPurposeAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
class ChannelTypeAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
class ContentTemplateTypeAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
class ChannelAdmin(ModelAdmin):
roles_accepted = ('admin', 'editor')
column_list = ('title', 'long_slug', 'is_homepage',
'channel_type', 'created_at', 'available_at', 'published',
'view_on_site')
column_filters = ['published', 'is_homepage', 'include_in_rss',
'show_in_menu', 'indexable']
column_searchable_list = ('title', 'description')
form_columns = ['title', 'slug', 'content_format', 'description',
'parent', 'is_homepage',
'include_in_rss', 'indexable', 'show_in_menu', 'order',
'per_page', 'tags',
'published', 'canonical_url', 'values', 'channel_type',
'inherit_parent', 'content_filters', 'available_at',
'available_until', 'render_content', 'redirect_url']
column_formatters = {
'view_on_site': ModelAdmin.formatters.get('view_on_site'),
'created_at': ModelAdmin.formatters.get('datetime'),
'available_at': ModelAdmin.formatters.get('datetime')
}
form_subdocuments = {}
form_widget_args = {
'title': {'style': 'width: 400px'},
'slug': {'style': 'width: 400px'},
}
form_args = {
'description': {'widget': TextEditor()},
'slug': {'widget': PrepopulatedText(master='title')}
}
form_ajax_refs = {
'render_content': AjaxModelLoader('render_content',
Content,
fields=['title', 'slug']),
'parent': {'fields': ['title', 'slug', 'long_slug']},
}
| [
"[email protected]"
]
| |
6665c52b545bfa40ef7b723af971b5ab3734efa2 | d58f9e650c4d8f1fe0379bb5c0a8d2f58ae697ec | /thorvald_penetrometer/scripts/penetrometer_driver.py | 5116ed52841c9316380b945b56beff444f675d8b | []
| no_license | mfkiwl/lcas_hardware-gnss-imu | 41a02418c3895463b0e22e36eb9669560d516e8a | 2aaed0601e2fd165cfb1f2a58b7c19df3b55ed2d | refs/heads/master | 2021-09-28T20:33:53.096301 | 2018-11-20T09:21:55 | 2018-11-20T09:21:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,844 | py | #!/usr/bin/env python
import rospy
import actionlib
import serial
import threading
import yaml
import std_srvs.srv
#import actionlib_msgs.msg
import dynamic_reconfigure.server # import Server
from thorvald_penetrometer.cfg import PenetrometerConfig
import std_msgs.msg #import String
import thorvald_penetrometer.msg
class PenetrometerServer(object):
"""
Class for Penetrometer Control
"""
_feedback = thorvald_penetrometer.msg.ProbeSoilFeedback()
_result = thorvald_penetrometer.msg.ProbeSoilResult()
_config_commands={ "ground_level":'g', "speed":'s', "top_speed":'t', "home_speed":'k',
"acceleration":'a', "deceleration":'d', "probe_depth":'l', "dist_per_reading": 'q',
"lfd_tolerance":'r', "max_force":'m', "min_force":'n', "max_force_delta":'x',
"min_force_delta":'y',"force_delta_abs":'v',"safe_disconnect":'o'}
_decimilimiters_configs=['ground_level','probe_depth','dist_per_reading']
_centiseconds_configs=['lfd_tolerance']
def __init__(self, name):
"""
Initialization for Class
"""
self.cancelled = False
self.running=True
self.reply_buf=[]
self.last_response = ''
self.config={}
self.serial_port = rospy.get_param('~serial_port', '/dev/ttyACM0')
rospy.loginfo("opening serial port")
self.ser = serial.Serial(self.serial_port, 57600, timeout=0, parity=serial.PARITY_NONE)
thread = threading.Thread(target=self.read_from_port)#, args=(serial_port,))
thread.start()
self.input_pub = rospy.Publisher('/penetrometer_raw_data', std_msgs.msg.String, queue_size=0)
#Creating Action Server
rospy.loginfo("Creating action server.")
self._as = actionlib.SimpleActionServer(name, thorvald_penetrometer.msg.ProbeSoilAction, execute_cb = self.executeCallback, auto_start = False)
self._as.register_preempt_callback(self.preemptCallback)
rospy.loginfo(" ...starting")
self._as.start()
rospy.loginfo(" ...done")
rospy.loginfo("Creating services")
sname=name+'/save_params'
s = rospy.Service(sname, std_srvs.srv.Trigger, self.save_params)
sname=name+'/clear_errors'
s1 = rospy.Service(sname, std_srvs.srv.Trigger, self.clear_errors_req)
rospy.loginfo("initialising device done ...")
self.initialise_penetrometer()
self.clear_errors()
self.set_e_stop(False)
self.set_power_enable(True)
#Creating Dyn reconf server
rospy.loginfo("Creating dynamic reconfigure server.")
self.dynsrv = dynamic_reconfigure.server.Server(PenetrometerConfig, self.dyn_reconf_callback)
self.send_home()
rospy.loginfo("ROS init done ...")
rospy.loginfo("ALL done ...")
rospy.spin()
#self.write_config_to_file()
self.running=False
self.ser.close()
def clear_errors_req(self, req):
self.clear_errors()
return True, "Errors cleared"
def save_params(self, req):
self.write_config_to_file()
return True, "saved to params.yaml"
def write_config_to_file(self):
config = dict(self.config)
del config['groups']
yml = yaml.safe_dump(config, default_flow_style=False)
fh = open("params.yaml", "w")
s_output = str(yml)
fh.write(s_output)
fh.close()
def read_from_port(self):
serial_buffer=[]
#response=[]
while self.running:
if (self.ser.inWaiting()>0): #if incoming bytes are waiting to be read from the serial input buffer
data_str = self.ser.read(self.ser.inWaiting())#.decode('ascii') #read the bytes and convert from binary array to ASCII
print "|"+data_str+"|"
for i in data_str:
serial_buffer.append(i)
while '\n' in serial_buffer:
#print "end found"
nind= serial_buffer.index('\n')
self.reply_buf.append(serial_buffer[0:nind])
pen_data = ''.join(serial_buffer[0:nind])
self.input_pub.publish(pen_data)
for i in reversed(range(nind+1)):
serial_buffer.pop(i)
print serial_buffer
# if len(self.reply_buf)>0:
# print(self.reply_buf)
rospy.sleep(0.001) # Optional: sleep 10 ms (0.01 sec) once per loop to let other threads on your PC run
def dyn_reconf_callback(self, config, level):
#rospy.loginfo("""Reconfigure Request: {counts}""".format(**config))
#self.max_counts = config['counts']
#print "reconfigure ", config
if self.config:
changed_dict = {x: self.config[x] != config[x] for x in self.config if x in config}
lk = [key for (key, value) in changed_dict.items() if value]
#print "config changed ", lk, config[lk[0]]
self.set_config(lk[0], config[lk[0]])
self.config = config
else:
#print "First config: ", config.items()
self.config = config
for i in config.items():
self.set_config(i[0], i[1])
rospy.sleep(0.1)
return config
def set_config(self, field, value):
if field in self._config_commands.keys():
print field, value
if isinstance(value,bool):
value=int(value)
if field in self._decimilimiters_configs:
value=int(value*10)
if field in self._centiseconds_configs:
value=int(value/10)
command = self._config_commands[field]+str(value)
reply = self._config_commands[field].capitalize()+str(value)
print command, reply
self.send_command(command)
response = self.wait_for_reply(reply)
if response:
rospy.loginfo("%s set at %d" %(field, value))
else:
rospy.logerr("Something failed when setting %s set at %d, response code (%s)" %(field,value,self.last_response))
rospy.loginfo("Maybe, try again?")
def send_command(self, command):
for i in command:
self.ser.write(i)
rospy.sleep(0.001)
self.ser.write('\n')
def clear_reply_buf(self):
self.reply_buf=[]
def wait_for_reply(self, expected, timeout=10):
time_count=0
response=''
replied=False
while not replied and time_count<= (timeout*20) :
if len(self.reply_buf)>0:
response = self.reply_buf.pop(0)
replied = True
else:
rospy.sleep(0.05)
time_count+=1
self.last_response = ''.join(response)
if self.last_response == expected:
return True
else:
if time_count > (timeout*20):
self.last_response = 'timeout'
return False
def initialise_penetrometer(self, retries=3):
self.clear_reply_buf()
self.send_command('@')
rospy.loginfo("waiting for initialisation confirmation")
response = self.wait_for_reply("@1")
if response:
rospy.loginfo("initialisation correct!")
else:
if retries > 0:
rospy.logwarn("wrong response try again")
self.initialise_penetrometer(retries=retries-1)
else:
rospy.logerr("too many fails!!")
def clear_errors(self):
self.clear_reply_buf()
self.send_command('f0')
rospy.loginfo("clearing errors")
response = self.wait_for_reply('F0 ')
if response:
rospy.loginfo("Errors cleared")
else:
rospy.logerr("Something failed, response code (%s)" %self.last_response)
rospy.loginfo("Maybe, try clear error service again?")
def set_e_stop(self, enable):
self.clear_reply_buf()
if enable:
command = 'e1'
reply = 'E1'
else:
command = 'e0'
reply = 'E0'
self.send_command(command)
rospy.loginfo("clearing estop")
response = self.wait_for_reply(reply)
if response:
rospy.loginfo("estop reset")
else:
rospy.logerr("Something failed, response code (%s)" %self.last_response)
rospy.loginfo("Maybe, try set estop service again?")
def set_power_enable(self, enable):
self.clear_reply_buf()
if enable:
command = 'p1'
reply = 'P1'
else:
command = 'p0'
reply = 'P0'
self.send_command(command)
rospy.loginfo("Enabling")
response = self.wait_for_reply(reply)
if response:
rospy.loginfo("Enabled")
else:
rospy.logerr("Something failed, response code (%s)" %self.last_response)
rospy.loginfo("Maybe, try set power service again?")
def send_home(self):
self.clear_reply_buf()
self.send_command('z')
rospy.loginfo("Homing")
response = self.wait_for_reply('Z1', timeout=120)
if response:
rospy.loginfo("Homed")
else:
rospy.logerr("Something failed, response code (%s)" %self.last_response)
rospy.loginfo("Maybe, try send home service again?")
def get_data(self, timeout=10):
print "."
time_count=0
response=''
replied=False
while (not replied) and time_count <= (timeout*20) :
#print (self.reply_buf)
if len(self.reply_buf)>0:
response = self.reply_buf.pop(0)
response = ''.join(response)
#print "data in ->", response
replied = True
else:
rospy.sleep(0.05)
time_count+=1
if time_count > (timeout*20):
#print "timed out"
return None
else:
return response
def executeCallback(self, goal):
self.cancelled=False
finished=False
self.depth_data=[]
self.force_data=[]
self.clear_reply_buf()
rospy.loginfo("Probing")
self.send_command('!1')
while not self.cancelled and not finished:
print "++"
data_str = self.get_data(timeout=15)
print data_str
if data_str:
if data_str == '!1':
finished=True
else:
print "appending"
cd=data_str.lstrip('*').split(',')
self.depth_data.append(int(cd[0]))
self.force_data.append(int(cd[1]))
else:
self.cancelled=True
rospy.loginfo("Probe finished")
self.send_home()
if not self.cancelled:
self._result.result = True
self._result.depth = self.depth_data
self._result.force = self.force_data
rospy.loginfo('Succeeded')
self._as.set_succeeded(self._result)
else:
self._as.set_preempted()
def preemptCallback(self):
self.cancelled=True
if __name__ == '__main__':
rospy.init_node('thorvald_penetrometer')
server = PenetrometerServer(rospy.get_name())
| [
"[email protected]"
]
| |
5ba3eb2c99cb4886c0d71494e016a22abad98aee | acbe6bd6cefaf8b12070d7258dab30e4f7fcebed | /ui/style.py | c5b1d5ee83c7a5fb029f0f3becf8dba8c57a3b3b | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | RogueScholar/debreate | 02c98c5a78d33041798410f0e3b99e80fda65d00 | dfe9bcac7333a53082b3a2ae169806cf604d59f6 | refs/heads/master | 2023-06-07T11:49:03.821969 | 2023-04-28T02:14:25 | 2023-04-28T02:14:25 | 253,707,766 | 0 | 0 | MIT | 2023-05-28T15:24:17 | 2020-04-07T06:34:47 | Python | UTF-8 | Python | false | false | 1,267 | py |
# ******************************************************
# * Copyright © 2016-2023 - Jordan Irwin (AntumDeluge) *
# ******************************************************
# * This software is licensed under the MIT license. *
# * See: LICENSE.txt for details. *
# ******************************************************
## @module ui.style
import wx
# FIXME: legacy wx version no longer supported
if wx.MAJOR_VERSION > 2:
PANEL_BORDER = wx.BORDER_THEME
else:
PANEL_BORDER = wx.BORDER_MASK
## Layout styles for sizers.
class layout:
ALGN_T = wx.ALIGN_TOP
ALGN_B = wx.ALIGN_BOTTOM
ALGN_L = wx.ALIGN_LEFT
ALGN_LT = ALGN_L|ALGN_T
ALGN_LB = ALGN_L|ALGN_B
ALGN_R = wx.ALIGN_RIGHT
ALGN_RT = ALGN_R|ALGN_T
ALGN_RB = ALGN_R|ALGN_B
ALGN_C = wx.ALIGN_CENTER
ALGN_CH = wx.ALIGN_CENTER_HORIZONTAL
ALGN_CV = wx.ALIGN_CENTER_VERTICAL
ALGN_CL = ALGN_CV|ALGN_L
ALGN_CR = ALGN_CV|ALGN_R
ALGN_CT = ALGN_CH|ALGN_T
ALGN_CB = ALGN_CH|ALGN_B
PAD_LT = wx.LEFT|wx.TOP
PAD_LB = wx.LEFT|wx.BOTTOM
PAD_LTB = PAD_LT|wx.BOTTOM
PAD_RT = wx.RIGHT|wx.TOP
PAD_RB = wx.RIGHT|wx.BOTTOM
PAD_RTB = PAD_RT|wx.BOTTOM
PAD_LR = wx.LEFT|wx.RIGHT
PAD_LRB = PAD_LR|wx.BOTTOM
PAD_LRT = PAD_LR|wx.TOP
PAD_TB = wx.TOP|wx.BOTTOM
| [
"[email protected]"
]
| |
8a9561159e82907417c9e0b374b3f8f11bf078ac | 72e11a80587342b3f278d4df18406cd4ce7531e8 | /pip-10.0.0.dev0-py3.6.egg/pip/_internal/index.py | 7c1c210b697a85f199e819826dd94ed5eab4da72 | []
| no_license | EnjoyLifeFund/Debian_py36_packages | 740666f290cef73a4f634558ccf3fd4926addeda | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | refs/heads/master | 2021-08-24T02:17:24.349195 | 2017-12-06T06:18:35 | 2017-12-06T06:18:35 | 113,167,612 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,528 | py | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import cgi
import itertools
import logging
import mimetypes
import os
import posixpath
import re
import sys
import warnings
from collections import namedtuple
from pip._vendor import html5lib, requests, six
from pip._vendor.distlib.compat import unescape
from pip._vendor.packaging import specifiers
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip._internal.compat import ipaddress
from pip._internal.download import HAS_TLS, is_url, path_to_url, url_to_path
from pip._internal.exceptions import (
BestVersionAlreadyInstalled, DistributionNotFound, InvalidWheelFilename,
UnsupportedWheel,
)
from pip._internal.models import PyPI
from pip._internal.pep425tags import get_supported
from pip._internal.utils.deprecation import RemovedInPip11Warning
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, cached_property, normalize_path,
splitext,
)
from pip._internal.utils.packaging import check_requires_python
from pip._internal.wheel import Wheel, wheel_ext
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
SECURE_ORIGINS = [
# protocol, hostname, port
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
# ssh is always secure.
("ssh", "*", "*"),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None, platform=None,
versions=None, abi=None, implementation=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
:param platform: A string or None. If None, searches for packages
that are supported by the current system. Otherwise, will find
packages that can be built on the platform passed in. These
packages will only be downloaded for distribution: they will
not be built locally.
:param versions: A list of strings or None. This is passed directly
to pep425tags.py in the get_supported() method.
:param abi: A string or None. This is passed directly
to pep425tags.py in the get_supported() method.
:param implementation: A string or None. This is passed directly
to pep425tags.py in the get_supported() method.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# The valid tags to check potential found wheel candidates against
self.valid_tags = get_supported(
versions=versions,
platform=platform,
abi=abi,
impl=implementation,
)
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def get_formatted_locations(self):
lines = []
if self.index_urls and self.index_urls != [PyPI.simple_url]:
lines.append(
"Looking in indexes: {}".format(", ".join(self.index_urls))
)
if self.find_links:
lines.append(
"Looking in links: {}".format(", ".join(self.find_links))
)
return "\n".join(lines)
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip11Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
logger.warning(
"Url '%s' is ignored: it is neither a file "
"nor a directory.", url)
elif is_url(url):
# Only add url with clear scheme
urls.append(url)
else:
logger.warning(
"Url '%s' is ignored. It is either a non-existing "
"path or lacks a specific scheme.", url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min(self.valid_tags)
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(self.valid_tags)
build_tag = tuple()
if candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported(self.valid_tags):
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min(self.valid_tags))
if wheel.build_tag is not None:
match = re.match(r'^(\d+)(.*)$', wheel.build_tag)
build_tag_groups = match.groups()
build_tag = (int(build_tag_groups[0]), build_tag_groups[1])
else: # sdist
pri = -(support_num)
return (candidate.version, build_tag, pri)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# The protocol to use to see if the protocol matches.
# Don't count the repository type as part of the protocol: in
# cases such as "git+ssh", only use "ssh". (I.e., Only verify against
# the last scheme.)
protocol = origin[0].rsplit('+', 1)[-1]
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
if protocol != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if (origin[1] and
origin[1].lower() != secure_origin[1].lower() and
secure_origin[1] != "*"):
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS we "
"recommend you use HTTPS instead, otherwise you may silence "
"this warning and allow it anyway with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(
url,
urllib_parse.quote(canonicalize_name(project_name)))
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
return [mkurl_pypi_url(url) for url in self.index_urls]
def find_all_candidates(self, project_name):
"""Find all available InstallationCandidate for project_name
This checks index_urls, find_links and dependency_links.
All versions found are returned as an InstallationCandidate list.
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url) for url in index_url_loc),
(Link(url) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = canonicalize_name(project_name)
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name, canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f') for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find a Link matching req
Expects req, an InstallRequirement and upgrade, a boolean
Returns a Link if found,
Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise
"""
all_candidates = self.find_all_candidates(req.name)
# Filter out anything which doesn't match our specifier
compatible_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(c.version) for c in all_candidates],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_candidates = [
# Again, converting to str to deal with debundling.
c for c in all_candidates if str(c.version) in compatible_versions
]
if applicable_candidates:
best_candidate = max(applicable_candidates,
key=self._candidate_sort_key)
else:
best_candidate = None
if req.satisfied_by is not None:
installed_version = parse_version(req.satisfied_by.version)
else:
installed_version = None
if installed_version is None and best_candidate is None:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(c.version) for c in all_candidates),
key=parse_version,
)
)
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
best_installed = False
if installed_version and (
best_candidate is None or
best_candidate.version <= installed_version):
best_installed = True
if not upgrade and installed_version is not None:
if best_installed:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
installed_version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
installed_version,
best_candidate.version,
)
return None
if best_installed:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
installed_version,
', '.join(sorted(compatible_versions, key=parse_version)) or
"none",
)
raise BestVersionAlreadyInstalled
logger.debug(
'Using version %s (newest of versions: %s)',
best_candidate.version,
', '.join(sorted(compatible_versions, key=parse_version))
)
return best_candidate.location
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors.
"""
seen = set()
for location in locations:
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if canonicalize_name(wheel.name) != search.canonical:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported(self.valid_tags):
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
try:
support_this_python = check_requires_python(link.requires_python)
except specifiers.InvalidSpecifier:
logger.debug("Package %s has an invalid Requires-Python entry: %s",
link.filename, link.requires_python)
support_this_python = True
if not support_this_python:
logger.debug("The package %s is incompatible with the python"
"version in use. Acceptable python versions are:%s",
link, link.requires_python)
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
transport_encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip._internal.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(resp.content, resp.url, resp.headers)
except requests.HTTPError as exc:
cls._handle_fail(link, exc, url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, meth=logger.info)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in {'http', 'https'}:
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
pyrequire = anchor.get('data-requires-python')
pyrequire = unescape(pyrequire) if pyrequire else None
yield Link(url, self, requires_python=pyrequire)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, requires_python=None):
"""
Object representing a parsed link from https://pypi.python.org/simple/*
url:
url of the resource pointed to (href of the link)
comes_from:
instance of HTMLPage where the link was found, or string.
requires_python:
String containing the `Requires-Python` metadata field, specified
in PEP 345. This may be specified by a data-requires-python
attribute in the HTML link tag, as described in PEP 503.
"""
# url can be a UNC windows share
if url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.requires_python = requires_python if requires_python else None
def __str__(self):
if self.requires_python:
rp = ' (requires-python:%s)' % self.requires_python
else:
rp = ''
if self.comes_from:
return '%s (from %s)%s' % (self.url, self.comes_from, rp)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'[#&]egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)')
@property
def subdirectory_fragment(self):
match = self._subdirectory_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip._internal.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = canonicalize_name(name)
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = {"binary", "source"}
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
| [
"[email protected]"
]
| |
9a6666ffe7fd9c01862329091ec04e6fb5b1e21a | 781e2692049e87a4256320c76e82a19be257a05d | /all_data/exercism_data/python/rna-transcription/a033dd3f296e4281ad7fae26e02d0a4d.py | 612ab04228445c3976c52b87575abb7b15c6a2a3 | []
| no_license | itsolutionscorp/AutoStyle-Clustering | 54bde86fe6dbad35b568b38cfcb14c5ffaab51b0 | be0e2f635a7558f56c61bc0b36c6146b01d1e6e6 | refs/heads/master | 2020-12-11T07:27:19.291038 | 2016-03-16T03:18:00 | 2016-03-16T03:18:42 | 59,454,921 | 4 | 0 | null | 2016-05-23T05:40:56 | 2016-05-23T05:40:56 | null | UTF-8 | Python | false | false | 262 | py | DNA_TO_RNA = {
'G' :'C',
'C' : 'G',
'T' : 'A',
'A' : 'U',
}
def to_rna(dna):
rna = ''
for c in dna:
if c not in DNA_TO_RNA:
raise ValueError("illegal nucleotide '%s' in dna" % c)
rna = rna + DNA_TO_RNA[c]
return rna
| [
"[email protected]"
]
| |
53a392751a75f85027707c09f1f615efa1879fc4 | 1705e97ef5613685e142e3f78a2057399b09858c | /Code/asiportal/asiapp/wsgi.py | d3acec3485332c8d95214dd8fcb36efc399cc96e | []
| no_license | FIU-SCIS-Senior-Projects/Academic-Success-Initiative---ASI-PantherCentric-1.0 | 0b956175efb031022ed32412195531c7f0c162c5 | 8ee64b58e2634384d5905defd3701a453b49b966 | refs/heads/master | 2022-11-24T00:07:52.458186 | 2017-08-02T01:36:32 | 2017-08-02T01:36:32 | 91,715,982 | 0 | 0 | null | 2022-11-22T01:31:04 | 2017-05-18T16:37:10 | SQLPL | UTF-8 | Python | false | false | 389 | py | """
WSGI config for asiapp project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "asiapp.settings")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
b31c59a0010e457b6542772a973b3e6da56bfc29 | 9cfd73a998d842d767071b26cefe0eb8efe39e90 | /learning_rates.py | 3b8df2efc6fd82922d321b26d58e4bf9e17144c4 | []
| no_license | boyko11/LogReg-DLAI | 829e9b4e6b8dd23d6f3b5f0f68550d83c080104d | d222f6501ec4f0ea427f42706bb98c28c832fdb8 | refs/heads/master | 2022-11-23T19:26:10.052482 | 2020-08-02T17:57:24 | 2020-08-02T17:57:24 | 284,513,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 842 | py | from logistic_regression import model
import data_service
import matplotlib.pyplot as plt
import numpy as np
train_set_x, train_set_y, test_set_x, test_set_y, _ = data_service.load_and_preprocess_data()
learning_rates = [0.01, 0.001, 0.0001]
models = {}
for i in learning_rates:
print ("learning rate is: " + str(i))
models[str(i)] = model(train_set_x, train_set_y, test_set_x, test_set_y, num_iterations = 1500, learning_rate = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in learning_rates:
plt.plot(np.squeeze(models[str(i)]["costs"]), label= str(models[str(i)]["learning_rate"]))
plt.ylabel('cost')
plt.xlabel('iterations (hundreds)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
| [
"[email protected]"
]
| |
aa3d3d73ed130154ffeca62917f6d42d463b92b8 | 3eb99709809a493c46a79171ef9774aa4261b59d | /脚本/llianli/cfapp_ei.py | 0ca3d8cf2ee5e729d313f8426799f897d4cd36f7 | []
| no_license | bingwin/tencent | c831a5b344f597a06c7a7b179d4f67d668198c90 | ea5dc5ff398d85cfdf4df056dc8b4064e66fb5fb | refs/heads/master | 2020-07-28T21:44:00.281933 | 2016-05-28T03:21:31 | 2016-05-28T03:21:31 | 209,548,176 | 1 | 0 | null | 2019-09-19T12:29:21 | 2019-09-19T12:29:21 | null | UTF-8 | Python | false | false | 6,718 | py | #!/usr/bin/env python
#-*- coding: utf-8 -*-
# ******************************************************************************
# 程序名称: cfapp_ei.py
# 功能描述: cfapp每日访问的事件n数目
# 输入参数: yyyymmdd 例如:20151208
# 目标表名:
# 数据源表: teg_mta_intf::ieg_lol
# 创建人名: llianli
# 创建日期: 2015-12-08
# 版本说明: v1.0
# 公司名称: tencent
# 修改人名:
# 修改日期:
# 修改原因:
# ******************************************************************************
#import system module
# main entry
import datetime
import time
def TDW_PL(tdw, argv=[]):
tdw.WriteLog("== begin ==")
tdw.WriteLog("== argv[0] = " + argv[0] + " ==")
sDate = argv[0]
tdw.WriteLog("== sDate = " + sDate + " ==")
tdw.WriteLog("== connect tdw ==")
sql = """use ieg_qt_community_app"""
res = tdw.execute(sql)
sql = """set hive.inputfiles.splitbylinenum=true"""
res = tdw.execute(sql)
sql = """set hive.inputfiles.line_num_per_split=1000000"""
res = tdw.execute(sql)
##创建表写数据
sql = '''
CREATE TABLE IF NOT EXISTS tb_cf_app_ei
(
fdate INT,
id INT,
ei1 STRING,
ei2 STRING,
uin_mac STRING,
uin STRING,
pv BIGINT
)
'''
tdw.WriteLog(sql)
res = tdw.execute(sql)
sql = ''' DELETE FROM tb_cf_app_ei WHERE fdate = %s '''%(sDate)
tdw.WriteLog(sql)
res = tdw.execute(sql)
##将每日的数据配置写入表中
sql = '''
INSERT TABLE tb_cf_app_ei
SELECT
%s AS fdate,
id,
ei1,
ei2,
uin_info,
uin,
COUNT(*) AS pv
FROM
(
SELECT
id,
'all' AS ei1,
case
when (id = 1100679031 and ei in ('情报站列表项点击') and get_json_object(kv,'$.type') not in ('图片','手机','论坛','电脑','游戏')) or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '资讯列表项')
then '情报站-资讯'
when (id = 1100679031 and ( ei in ('视频播放次数') or (ei = '资讯广告点击' and get_json_object(kv,'$.type') = '视频') ) ) or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '视频列表项')
then '情报站-视频'
when (id = 1100679031 and ei in ('情报站列表项点击') and get_json_object(kv,'$.type') ='图片') or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '图片列表项')
then '情报站-图片'
when (id = 1100679031 and ei in ('情报站列表项点击') and get_json_object(kv,'$.type') in ('手机','电脑','论坛','游戏')) or
(id = 1200679031 and ei in ('情报站列表项') and get_json_object(kv,'$.info_list') = '活动列表项')
then '情报站-活动'
when (id = 1100679031 and ei = '我模块点击次数' ) or (id = 1200679031 and ei = '情报站社区基地我TAB点击次数' and get_json_object(kv,'$.type') = '我') then '我-战绩'
when (id = 1100679031 and ei = '我_战绩资产记录展示次数' and get_json_object(kv,'$.tab') = '装备') or (id = 1200679031 and ei = '战绩资产记录TAB点击次数' and get_json_object(kv,'$.type') = '资产') then '我-资产'
when (id = 1100679031 and ei = '我_战绩资产记录展示次数' and get_json_object(kv,'$.tab') = '记录') or (id = 1200679031 and ei = '战绩资产记录TAB点击次数' and get_json_object(kv,'$.type') = '记录') then '我-记录'
when (id = 1100679031 and ei = '客态资料' ) then '客态资料'
when (id = 1100679031 and ei = '道聚城点击次数') or (id = 1200679031 and ei = '道具城点击') then '基地-道聚城'
when (id = 1100679031 and ei = '火线_视频点击次数') or (id = 1200679031 and ei = '火线时刻视频点击次数') then '基地-火线时刻'
when (id = 1100679031 and ei = '我的仓库点击' ) or (id = 1200679031 and ei = '我的仓库点击') then '基地-我的仓库'
when (id = 1100679031 and ei = '军火基地点击次' ) or (id = 1200679031 and ei = '军火基地点击次') then '基地-军火基地'
when (id = 1100679031 and ei= '基地WEB页面点击次数' and get_json_object(kv,'$.title') = '周边商城') then '基地-周边商城'
when (id = 1100679031 and ei = '竞猜大厅入口' ) or (id = 1200679031 and ei = '竞猜大厅入口点击次数') then '基地-赛事竞猜'
when (id = 1100679031 and ei = '火线百科点击次数' ) or (id = 1200679031 and ei = '火线百科点击') then '基地-火线百科'
when (id = 1100679031 and ei = '火线助手点击次数' ) or (id = 1200679031 and ei = '火线助手') then '基地-火线助手'
when (id = 1100679031 and ei = '我的任务点击次数' ) or (id = 1200679031 and ei = '我的任务点击') then '基地-我的任务'
when (id = 1100679031 and ei = '地图点位模块点击次数' ) or (id = 1200679031 and ei = '地图点图') then '基地-地图点位'
when (id = 1100679031 and ei in ('每天用户发的消息' ,'每天用户发的消息')) then '社区-聊天'
when (id = 1100679031 and ei = '社区_CF论坛点击次数' ) or (id = 1200679031 and ei = 'CF论坛点击') then '社区-CF论坛'
when (id = 1100679031 and ei = '社区_CF手游论坛点击次数' ) or (id = 1200679031 and ei = '点击CF手游论坛') then '社区-CF手游论坛'
when (id = 1100679031 and ei = '社区_兴趣部落点击次数' ) or (id = 1200679031 and ei = 'CF兴趣部落') then '社区-兴趣部落'
ELSE 'other'
end as ei2,
concat(ui,mc) AS uin_info,
get_json_object(kv,'$.uin') AS uin
FROM teg_mta_intf::ieg_lol WHERE sdate = %s AND id in (1100679031,1200679031)
)t1 WHERE ei1 != 'other' AND ei2 != 'other'
GROUP BY id,ei1,ei2,uin_info,uin
'''%(sDate,sDate)
tdw.WriteLog(sql)
res = tdw.execute(sql)
tdw.WriteLog("== end OK ==")
| [
"[email protected]"
]
| |
795299e5984a625559bf3332c4649ad94559164d | 52a15d4fabf68bf23a23799312ae40465764908c | /src/operation/unittest.py | ea1bd413fef2f9e5c4a43d478e9926b0e0835959 | [
"MIT",
"Apache-2.0"
]
| permissive | jensl/critic | 2071a1b0600051967323df48f4d3a5656a5d2bb8 | c2d962b909ff7ef2f09bccbeb636333920b3659e | refs/heads/stable/1 | 2022-05-28T03:51:15.108944 | 2018-03-27T18:47:46 | 2018-03-29T15:08:30 | 6,430,552 | 224 | 36 | NOASSERTION | 2023-05-29T15:38:00 | 2012-10-28T18:26:04 | Python | UTF-8 | Python | false | false | 123 | py | def independence():
# Simply check that operation can be imported.
import operation
print "independence: ok"
| [
"[email protected]"
]
| |
967f4507e9be93893f9db9e8ab04d072e7c1c49c | 16ac02b8f427bd622af1564f1236e4913ed63521 | /Codes/Version 1.6/force_raised_gaussian.py | 003e679b1563108f7216fab5e0a3d0cd04424273 | [
"MIT"
]
| permissive | gharib85/Brownian-dynamics-in-a-time-varying-force-field | 20660665747310e1201e8ca7d404acc15ec7a3bd | 1dce268fcc4f27e066be0ec0b511178cbc1437c5 | refs/heads/main | 2023-08-16T03:47:51.957137 | 2021-10-23T19:09:50 | 2021-10-23T19:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,593 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on March 6, 2021
@author: asif
"""
import numpy as np
import pylab as py
import matplotlib as plt
ro = 2e-6
tfinal = 12
xrange_limit = 30e-6 # Max and min of x axis range for plotting animation
zlow_limit = -10e-6
zhigh_limit = 30e-6
r_active = 0
n_order = 1 # Order of the Gaussian potential = 2n
w_well = 10e-6 # 1/e *max width of the potential well
A_well = 4000*1.38e-23*300 # well depth
def draw_geo(tm, ax_xy, ax_yz, ax_xz):
# March 7, 2021
# The flag_source_state variable is used to draw/erase the source geometry only once
# This is necessary to speed up the animation.
global flag_source_state_1 # Make this variable global so that the assigned value remains saved globally as t changes
global flag_source_state_2
if 'flag_source_state_1' not in globals():
global flag_source_state # Make this variable global so that the assigned value remains saved globally as t changes
flag_source_state_1 = 0 # initialize with OFF state
print('Defining global flag for source geometry \n')
if 'flag_source_state_2' not in globals():
global flag_source_state # Make this variable global so that the assigned value remains saved globally as t changes
flag_source_state_2 = 0 # initialize with OFF state
print('Defining global flag for source geometry \n')
# Draw static geometry (only once)
if flag_source_state_2 < 1:
py.sca(ax_yz)
substrate_yz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
py.gca().add_patch(substrate_yz)
py.sca(ax_xz)
substrate_xz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
py.gca().add_patch(substrate_xz)
py.sca(ax_xy)
substrate_xy = py.Rectangle((-xrange_limit*1e6, -xrange_limit*1e6),2*xrange_limit*1e6,2*xrange_limit*1e6,fc='#f9f9f9')
py.gca().add_patch(substrate_xy)
flag_source_state_2 = 1
# Draw source
if (tm > 1) & (tm < 8) & (flag_source_state_1 < 1):
patch_spot_xy = py.Circle((0, 0), 0.5*w_well*1e6, fc='#ff8c00',alpha = 0.8)
# patch_spot_yz = plt.patches.Arc((0, 0), 0.5*w_well*1e6, 0.5*w_well*1e6,0, 0, 180, fc='#ff8c00',alpha = 0.8)
py.sca(ax_xy)
py.gca().add_patch(patch_spot_xy)
# py.sca(ax_yz)
# py.gca().add_patch(patch_spot_yz)
flag_source_state_1 = 1
print('Drawing source\n')
# Erase source (draw a white circle)
if (tm > 8) & (flag_source_state_1 == 1):
patch_spot = py.Circle((0, 0), 0.51*w_well*1e6, fc='#f9f9f9',alpha = 1)
py.gca().add_patch(patch_spot)
print('Erasing source\n')
flag_source_state_1 = 0
# def draw_yz(tm):
# substrate_yz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
# py.gca().add_patch(substrate_yz)
# def draw_xz(tm):
# substrate_xz = py.Rectangle((-xrange_limit*1e6, zlow_limit*1e6),2*xrange_limit*1e6, abs(zlow_limit)*1e6,fc='#d4d4d4', ec='k')
# py.gca().add_patch(substrate_xz)
# This is function that is called from the main program
# Simplified spring force model
def force_profile(r_in, t):
Np = r_in[0,:].size
fm = np.zeros((3,Np))
r_norm = np.linalg.norm(r_in, axis = 0) + 1e-30
g = A_well*np.exp(-(r_norm/w_well)**(2*n_order))
if (t > 1) & (t<8):
fm[0,:] = -2*n_order*r_in[0,:]/(r_norm**2) * (r_norm/w_well)**(2*n_order) * g
fm[1,:] = -2*n_order*r_in[1,:]/(r_norm**2) * (r_norm/w_well)**(2*n_order) * g
fm[2,:] = -2*n_order*r_in[2,:]/(r_norm**2) * (r_norm/w_well)**(2*n_order) * g
# fm[:,2] = 0
# fm[:,3] = 0
# fm[:,4] = 0
# fm[:,5] = 0
# fm[:,6] = 0
return fm
def force_plot():
Np = 1
rin = np.zeros((3,Np))
r_in = np.tile(np.linspace(-xrange_limit,xrange_limit,200),(3,1))
F = force_profile(r_in,2)
py.figure()
py.plot(r_in[0,:]*1e6,F[0,:]*1e12, label = '$F_x$')
# py.plot(r_in[1,:]*1e6,F[1,:]*1e12,'.', label = '$F_y$')
# py.plot(r_in[2,:]*1e6,F[2,:]*1e12,'x', label = '$F_z$')
py.xlabel('$x$ ($\mu$m)')
py.ylabel('Force (pN)')
py.legend()
# force_plot()
# draw_source(9)
| [
"[email protected]"
]
| |
2a499fd7e7b8c5bbf2617bae35a047e99b8d6b08 | 637ec65429e817c6c12fc66bad299a9ff831ca3c | /supplier_management/supplier_management/doctype/supplier_product_info/supplier_product_info.py | a6f4b09f223a966bb66a2fb4d123987e1b8b7488 | [
"MIT"
]
| permissive | ashish-greycube/supplier_management | 292ca4d956fdc8659e630ec9a8280d0b77037f25 | c6f32c383f3d6e9a459903652a42341beb7f8482 | refs/heads/master | 2020-09-02T22:05:23.001424 | 2020-01-08T07:03:05 | 2020-01-08T07:03:05 | 219,316,260 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, GreyCube Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class SupplierProductInfo(Document):
pass
| [
"[email protected]"
]
| |
553f569241527a84d22652ea7170576a0d8bb70b | c85be7ac89f702880dcc35cda13918bd01bc36a1 | /app.py | d84bdfbcfbd988b33444c67c6abe3b962bf96134 | []
| no_license | MarcoBurgos/MasterCryptoTeam | 1fb101cea08b6b99bc22b7835a4b7526b8cd4fa1 | c0a5c66cde61eefb5f173b46de7bfcb8fd79c6bd | refs/heads/master | 2022-12-13T23:47:18.031317 | 2019-06-16T05:30:14 | 2019-06-16T05:30:14 | 187,737,852 | 0 | 0 | null | 2022-12-08T01:45:51 | 2019-05-21T01:18:19 | Python | UTF-8 | Python | false | false | 81 | py | from mastercrypto import app
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
]
| |
698615984a24120282d332cfef57d98cdf075fb5 | 0c325cf7a68ef51067ed8db566d525a20de5b635 | /python/xlrd_and_xlwt/xlrd_test.py | ff5eb4eb3632e68644309cd097ce90b78ddb3c9c | []
| no_license | alinzel/NOTES | 2ab6aa1ef1d601a9ae8c0d23c0df2bca7e1aa241 | 3e0594641a605580e920d0b08a251fbc99f34e2f | refs/heads/master | 2023-01-08T22:48:30.762625 | 2020-01-17T09:14:47 | 2020-01-17T09:14:47 | 175,339,492 | 0 | 0 | null | 2022-12-27T15:01:19 | 2019-03-13T03:28:08 | HTML | UTF-8 | Python | false | false | 3,865 | py | # TODO xlrd--一个从excel文件中读取和格式化数据信息的库,无论是xls还是xlsx文件
import xlrd
# 打开excel文件,返回实例对象-<xlrd.book.Book object at 0x000001ED41180898>
excel = xlrd.open_workbook(r"./excel/2017年人员电子档案.xlsx") #r-->保持原始字符串,不转义
# 获取sheet的名字,返回名字列表-['2017-6-22', '测试']
sheet_names = excel.sheet_names()
# 获取sheet对象,返回对象列表-[<xlrd.sheet.Sheet object at 0x0000023A57014CC0>, <xlrd.sheet.Sheet object at 0x0000023A57014CF8>]
sheets = excel.sheets()
# 获取sheet总数,返回数字-2
sheet_num = excel.nsheets
# 获取某一个sheet对象
sheet_index = excel.sheet_by_index(0) # 根据索引
sheet_name = excel.sheet_by_name("测试") # 根据名称
# 获取sheet对象相关信息
name = sheet_index.name # 返回sheet名称
rows = sheet_index.nrows # 返回行数
cols = sheet_index.ncols # 返回列数
# 批量获取单元格信息
row_value = sheet_index.row_values(2, 0, 4) # 获取某一行的值,返回列表,TODO 参数依次,第二行,从0开始,到第4列
col_value = sheet_index.col_values(0, 0, 4)
row = sheet_index.row(2) # 获取某一行的值和类型,不支持切片-[text:'123', text:'456', text:'789', text:'147', text:'11111111', text:'258', text:'']
col = sheet_index.col(1)
slice_row = sheet_index.row_slice(2, 0, 4) # 获取某一行的值和类型,支持切片
slice_col = sheet_index.col_slice(0, 0, 4)
# 获取特定单元格
cell_value = sheet_index.cell(1,2).value # 获取第2行,第三列的值
cell_value_ = sheet_index.cell_value(1,2)
# 获取单元格栏信息
print(xlrd.cellname(0,1))
print(xlrd.cellnameabs(0,1))
print(xlrd.colname(8))
# 写入数据库
import pymysql
# 连接数据库
coon = pymysql.connect(
host="192.168.200.10",
db="test_zwl",
user="bdsdata",
password="357135",
port=3306
)
cur = coon.cursor()
# TODO 查询
# sql = "select * from file"
# cur.execute(sql)
# result = cur.fetchone()
# print(result)
# TODO 插入数据
row_num = sheet_index.nrows
col_num = sheet_index.ncols
# 构造sql语句,批量插入数据库 values(),(),(),没有选择一条一条的插入
sql = "insert into file values"
for i in range(1,row_num): # 控制每一行
for j in range(0,col_num): # 控制列
item = sheet_index.cell_value(i, j) # 获取指定单元格数值
# TODO 数据库中的空值两种形式,一种空字符串--数据库显示空白,另一种是null,且不能用引号包裹起来--数据库显示为null
if item == "":
item = "Null"
value = str(item)
else:
value = '"' + str(item) + '"'
if i != row_num-1:
if j == 0 :
sql += "(" + str(i) + ","+ value + "," # TODO 插入的item 要用 ”“包起来,不然报错 1064,但是null不可以包
elif j == col_num-1:
sql += value + "),"
else:
sql += value + ","
else:
if j == 0 :
sql += "(" + str(i) + ","+ value + ","
elif j == col_num-1:
sql += value + ")"
else:
sql += value + ","
# break
# print(sql)
# try:
# cur.execute(sql)
# coon.commit() # TODO 不要忘记提交啊
# except:
# coon.rollback()
value_list = []
for i in range(1,row_num):
row_v = sheet_index.row_values(i)
row_v = [None if row == "" else row for row in row_v ] # None在数据库显示为Null
value_list.append(row_v)
sql_many = "insert into file (name,area,department,job_state,phone,in_date,out_date)values(%s,%s,%s,%s,%s,%s,%s)"
try:
cur.executemany(sql_many,value_list)
coon.commit() # TODO 不要忘记提交啊
except Exception as e:
print(e)
coon.rollback()
cur.close()
coon.close() | [
"[email protected]"
]
| |
a5fa4b57b83b141e36d8a93815e1e8d828b4aaba | 4772576b2f7601fb3295cec7756c832c250ffbc2 | /max.py | 9816e594e10a7248d892a993d1deccedf0c7b493 | []
| no_license | Dhineshkumarraveendiran/Guvi | db3a956025299fcb2fd06911cc322403c0027ca1 | 3904a980fa59dd079473a4d68c345ed5116160f1 | refs/heads/master | 2020-04-15T04:59:55.974890 | 2019-05-15T10:11:59 | 2019-05-15T10:11:59 | 164,405,170 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 78 | py | #r
n =int(input())
li=list(map(int,input().split()))
max1=max(li)
print(max1)
| [
"[email protected]"
]
| |
1562a42c83d7480a67c631a9a7b097f839980268 | 71c331e4b1e00fa3be03b7f711fcb05a793cf2af | /QA-System-master/SpeechToText_test/google-cloud-sdk/lib/googlecloudsdk/third_party/apis/storage/v1/storage_v1_messages.py | d1d192d4db3de2568479ea3d6ec6356cf43099a3 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
]
| permissive | iofh/QA-System | 568228bb0c0adf9ec23b45cd144d61049e720002 | af4a8f1b5f442ddf4905740ae49ed23d69afb0f6 | refs/heads/master | 2022-11-27T23:04:16.385021 | 2020-08-12T10:11:44 | 2020-08-12T10:11:44 | 286,980,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138,678 | py | """Generated message classes for storage version v1.
Stores and retrieves potentially large, immutable data objects.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import message_types as _message_types
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
from apitools.base.py import extra_types
package = 'storage'
class Bucket(_messages.Message):
r"""A bucket.
Messages:
BillingValue: The bucket's billing configuration.
CorsValueListEntry: A CorsValueListEntry object.
EncryptionValue: Encryption configuration for a bucket.
IamConfigurationValue: The bucket's IAM configuration.
LabelsValue: User-provided labels, in key/value pairs.
LifecycleValue: The bucket's lifecycle configuration. See lifecycle
management for more information.
LoggingValue: The bucket's logging configuration, which defines the
destination bucket and optional name prefix for the current bucket's
logs.
OwnerValue: The owner of the bucket. This is always the project team's
owner group.
RetentionPolicyValue: The bucket's retention policy. The retention policy
enforces a minimum retention time for all objects contained in the
bucket, based on their creation time. Any attempt to overwrite or delete
objects younger than the retention period will result in a
PERMISSION_DENIED error. An unlocked retention policy can be modified or
removed from the bucket via a storage.buckets.update operation. A locked
retention policy cannot be removed or shortened in duration for the
lifetime of the bucket. Attempting to remove or decrease period of a
locked retention policy will result in a PERMISSION_DENIED error.
VersioningValue: The bucket's versioning configuration.
WebsiteValue: The bucket's website configuration, controlling how the
service behaves when accessing bucket contents as a web site. See the
Static Website Examples for more information.
Fields:
acl: Access controls on the bucket.
billing: The bucket's billing configuration.
cors: The bucket's Cross-Origin Resource Sharing (CORS) configuration.
defaultEventBasedHold: The default value for event-based hold on newly
created objects in this bucket. Event-based hold is a way to retain
objects indefinitely until an event occurs, signified by the hold's
release. After being released, such objects will be subject to bucket-
level retention (if any). One sample use case of this flag is for banks
to hold loan documents for at least 3 years after loan is paid in full.
Here, bucket-level retention is 3 years and the event is loan being paid
in full. In this example, these objects will be held intact for any
number of years until the event has occurred (event-based hold on the
object is released) and then 3 more years after that. That means
retention duration of the objects begins from the moment event-based
hold transitioned from true to false. Objects under event-based hold
cannot be deleted, overwritten or archived until the hold is removed.
defaultObjectAcl: Default access controls to apply to new objects when no
ACL is provided.
encryption: Encryption configuration for a bucket.
etag: HTTP 1.1 Entity tag for the bucket.
iamConfiguration: The bucket's IAM configuration.
id: The ID of the bucket. For buckets, the id and name properties are the
same.
kind: The kind of item this is. For buckets, this is always
storage#bucket.
labels: User-provided labels, in key/value pairs.
lifecycle: The bucket's lifecycle configuration. See lifecycle management
for more information.
location: The location of the bucket. Object data for objects in the
bucket resides in physical storage within this region. Defaults to US.
See the developer's guide for the authoritative list.
locationType: The type of the bucket location.
logging: The bucket's logging configuration, which defines the destination
bucket and optional name prefix for the current bucket's logs.
metageneration: The metadata generation of this bucket.
name: The name of the bucket.
owner: The owner of the bucket. This is always the project team's owner
group.
projectNumber: The project number of the project the bucket belongs to.
retentionPolicy: The bucket's retention policy. The retention policy
enforces a minimum retention time for all objects contained in the
bucket, based on their creation time. Any attempt to overwrite or delete
objects younger than the retention period will result in a
PERMISSION_DENIED error. An unlocked retention policy can be modified or
removed from the bucket via a storage.buckets.update operation. A locked
retention policy cannot be removed or shortened in duration for the
lifetime of the bucket. Attempting to remove or decrease period of a
locked retention policy will result in a PERMISSION_DENIED error.
selfLink: The URI of this bucket.
storageClass: The bucket's default storage class, used whenever no
storageClass is specified for a newly-created object. This defines how
objects in the bucket are stored and determines the SLA and the cost of
storage. Values include MULTI_REGIONAL, REGIONAL, STANDARD, NEARLINE,
COLDLINE, ARCHIVE, and DURABLE_REDUCED_AVAILABILITY. If this value is
not specified when the bucket is created, it will default to STANDARD.
For more information, see storage classes.
timeCreated: The creation time of the bucket in RFC 3339 format.
updated: The modification time of the bucket in RFC 3339 format.
versioning: The bucket's versioning configuration.
website: The bucket's website configuration, controlling how the service
behaves when accessing bucket contents as a web site. See the Static
Website Examples for more information.
zoneAffinity: The zone or zones from which the bucket is intended to use
zonal quota. Requests for data from outside the specified affinities are
still allowed but won't be able to use zonal quota. The zone or zones
need to be within the bucket location otherwise the requests will fail
with a 400 Bad Request response.
zoneSeparation: If set, objects placed in this bucket are required to be
separated by disaster domain.
"""
class BillingValue(_messages.Message):
r"""The bucket's billing configuration.
Fields:
requesterPays: When set to true, Requester Pays is enabled for this
bucket.
"""
requesterPays = _messages.BooleanField(1)
class CorsValueListEntry(_messages.Message):
r"""A CorsValueListEntry object.
Fields:
maxAgeSeconds: The value, in seconds, to return in the Access-Control-
Max-Age header used in preflight responses.
method: The list of HTTP methods on which to include CORS response
headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
of methods, and means "any method".
origin: The list of Origins eligible to receive CORS response headers.
Note: "*" is permitted in the list of origins, and means "any Origin".
responseHeader: The list of HTTP headers other than the simple response
headers to give permission for the user-agent to share across domains.
"""
maxAgeSeconds = _messages.IntegerField(1, variant=_messages.Variant.INT32)
method = _messages.StringField(2, repeated=True)
origin = _messages.StringField(3, repeated=True)
responseHeader = _messages.StringField(4, repeated=True)
class EncryptionValue(_messages.Message):
r"""Encryption configuration for a bucket.
Fields:
defaultKmsKeyName: A Cloud KMS key that will be used to encrypt objects
inserted into this bucket, if no encryption method is specified.
"""
defaultKmsKeyName = _messages.StringField(1)
class IamConfigurationValue(_messages.Message):
r"""The bucket's IAM configuration.
Messages:
BucketPolicyOnlyValue: The bucket's uniform bucket-level access
configuration. The feature was formerly known as Bucket Policy Only.
For backward compatibility, this field will be populated with
identical information as the uniformBucketLevelAccess field. We
recommend using the uniformBucketLevelAccess field to enable and
disable the feature.
UniformBucketLevelAccessValue: The bucket's uniform bucket-level access
configuration.
Fields:
bucketPolicyOnly: The bucket's uniform bucket-level access
configuration. The feature was formerly known as Bucket Policy Only.
For backward compatibility, this field will be populated with
identical information as the uniformBucketLevelAccess field. We
recommend using the uniformBucketLevelAccess field to enable and
disable the feature.
uniformBucketLevelAccess: The bucket's uniform bucket-level access
configuration.
"""
class BucketPolicyOnlyValue(_messages.Message):
r"""The bucket's uniform bucket-level access configuration. The feature
was formerly known as Bucket Policy Only. For backward compatibility,
this field will be populated with identical information as the
uniformBucketLevelAccess field. We recommend using the
uniformBucketLevelAccess field to enable and disable the feature.
Fields:
enabled: If set, access is controlled only by bucket-level or above
IAM policies.
lockedTime: The deadline for changing
iamConfiguration.bucketPolicyOnly.enabled from true to false in RFC
3339 format. iamConfiguration.bucketPolicyOnly.enabled may be
changed from true to false until the locked time, after which the
field is immutable.
"""
enabled = _messages.BooleanField(1)
lockedTime = _message_types.DateTimeField(2)
class UniformBucketLevelAccessValue(_messages.Message):
r"""The bucket's uniform bucket-level access configuration.
Fields:
enabled: If set, access is controlled only by bucket-level or above
IAM policies.
lockedTime: The deadline for changing
iamConfiguration.uniformBucketLevelAccess.enabled from true to false
in RFC 3339 format.
iamConfiguration.uniformBucketLevelAccess.enabled may be changed
from true to false until the locked time, after which the field is
immutable.
"""
enabled = _messages.BooleanField(1)
lockedTime = _message_types.DateTimeField(2)
bucketPolicyOnly = _messages.MessageField('BucketPolicyOnlyValue', 1)
uniformBucketLevelAccess = _messages.MessageField('UniformBucketLevelAccessValue', 2)
@encoding.MapUnrecognizedFields('additionalProperties')
class LabelsValue(_messages.Message):
r"""User-provided labels, in key/value pairs.
Messages:
AdditionalProperty: An additional property for a LabelsValue object.
Fields:
additionalProperties: An individual label entry.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a LabelsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
class LifecycleValue(_messages.Message):
r"""The bucket's lifecycle configuration. See lifecycle management for
more information.
Messages:
RuleValueListEntry: A RuleValueListEntry object.
Fields:
rule: A lifecycle management rule, which is made of an action to take
and the condition(s) under which the action will be taken.
"""
class RuleValueListEntry(_messages.Message):
r"""A RuleValueListEntry object.
Messages:
ActionValue: The action to take.
ConditionValue: The condition(s) under which the action will be taken.
Fields:
action: The action to take.
condition: The condition(s) under which the action will be taken.
"""
class ActionValue(_messages.Message):
r"""The action to take.
Fields:
storageClass: Target storage class. Required iff the type of the
action is SetStorageClass.
type: Type of the action. Currently, only Delete and SetStorageClass
are supported.
"""
storageClass = _messages.StringField(1)
type = _messages.StringField(2)
class ConditionValue(_messages.Message):
r"""The condition(s) under which the action will be taken.
Fields:
age: Age of an object (in days). This condition is satisfied when an
object reaches the specified age.
createdBefore: A date in RFC 3339 format with only the date part
(for instance, "2013-01-15"). This condition is satisfied when an
object is created before midnight of the specified date in UTC.
customTimeBefore: A timestamp in RFC 3339 format. This condition is
satisfied when the custom time on an object is before this
timestamp.
daysSinceCustomTime: Number of days elapsed since the user-specified
timestamp set on an object. The condition is satisfied if the days
elapsed is at least this number. If no custom timestamp is
specified on an object, the condition does not apply.
daysSinceNoncurrentTime: Number of days elapsed since the noncurrent
timestamp of an object. The condition is satisfied if the days
elapsed is at least this number. This condition is relevant only
for versioned objects. The value of the field must be a
nonnegative integer. If it's zero, the object version will become
eligible for Lifecycle action as soon as it becomes noncurrent.
isLive: Relevant only for versioned objects. If the value is true,
this condition matches live objects; if the value is false, it
matches archived objects.
matchesPattern: A regular expression that satisfies the RE2 syntax.
This condition is satisfied when the name of the object matches
the RE2 pattern. Note: This feature is currently in the "Early
Access" launch stage and is only available to a whitelisted set of
users; that means that this feature may be changed in backward-
incompatible ways and that it is not guaranteed to be released.
matchesStorageClass: Objects having any of the storage classes
specified by this condition will be matched. Values include
MULTI_REGIONAL, REGIONAL, NEARLINE, COLDLINE, ARCHIVE, STANDARD,
and DURABLE_REDUCED_AVAILABILITY.
noncurrentTimeBefore: A timestamp in RFC 3339 format. This condition
is satisfied when the noncurrent time on an object is before this
timestamp. This condition is relevant only for versioned objects.
numNewerVersions: Relevant only for versioned objects. If the value
is N, this condition is satisfied when there are at least N
versions (including the live version) newer than this version of
the object.
"""
age = _messages.IntegerField(1, variant=_messages.Variant.INT32)
createdBefore = extra_types.DateField(2)
customTimeBefore = _message_types.DateTimeField(3)
daysSinceCustomTime = _messages.IntegerField(4, variant=_messages.Variant.INT32)
daysSinceNoncurrentTime = _messages.IntegerField(5, variant=_messages.Variant.INT32)
isLive = _messages.BooleanField(6)
matchesPattern = _messages.StringField(7)
matchesStorageClass = _messages.StringField(8, repeated=True)
noncurrentTimeBefore = _message_types.DateTimeField(9)
numNewerVersions = _messages.IntegerField(10, variant=_messages.Variant.INT32)
action = _messages.MessageField('ActionValue', 1)
condition = _messages.MessageField('ConditionValue', 2)
rule = _messages.MessageField('RuleValueListEntry', 1, repeated=True)
class LoggingValue(_messages.Message):
r"""The bucket's logging configuration, which defines the destination
bucket and optional name prefix for the current bucket's logs.
Fields:
logBucket: The destination bucket where the current bucket's logs should
be placed.
logObjectPrefix: A prefix for log object names.
"""
logBucket = _messages.StringField(1)
logObjectPrefix = _messages.StringField(2)
class OwnerValue(_messages.Message):
r"""The owner of the bucket. This is always the project team's owner
group.
Fields:
entity: The entity, in the form project-owner-projectId.
entityId: The ID for the entity.
"""
entity = _messages.StringField(1)
entityId = _messages.StringField(2)
class RetentionPolicyValue(_messages.Message):
r"""The bucket's retention policy. The retention policy enforces a minimum
retention time for all objects contained in the bucket, based on their
creation time. Any attempt to overwrite or delete objects younger than the
retention period will result in a PERMISSION_DENIED error. An unlocked
retention policy can be modified or removed from the bucket via a
storage.buckets.update operation. A locked retention policy cannot be
removed or shortened in duration for the lifetime of the bucket.
Attempting to remove or decrease period of a locked retention policy will
result in a PERMISSION_DENIED error.
Fields:
effectiveTime: Server-determined value that indicates the time from
which policy was enforced and effective. This value is in RFC 3339
format.
isLocked: Once locked, an object retention policy cannot be modified.
retentionPeriod: The duration in seconds that objects need to be
retained. Retention duration must be greater than zero and less than
100 years. Note that enforcement of retention periods less than a day
is not guaranteed. Such periods should only be used for testing
purposes.
"""
effectiveTime = _message_types.DateTimeField(1)
isLocked = _messages.BooleanField(2)
retentionPeriod = _messages.IntegerField(3)
class VersioningValue(_messages.Message):
r"""The bucket's versioning configuration.
Fields:
enabled: While set to true, versioning is fully enabled for this bucket.
"""
enabled = _messages.BooleanField(1)
class WebsiteValue(_messages.Message):
r"""The bucket's website configuration, controlling how the service
behaves when accessing bucket contents as a web site. See the Static
Website Examples for more information.
Fields:
mainPageSuffix: If the requested object path is missing, the service
will ensure the path has a trailing '/', append this suffix, and
attempt to retrieve the resulting object. This allows the creation of
index.html objects to represent directory pages.
notFoundPage: If the requested object path is missing, and any
mainPageSuffix object is missing, if applicable, the service will
return the named object from this bucket as the content for a 404 Not
Found result.
"""
mainPageSuffix = _messages.StringField(1)
notFoundPage = _messages.StringField(2)
acl = _messages.MessageField('BucketAccessControl', 1, repeated=True)
billing = _messages.MessageField('BillingValue', 2)
cors = _messages.MessageField('CorsValueListEntry', 3, repeated=True)
defaultEventBasedHold = _messages.BooleanField(4)
defaultObjectAcl = _messages.MessageField('ObjectAccessControl', 5, repeated=True)
encryption = _messages.MessageField('EncryptionValue', 6)
etag = _messages.StringField(7)
iamConfiguration = _messages.MessageField('IamConfigurationValue', 8)
id = _messages.StringField(9)
kind = _messages.StringField(10, default='storage#bucket')
labels = _messages.MessageField('LabelsValue', 11)
lifecycle = _messages.MessageField('LifecycleValue', 12)
location = _messages.StringField(13)
locationType = _messages.StringField(14)
logging = _messages.MessageField('LoggingValue', 15)
metageneration = _messages.IntegerField(16)
name = _messages.StringField(17)
owner = _messages.MessageField('OwnerValue', 18)
projectNumber = _messages.IntegerField(19, variant=_messages.Variant.UINT64)
retentionPolicy = _messages.MessageField('RetentionPolicyValue', 20)
selfLink = _messages.StringField(21)
storageClass = _messages.StringField(22)
timeCreated = _message_types.DateTimeField(23)
updated = _message_types.DateTimeField(24)
versioning = _messages.MessageField('VersioningValue', 25)
website = _messages.MessageField('WebsiteValue', 26)
zoneAffinity = _messages.StringField(27, repeated=True)
zoneSeparation = _messages.BooleanField(28)
class BucketAccessControl(_messages.Message):
r"""An access-control entry.
Messages:
ProjectTeamValue: The project team associated with the entity, if any.
Fields:
bucket: The name of the bucket.
domain: The domain associated with the entity, if any.
email: The email address associated with the entity, if any.
entity: The entity holding the permission, in one of the following forms:
- user-userId - user-email - group-groupId - group-email - domain-
domain - project-team-projectId - allUsers - allAuthenticatedUsers
Examples: - The user [email protected] would be [email protected]. -
The group [email protected] would be group-
[email protected]. - To refer to all members of the Google Apps
for Business domain example.com, the entity would be domain-example.com.
entityId: The ID for the entity, if any.
etag: HTTP 1.1 Entity tag for the access-control entry.
id: The ID of the access-control entry.
kind: The kind of item this is. For bucket access control entries, this is
always storage#bucketAccessControl.
projectTeam: The project team associated with the entity, if any.
role: The access permission for the entity.
selfLink: The link to this access-control entry.
"""
class ProjectTeamValue(_messages.Message):
r"""The project team associated with the entity, if any.
Fields:
projectNumber: The project number.
team: The team.
"""
projectNumber = _messages.StringField(1)
team = _messages.StringField(2)
bucket = _messages.StringField(1)
domain = _messages.StringField(2)
email = _messages.StringField(3)
entity = _messages.StringField(4)
entityId = _messages.StringField(5)
etag = _messages.StringField(6)
id = _messages.StringField(7)
kind = _messages.StringField(8, default='storage#bucketAccessControl')
projectTeam = _messages.MessageField('ProjectTeamValue', 9)
role = _messages.StringField(10)
selfLink = _messages.StringField(11)
class BucketAccessControls(_messages.Message):
r"""An access-control list.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of bucket access control
entries, this is always storage#bucketAccessControls.
"""
items = _messages.MessageField('BucketAccessControl', 1, repeated=True)
kind = _messages.StringField(2, default='storage#bucketAccessControls')
class Buckets(_messages.Message):
r"""A list of buckets.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of buckets, this is always
storage#buckets.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
"""
items = _messages.MessageField('Bucket', 1, repeated=True)
kind = _messages.StringField(2, default='storage#buckets')
nextPageToken = _messages.StringField(3)
class Channel(_messages.Message):
r"""An notification channel used to watch for resource changes.
Messages:
ParamsValue: Additional parameters controlling delivery channel behavior.
Optional.
Fields:
address: The address where notifications are delivered for this channel.
expiration: Date and time of notification channel expiration, expressed as
a Unix timestamp, in milliseconds. Optional.
id: A UUID or similar unique string that identifies this channel.
kind: Identifies this as a notification channel used to watch for changes
to a resource, which is "api#channel".
params: Additional parameters controlling delivery channel behavior.
Optional.
payload: A Boolean value to indicate whether payload is wanted. Optional.
resourceId: An opaque ID that identifies the resource being watched on
this channel. Stable across different API versions.
resourceUri: A version-specific identifier for the watched resource.
token: An arbitrary string delivered to the target address with each
notification delivered over this channel. Optional.
type: The type of delivery mechanism used for this channel.
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class ParamsValue(_messages.Message):
r"""Additional parameters controlling delivery channel behavior. Optional.
Messages:
AdditionalProperty: An additional property for a ParamsValue object.
Fields:
additionalProperties: Declares a new parameter by name.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a ParamsValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
address = _messages.StringField(1)
expiration = _messages.IntegerField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default='api#channel')
params = _messages.MessageField('ParamsValue', 5)
payload = _messages.BooleanField(6)
resourceId = _messages.StringField(7)
resourceUri = _messages.StringField(8)
token = _messages.StringField(9)
type = _messages.StringField(10)
class Channels(_messages.Message):
r"""A channels list response.
Messages:
ItemsValueListEntry: A ItemsValueListEntry object.
Fields:
items: The list of notification channels for a bucket.
kind: The kind of item this is.
"""
class ItemsValueListEntry(_messages.Message):
r"""A ItemsValueListEntry object.
Fields:
channel_id: User-specified name for a channel. Needed to unsubscribe.
creation_time_ms: 64-bit Unix timestamp in ms of when the channel was
created.
push_url: Url used to identify where notifications are sent to.
resource_id: Opaque value generated by GCS representing a bucket. Needed
to unsubscribe.
subscriber_email: Email address of the subscriber.
"""
channel_id = _messages.StringField(1)
creation_time_ms = _messages.IntegerField(2)
push_url = _messages.StringField(3)
resource_id = _messages.StringField(4)
subscriber_email = _messages.StringField(5)
items = _messages.MessageField('ItemsValueListEntry', 1, repeated=True)
kind = _messages.StringField(2, default='storage#channels')
class ComposeRequest(_messages.Message):
r"""A Compose request.
Messages:
SourceObjectsValueListEntry: A SourceObjectsValueListEntry object.
Fields:
destination: Properties of the resulting object.
kind: The kind of item this is.
sourceObjects: The list of source objects that will be concatenated into a
single object.
"""
class SourceObjectsValueListEntry(_messages.Message):
r"""A SourceObjectsValueListEntry object.
Messages:
ObjectPreconditionsValue: Conditions that must be met for this operation
to execute.
Fields:
generation: The generation of this object to use as the source.
name: The source object's name. All source objects must reside in the
same bucket.
objectPreconditions: Conditions that must be met for this operation to
execute.
"""
class ObjectPreconditionsValue(_messages.Message):
r"""Conditions that must be met for this operation to execute.
Fields:
ifGenerationMatch: Only perform the composition if the generation of
the source object that would be used matches this value. If this
value and a generation are both specified, they must be the same
value or the call will fail.
"""
ifGenerationMatch = _messages.IntegerField(1)
generation = _messages.IntegerField(1)
name = _messages.StringField(2)
objectPreconditions = _messages.MessageField('ObjectPreconditionsValue', 3)
destination = _messages.MessageField('Object', 1)
kind = _messages.StringField(2, default='storage#composeRequest')
sourceObjects = _messages.MessageField('SourceObjectsValueListEntry', 3, repeated=True)
class Expr(_messages.Message):
r"""Represents an expression text. Example: title: "User account presence"
description: "Determines whether the request has a user account" expression:
"size(request.user) > 0"
Fields:
description: An optional description of the expression. This is a longer
text which describes the expression, e.g. when hovered over it in a UI.
expression: Textual representation of an expression in Common Expression
Language syntax. The application context of the containing message
determines which well-known feature set of CEL is supported.
location: An optional string indicating the location of the expression for
error reporting, e.g. a file name and a position in the file.
title: An optional title for the expression, i.e. a short string
describing its purpose. This can be used e.g. in UIs which allow to
enter the expression.
"""
description = _messages.StringField(1)
expression = _messages.StringField(2)
location = _messages.StringField(3)
title = _messages.StringField(4)
class HmacKey(_messages.Message):
r"""JSON template to produce a JSON-style HMAC Key resource for Create
responses.
Fields:
kind: The kind of item this is. For HMAC keys, this is always
storage#hmacKey.
metadata: Key metadata.
secret: HMAC secret key material.
"""
kind = _messages.StringField(1, default='storage#hmacKey')
metadata = _messages.MessageField('HmacKeyMetadata', 2)
secret = _messages.StringField(3)
class HmacKeyMetadata(_messages.Message):
r"""JSON template to produce a JSON-style HMAC Key metadata resource.
Fields:
accessId: The ID of the HMAC Key.
etag: HTTP 1.1 Entity tag for the HMAC key.
id: The ID of the HMAC key, including the Project ID and the Access ID.
kind: The kind of item this is. For HMAC Key metadata, this is always
storage#hmacKeyMetadata.
projectId: Project ID owning the service account to which the key
authenticates.
selfLink: The link to this resource.
serviceAccountEmail: The email address of the key's associated service
account.
state: The state of the key. Can be one of ACTIVE, INACTIVE, or DELETED.
timeCreated: The creation time of the HMAC key in RFC 3339 format.
updated: The last modification time of the HMAC key metadata in RFC 3339
format.
"""
accessId = _messages.StringField(1)
etag = _messages.StringField(2)
id = _messages.StringField(3)
kind = _messages.StringField(4, default='storage#hmacKeyMetadata')
projectId = _messages.StringField(5)
selfLink = _messages.StringField(6)
serviceAccountEmail = _messages.StringField(7)
state = _messages.StringField(8)
timeCreated = _message_types.DateTimeField(9)
updated = _message_types.DateTimeField(10)
class HmacKeysMetadata(_messages.Message):
r"""A list of hmacKeys.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of hmacKeys, this is always
storage#hmacKeysMetadata.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
"""
items = _messages.MessageField('HmacKeyMetadata', 1, repeated=True)
kind = _messages.StringField(2, default='storage#hmacKeysMetadata')
nextPageToken = _messages.StringField(3)
class Notification(_messages.Message):
r"""A subscription to receive Google PubSub notifications.
Messages:
CustomAttributesValue: An optional list of additional attributes to attach
to each Cloud PubSub message published for this notification
subscription.
Fields:
custom_attributes: An optional list of additional attributes to attach to
each Cloud PubSub message published for this notification subscription.
etag: HTTP 1.1 Entity tag for this subscription notification.
event_types: If present, only send notifications about listed event types.
If empty, sent notifications for all event types.
id: The ID of the notification.
kind: The kind of item this is. For notifications, this is always
storage#notification.
object_name_prefix: If present, only apply this notification configuration
to object names that begin with this prefix.
payload_format: The desired content of the Payload.
selfLink: The canonical URL of this notification.
topic: The Cloud PubSub topic to which this subscription publishes.
Formatted as: '//pubsub.googleapis.com/projects/{project-
identifier}/topics/{my-topic}'
"""
@encoding.MapUnrecognizedFields('additionalProperties')
class CustomAttributesValue(_messages.Message):
r"""An optional list of additional attributes to attach to each Cloud
PubSub message published for this notification subscription.
Messages:
AdditionalProperty: An additional property for a CustomAttributesValue
object.
Fields:
additionalProperties: Additional properties of type
CustomAttributesValue
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a CustomAttributesValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
custom_attributes = _messages.MessageField('CustomAttributesValue', 1)
etag = _messages.StringField(2)
event_types = _messages.StringField(3, repeated=True)
id = _messages.StringField(4)
kind = _messages.StringField(5, default='storage#notification')
object_name_prefix = _messages.StringField(6)
payload_format = _messages.StringField(7, default='JSON_API_V1')
selfLink = _messages.StringField(8)
topic = _messages.StringField(9)
class Notifications(_messages.Message):
r"""A list of notification subscriptions.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of notifications, this is always
storage#notifications.
"""
items = _messages.MessageField('Notification', 1, repeated=True)
kind = _messages.StringField(2, default='storage#notifications')
class Object(_messages.Message):
r"""An object.
Messages:
CustomerEncryptionValue: Metadata of customer-supplied encryption key, if
the object is encrypted by such a key.
MetadataValue: User-provided metadata, in key/value pairs.
OwnerValue: The owner of the object. This will always be the uploader of
the object.
Fields:
acl: Access controls on the object.
bucket: The name of the bucket containing this object.
cacheControl: Cache-Control directive for the object data. If omitted, and
the object is accessible to all anonymous users, the default will be
public, max-age=3600.
componentCount: Number of underlying components that make up this object.
Components are accumulated by compose operations.
contentDisposition: Content-Disposition of the object data.
contentEncoding: Content-Encoding of the object data.
contentLanguage: Content-Language of the object data.
contentType: Content-Type of the object data. If an object is stored
without a Content-Type, it is served as application/octet-stream.
crc32c: CRC32c checksum, as described in RFC 4960, Appendix B; encoded
using base64 in big-endian byte order. For more information about using
the CRC32c checksum, see Hashes and ETags: Best Practices.
customTime: A timestamp in RFC 3339 format specified by the user for an
object.
customerEncryption: Metadata of customer-supplied encryption key, if the
object is encrypted by such a key.
etag: HTTP 1.1 Entity tag for the object.
eventBasedHold: Whether an object is under event-based hold. Event-based
hold is a way to retain objects until an event occurs, which is
signified by the hold's release (i.e. this value is set to false). After
being released (set to false), such objects will be subject to bucket-
level retention (if any). One sample use case of this flag is for banks
to hold loan documents for at least 3 years after loan is paid in full.
Here, bucket-level retention is 3 years and the event is the loan being
paid in full. In this example, these objects will be held intact for any
number of years until the event has occurred (event-based hold on the
object is released) and then 3 more years after that. That means
retention duration of the objects begins from the moment event-based
hold transitioned from true to false.
generation: The content generation of this object. Used for object
versioning.
id: The ID of the object, including the bucket name, object name, and
generation number.
kind: The kind of item this is. For objects, this is always
storage#object.
kmsKeyName: Cloud KMS Key used to encrypt this object, if the object is
encrypted by such a key.
md5Hash: MD5 hash of the data; encoded using base64. For more information
about using the MD5 hash, see Hashes and ETags: Best Practices.
mediaLink: Media download link.
metadata: User-provided metadata, in key/value pairs.
metageneration: The version of the metadata for this object at this
generation. Used for preconditions and for detecting changes in
metadata. A metageneration number is only meaningful in the context of a
particular generation of a particular object.
name: The name of the object. Required if not specified by URL parameter.
owner: The owner of the object. This will always be the uploader of the
object.
retentionExpirationTime: A server-determined value that specifies the
earliest time that the object's retention period expires. This value is
in RFC 3339 format. Note 1: This field is not provided for objects with
an active event-based hold, since retention expiration is unknown until
the hold is removed. Note 2: This value can be provided even when
temporary hold is set (so that the user can reason about policy without
having to first unset the temporary hold).
selfLink: The link to this object.
size: Content-Length of the data in bytes.
storageClass: Storage class of the object.
temporaryHold: Whether an object is under temporary hold. While this flag
is set to true, the object is protected against deletion and overwrites.
A common use case of this flag is regulatory investigations where
objects need to be retained while the investigation is ongoing. Note
that unlike event-based hold, temporary hold does not impact retention
expiration time of an object.
timeCreated: The creation time of the object in RFC 3339 format.
timeDeleted: The deletion time of the object in RFC 3339 format. Will be
returned if and only if this version of the object has been deleted.
timeStorageClassUpdated: The time at which the object's storage class was
last changed. When the object is initially created, it will be set to
timeCreated.
updated: The modification time of the object metadata in RFC 3339 format.
"""
class CustomerEncryptionValue(_messages.Message):
r"""Metadata of customer-supplied encryption key, if the object is
encrypted by such a key.
Fields:
encryptionAlgorithm: The encryption algorithm.
keySha256: SHA256 hash value of the encryption key.
"""
encryptionAlgorithm = _messages.StringField(1)
keySha256 = _messages.StringField(2)
@encoding.MapUnrecognizedFields('additionalProperties')
class MetadataValue(_messages.Message):
r"""User-provided metadata, in key/value pairs.
Messages:
AdditionalProperty: An additional property for a MetadataValue object.
Fields:
additionalProperties: An individual metadata entry.
"""
class AdditionalProperty(_messages.Message):
r"""An additional property for a MetadataValue object.
Fields:
key: Name of the additional property.
value: A string attribute.
"""
key = _messages.StringField(1)
value = _messages.StringField(2)
additionalProperties = _messages.MessageField('AdditionalProperty', 1, repeated=True)
class OwnerValue(_messages.Message):
r"""The owner of the object. This will always be the uploader of the
object.
Fields:
entity: The entity, in the form user-userId.
entityId: The ID for the entity.
"""
entity = _messages.StringField(1)
entityId = _messages.StringField(2)
acl = _messages.MessageField('ObjectAccessControl', 1, repeated=True)
bucket = _messages.StringField(2)
cacheControl = _messages.StringField(3)
componentCount = _messages.IntegerField(4, variant=_messages.Variant.INT32)
contentDisposition = _messages.StringField(5)
contentEncoding = _messages.StringField(6)
contentLanguage = _messages.StringField(7)
contentType = _messages.StringField(8)
crc32c = _messages.StringField(9)
customTime = _message_types.DateTimeField(10)
customerEncryption = _messages.MessageField('CustomerEncryptionValue', 11)
etag = _messages.StringField(12)
eventBasedHold = _messages.BooleanField(13)
generation = _messages.IntegerField(14)
id = _messages.StringField(15)
kind = _messages.StringField(16, default='storage#object')
kmsKeyName = _messages.StringField(17)
md5Hash = _messages.StringField(18)
mediaLink = _messages.StringField(19)
metadata = _messages.MessageField('MetadataValue', 20)
metageneration = _messages.IntegerField(21)
name = _messages.StringField(22)
owner = _messages.MessageField('OwnerValue', 23)
retentionExpirationTime = _message_types.DateTimeField(24)
selfLink = _messages.StringField(25)
size = _messages.IntegerField(26, variant=_messages.Variant.UINT64)
storageClass = _messages.StringField(27)
temporaryHold = _messages.BooleanField(28)
timeCreated = _message_types.DateTimeField(29)
timeDeleted = _message_types.DateTimeField(30)
timeStorageClassUpdated = _message_types.DateTimeField(31)
updated = _message_types.DateTimeField(32)
class ObjectAccessControl(_messages.Message):
r"""An access-control entry.
Messages:
ProjectTeamValue: The project team associated with the entity, if any.
Fields:
bucket: The name of the bucket.
domain: The domain associated with the entity, if any.
email: The email address associated with the entity, if any.
entity: The entity holding the permission, in one of the following forms:
- user-userId - user-email - group-groupId - group-email - domain-
domain - project-team-projectId - allUsers - allAuthenticatedUsers
Examples: - The user [email protected] would be [email protected]. -
The group [email protected] would be group-
[email protected]. - To refer to all members of the Google Apps
for Business domain example.com, the entity would be domain-example.com.
entityId: The ID for the entity, if any.
etag: HTTP 1.1 Entity tag for the access-control entry.
generation: The content generation of the object, if applied to an object.
id: The ID of the access-control entry.
kind: The kind of item this is. For object access control entries, this is
always storage#objectAccessControl.
object: The name of the object, if applied to an object.
projectTeam: The project team associated with the entity, if any.
role: The access permission for the entity.
selfLink: The link to this access-control entry.
"""
class ProjectTeamValue(_messages.Message):
r"""The project team associated with the entity, if any.
Fields:
projectNumber: The project number.
team: The team.
"""
projectNumber = _messages.StringField(1)
team = _messages.StringField(2)
bucket = _messages.StringField(1)
domain = _messages.StringField(2)
email = _messages.StringField(3)
entity = _messages.StringField(4)
entityId = _messages.StringField(5)
etag = _messages.StringField(6)
generation = _messages.IntegerField(7)
id = _messages.StringField(8)
kind = _messages.StringField(9, default='storage#objectAccessControl')
object = _messages.StringField(10)
projectTeam = _messages.MessageField('ProjectTeamValue', 11)
role = _messages.StringField(12)
selfLink = _messages.StringField(13)
class ObjectAccessControls(_messages.Message):
r"""An access-control list.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of object access control
entries, this is always storage#objectAccessControls.
"""
items = _messages.MessageField('ObjectAccessControl', 1, repeated=True)
kind = _messages.StringField(2, default='storage#objectAccessControls')
class Objects(_messages.Message):
r"""A list of objects.
Fields:
items: The list of items.
kind: The kind of item this is. For lists of objects, this is always
storage#objects.
nextPageToken: The continuation token, used to page through large result
sets. Provide this value in a subsequent request to return the next page
of results.
prefixes: The list of prefixes of objects matching-but-not-listed up to
and including the requested delimiter.
"""
items = _messages.MessageField('Object', 1, repeated=True)
kind = _messages.StringField(2, default='storage#objects')
nextPageToken = _messages.StringField(3)
prefixes = _messages.StringField(4, repeated=True)
class Policy(_messages.Message):
r"""A bucket/object IAM policy.
Messages:
BindingsValueListEntry: A BindingsValueListEntry object.
Fields:
bindings: An association between a role, which comes with a set of
permissions, and members who may assume that role.
etag: HTTP 1.1 Entity tag for the policy.
kind: The kind of item this is. For policies, this is always
storage#policy. This field is ignored on input.
resourceId: The ID of the resource to which this policy belongs. Will be
of the form projects/_/buckets/bucket for buckets, and
projects/_/buckets/bucket/objects/object for objects. A specific
generation may be specified by appending #generationNumber to the end of
the object name, e.g. projects/_/buckets/my-bucket/objects/data.txt#17.
The current generation can be denoted with #0. This field is ignored on
input.
version: The IAM policy format version.
"""
class BindingsValueListEntry(_messages.Message):
r"""A BindingsValueListEntry object.
Fields:
condition: The condition that is associated with this binding. NOTE: an
unsatisfied condition will not allow user access via current binding.
Different bindings, including their conditions, are examined
independently.
members: A collection of identifiers for members who may assume the
provided role. Recognized identifiers are as follows: - allUsers - A
special identifier that represents anyone on the internet; with or
without a Google account. - allAuthenticatedUsers - A special
identifier that represents anyone who is authenticated with a Google
account or a service account. - user:emailid - An email address that
represents a specific account. For example, user:[email protected] or
user:[email protected]. - serviceAccount:emailid - An email address
that represents a service account. For example, serviceAccount:my-
[email protected] . - group:emailid - An email
address that represents a Google group. For example,
group:[email protected]. - domain:domain - A Google Apps domain
name that represents all the users of that domain. For example,
domain:google.com or domain:example.com. - projectOwner:projectid -
Owners of the given project. For example, projectOwner:my-example-
project - projectEditor:projectid - Editors of the given project.
For example, projectEditor:my-example-project -
projectViewer:projectid - Viewers of the given project. For example,
projectViewer:my-example-project
role: The role to which members belong. Two types of roles are
supported: new IAM roles, which grant permissions that do not map
directly to those provided by ACLs, and legacy IAM roles, which do map
directly to ACL permissions. All roles are of the format
roles/storage.specificRole. The new IAM roles are: -
roles/storage.admin - Full control of Google Cloud Storage resources.
- roles/storage.objectViewer - Read-Only access to Google Cloud
Storage objects. - roles/storage.objectCreator - Access to create
objects in Google Cloud Storage. - roles/storage.objectAdmin - Full
control of Google Cloud Storage objects. The legacy IAM roles are:
- roles/storage.legacyObjectReader - Read-only access to objects
without listing. Equivalent to an ACL entry on an object with the
READER role. - roles/storage.legacyObjectOwner - Read/write access
to existing objects without listing. Equivalent to an ACL entry on an
object with the OWNER role. - roles/storage.legacyBucketReader -
Read access to buckets with object listing. Equivalent to an ACL entry
on a bucket with the READER role. - roles/storage.legacyBucketWriter
- Read access to buckets with object listing/creation/deletion.
Equivalent to an ACL entry on a bucket with the WRITER role. -
roles/storage.legacyBucketOwner - Read and write access to existing
buckets with object listing/creation/deletion. Equivalent to an ACL
entry on a bucket with the OWNER role.
"""
condition = _messages.MessageField('Expr', 1)
members = _messages.StringField(2, repeated=True)
role = _messages.StringField(3)
bindings = _messages.MessageField('BindingsValueListEntry', 1, repeated=True)
etag = _messages.BytesField(2)
kind = _messages.StringField(3, default='storage#policy')
resourceId = _messages.StringField(4)
version = _messages.IntegerField(5, variant=_messages.Variant.INT32)
class RewriteResponse(_messages.Message):
r"""A rewrite response.
Fields:
done: true if the copy is finished; otherwise, false if the copy is in
progress. This property is always present in the response.
kind: The kind of item this is.
objectSize: The total size of the object being copied in bytes. This
property is always present in the response.
resource: A resource containing the metadata for the copied-to object.
This property is present in the response only when copying completes.
rewriteToken: A token to use in subsequent requests to continue copying
data. This token is present in the response only when there is more data
to copy.
totalBytesRewritten: The total bytes written so far, which can be used to
provide a waiting user with a progress indicator. This property is
always present in the response.
"""
done = _messages.BooleanField(1)
kind = _messages.StringField(2, default='storage#rewriteResponse')
objectSize = _messages.IntegerField(3)
resource = _messages.MessageField('Object', 4)
rewriteToken = _messages.StringField(5)
totalBytesRewritten = _messages.IntegerField(6)
class ServiceAccount(_messages.Message):
r"""A subscription to receive Google PubSub notifications.
Fields:
email_address: The ID of the notification.
kind: The kind of item this is. For notifications, this is always
storage#notification.
"""
email_address = _messages.StringField(1)
kind = _messages.StringField(2, default='storage#serviceAccount')
class StandardQueryParameters(_messages.Message):
r"""Query parameters accepted by all methods.
Enums:
AltValueValuesEnum: Data format for the response.
Fields:
alt: Data format for the response.
fields: Selector specifying which fields to include in a partial response.
key: API key. Your API key identifies your project and provides you with
API access, quota, and reports. Required unless you provide an OAuth 2.0
token.
oauth_token: OAuth 2.0 token for the current user.
prettyPrint: Returns response with indentations and line breaks.
quotaUser: An opaque string that represents a user for quota purposes.
Must not exceed 40 characters.
trace: A tracing token of the form "token:<tokenid>" to include in api
requests.
userIp: Deprecated. Please use quotaUser instead.
"""
class AltValueValuesEnum(_messages.Enum):
r"""Data format for the response.
Values:
json: Responses with Content-Type of application/json
"""
json = 0
alt = _messages.EnumField('AltValueValuesEnum', 1, default='json')
fields = _messages.StringField(2)
key = _messages.StringField(3)
oauth_token = _messages.StringField(4)
prettyPrint = _messages.BooleanField(5, default=True)
quotaUser = _messages.StringField(6)
trace = _messages.StringField(7)
userIp = _messages.StringField(8)
class StorageBucketAccessControlsDeleteRequest(_messages.Message):
r"""A StorageBucketAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketAccessControlsDeleteResponse(_messages.Message):
r"""An empty StorageBucketAccessControlsDelete response."""
class StorageBucketAccessControlsGetRequest(_messages.Message):
r"""A StorageBucketAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketAccessControlsInsertRequest(_messages.Message):
r"""A StorageBucketAccessControlsInsertRequest object.
Fields:
bucket: Name of a bucket.
bucketAccessControl: A BucketAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
bucketAccessControl = _messages.MessageField('BucketAccessControl', 2)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketAccessControlsListRequest(_messages.Message):
r"""A StorageBucketAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
provisionalUserProject = _messages.StringField(2)
userProject = _messages.StringField(3)
class StorageBucketAccessControlsPatchRequest(_messages.Message):
r"""A StorageBucketAccessControlsPatchRequest object.
Fields:
bucket: Name of a bucket.
bucketAccessControl: A BucketAccessControl resource to be passed as the
request body.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
bucketAccessControl = _messages.MessageField('BucketAccessControl', 2)
entity = _messages.StringField(3, required=True)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageBucketAccessControlsUpdateRequest(_messages.Message):
r"""A StorageBucketAccessControlsUpdateRequest object.
Fields:
bucket: Name of a bucket.
bucketAccessControl: A BucketAccessControl resource to be passed as the
request body.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
bucketAccessControl = _messages.MessageField('BucketAccessControl', 2)
entity = _messages.StringField(3, required=True)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageBucketsDeleteRequest(_messages.Message):
r"""A StorageBucketsDeleteRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: If set, only deletes the bucket if its
metageneration matches this value.
ifMetagenerationNotMatch: If set, only deletes the bucket if its
metageneration does not match this value.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageBucketsDeleteResponse(_messages.Message):
r"""An empty StorageBucketsDelete response."""
class StorageBucketsGetIamPolicyRequest(_messages.Message):
r"""A StorageBucketsGetIamPolicyRequest object.
Fields:
bucket: Name of a bucket.
optionsRequestedPolicyVersion: The IAM policy format version to be
returned. If the optionsRequestedPolicyVersion is for an older version
that doesn't support part of the requested IAM policy, the request
fails.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
optionsRequestedPolicyVersion = _messages.IntegerField(2, variant=_messages.Variant.INT32)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketsGetRequest(_messages.Message):
r"""A StorageBucketsGetRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
projection = _messages.EnumField('ProjectionValueValuesEnum', 4)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageBucketsInsertRequest(_messages.Message):
r"""A StorageBucketsInsertRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the bucket resource specifies acl or defaultObjectAcl properties,
when it defaults to full.
Fields:
bucket: A Bucket resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
project: A valid API project identifier.
projection: Set of properties to return. Defaults to noAcl, unless the
bucket resource specifies acl or defaultObjectAcl properties, when it
defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the bucket
resource specifies acl or defaultObjectAcl properties, when it defaults to
full.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.MessageField('Bucket', 1)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 2)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 3)
project = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
provisionalUserProject = _messages.StringField(6)
userProject = _messages.StringField(7)
class StorageBucketsListChannelsRequest(_messages.Message):
r"""A StorageBucketsListChannelsRequest object.
Fields:
bucket: Name of a bucket.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
provisionalUserProject = _messages.StringField(2)
userProject = _messages.StringField(3)
class StorageBucketsListRequest(_messages.Message):
r"""A StorageBucketsListRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
maxResults: Maximum number of buckets to return in a single response. The
service will use this parameter or 1,000 items, whichever is smaller.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to buckets whose names begin with this prefix.
project: A valid API project identifier.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32, default=1000)
pageToken = _messages.StringField(2)
prefix = _messages.StringField(3)
project = _messages.StringField(4, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 5)
provisionalUserProject = _messages.StringField(6)
userProject = _messages.StringField(7)
class StorageBucketsLockRetentionPolicyRequest(_messages.Message):
r"""A StorageBucketsLockRetentionPolicyRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: Makes the operation conditional on whether bucket's
current metageneration matches the given value.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketsPatchRequest(_messages.Message):
r"""A StorageBucketsPatchRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of a bucket.
bucketResource: A Bucket resource to be passed as the request body.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
projection: Set of properties to return. Defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
bucketResource = _messages.MessageField('Bucket', 2)
ifMetagenerationMatch = _messages.IntegerField(3)
ifMetagenerationNotMatch = _messages.IntegerField(4)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
provisionalUserProject = _messages.StringField(8)
userProject = _messages.StringField(9)
class StorageBucketsSetIamPolicyRequest(_messages.Message):
r"""A StorageBucketsSetIamPolicyRequest object.
Fields:
bucket: Name of a bucket.
policy: A Policy resource to be passed as the request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
policy = _messages.MessageField('Policy', 2)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketsTestIamPermissionsRequest(_messages.Message):
r"""A StorageBucketsTestIamPermissionsRequest object.
Fields:
bucket: Name of a bucket.
permissions: Permissions to test.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
permissions = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageBucketsUpdateRequest(_messages.Message):
r"""A StorageBucketsUpdateRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this bucket.
PredefinedDefaultObjectAclValueValuesEnum: Apply a predefined set of
default object access controls to this bucket.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of a bucket.
bucketResource: A Bucket resource to be passed as the request body.
ifMetagenerationMatch: Makes the return of the bucket metadata conditional
on whether the bucket's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the return of the bucket metadata
conditional on whether the bucket's current metageneration does not
match the given value.
predefinedAcl: Apply a predefined set of access controls to this bucket.
predefinedDefaultObjectAcl: Apply a predefined set of default object
access controls to this bucket.
projection: Set of properties to return. Defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this bucket.
Values:
authenticatedRead: Project team owners get OWNER access, and
allAuthenticatedUsers get READER access.
private: Project team owners get OWNER access.
projectPrivate: Project team members get access according to their
roles.
publicRead: Project team owners get OWNER access, and allUsers get
READER access.
publicReadWrite: Project team owners get OWNER access, and allUsers get
WRITER access.
"""
authenticatedRead = 0
private = 1
projectPrivate = 2
publicRead = 3
publicReadWrite = 4
class PredefinedDefaultObjectAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of default object access controls to this
bucket.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit owner, acl and defaultObjectAcl properties.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
bucketResource = _messages.MessageField('Bucket', 2)
ifMetagenerationMatch = _messages.IntegerField(3)
ifMetagenerationNotMatch = _messages.IntegerField(4)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 5)
predefinedDefaultObjectAcl = _messages.EnumField('PredefinedDefaultObjectAclValueValuesEnum', 6)
projection = _messages.EnumField('ProjectionValueValuesEnum', 7)
provisionalUserProject = _messages.StringField(8)
userProject = _messages.StringField(9)
class StorageChannelsStopResponse(_messages.Message):
r"""An empty StorageChannelsStop response."""
class StorageDefaultObjectAccessControlsDeleteRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageDefaultObjectAccessControlsDeleteResponse(_messages.Message):
r"""An empty StorageDefaultObjectAccessControlsDelete response."""
class StorageDefaultObjectAccessControlsGetRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageDefaultObjectAccessControlsInsertRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsInsertRequest object.
Fields:
bucket: Name of a bucket.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 2)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageDefaultObjectAccessControlsListRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
ifMetagenerationMatch: If present, only return default ACL listing if the
bucket's current metageneration matches this value.
ifMetagenerationNotMatch: If present, only return default ACL listing if
the bucket's current metageneration does not match the given value.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
ifMetagenerationMatch = _messages.IntegerField(2)
ifMetagenerationNotMatch = _messages.IntegerField(3)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageDefaultObjectAccessControlsPatchRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsPatchRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 3)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageDefaultObjectAccessControlsUpdateRequest(_messages.Message):
r"""A StorageDefaultObjectAccessControlsUpdateRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 3)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageNotificationsDeleteRequest(_messages.Message):
r"""A StorageNotificationsDeleteRequest object.
Fields:
bucket: The parent bucket of the notification.
notification: ID of the notification to delete.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
notification = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageNotificationsDeleteResponse(_messages.Message):
r"""An empty StorageNotificationsDelete response."""
class StorageNotificationsGetRequest(_messages.Message):
r"""A StorageNotificationsGetRequest object.
Fields:
bucket: The parent bucket of the notification.
notification: Notification ID
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
notification = _messages.StringField(2, required=True)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageNotificationsInsertRequest(_messages.Message):
r"""A StorageNotificationsInsertRequest object.
Fields:
bucket: The parent bucket of the notification.
notification: A Notification resource to be passed as the request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
notification = _messages.MessageField('Notification', 2)
provisionalUserProject = _messages.StringField(3)
userProject = _messages.StringField(4)
class StorageNotificationsListRequest(_messages.Message):
r"""A StorageNotificationsListRequest object.
Fields:
bucket: Name of a Google Cloud Storage bucket.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
provisionalUserProject = _messages.StringField(2)
userProject = _messages.StringField(3)
class StorageObjectAccessControlsDeleteRequest(_messages.Message):
r"""A StorageObjectAccessControlsDeleteRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectAccessControlsDeleteResponse(_messages.Message):
r"""An empty StorageObjectAccessControlsDelete response."""
class StorageObjectAccessControlsGetRequest(_messages.Message):
r"""A StorageObjectAccessControlsGetRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectAccessControlsInsertRequest(_messages.Message):
r"""A StorageObjectAccessControlsInsertRequest object.
Fields:
bucket: Name of a bucket.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 4)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectAccessControlsListRequest(_messages.Message):
r"""A StorageObjectAccessControlsListRequest object.
Fields:
bucket: Name of a bucket.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageObjectAccessControlsPatchRequest(_messages.Message):
r"""A StorageObjectAccessControlsPatchRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
provisionalUserProject = _messages.StringField(6)
userProject = _messages.StringField(7)
class StorageObjectAccessControlsUpdateRequest(_messages.Message):
r"""A StorageObjectAccessControlsUpdateRequest object.
Fields:
bucket: Name of a bucket.
entity: The entity holding the permission. Can be user-userId, user-
emailAddress, group-groupId, group-emailAddress, allUsers, or
allAuthenticatedUsers.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectAccessControl: A ObjectAccessControl resource to be passed as the
request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
entity = _messages.StringField(2, required=True)
generation = _messages.IntegerField(3)
object = _messages.StringField(4, required=True)
objectAccessControl = _messages.MessageField('ObjectAccessControl', 5)
provisionalUserProject = _messages.StringField(6)
userProject = _messages.StringField(7)
class StorageObjectsComposeRequest(_messages.Message):
r"""A StorageObjectsComposeRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
Fields:
composeRequest: A ComposeRequest resource to be passed as the request
body.
destinationBucket: Name of the bucket containing the source objects. The
destination object is stored in this bucket.
destinationObject: Name of the new object. For information about how to
URL encode object names to be path safe, see Encoding URI Path Parts.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
kmsKeyName: Resource name of the Cloud KMS key, of the form projects/my-
project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be
used to encrypt the object. Overrides the object metadata's kms_key_name
value, if any.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
composeRequest = _messages.MessageField('ComposeRequest', 1)
destinationBucket = _messages.StringField(2, required=True)
destinationObject = _messages.StringField(3, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
ifGenerationMatch = _messages.IntegerField(5)
ifMetagenerationMatch = _messages.IntegerField(6)
kmsKeyName = _messages.StringField(7)
provisionalUserProject = _messages.StringField(8)
userProject = _messages.StringField(9)
class StorageObjectsCopyRequest(_messages.Message):
r"""A StorageObjectsCopyRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
destinationBucket: Name of the bucket in which to store the new object.
Overrides the provided object metadata's bucket value, if any.For
information about how to URL encode object names to be path safe, see
Encoding URI Path Parts.
destinationKmsKeyName: Resource name of the Cloud KMS key, of the form
projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
that will be used to encrypt the object. Overrides the object metadata's
kms_key_name value, if any.
destinationObject: Name of the new object. Required when the object
metadata is not otherwise provided. Overrides the object metadata's name
value, if any.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the
destination object's current generation matches the given value. Setting
to 0 makes the operation succeed only if there are no live versions of
the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
destination object's current generation does not match the given value.
If no live object exists, the precondition fails. Setting to 0 makes the
operation succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
destination object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
destination object's current metageneration does not match the given
value.
ifSourceGenerationMatch: Makes the operation conditional on whether the
source object's current generation matches the given value.
ifSourceGenerationNotMatch: Makes the operation conditional on whether the
source object's current generation does not match the given value.
ifSourceMetagenerationMatch: Makes the operation conditional on whether
the source object's current metageneration matches the given value.
ifSourceMetagenerationNotMatch: Makes the operation conditional on whether
the source object's current metageneration does not match the given
value.
object: A Object resource to be passed as the request body.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
sourceBucket: Name of the bucket in which to find the source object.
sourceGeneration: If present, selects a specific revision of the source
object (as opposed to the latest version, the default).
sourceObject: Name of the source object. For information about how to URL
encode object names to be path safe, see Encoding URI Path Parts.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
destinationBucket = _messages.StringField(1, required=True)
destinationKmsKeyName = _messages.StringField(2)
destinationObject = _messages.StringField(3, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
ifGenerationMatch = _messages.IntegerField(5)
ifGenerationNotMatch = _messages.IntegerField(6)
ifMetagenerationMatch = _messages.IntegerField(7)
ifMetagenerationNotMatch = _messages.IntegerField(8)
ifSourceGenerationMatch = _messages.IntegerField(9)
ifSourceGenerationNotMatch = _messages.IntegerField(10)
ifSourceMetagenerationMatch = _messages.IntegerField(11)
ifSourceMetagenerationNotMatch = _messages.IntegerField(12)
object = _messages.MessageField('Object', 13)
projection = _messages.EnumField('ProjectionValueValuesEnum', 14)
provisionalUserProject = _messages.StringField(15)
sourceBucket = _messages.StringField(16, required=True)
sourceGeneration = _messages.IntegerField(17)
sourceObject = _messages.StringField(18, required=True)
userProject = _messages.StringField(19)
class StorageObjectsDeleteRequest(_messages.Message):
r"""A StorageObjectsDeleteRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, permanently deletes a specific revision of this
object (as opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
provisionalUserProject = _messages.StringField(8)
userProject = _messages.StringField(9)
class StorageObjectsDeleteResponse(_messages.Message):
r"""An empty StorageObjectsDelete response."""
class StorageObjectsGetIamPolicyRequest(_messages.Message):
r"""A StorageObjectsGetIamPolicyRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
provisionalUserProject = _messages.StringField(4)
userProject = _messages.StringField(5)
class StorageObjectsGetRequest(_messages.Message):
r"""A StorageObjectsGetRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
projection = _messages.EnumField('ProjectionValueValuesEnum', 8)
provisionalUserProject = _messages.StringField(9)
userProject = _messages.StringField(10)
class StorageObjectsInsertRequest(_messages.Message):
r"""A StorageObjectsInsertRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
bucket: Name of the bucket in which to store the new object. Overrides the
provided object metadata's bucket value, if any.
contentEncoding: If set, sets the contentEncoding property of the final
object to this value. Setting this parameter is equivalent to setting
the contentEncoding metadata property. This can be useful when uploading
an object with uploadType=media to indicate the encoding of the content
being uploaded.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
kmsKeyName: Resource name of the Cloud KMS key, of the form projects/my-
project/locations/global/keyRings/my-kr/cryptoKeys/my-key, that will be
used to encrypt the object. Overrides the object metadata's kms_key_name
value, if any.
name: Name of the object. Required when the object metadata is not
otherwise provided. Overrides the object metadata's name value, if any.
For information about how to URL encode object names to be path safe,
see Encoding URI Path Parts.
object: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
contentEncoding = _messages.StringField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
kmsKeyName = _messages.StringField(7)
name = _messages.StringField(8)
object = _messages.MessageField('Object', 9)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 10)
projection = _messages.EnumField('ProjectionValueValuesEnum', 11)
provisionalUserProject = _messages.StringField(12)
userProject = _messages.StringField(13)
class StorageObjectsListRequest(_messages.Message):
r"""A StorageObjectsListRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which to look for objects.
delimiter: Returns results in a directory-like mode. items will contain
only objects whose names, aside from the prefix, do not contain
delimiter. Objects whose names, aside from the prefix, contain delimiter
will have their name, truncated after the delimiter, returned in
prefixes. Duplicate prefixes are omitted.
endOffset: Filter results to objects whose names are lexicographically
before endOffset. If startOffset is also set, the objects listed will
have names between startOffset (inclusive) and endOffset (exclusive).
includeTrailingDelimiter: If true, objects that end in exactly one
instance of delimiter will have their metadata included in items in
addition to prefixes.
maxResults: Maximum number of items plus prefixes to return in a single
page of responses. As duplicate prefixes are omitted, fewer total
results may be returned than requested. The service will use this
parameter or 1,000 items, whichever is smaller.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to objects whose names begin with this prefix.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
startOffset: Filter results to objects whose names are lexicographically
equal to or after startOffset. If endOffset is also set, the objects
listed will have names between startOffset (inclusive) and endOffset
(exclusive).
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
versions: If true, lists all versions of an object as distinct results.
The default is false. For more information, see Object Versioning.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
delimiter = _messages.StringField(2)
endOffset = _messages.StringField(3)
includeTrailingDelimiter = _messages.BooleanField(4)
maxResults = _messages.IntegerField(5, variant=_messages.Variant.UINT32, default=1000)
pageToken = _messages.StringField(6)
prefix = _messages.StringField(7)
projection = _messages.EnumField('ProjectionValueValuesEnum', 8)
provisionalUserProject = _messages.StringField(9)
startOffset = _messages.StringField(10)
userProject = _messages.StringField(11)
versions = _messages.BooleanField(12)
class StorageObjectsPatchRequest(_messages.Message):
r"""A StorageObjectsPatchRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectResource: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request, for Requester Pays
buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
objectResource = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
provisionalUserProject = _messages.StringField(11)
userProject = _messages.StringField(12)
class StorageObjectsRewriteRequest(_messages.Message):
r"""A StorageObjectsRewriteRequest object.
Enums:
DestinationPredefinedAclValueValuesEnum: Apply a predefined set of access
controls to the destination object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl,
unless the object resource specifies the acl property, when it defaults
to full.
Fields:
destinationBucket: Name of the bucket in which to store the new object.
Overrides the provided object metadata's bucket value, if any.
destinationKmsKeyName: Resource name of the Cloud KMS key, of the form
projects/my-project/locations/global/keyRings/my-kr/cryptoKeys/my-key,
that will be used to encrypt the object. Overrides the object metadata's
kms_key_name value, if any.
destinationObject: Name of the new object. Required when the object
metadata is not otherwise provided. Overrides the object metadata's name
value, if any. For information about how to URL encode object names to
be path safe, see Encoding URI Path Parts.
destinationPredefinedAcl: Apply a predefined set of access controls to the
destination object.
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
destination object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
destination object's current metageneration does not match the given
value.
ifSourceGenerationMatch: Makes the operation conditional on whether the
source object's current generation matches the given value.
ifSourceGenerationNotMatch: Makes the operation conditional on whether the
source object's current generation does not match the given value.
ifSourceMetagenerationMatch: Makes the operation conditional on whether
the source object's current metageneration matches the given value.
ifSourceMetagenerationNotMatch: Makes the operation conditional on whether
the source object's current metageneration does not match the given
value.
maxBytesRewrittenPerCall: The maximum number of bytes that will be
rewritten per rewrite request. Most callers shouldn't need to specify
this parameter - it is primarily in place to support testing. If
specified the value must be an integral multiple of 1 MiB (1048576).
Also, this only applies to requests where the source and destination
span locations and/or storage classes. Finally, this value must not
change across rewrite calls else you'll get an error that the
rewriteToken is invalid.
object: A Object resource to be passed as the request body.
projection: Set of properties to return. Defaults to noAcl, unless the
object resource specifies the acl property, when it defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
rewriteToken: Include this field (from the previous rewrite response) on
each rewrite request after the first one, until the rewrite response
'done' flag is true. Calls that provide a rewriteToken can omit all
other request fields, but if included those fields must match the values
provided in the first rewrite request.
sourceBucket: Name of the bucket in which to find the source object.
sourceGeneration: If present, selects a specific revision of the source
object (as opposed to the latest version, the default).
sourceObject: Name of the source object. For information about how to URL
encode object names to be path safe, see Encoding URI Path Parts.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class DestinationPredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to the destination object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl, unless the object
resource specifies the acl property, when it defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
destinationBucket = _messages.StringField(1, required=True)
destinationKmsKeyName = _messages.StringField(2)
destinationObject = _messages.StringField(3, required=True)
destinationPredefinedAcl = _messages.EnumField('DestinationPredefinedAclValueValuesEnum', 4)
ifGenerationMatch = _messages.IntegerField(5)
ifGenerationNotMatch = _messages.IntegerField(6)
ifMetagenerationMatch = _messages.IntegerField(7)
ifMetagenerationNotMatch = _messages.IntegerField(8)
ifSourceGenerationMatch = _messages.IntegerField(9)
ifSourceGenerationNotMatch = _messages.IntegerField(10)
ifSourceMetagenerationMatch = _messages.IntegerField(11)
ifSourceMetagenerationNotMatch = _messages.IntegerField(12)
maxBytesRewrittenPerCall = _messages.IntegerField(13)
object = _messages.MessageField('Object', 14)
projection = _messages.EnumField('ProjectionValueValuesEnum', 15)
provisionalUserProject = _messages.StringField(16)
rewriteToken = _messages.StringField(17)
sourceBucket = _messages.StringField(18, required=True)
sourceGeneration = _messages.IntegerField(19)
sourceObject = _messages.StringField(20, required=True)
userProject = _messages.StringField(21)
class StorageObjectsSetIamPolicyRequest(_messages.Message):
r"""A StorageObjectsSetIamPolicyRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
policy: A Policy resource to be passed as the request body.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
policy = _messages.MessageField('Policy', 4)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectsTestIamPermissionsRequest(_messages.Message):
r"""A StorageObjectsTestIamPermissionsRequest object.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
permissions: Permissions to test.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
object = _messages.StringField(3, required=True)
permissions = _messages.StringField(4, required=True)
provisionalUserProject = _messages.StringField(5)
userProject = _messages.StringField(6)
class StorageObjectsUpdateRequest(_messages.Message):
r"""A StorageObjectsUpdateRequest object.
Enums:
PredefinedAclValueValuesEnum: Apply a predefined set of access controls to
this object.
ProjectionValueValuesEnum: Set of properties to return. Defaults to full.
Fields:
bucket: Name of the bucket in which the object resides.
generation: If present, selects a specific revision of this object (as
opposed to the latest version, the default).
ifGenerationMatch: Makes the operation conditional on whether the object's
current generation matches the given value. Setting to 0 makes the
operation succeed only if there are no live versions of the object.
ifGenerationNotMatch: Makes the operation conditional on whether the
object's current generation does not match the given value. If no live
object exists, the precondition fails. Setting to 0 makes the operation
succeed only if there is a live version of the object.
ifMetagenerationMatch: Makes the operation conditional on whether the
object's current metageneration matches the given value.
ifMetagenerationNotMatch: Makes the operation conditional on whether the
object's current metageneration does not match the given value.
object: Name of the object. For information about how to URL encode object
names to be path safe, see Encoding URI Path Parts.
objectResource: A Object resource to be passed as the request body.
predefinedAcl: Apply a predefined set of access controls to this object.
projection: Set of properties to return. Defaults to full.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
"""
class PredefinedAclValueValuesEnum(_messages.Enum):
r"""Apply a predefined set of access controls to this object.
Values:
authenticatedRead: Object owner gets OWNER access, and
allAuthenticatedUsers get READER access.
bucketOwnerFullControl: Object owner gets OWNER access, and project team
owners get OWNER access.
bucketOwnerRead: Object owner gets OWNER access, and project team owners
get READER access.
private: Object owner gets OWNER access.
projectPrivate: Object owner gets OWNER access, and project team members
get access according to their roles.
publicRead: Object owner gets OWNER access, and allUsers get READER
access.
"""
authenticatedRead = 0
bucketOwnerFullControl = 1
bucketOwnerRead = 2
private = 3
projectPrivate = 4
publicRead = 5
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to full.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
generation = _messages.IntegerField(2)
ifGenerationMatch = _messages.IntegerField(3)
ifGenerationNotMatch = _messages.IntegerField(4)
ifMetagenerationMatch = _messages.IntegerField(5)
ifMetagenerationNotMatch = _messages.IntegerField(6)
object = _messages.StringField(7, required=True)
objectResource = _messages.MessageField('Object', 8)
predefinedAcl = _messages.EnumField('PredefinedAclValueValuesEnum', 9)
projection = _messages.EnumField('ProjectionValueValuesEnum', 10)
provisionalUserProject = _messages.StringField(11)
userProject = _messages.StringField(12)
class StorageObjectsWatchAllRequest(_messages.Message):
r"""A StorageObjectsWatchAllRequest object.
Enums:
ProjectionValueValuesEnum: Set of properties to return. Defaults to noAcl.
Fields:
bucket: Name of the bucket in which to look for objects.
channel: A Channel resource to be passed as the request body.
delimiter: Returns results in a directory-like mode. items will contain
only objects whose names, aside from the prefix, do not contain
delimiter. Objects whose names, aside from the prefix, contain delimiter
will have their name, truncated after the delimiter, returned in
prefixes. Duplicate prefixes are omitted.
endOffset: Filter results to objects whose names are lexicographically
before endOffset. If startOffset is also set, the objects listed will
have names between startOffset (inclusive) and endOffset (exclusive).
includeTrailingDelimiter: If true, objects that end in exactly one
instance of delimiter will have their metadata included in items in
addition to prefixes.
maxResults: Maximum number of items plus prefixes to return in a single
page of responses. As duplicate prefixes are omitted, fewer total
results may be returned than requested. The service will use this
parameter or 1,000 items, whichever is smaller.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
prefix: Filter results to objects whose names begin with this prefix.
projection: Set of properties to return. Defaults to noAcl.
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
startOffset: Filter results to objects whose names are lexicographically
equal to or after startOffset. If endOffset is also set, the objects
listed will have names between startOffset (inclusive) and endOffset
(exclusive).
userProject: The project to be billed for this request. Required for
Requester Pays buckets.
versions: If true, lists all versions of an object as distinct results.
The default is false. For more information, see Object Versioning.
"""
class ProjectionValueValuesEnum(_messages.Enum):
r"""Set of properties to return. Defaults to noAcl.
Values:
full: Include all properties.
noAcl: Omit the owner, acl property.
"""
full = 0
noAcl = 1
bucket = _messages.StringField(1, required=True)
channel = _messages.MessageField('Channel', 2)
delimiter = _messages.StringField(3)
endOffset = _messages.StringField(4)
includeTrailingDelimiter = _messages.BooleanField(5)
maxResults = _messages.IntegerField(6, variant=_messages.Variant.UINT32, default=1000)
pageToken = _messages.StringField(7)
prefix = _messages.StringField(8)
projection = _messages.EnumField('ProjectionValueValuesEnum', 9)
provisionalUserProject = _messages.StringField(10)
startOffset = _messages.StringField(11)
userProject = _messages.StringField(12)
versions = _messages.BooleanField(13)
class StorageProjectsHmacKeysCreateRequest(_messages.Message):
r"""A StorageProjectsHmacKeysCreateRequest object.
Fields:
projectId: Project ID owning the service account.
serviceAccountEmail: Email address of the service account.
userProject: The project to be billed for this request.
"""
projectId = _messages.StringField(1, required=True)
serviceAccountEmail = _messages.StringField(2, required=True)
userProject = _messages.StringField(3)
class StorageProjectsHmacKeysDeleteRequest(_messages.Message):
r"""A StorageProjectsHmacKeysDeleteRequest object.
Fields:
accessId: Name of the HMAC key to be deleted.
projectId: Project ID owning the requested key
userProject: The project to be billed for this request.
"""
accessId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
userProject = _messages.StringField(3)
class StorageProjectsHmacKeysDeleteResponse(_messages.Message):
r"""An empty StorageProjectsHmacKeysDelete response."""
class StorageProjectsHmacKeysGetRequest(_messages.Message):
r"""A StorageProjectsHmacKeysGetRequest object.
Fields:
accessId: Name of the HMAC key.
projectId: Project ID owning the service account of the requested key.
userProject: The project to be billed for this request.
"""
accessId = _messages.StringField(1, required=True)
projectId = _messages.StringField(2, required=True)
userProject = _messages.StringField(3)
class StorageProjectsHmacKeysListRequest(_messages.Message):
r"""A StorageProjectsHmacKeysListRequest object.
Fields:
maxResults: Maximum number of items to return in a single page of
responses. The service uses this parameter or 250 items, whichever is
smaller. The max number of items per page will also be limited by the
number of distinct service accounts in the response. If the number of
service accounts in a single response is too high, the page will
truncated and a next page token will be returned.
pageToken: A previously-returned page token representing part of the
larger set of results to view.
projectId: Name of the project in which to look for HMAC keys.
serviceAccountEmail: If present, only keys for the given service account
are returned.
showDeletedKeys: Whether or not to show keys in the DELETED state.
userProject: The project to be billed for this request.
"""
maxResults = _messages.IntegerField(1, variant=_messages.Variant.UINT32, default=250)
pageToken = _messages.StringField(2)
projectId = _messages.StringField(3, required=True)
serviceAccountEmail = _messages.StringField(4)
showDeletedKeys = _messages.BooleanField(5)
userProject = _messages.StringField(6)
class StorageProjectsHmacKeysUpdateRequest(_messages.Message):
r"""A StorageProjectsHmacKeysUpdateRequest object.
Fields:
accessId: Name of the HMAC key being updated.
hmacKeyMetadata: A HmacKeyMetadata resource to be passed as the request
body.
projectId: Project ID owning the service account of the updated key.
userProject: The project to be billed for this request.
"""
accessId = _messages.StringField(1, required=True)
hmacKeyMetadata = _messages.MessageField('HmacKeyMetadata', 2)
projectId = _messages.StringField(3, required=True)
userProject = _messages.StringField(4)
class StorageProjectsServiceAccountGetRequest(_messages.Message):
r"""A StorageProjectsServiceAccountGetRequest object.
Fields:
projectId: Project ID
provisionalUserProject: The project to be billed for this request if the
target bucket is requester-pays bucket.
userProject: The project to be billed for this request.
"""
projectId = _messages.StringField(1, required=True)
provisionalUserProject = _messages.StringField(2)
userProject = _messages.StringField(3)
class TestIamPermissionsResponse(_messages.Message):
r"""A storage.(buckets|objects).testIamPermissions response.
Fields:
kind: The kind of item this is.
permissions: The permissions held by the caller. Permissions are always of
the format storage.resource.capability, where resource is one of buckets
or objects. The supported permissions are as follows: -
storage.buckets.delete - Delete bucket. - storage.buckets.get - Read
bucket metadata. - storage.buckets.getIamPolicy - Read bucket IAM
policy. - storage.buckets.create - Create bucket. -
storage.buckets.list - List buckets. - storage.buckets.setIamPolicy -
Update bucket IAM policy. - storage.buckets.update - Update bucket
metadata. - storage.objects.delete - Delete object. -
storage.objects.get - Read object data and metadata. -
storage.objects.getIamPolicy - Read object IAM policy. -
storage.objects.create - Create object. - storage.objects.list - List
objects. - storage.objects.setIamPolicy - Update object IAM policy.
- storage.objects.update - Update object metadata.
"""
kind = _messages.StringField(1, default='storage#testIamPermissionsResponse')
permissions = _messages.StringField(2, repeated=True)
| [
"[email protected]"
]
| |
65f5d5d7db31e03fff05009390b6ac2b06cc7f29 | 5d58fa1d54855f18bad5688de4459af8d461c0ac | /plugins/callback/yaml.py | 40bc0191f254fdf8b7a04ea6c86e06ff50051353 | []
| no_license | nasirhm/general | b3b52f6e31be3de8bae0414da620d8cdbb2c2366 | 5ccd89933297f5587dae5cd114e24ea5c54f7ce5 | refs/heads/master | 2021-01-04T07:03:21.121102 | 2020-02-13T20:59:56 | 2020-02-13T20:59:56 | 240,440,187 | 1 | 0 | null | 2020-02-14T06:08:14 | 2020-02-14T06:08:13 | null | UTF-8 | Python | false | false | 4,855 | py | # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: yaml
type: stdout
short_description: yaml-ized Ansible screen output
description:
- Ansible output that can be quite a bit easier to read than the
default JSON formatting.
requirements:
- set as stdout in configuration
extends_documentation_fragment:
- default_callback
'''
import yaml
import json
import re
import string
import sys
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.six import string_types
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.callback import CallbackBase, strip_internal_keys, module_response_deepcopy
from ansible.plugins.callback.default import CallbackModule as Default
# from http://stackoverflow.com/a/15423007/115478
def should_use_block(value):
"""Returns true if string should be in block format"""
for c in u"\u000a\u000d\u001c\u001d\u001e\u0085\u2028\u2029":
if c in value:
return True
return False
def my_represent_scalar(self, tag, value, style=None):
"""Uses block style for multi-line strings"""
if style is None:
if should_use_block(value):
style = '|'
# we care more about readable than accuracy, so...
# ...no trailing space
value = value.rstrip()
# ...and non-printable characters
value = ''.join(x for x in value if x in string.printable)
# ...tabs prevent blocks from expanding
value = value.expandtabs()
# ...and odd bits of whitespace
value = re.sub(r'[\x0b\x0c\r]', '', value)
# ...as does trailing space
value = re.sub(r' +\n', '\n', value)
else:
style = self.default_style
node = yaml.representer.ScalarNode(tag, value, style=style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
return node
class CallbackModule(Default):
"""
Variation of the Default output which uses nicely readable YAML instead
of JSON for printing results.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'community.general.yaml'
def __init__(self):
super(CallbackModule, self).__init__()
yaml.representer.BaseRepresenter.represent_scalar = my_represent_scalar
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
if result.get('_ansible_no_log', False):
return json.dumps(dict(censored="The output has been hidden due to the fact that 'no_log: true' was specified for this result"))
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
abridged_result = strip_internal_keys(module_response_deepcopy(result))
# remove invocation unless specifically wanting it
if not keep_invocation and self._display.verbosity < 3 and 'invocation' in result:
del abridged_result['invocation']
# remove diff information from screen output
if self._display.verbosity < 3 and 'diff' in result:
del abridged_result['diff']
# remove exception from screen output
if 'exception' in abridged_result:
del abridged_result['exception']
dumped = ''
# put changed and skipped into a header line
if 'changed' in abridged_result:
dumped += 'changed=' + str(abridged_result['changed']).lower() + ' '
del abridged_result['changed']
if 'skipped' in abridged_result:
dumped += 'skipped=' + str(abridged_result['skipped']).lower() + ' '
del abridged_result['skipped']
# if we already have stdout, we don't need stdout_lines
if 'stdout' in abridged_result and 'stdout_lines' in abridged_result:
abridged_result['stdout_lines'] = '<omitted>'
# if we already have stderr, we don't need stderr_lines
if 'stderr' in abridged_result and 'stderr_lines' in abridged_result:
abridged_result['stderr_lines'] = '<omitted>'
if abridged_result:
dumped += '\n'
dumped += to_text(yaml.dump(abridged_result, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
# indent by a couple of spaces
dumped = '\n '.join(dumped.split('\n')).rstrip()
return dumped
def _serialize_diff(self, diff):
return to_text(yaml.dump(diff, allow_unicode=True, width=1000, Dumper=AnsibleDumper, default_flow_style=False))
| [
"[email protected]"
]
| |
233d0ec4819d640232d4c681a2454a1e0e5966e1 | 23d25497d30accc7125f6068ad7c55ebcbea0160 | /Python/10828.py | a809aac64a520ee155f389f3eeaf52107078f583 | []
| no_license | ParkJeongseop/Algorithm | 460689e064529d65e8612493a5d338305ec6311e | 388d092ee8b07b7ea76e720053c782790563515b | refs/heads/master | 2023-08-30T23:19:46.029510 | 2023-08-09T11:08:56 | 2023-08-09T11:08:56 | 149,557,160 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 491 | py | import sys; input = lambda:sys.stdin.readline().rstrip()
n = int(input())
a = []
for _ in range(n):
cmd = input().split()
if cmd[0] == 'push':
a.append(cmd[1])
elif cmd[0] == 'pop':
if a:
print(a.pop())
else:
print(-1)
elif cmd[0] == 'size':
print(len(a))
elif cmd[0] == 'empty':
print(0 if len(a) else 1)
elif cmd[0] == 'top':
if a:
print(a[-1])
else:
print(-1) | [
"[email protected]"
]
| |
11754e433ee8f5985f0ae11f9bae4e8dc50213e1 | 6e8f2e28479566dbaa338300b2d61f784ff83f97 | /.history/code/tensorboard_utils_20210411113117.py | 69315f5c5b16b26260ed37152698eb1eba53cc5e | []
| no_license | eeng5/CV-final-project | 55a7d736f75602858233ebc380c4e1d67ab2b866 | 580e28819560b86f6974959efb1d31ef138198fc | refs/heads/main | 2023-04-09T21:28:21.531293 | 2021-04-21T19:57:22 | 2021-04-21T19:57:22 | 352,703,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,424 | py | """
Project 4 - CNNs
CS1430 - Computer Vision
Brown University
"""
import io
import os
import re
import sklearn.metrics
import numpy as np
import tensorflow as tf
from matplotlib import pyplot as plt
import hyperparameters as hp
def plot_to_image(figure):
""" Converts a pyplot figure to an image tensor. """
buf = io.BytesIO()
plt.savefig(buf, format='png')
plt.close(figure)
buf.seek(0)
image = tf.image.decode_png(buf.getvalue(), channels=4)
image = tf.expand_dims(image, 0)
return image
class ImageLabelingLogger(tf.keras.callbacks.Callback):
""" Keras callback for logging a plot of test images and their
predicted labels for viewing in Tensorboard. """
def __init__(self, logs_path, datasets):
super(ImageLabelingLogger, self).__init__()
self.datasets = datasets
self.task = datasets.task
self.logs_path = logs_path
print("Done setting up image labeling logger.")
def on_epoch_end(self, epoch, logs=None):
self.log_image_labels(epoch, logs)
def log_image_labels(self, epoch_num, logs):
""" Writes a plot of test images and their predicted labels
to disk. """
fig = plt.figure(figsize=(9, 9))
count = 0
for batch in self.datasets.test_data: # changed from train to test
for i, image in enumerate(batch[0]):
plt.subplot(5, 5, count+1)
correct_class_idx = batch[1][i]
probabilities = self.model(np.array([image])).numpy()[0]
predict_class_idx = np.argmax(probabilities)
image = np.clip(image, 0., 1.)
plt.imshow(image, cmap='gray')
is_correct = correct_class_idx == predict_class_idx
title_color = 'g' if is_correct else 'r'
plt.title(
self.datasets.idx_to_class[predict_class_idx],
color=title_color)
plt.axis('off')
count += 1
if count == 25:
break
if count == 25:
break
figure_img = plot_to_image(fig)
file_writer_il = tf.summary.create_file_writer(
self.logs_path + os.sep + "image_labels")
with file_writer_il.as_default():
tf.summary.image("Image Label Predictions",
figure_img, step=epoch_num)
class ConfusionMatrixLogger(tf.keras.callbacks.Callback):
""" Keras callback for logging a confusion matrix for viewing
in Tensorboard. """
def __init__(self, logs_path, datasets):
super(ConfusionMatrixLogger, self).__init__()
self.datasets = datasets
self.logs_path = logs_path
def on_epoch_end(self, epoch, logs=None):
self.log_confusion_matrix(epoch, logs)
def log_confusion_matrix(self, epoch, logs):
""" Writes a confusion matrix plot to disk. """
test_pred = []
test_true = []
count = 0
for i in self.datasets.test_data:
test_pred.append(self.model.predict(i[0]))
test_true.append(i[1])
count += 1
if count >= 1500 / hp.batch_size:
break
test_pred = np.array(test_pred)
test_pred = np.argmax(test_pred, axis=-1).flatten()
test_true = np.array(test_true).flatten()
# Source: https://www.tensorflow.org/tensorboard/image_summaries
cm = sklearn.metrics.confusion_matrix(test_true, test_pred)
figure = self.plot_confusion_matrix(
cm, class_names=self.datasets.classes)
cm_image = plot_to_image(figure)
file_writer_cm = tf.summary.create_file_writer(
self.logs_path + os.sep + "confusion_matrix")
with file_writer_cm.as_default():
tf.summary.image(
"Confusion Matrix (on validation set)", cm_image, step=epoch)
def plot_confusion_matrix(self, cm, class_names):
""" Plots a confusion matrix returned by
sklearn.metrics.confusion_matrix(). """
# Source: https://www.tensorflow.org/tensorboard/image_summaries
figure = plt.figure(figsize=(8, 8))
plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Greens)
plt.title("Confusion matrix")
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
cm = np.around(cm.astype('float') / cm.sum(axis=1)
[:, np.newaxis], decimals=2)
threshold = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
color = "white" if cm[i, j] > threshold else "black"
plt.text(j, i, cm[i, j],
horizontalalignment="center", color=color)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
return figure
class CustomModelSaver(tf.keras.callbacks.Callback):
""" Custom Keras callback for saving weights of networks. """
def __init__(self, checkpoint_dir, task, max_num_weights=5):
super(CustomModelSaver, self).__init__()
self.checkpoint_dir = checkpoint_dir
self.task = task
self.max_num_weights = max_num_weights
def on_epoch_end(self, epoch, logs=None):
""" At epoch end, weights are saved to checkpoint directory. """
min_acc_file, max_acc_file, max_acc, num_weights = \
self.scan_weight_files()
cur_acc = logs["val_sparse_categorical_accuracy"]
# Only save weights if test accuracy exceeds the previous best
# weight file
if cur_acc > max_acc:
save_name = "weights.e{0:03d}-acc{1:.4f}.h5".format(
epoch, cur_acc)
if self.task == '1':
self.model.save_weights(
self.checkpoint_dir + os.sep + "your." + save_name)
else:
# Only save weights of classification head of VGGModel
self.model.head.save_weights(
self.checkpoint_dir + os.sep + "vgg." + save_name)
# Ensure max_num_weights is not exceeded by removing
# minimum weight
if self.max_num_weights > 0 and \
num_weights + 1 > self.max_num_weights:
os.remove(self.checkpoint_dir + os.sep + min_acc_file)
def scan_weight_files(self):
""" Scans checkpoint directory to find current minimum and maximum
accuracy weights files as well as the number of weights. """
min_acc = float('inf')
max_acc = 0
min_acc_file = ""
max_acc_file = ""
num_weights = 0
files = os.listdir(self.checkpoint_dir)
for weight_file in files:
if weight_file.endswith(".h5"):
num_weights += 1
file_acc = float(re.findall(
r"[+-]?\d+\.\d+", weight_file.split("acc")[-1])[0])
if file_acc > max_acc:
max_acc = file_acc
max_acc_file = weight_file
if file_acc < min_acc:
min_acc = file_acc
min_acc_file = weight_file
return min_acc_file, max_acc_file, max_acc, num_weights
| [
"[email protected]"
]
| |
4fbe70fbf88650d84fb87c57199e97908cac72f3 | 1bf7673846aedb5beed2d065f971f2985f70df1b | /lib/stashcache_tester/output/githubOutput.py | e82f08f5b95658cef25fec44fc13f3b8251cb8d3 | []
| no_license | StashCache/stashcache-tester | 31ee90945186821f9bb0979c7bee942037ae05e7 | 5031d294050e9c6419c360e804654850efcfa32c | refs/heads/master | 2020-12-25T14:12:41.392207 | 2017-02-23T17:55:51 | 2017-02-23T17:55:51 | 40,491,284 | 0 | 2 | null | 2017-02-23T17:55:52 | 2015-08-10T15:51:17 | Python | UTF-8 | Python | false | false | 6,685 | py |
import logging
import json
import time
import shutil
import os
import sys
from tempfile import NamedTemporaryFile
from stashcache_tester.output.generalOutput import GeneralOutput
from stashcache_tester.util.Configuration import get_option
from stashcache_tester.util.ExternalCommands import RunExternal
class GithubOutput(GeneralOutput):
"""
:param dict sitesData: Dictionary described in :ref:`sitesData <sitesData-label>`.
This class summarizes and uploads the download data to a github account. The data will be stored in a file named ``data.json`` in the git repo under the directory in the configuration. The format of ``data.json`` is::
{
"20150911": [
{
"average": 364.76526180827,
"name": "Tusker"
},
{
"average": 75.99734924610296,
"name": "UCSDT2"
},
...
],
"20150913": [
{
"average": 239.02169168535966,
"name": "Tusker"
},
...
],
...
}
Github output requires an SSH key to be added to the github repository which is pointed to by the `repo` configuration option.
Github output requires additional configuration options in the main configuration in the section `[github]`. An example configuration could be::
[github]
repo = StashCache/stashcache.github.io.git
branch = master
directory = data
ssh_key = /home/user/.ssh/id_rsa
The configuration is:
repo
The git repo to commit the data to.
branch
The branch to install repo.
directory
The directory to put the data summarized files into.
maxdays
The maximum number of days to keep data. Default=30
ssh_key
Path to SSH key to use when checking out and pushing to the repository.
"""
git_ssh_contents = """#!/bin/sh
exec ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i $SSH_KEY_FILE "$@"
"""
def __init__(self, sitesData):
GeneralOutput.__init__(self, sitesData)
def _get_option(self, option, default = None):
return get_option(option, section="github", default=default)
def _summarize_data(self, sitesData):
summarized = []
# Average download time per site.
for site in sitesData:
cur = {}
cur['name'] = site
siteTimes = sitesData[site]
total_runtime = 0
failures = 0
caches = {}
for run in siteTimes:
# Initialize the cache structure
cache = run['cache']
if cache not in caches:
caches[cache] = {}
caches[cache]['runs'] = 0
caches[cache]['totalRuntime'] = 0
caches[cache]['failures'] = 0
if run['success'] is True:
total_runtime += float(run['duration'])
caches[cache]['totalRuntime'] += float(run['duration'])
caches[cache]['runs'] += 1
else:
caches[cache]['failures'] += 1
failures += 1
testsize = get_option("raw_testsize")
if total_runtime == 0:
cur['average'] = 0
for cache in caches.keys():
caches[cache]['average'] = 0
else:
cur['average'] = (float(testsize*8) / (1024*1024)) / (total_runtime / len(siteTimes))
for cache in caches.keys():
caches[cache]['average'] = (float(testsize*8) / (1024*1024)) / (caches[cache]['totalRuntime'] / caches[cache]['runs'])
cur['caches'] = caches
cur['failures'] = failures
summarized.append(cur)
# Should we do violin plot?
#summarized = sitesData
return summarized
def startProcessing(self):
"""
Begin summarizing the data.
"""
summarized_data = self._summarize_data(self.sitesData)
logging.debug("Creating temporary file for GIT_SSH")
tmpfile = NamedTemporaryFile(delete=False)
tmpfile.write(self.git_ssh_contents)
git_sh_loc = tmpfile.name
logging.debug("Wrote contents of git_ssh_contents to %s" % git_sh_loc)
tmpfile.close()
import stat
os.chmod(git_sh_loc, stat.S_IXUSR | stat.S_IRUSR)
os.environ["GIT_SSH"] = git_sh_loc
# Download the git repo
git_repo = self._get_option("repo")
git_branch = self._get_option("branch")
key_file = self._get_option("ssh_key")
output_dir = self._get_option("directory")
os.environ["SSH_KEY_FILE"] = key_file
RunExternal("git clone --quiet --branch %s [email protected]:%s output_git" % (git_branch, git_repo))
# Write summarized data to the data file
data_filename = os.path.join("output_git", output_dir, "data.json")
if not os.path.exists(data_filename):
logging.error("Data file does not exist, bailing")
sys.exit(1)
with open(data_filename) as data_file:
data = json.load(data_file)
# Truncate the data to the latest `maxdays` days.
maxdays = self._get_option("maxdays", 30)
# Get and sort the keys
sorted_list = data.keys()
sorted_list.sort()
# Discard the last `maxdays` days (looking for what we need to delete)
to_delete = sorted_list[:-int(maxdays)]
for key in to_delete:
logging.debug("Removing data from %s" % key)
data.pop(key, None)
# Write today's summarized data
todays_key = time.strftime("%Y%m%d")
data[todays_key] = summarized_data
with open(data_filename, 'w') as data_file:
json.dump(data, data_file)
# Commit to git repo
RunExternal("cd output_git; git add -f .")
RunExternal("cd output_git; git commit -m \"Adding data for %s\"" % todays_key)
RunExternal("cd output_git; git push -fq origin %s" % git_branch)
shutil.rmtree("output_git")
| [
"[email protected]"
]
| |
5c49e0ec04fe15cf08be854625cc496120e28c5f | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/helpers/typeshed/stubs/keyboard/keyboard/_keyboard_event.pyi | 9c511fdccf59bc88a0fc4b133c00ab9036b835c7 | [
"MIT",
"Apache-2.0"
]
| permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 827 | pyi | from typing_extensions import Literal
from ._canonical_names import canonical_names as canonical_names, normalize_name as normalize_name
KEY_DOWN: Literal["down"]
KEY_UP: Literal["up"]
class KeyboardEvent:
event_type: Literal["down", "up"] | None
scan_code: int
name: str | None
time: float | None
device: str | None
modifiers: tuple[str, ...] | None
is_keypad: bool | None
def __init__(
self,
event_type: Literal["down", "up"] | None,
scan_code: int,
name: str | None = ...,
time: float | None = ...,
device: str | None = ...,
modifiers: tuple[str, ...] | None = ...,
is_keypad: bool | None = ...,
) -> None: ...
def to_json(self, ensure_ascii: bool = ...) -> str: ...
def __eq__(self, other: object) -> bool: ...
| [
"[email protected]"
]
| |
cce9c2c02347ccae443d5f1e8dbebf712c264d0e | 73e277935ef28fd05935c93a3f155c9cc6dc6de7 | /ctf/crypto/quad_residue/Cipolla.py | e07aed34561ff03170436108e72f4b49b2beca9e | []
| no_license | ohmygodlin/snippet | 5ffe6b8fec99abd67dd5d7f819520e28112eae4b | 21d02015492fb441b2ad93b4a455dc4a145f9913 | refs/heads/master | 2023-01-08T14:59:38.618791 | 2022-12-28T11:23:23 | 2022-12-28T11:23:23 | 190,989,347 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,165 | py | #python3
#https://learnblockchain.cn/article/1520
def square_root_of_quadratic_residue(n, modulo):
"""Square root of quadratic residue
Solve the square root of quadratic residue using Cipolla's algorithm with Legendre symbol
Returns:
int -- if n is a quadratic residue,
return x, such that x^{2} = n (mod modulo)
otherwise, return -1
"""
if modulo == 2:
return 1
if n % modulo == 0:
return 0
Legendre = lambda n: pow(n, modulo - 1 >> 1, modulo)
if Legendre(n) == modulo - 1:
return -1
t = 0
while Legendre(t ** 2 - n) != modulo - 1:
t += 1
w = (t ** 2 - n) % modulo
return (generate_quadratic_field(w, modulo)(t, 1) ** (modulo + 1 >> 1)).x
def generate_quadratic_field(d, modulo=0):
"""Generate quadratic field number class
Returns:
class -- quadratic field number class
"""
assert(isinstance(modulo, int) and modulo >= 0)
class QuadraticFieldNumber:
def __init__(self, x, y):
self.x = x % modulo
self.y = y % modulo
def __mul__(self, another):
x = self.x * another.x + d * self.y * another.y
y = self.x * another.y + self.y * another.x
return self.__class__(x, y)
def __pow__(self, exponent):
result = self.__class__(1, 0)
if exponent:
temporary = self.__class__(self.x, self.y)
while exponent:
if exponent & 1:
result *= temporary
temporary *= temporary
exponent >>= 1
return result
def __str__(self):
return '({}, {} \\sqrt({}))'.format(self.x, self.y, d)
return QuadraticFieldNumber
a = 8479994658316772151941616510097127087554541274812435112009425778595495359700244470400642403747058566807127814165396640215844192327900454116257979487432016769329970767046735091249898678088061634796559556704959846424131820416048436501387617211770124292793308079214153179977624440438616958575058361193975686620046439877308339989295604537867493683872778843921771307305602776398786978353866231661453376056771972069776398999013769588936194859344941268223184197231368887060609212875507518936172060702209557124430477137421847130682601666968691651447236917018634902407704797328509461854842432015009878011354022108661461024768
p = 30531851861994333252675935111487950694414332763909083514133769861350960895076504687261369815735742549428789138300843082086550059082835141454526618160634109969195486322015775943030060449557090064811940139431735209185996454739163555910726493597222646855506445602953689527405362207926990442391705014604777038685880527537489845359101552442292804398472642356609304810680731556542002301547846635101455995732584071355903010856718680732337369128498655255277003643669031694516851390505923416710601212618443109844041514942401969629158975457079026906304328749039997262960301209158175920051890620947063936347307238412281568760161
x = square_root_of_quadratic_residue(a, p)
print(x)
print(pow(x,2,p) - a)
#x^2 = (p-x)^2 = n mod p | [
"[email protected]"
]
| |
4144585f59160e7268a01a9b954689f44dcc5460 | 44a6e88da453a2e368b014e403843b0c955f21f4 | /utils/make_mock_solid_dir.py | 49e00504ae9b25d4b9a7a94ae096e077cf8d7ffc | [
"Artistic-2.0"
]
| permissive | golharam/genomics | a26b1f9366203ec059cc2e49281909bfc16e6ab4 | ca0c7c239b0f04353e2f2fa897db9c24a1211596 | refs/heads/master | 2020-08-06T10:28:21.604129 | 2019-09-27T07:51:41 | 2019-09-27T07:51:41 | 212,943,378 | 0 | 0 | Artistic-2.0 | 2019-10-05T04:25:24 | 2019-10-05T04:25:23 | null | UTF-8 | Python | false | false | 1,728 | py | #!/usr/bin/env python
#
# make_mock_solid_dir.py: make mock SOLiD directory for test purposes
# Copyright (C) University of Manchester 2011 Peter Briggs
#
########################################################################
#
# make_mock_solid_dir.py
#
#########################################################################
"""make_mock_solid_dir.py
Makes a mock SOLiD run directory with run_definition and barcode statistic
files plus mock csfasta and qual files, which can be used to test other
programs and scrips with.
It uses the TestUtils class from the SolidData module to build and populate
the mock directory structure.
Usage: make_mock_solid_dir.py
"""
#######################################################################
# Import modules that this module depends on
#######################################################################
#
import os
import sys
# Put ../share onto Python search path for modules
SHARE_DIR = os.path.abspath(
os.path.normpath(
os.path.join(os.path.dirname(sys.argv[0]),'..','share')))
sys.path.append(SHARE_DIR)
try:
from bcftbx.test.test_SolidData import TestUtils
except ImportError as ex:
print("Error importing modules: %s" % ex)
if __name__ == "__main__":
paired_end = False
if '--paired-end' in sys.argv:
paired_end = True
elif len(sys.argv) > 1:
print("Usage: %s [--paired-end]" % os.path.basename(sys.argv[0]))
sys.exit(1)
# Make mock solid directory
if paired_end:
solid_dir = TestUtils().make_solid_dir_paired_end('solid0123_20111014_PE_BC')
else:
solid_dir = TestUtils().make_solid_dir('solid0123_20111014_FRAG_BC')
print("Constructed mock dir: %s" % solid_dir)
| [
"[email protected]"
]
| |
6f1f8161ba95d3088ba7f50b93a121664fb1a322 | 57abd17391c6ef691509dae512c102f6635dab9b | /tensorflow_datasets/scripts/create_new_dataset.py | 6e57f703111ebe42c66b6fd4f7d3415e908e0bac | [
"Apache-2.0"
]
| permissive | SinghKislay/datasets | 434e50eb3b8584849192f3cabe7305429cc62363 | bc09dd59826975f57c861da4bea23fa5d63d61cf | refs/heads/master | 2020-05-02T22:27:34.771036 | 2019-04-10T18:14:41 | 2019-04-10T18:14:41 | 176,097,632 | 0 | 0 | Apache-2.0 | 2019-03-17T12:25:56 | 2019-03-17T12:25:55 | null | UTF-8 | Python | false | false | 6,919 | py | # coding=utf-8
# Copyright 2019 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate the minimal source code for a new dataset.
python -m tensorflow_datasets.scripts.create_new_dataset \
--dataset dataset_name \
--type dataset_type
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from tensorflow.io import gfile
from tensorflow_datasets.core import naming
from tensorflow_datasets.core.utils import py_utils
FLAGS = flags.FLAGS
_DATASET_TYPE = ['image', 'video', 'audio', 'text', 'structured', 'translate']
flags.DEFINE_string('tfds_dir', None, 'Root directory of tfds (auto-computed)')
flags.DEFINE_string('dataset', None, 'Dataset name')
flags.DEFINE_enum('type', None, _DATASET_TYPE, 'Dataset type')
_HEADER = """\
\"""{TODO}: Add a description here.\"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""
_DATASET_DEFAULT_IMPORTS = """\
import tensorflow_datasets as tfds\n
"""
_DATASET_TEST_DEFAULTS_IMPORTS = """\
from tensorflow_datasets import testing
from tensorflow_datasets.{dataset_type} import {dataset_name}
"""
_CITATION = """\
# {TODO}: BibTeX citation
_CITATION = \"""
\"""\n
"""
_DESCRIPTION = """\
# {TODO}:
_DESCRIPTION = \"""
\"""\n
"""
_DATASET_DEFAULTS = """\
class {dataset_cls}(tfds.core.GeneratorBasedBuilder):
\"""{TODO}: Short description of my dataset.\"""
# {TODO}: Set up version.
VERSION = tfds.core.Version('0.1.0')
def _info(self):
# {TODO}: Specifies the tfds.core.DatasetInfo object
return tfds.core.DatasetInfo(
builder=self,
# This is the description that will appear on the datasets page.
description=_DESCRIPTION,
# tfds.features.FeatureConnectors
features=tfds.features.FeaturesDict({{
# These are the features of your dataset like images, labels ...
}}),
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=(),
# Homepage of the dataset for documentation
urls=[],
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# {TODO}: Downloads the data and defines the splits
# dl_manager is a tfds.download.DownloadManager that can be used to
# download and extract URLs
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
# {TODO}: Tune the number of shards such that each shard
# is < 4 GB.
num_shards=10,
# These kwargs will be passed to _generate_examples
gen_kwargs={{}},
),
]
def _generate_examples(self):
# {TODO}: Yields examples from the dataset
yield {{}}\n
"""
_DATASET_TEST_DEFAULTS = """\
class {dataset_cls}Test(testing.DatasetBuilderTestCase):
# {TODO}:
DATASET_CLASS = {dataset_name}.{dataset_cls}
SPLITS = {{
"train": 3, # Number of fake train example
"test": 1, # Number of fake test example
}}
# If you are calling `download/download_and_extract` with a dict, like:
# dl_manager.download({{'some_key': 'http://a.org/out.txt', ...}})
# then the tests needs to provide the fake output paths relative to the
# fake data directory
# DL_EXTRACT_RESULT = {{'some_key': 'output_file1.txt', ...}}
if __name__ == "__main__":
testing.test_main()
"""
_CHECKSUM_FILE = """\
# {TODO}: If your dataset downloads files, then the checksums will be
# automatically added here when running the download_and_prepare script
# with --register_checksums.
"""
def create_dataset_file(root_dir, data):
"""Create a new dataset from a template."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}.py')
context = (
_HEADER + _DATASET_DEFAULT_IMPORTS + _CITATION
+ _DESCRIPTION + _DATASET_DEFAULTS
)
with gfile.GFile(file_path.format(**data), 'w') as f:
f.write(context.format(**data))
def add_the_init(root_dir, data):
"""Append the new dataset file to the __init__.py."""
init_file = os.path.join(root_dir, '{dataset_type}', '__init__.py')
context = (
'from tensorflow_datasets.{dataset_type}.{dataset_name} import '
'{dataset_cls} # {TODO} Sort alphabetically\n'
)
with gfile.GFile(init_file.format(**data), 'a') as f:
f.write(context.format(**data))
def create_dataset_test_file(root_dir, data):
"""Create the test file associated with the dataset."""
file_path = os.path.join(root_dir, '{dataset_type}', '{dataset_name}_test.py')
context = (
_HEADER + _DATASET_TEST_DEFAULTS_IMPORTS +
_DATASET_TEST_DEFAULTS)
with gfile.GFile(file_path.format(**data), 'w') as f:
f.write(context.format(**data))
def create_fake_data(root_dir, data):
fake_examples_dir = os.path.join(
root_dir, 'testing', 'test_data', 'fake_examples', '{dataset_name}')
fake_examples_dir = fake_examples_dir.format(**data)
gfile.makedirs(fake_examples_dir)
fake_path = os.path.join(
fake_examples_dir, 'TODO-add_fake_data_in_this_directory.txt')
with gfile.GFile(fake_path, 'w') as f:
f.write('{TODO}: Add fake data in this directory'.format(**data))
def create_checksum_file(root_dir, data):
checksum_path = os.path.join(root_dir, 'url_checksums', '{dataset_name}.txt')
with gfile.GFile(checksum_path.format(**data), 'w') as f:
f.write(_CHECKSUM_FILE.format(**data))
def main(_):
dataset_name = FLAGS.dataset
dataset_type = FLAGS.type
root_dir = FLAGS.tfds_dir
if not root_dir:
root_dir = py_utils.tfds_dir()
data = dict(
dataset_name=dataset_name,
dataset_type=dataset_type,
dataset_cls=naming.snake_to_camelcase(dataset_name),
TODO='TODO({})'.format(dataset_name),
)
create_dataset_file(root_dir, data)
add_the_init(root_dir, data)
create_dataset_test_file(root_dir, data)
create_fake_data(root_dir, data)
create_checksum_file(root_dir, data)
print(
'Dataset generated in {}\n'
'You can start with searching TODO({}).\n'
'Please check this '
'`https://github.com/tensorflow/datasets/blob/master/docs/add_dataset.md`'
'for details.'.format(root_dir, dataset_name)
)
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
]
| |
9442061d1c5d28bd09a835998a2e53cfa07e48e2 | bb33e6be8316f35decbb2b81badf2b6dcf7df515 | /source/res/scripts/client/gui/Scaleform/daapi/view/meta/EventBoardsAwardsOverlayMeta.py | 219372a39b6b37e617c2e86dffba37acfa9ed26a | []
| no_license | StranikS-Scan/WorldOfTanks-Decompiled | 999c9567de38c32c760ab72c21c00ea7bc20990c | d2fe9c195825ececc728e87a02983908b7ea9199 | refs/heads/1.18 | 2023-08-25T17:39:27.718097 | 2022-09-22T06:49:44 | 2022-09-22T06:49:44 | 148,696,315 | 103 | 39 | null | 2022-09-14T17:50:03 | 2018-09-13T20:49:11 | Python | UTF-8 | Python | false | false | 736 | py | # Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/meta/EventBoardsAwardsOverlayMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class EventBoardsAwardsOverlayMeta(BaseDAAPIComponent):
def changeFilter(self, id):
self._printOverrideError('changeFilter')
def as_setHeaderS(self, data):
return self.flashObject.as_setHeader(data) if self._isDAAPIInited() else None
def as_setVehicleS(self, data):
return self.flashObject.as_setVehicle(data) if self._isDAAPIInited() else None
def as_setDataS(self, data):
return self.flashObject.as_setData(data) if self._isDAAPIInited() else None
| [
"[email protected]"
]
| |
fe0bd2ceaf4493e021a319b6698c83f78f07f01e | dce2e3b11804fdb141feaa48299fa8cd751f0e5d | /2.两数相加.py | ad357aa9fedb490291ad6f56660209fd8858a61c | []
| permissive | Cosmos-Break/leetcode | bf056efb6f3eb6448df7fb3fc4869992a3e7eb48 | 9f5f3d24e35b0a482ed40594ea665e9068324dcc | refs/heads/main | 2023-06-26T04:29:25.135826 | 2021-07-19T12:29:29 | 2021-07-19T12:29:29 | 293,397,157 | 0 | 0 | MIT | 2020-09-07T01:55:39 | 2020-09-07T01:55:38 | null | UTF-8 | Python | false | false | 785 | py | #
# @lc app=leetcode.cn id=2 lang=python3
#
# [2] 两数相加
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode(l1.val + l2.val)
cur = head
while l1.next or l2.next:
l1 = l1.next if l1.next else ListNode()
l2 = l2.next if l2.next else ListNode()
cur.next = ListNode(l1.val + l2.val + cur.val // 10)
cur.val = cur.val % 10
cur = cur.next
if cur.val >= 10:
cur.next = ListNode(cur.val // 10)
cur.val = cur.val % 10
return head
# @lc code=end
| [
"[email protected]"
]
| |
cffddf3d75c1e1ce6fff97c1711d232a66a1205a | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901100012/d07/mymodule/stats_word.py | 02ebfde584c7e1c929c260f80257d92f7a50d67b | []
| no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 2,934 | py | en_text='''
The Zen of Python,by Tim Peters
Beautiful is better than ugly.
Explicit is better than implicit.
Simple is better than complex.
Complex is better than complicated.
Flat is better than nested.
Sparse is better than dense.
Readability counts.
Special cases aren't special enough to break the rules.
Although practicality beats purity.
Errors should never pass silently.
Unless explicitly silenced.
In the face of ambxiquity,refuse the temptation to guess.
There should be one-- and preferably only one --obvious way to do it.
Altough that way may not be obvious at first unless you're Dutch.
Now is better than never.
Although never is often better than *right* now.
If the implementation is hard to explain,it's a bad idea.
If the implementation is easy to explain,it's a good idea.
Namespaces are one honking great idea -- let's do more of those!
'''
#英文降序
def stats_text_en (text):
eles=text.split()#将文章按照空格划分开
words=[]
sys=".,-,*,!"
for elet in eles:
for s1 in sys:
elet=elet.replace(s1,' ')
if len(elet) and elet.isascii():
words.append(elet)
print(words)
print()
counter={}
word_set=set(words)
for word in word_set:
counter[word]=words.count(word)
print(counter)
print()
return sorted(counter.items(),key=lambda x:x[1],reverse=True)
#中文降序
def stats_text_cn (text):
cn_characters=[]
for character in text:
if '\u4e00'<=character<='\u9fa5':#中文范围
cn_characters.append(character)
counter={}
cn_set=set(cn_characters)
for word in cn_set:
counter[word]=cn_characters.count(word)
return sorted(counter.items(),key=lambda x:x[1],reverse=True)
cn_text='''
Python之禅 by Tim Petters
美丽胜于丑陋
露骨比露骨好
简单总比复杂好
复杂比复杂好
平的比嵌套的好
稀疏比密密好
可读性很重要
特殊情况并不足以打破规则
尽管实用性胜过纯洁性
错误永远不应该悄悄过去
除非明确地沉默
面对橱柜,拒绝诱惑去猜测
应该有一种----最好只有一种----显而易见的方法来做到这一点
如果你不是荷兰人,那么这种方式在一开始可能并不明显
现在总比没有好
虽然从来没有比现在更好
如果实现很难解释,这是一个坏主意
如果实现容易解释,这是一个好主意
命名空间是一个很好的主意--让我们做更多的那些
'''
#输出合并词频统计结果
def stats_text(text):
return stats_text_en(text) + stats_text_cn(text)
#def stats_text(en_text,cn_text):
#print("输出合并词频统计结果\n",stats_text_en(en_text) + stats_text_cn(cn_text))
if __name__=='__main__':
en_result=stats_text_en(en_text)
cn_result=stats_text_cn(cn_text)
print("统计英文次数-->\n",en_result)
print("统计中文次数-->\n",cn_result)
| [
"[email protected]"
]
| |
78ce0f7dcfae56d27b83005282d451663d29798d | a11d83fced34854664fac72e18d48fde6aa967e4 | /0x02-python-import_modules/102-magic_calculation.py | 0162921f0da03b752aab68a8227e86622fb4338e | []
| no_license | afarizap/holbertonschool-higher_level_programming | ffe0bf1440726c952f4dd28b908eabc4ccb5225b | ad39e58f9cb20cba4b9e2c14075f216097588f47 | refs/heads/master | 2023-03-30T15:39:35.184484 | 2021-03-22T22:55:24 | 2021-03-22T22:55:24 | 259,437,040 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | #!/usr/bin/python3
def magic_calculation(a, b):
from magic_calculation_102 import add, sub
if a < b:
c = add(a, b)
for i in range(4, 6):
c = add(c, i)
return c
return sub(a, b)
if __name__ == '__main__':
import dis
dis.dis(magic_calculation)
| [
"[email protected]"
]
| |
038a56b6976ac7b4e464d15987f277155fce3956 | 09cead98874a64d55b9e5c84b369d3523c890442 | /py200703_python1/day13_py200814/output_1.py | 5599e7b1ca607505095d6c9b8c9ce7737df9672c | []
| no_license | edu-athensoft/stem1401python_student | f12b404d749286036a090e941c0268381ce558f8 | baad017d4cef2994855b008a756758d7b5e119ec | refs/heads/master | 2021-08-29T15:01:45.875136 | 2021-08-24T23:03:51 | 2021-08-24T23:03:51 | 210,029,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | """
output format()
output formatting with placeholders
string.format()
string template
placeholder
"""
x = 1 + 3*4
y = 2 + 5*6
# not recommended
print('x=', x, ',', 'y=', y)
# recommended
print("x={} , y={}")
print("x={} , y={}".format(x, y))
print("x={},y={}".format(x, y))
print("x={}, y={}".format(x, y))
| [
"[email protected]"
]
| |
82aed50d228c4f45ff91dae2b61a13a01bd7bd66 | 87eed57b13eba5fc10756e705821a2fc861a198e | /bfg9000/platforms/host.py | 65e15de4fb2bd1e765b040415f4de4a8f23600cb | [
"BSD-3-Clause"
]
| permissive | jimporter/bfg9000 | 379ac2d9debb822defacc6c5e31d7b65468f0973 | 876966cc82b5520a7bddf88c2a57716c5579b5ba | refs/heads/master | 2023-08-04T06:29:44.669098 | 2023-08-01T03:13:46 | 2023-08-01T03:13:46 | 31,297,691 | 87 | 21 | BSD-3-Clause | 2020-08-06T06:38:10 | 2015-02-25T04:47:12 | Python | UTF-8 | Python | false | false | 334 | py | from .core import _get_platform_info, _platform_info, Platform
class HostPlatform(Platform):
pass
def platform_info(*args, **kwargs):
return _platform_info('host', *args, **kwargs)
def from_json(value):
return _get_platform_info('host', value['genus'], value['species'],
value['arch'])
| [
"[email protected]"
]
| |
2f23cbd42dee001993bc154511cf225da4760ce6 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/caaa5d634f104a58a218ff663dfc926195e3acaf-<test_notify_sentry_app_and_plugin_with_same_slug>-bug.py | 2bb03ab90a78aded197d894082a935887a555e3a | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | def test_notify_sentry_app_and_plugin_with_same_slug(self):
event = self.get_event()
self.create_sentry_app(organization=event.organization, name='Notify', is_alertable=True)
plugin = MagicMock()
plugin.is_enabled.return_value = True
plugin.should_notify.return_value = True
rule = self.get_rule(data={
'service': 'notify',
})
with patch('sentry.plugins.plugins.get') as get_plugin:
get_plugin.return_value = plugin
results = list(rule.after(event=event, state=self.get_state()))
assert (len(results) is 2)
assert (plugin.should_notify.call_count is 1)
assert (results[0].callback is notify_sentry_app)
assert (results[1].callback is plugin.rule_notify) | [
"[email protected]"
]
| |
2fedba0cbdc8a5b29280723b6940c2f71a7bda36 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03546/s442051818.py | 0f991e8c96376c793a2bab1a38286d3a650be9e2 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | h, _ = map(int, input().split())
r = range(10)
c = [[int(i) for i in input().split()] for _ in r]
for k in r:
for i in r:
for j in r:
c[i][j] = min(c[i][j], c[i][k] + c[k][j])
else:
a = [[int(i) for i in input().split()] for _ in range(h)]
print(sum(c[i][1] for i in sum(a, []) if i != -1)) | [
"[email protected]"
]
| |
27c6f04530538b5ac8c71908ab91361f20ecc16b | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/225/users/4013/codes/1671_1079.py | c3bb6e4c00135c7fac261439e8a41a85fc6fb9ce | []
| no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # Ao testar sua solução, não se limite ao caso de exemplo.
from math import *
# Leitura dos lados do triangulo a, b, and c
a = float(input ("Lado 1: "))
b = float(input ("Lado 2: "))
c = float(input ("Lado 3: "))
print("Entradas:", a, ",", b, ",", c)
# Testa se pelo menos uma das entradas eh negativa
if ((a > 0) or (b > 0) or (c > 0 )):
# Testa se medidas correspondem aas de um triangulo
if ((a < b + c) and (b < a + c) and (c < a + b)):
s = (a + b + c) / 2.0
area = sqrt(s * (s-a) * (s-b) * (s-c))
area = round(area, 3)
print("Area:", area)
else:
print("Area: invalida")
else:
print("Area: invalida")
| [
"[email protected]"
]
| |
a3833b102545d1c9387ae8d1e32f5eb76d98b791 | dd097c7ae744227b0312d762ee0482a3380ff8c6 | /interptools.py | be2c119cfc1ff251f2c6d11d1db94c2279481ab4 | []
| no_license | moflaher/workspace_python | 0d6e98274d923a721db2b345f65c20b02ca59d08 | 6551e3602ead3373eafce10d11ce7b96bdcb106f | refs/heads/master | 2023-03-06T02:15:01.945481 | 2023-03-01T19:15:51 | 2023-03-01T19:15:51 | 20,814,932 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 21,812 | py | from __future__ import division,print_function
import numpy as np
import scipy as sp
import matplotlib as mpl
import matplotlib.tri as mplt
import matplotlib.pyplot as plt
import os, sys
import scipy.io as sio
import gridtools as gt
import datatools as dt
import plottools as pt
import projtools as pjt
import misctools as mt
from matplotlib.collections import LineCollection as LC
import seawater as sw
np.set_printoptions(precision=16,suppress=True,threshold=sys.maxsize)
import bisect
import scipy.interpolate as spitp
import matplotlib.path as path
"""
Front Matter
=============
Created in 2014
Author: Mitchell O'Flaherty-Sproul
A bunch of functions dealing with fvcom interpolation.
"""
def interpE_at_loc(data,varname,loc,layer=None,ll=True):
"""
Interpolate element data at a location. If variable is 3d then specify a layer, defaults to surface layer otherwise.
Note: 1d element data will break this, should be possible to handle. I will work out the logic another day.
:Parameters:
data - data dictionary from loadnc
varname - element data variable name. (2d or 3d)
loc - location
:Optional:
layer - default None. Specify which layer of 3d data to use
ll - default True. Is point lon/lat or xy.
"""
###############################################################################
# Error and corner case checking
if ll==True:
trifinder='trigrid_finder'
trigrid='trigrid'
else:
trifinder='trigridxy_finder'
trigrid='trigridxy'
if (data.has_key(trifinder)==False and data.has_key(trigrid)):
print('No trifinder initialized. Initializing now.')
data[trifinder]=data[trigrid].get_trifinder()
elif data.has_key(trigrid)==False:
print('No trifinder or trigrid to initialize it.')
return
if ((len(data[varname].shape)>2) and (layer==None)):
print('3d variable specified without layer. Returning surface layer.')
layer=0
elif ((len(data[varname].shape)==2) and (layer!=None)):
print('2d variable specified with layer. That would break things, unspecifing layer.')
layer=None
loc=np.array(loc)
host=data[trifinder].__call__(loc[0],loc[1])
if host==-1:
print('Point at: (' + ('%f'%loc[0]) + ', ' +('%f'%loc[1]) + ') is external to the grid.')
out=np.empty(shape=(data[varname][:,layer,host]).squeeze().shape)
out[:]=np.nan
return out
###############################################################################
#code for ll adapted from mod_utils.F
if ll==True:
x0c,y0c=pjt.ll2m(data['uvnodell'][host,:],loc)
else:
x0c=loc[0]-data['uvnode'][host,0]
y0c=loc[1]-data['uvnode'][host,1]
e0=data['nbe'][host,0]
e1=data['nbe'][host,1]
e2=data['nbe'][host,2]
var_e=(data[varname][:,layer,host]).squeeze()
if e0==-1:
var_0=np.zeros(shape=var_e.shape,dtype=var_e.dtype)
else:
var_0=(data[varname][:,layer,e0]).squeeze()
if e1==-1:
var_1=np.zeros(shape=var_e.shape,dtype=var_e.dtype)
else:
var_1=(data[varname][:,layer,e1]).squeeze()
if e2==-1:
var_2=np.zeros(shape=var_e.shape,dtype=var_e.dtype)
else:
var_2=(data[varname][:,layer,e2]).squeeze()
dvardx= data['a1u'][0,host]*var_e+data['a1u'][1,host]*var_0+data['a1u'][2,host]*var_1+data['a1u'][3,host]*var_2
dvardy= data['a2u'][0,host]*var_e+data['a2u'][1,host]*var_0+data['a2u'][2,host]*var_1+data['a2u'][3,host]*var_2
var= var_e + dvardx*x0c + dvardy*y0c
return var
def interpN_at_loc(data,varname,loc,layer=None,ll=True):
"""
Interpolate nodal data at a location. If variable is 3d then specify a layer, defaults to surface layer otherwise.
Note: 1d element data will break this, should be possible to handle. I will work out the logic another day.
data - data dictionary from loadnc
varname - nodal data variable name. (1d or 2d or 3d)
loc - location
Optional:
layer - default None. Specify which layer of 3d data to use
ll - default True. Is point lon/lat or xy.
"""
###############################################################################
# Error and corner case checking
if ll==True:
trifinder='trigrid_finder'
trigrid='trigrid'
else:
trifinder='trigridxy_finder'
trigrid='trigridxy'
if (data.has_key(trifinder)==False and data.has_key(trigrid)):
print('No trifinder initialized. Initializing now.')
data[trifinder]=data[trigrid].get_trifinder()
elif data.has_key(trigrid)==False:
print('No trifinder or trigrid to initialize it.')
return
if ((len(data[varname].shape)>2) and (layer==None)):
print('3d variable specified without layer. Returning surface layer.')
layer=0
elif ((len(data[varname].shape)==2) and (layer!=None)):
print('2d variable specified with layer. That would break things, unspecifing layer.')
layer=None
loc=np.array(loc)
host=data[trifinder].__call__(loc[0],loc[1])
if host==-1:
print('Point at: (' + ('%f'%loc[0]) + ', ' +('%f'%loc[1]) + ') is external to the grid.')
if len(data[varname].shape)==1:
out=np.nan
else:
out=np.empty(shape=(data[varname][:,layer,host]).squeeze().shape)
out[:]=np.nan
return out
###############################################################################
#code for ll adapted from mod_utils.F
if ll==True:
x0c,y0c=pjt.ll2m(data['uvnodell'][host,:],loc)
else:
x0c=loc[0]-data['uvnode'][host,0]
y0c=loc[1]-data['uvnode'][host,1]
n0=data['nv'][host,0]
n1=data['nv'][host,1]
n2=data['nv'][host,2]
#To deal with 1d data, should be a better way to handle this....
#This can all be vectorized, checkout robies code could make a factor of 2 difference.
if len(data[varname].shape)==1:
nvar0=data[varname][n0]
nvar1=data[varname][n1]
nvar2=data[varname][n2]
else:
nvar0=(data[varname][:,layer,n0]).squeeze()
nvar1=(data[varname][:,layer,n1]).squeeze()
nvar2=(data[varname][:,layer,n2]).squeeze()
var_0=data['aw0'][0,host]*nvar0+data['aw0'][1,host]*nvar1+data['aw0'][2,host]*nvar2
var_x=data['awx'][0,host]*nvar0+data['awx'][1,host]*nvar1+data['awx'][2,host]*nvar2
var_y=data['awy'][0,host]*nvar0+data['awy'][1,host]*nvar1+data['awy'][2,host]*nvar2
var= var_0 + var_x*x0c + var_y*y0c
return var
def interpEfield_locs(data,varname,locs,timein,layer=None,ll=False,fill_value=-9999,hosts=[]):
#"""
#Interpolate element data at a location. If variable is 3d then specify a layer, defaults to surface layer otherwise.
#Note: 1d element data will break this, should be possible to handle. I will work out the logic another day.
#:Parameters:
#data - data dictionary from loadnc
#varname - element data variable name. (2d or 3d)
#loc - location
#:Optional:
#layer - default None. Specify which layer of 3d data to use
#ll - default True. Is point lon/lat or xy.
#fill_value - default -9999 when points are outside the domain they return fill_value
#"""
###############################################################################
# Error and corner case checking
if ll==True:
trifinder='trigrid_finder'
trigrid='trigrid'
else:
trifinder='trigridxy_finder'
trigrid='trigridxy'
if (data.has_key(trifinder)==False and data.has_key(trigrid)):
print('No trifinder initialized. Initializing now.')
data[trifinder]=data[trigrid].get_trifinder()
elif data.has_key(trigrid)==False:
print('No trifinder or trigrid to initialize it.')
return
if ((len(data[varname].shape)>2) and (layer==None)):
print('3d variable specified without layer. Returning surface layer.')
layer=0
elif ((len(data[varname].shape)==2) and (layer!=None)):
print('2d variable specified with layer. That would break things, unspecifing layer.')
layer=None
locs=np.atleast_2d(locs)
#Only find the hosts if not given
if hosts==[]:
hosts=data[trifinder].__call__(locs[:,0],locs[:,1])
#if host==-1:
#print('Point at: (' + ('%f'%loc[0]) + ', ' +('%f'%loc[1]) + ') is external to the grid.'
#out=np.empty(shape=(data[varname][timein,layer,host]).squeeze().shape)
#out[:]=np.nan
#return out
###############################################################################
#code for ll adapted from mod_utils.F
if ll==True:
x0c,y0c=pjt.ll2m(data['uvnodell'][hosts,:].flatten(),locs.flatten())
else:
x0c=locs[:,0]-data['uvnode'][hosts,0]
y0c=locs[:,1]-data['uvnode'][hosts,1]
e0=data['nbe'][hosts,0]
e1=data['nbe'][hosts,1]
e2=data['nbe'][hosts,2]
var_e=(data[varname][timein,layer,hosts]).flatten()
var_0=(data[varname][timein,layer,e0]).flatten()
var_1=(data[varname][timein,layer,e1]).flatten()
var_2=(data[varname][timein,layer,e2]).flatten()
var_0[e0==-1]=0
var_1[e1==-1]=0
var_2[e2==-1]=0
dvardx= data['a1u'][0,hosts]*var_e+data['a1u'][1,hosts]*var_0+data['a1u'][2,hosts]*var_1+data['a1u'][3,hosts]*var_2
dvardy= data['a2u'][0,hosts]*var_e+data['a2u'][1,hosts]*var_0+data['a2u'][2,hosts]*var_1+data['a2u'][3,hosts]*var_2
var= var_e + dvardx*x0c + dvardy*y0c
# Handle any points outside the domain
var[hosts==-1]=fill_value
return var
def interpNfield_locs(data,varname,locs,timein,ll=False,fill_value=-9999,hosts=[]):
#"""
#Interpolate node data at a location.
#
#:Parameters:
#data - data dictionary from loadnc
#varname - element data variable name.
#loc - location
#
#:Optional:
#ll - default True. Is point lon/lat or xy.
#fill_value - default -9999 when points are outside the domain they return fill_value
#"""
###############################################################################
# Error and corner case checking
if ll==True:
trifinder='trigrid_finder'
trigrid='trigrid'
else:
trifinder='trigridxy_finder'
trigrid='trigridxy'
if (data.has_key(trifinder)==False and data.has_key(trigrid)):
print('No trifinder initialized. Initializing now.')
data[trifinder]=data[trigrid].get_trifinder()
elif data.has_key(trigrid)==False:
print('No trifinder or trigrid to initialize it.')
return
locs=np.atleast_2d(locs)
#Only find the hosts if not given
if hosts==[]:
hosts=data[trifinder].__call__(locs[:,0],locs[:,1])
#if host==-1:
#print('Point at: (' + ('%f'%loc[0]) + ', ' +('%f'%loc[1]) + ') is external to the grid.'
#out=np.empty(shape=(data[varname][timein,layer,host]).squeeze().shape)
#out[:]=np.nan
#return out
###############################################################################
#code for ll adapted from mod_utils.F
if ll==True:
x0c,y0c=pjt.ll2m(data['uvnodell'][hosts,:].flatten(),locs.flatten())
else:
x0c=locs[:,0]-data['uvnode'][hosts,0]
y0c=locs[:,1]-data['uvnode'][hosts,1]
n0=data['nv'][hosts,0]
n1=data['nv'][hosts,1]
n2=data['nv'][hosts,2]
#To deal with 1d data, should be a better way to handle this....
#This can all be vectorized, checkout robies code could make a factor of 2 difference.
if len(data[varname].shape)==1:
nvar0=data[varname][n0]
nvar1=data[varname][n1]
nvar2=data[varname][n2]
else:
nvar0=(data[varname][timein,n0]).squeeze()
nvar1=(data[varname][timein,n1]).squeeze()
nvar2=(data[varname][timein,n2]).squeeze()
var_0=data['aw0'][0,hosts]*nvar0+data['aw0'][1,hosts]*nvar1+data['aw0'][2,hosts]*nvar2
var_x=data['awx'][0,hosts]*nvar0+data['awx'][1,hosts]*nvar1+data['awx'][2,hosts]*nvar2
var_y=data['awy'][0,hosts]*nvar0+data['awy'][1,hosts]*nvar1+data['awy'][2,hosts]*nvar2
var= var_0 + var_x*x0c + var_y*y0c
# Handle any points outside the domain
var[hosts==-1]=fill_value
return var
def cross_shore_transect_2d(grid,name,region,vec,npt):
data = dt.loadnc('runs/'+grid+'/'+name+'/output/',singlename=grid + '_0001.nc')
print('done load')
data = dt.ncdatasort(data,trifinder=True)
print('done sort')
cages=gt.loadcage('runs/'+grid+'/' +name+ '/input/' +grid+ '_cage.dat')
if np.shape(cages)!=():
tmparray=[list(zip(data['nodell'][data['nv'][i,[0,1,2,0]],0],data['nodell'][data['nv'][i,[0,1,2,0]],1])) for i in cages ]
color='g'
lw=.2
ls='solid'
vectorstart=np.array(vec[0])
vectorend=np.array(vec[1])
vectorx=np.array([vectorstart[0],vectorend[0]])
vectory=np.array([vectorstart[1],vectorend[1]])
snv=(vectorend-vectorstart)/np.linalg.norm(vectorend-vectorstart)
xi=np.linspace(vectorstart[0],vectorend[0],npt)
yi=np.linspace(vectorstart[1],vectorend[1],npt)
us=data['ua'].shape
savepath='data/cross_shore_transect/'
if not os.path.exists(savepath): os.makedirs(savepath)
plotpath='figures/png/'+grid+'_2d/cross_shore_transect/'
if not os.path.exists(plotpath): os.makedirs(plotpath)
nidx=dt.get_nodes(data,region)
f=plt.figure()
ax=f.add_axes([.125,.1,.775,.8])
triax=ax.tripcolor(data['trigrid'],data['h'],vmin=data['h'][nidx].min(),vmax=data['h'][nidx].max())
ax.plot(xi,yi,'k',lw=3)
if np.shape(cages)!=():
lseg_t=LC(tmparray,linewidths = lw,linestyles=ls,color=color)
coast=ax.add_collection(lseg_t)
coast.set_zorder(30)
pt.prettyplot_ll(ax,setregion=region,cb=triax,cblabel=r'Depth (m)')
f.savefig(plotpath + name+'_'+('%f'%vectorx[0])+'_'+('%f'%vectorx[1])+'_'+('%f'%vectory[0])+'_'+('%f'%vectory[1])+'_'+('%d'%len(xi))+'_line_location.png',dpi=600)
plt.close(f)
fillarray_u=np.empty((us[0],npt))
fillarray_v=np.empty((us[0],npt))
fillalong=np.empty((us[0],npt))
fillcross=np.empty((us[0],npt))
dist=np.empty((npt,))
h=np.empty((npt,))
print('interp uvw on path')
for i in range(0,len(xi)):
print(i)
fillarray_u[:,i]=interpE_at_loc(data,'ua',[xi[i],yi[i]])
fillarray_v[:,i]=interpE_at_loc(data,'va',[xi[i],yi[i]])
h[i]=interpN_at_loc(data,'h',[xi[i],yi[i]])
print('Calc along path current')
for i in range(0,len(xi)):
print(i)
inner=np.inner(np.vstack([fillarray_u[:,i],fillarray_v[:,i]]).T,snv)
along=np.vstack([inner*snv[0],inner*snv[1]]).T
tmpa=np.multiply(np.sign(np.arctan2(along[:,1],along[:,0])),np.linalg.norm(along,axis=1))
fillalong[:,i]=tmpa
cross=np.vstack([fillarray_u[:,i],fillarray_v[:,i]]).T-along
tmpc=np.multiply(np.sign(np.arctan2(cross[:,1],cross[:,0])),np.linalg.norm(cross,axis=1))
fillcross[:,i]=tmpc
dist[i]=(sw.dist([vectorstart[1], yi[i]],[vectorstart[0], xi[i]],'km'))[0]*1000;
if np.shape(cages)!=():
incage=np.zeros((len(xi),))
host=data['trigrid'].get_trifinder().__call__(xi,yi)
incage[np.in1d(host,cages)]=1
savedic={}
savedic['u']=fillarray_u
savedic['v']=fillarray_v
savedic['along']=fillalong
savedic['cross']=fillcross
savedic['distance']=dist
savedic['h']=h
savedic['lon']=xi
savedic['lat']=yi
if np.shape(cages)!=():
savedic['incage']=incage
np.save(savepath+grid+'_'+name+'_'+('%f'%vectorx[0])+'_'+('%f'%vectorx[1])+'_'+('%f'%vectory[0])+'_'+('%f'%vectory[1])+'_'+('%d'%len(xi))+'_2d.npy',savedic)
sio.savemat(savepath+'matfiles/'+grid+'_'+name+'_'+('%f'%vectorx[0])+'_'+('%f'%vectorx[1])+'_'+('%f'%vectory[0])+'_'+('%f'%vectory[1])+'_'+('%d'%len(xi))+'_2d.mat',mdict=savedic)
def interpol(data_1, data_2, time_step=5.0/(24*60)):
dt_1 = data_1['time']
dt_2 = data_2['time']
# generate interpolation functions using linear interpolation
f1 = interp1d(dt_1, data_1['pts'])
f2 = interp1d(dt_2, data_2['pts'])
# choose interval on which to interpolate
start = max(dt_1[0], dt_2[0])
end = min(dt_1[-1], dt_2[-1])
# create timestamp array for new data and perform interpolation
output_times = np.arange(start,end,time_step)
series_1 = f1(output_times)
series_2 = f2(output_times)
dt_start = max(dt_1[0], dt_2[0])
return (series_1, series_2, output_times, time_step)
def interp1d(in_time, in_data, out_time, kind='linear'):
"""
Takes data (1d) and its timestamp. Returns the linear interpolates the vector to a second timestamp.
:Parameters:
in_data - data to interpolate
in_time - timestamp of in_data
out_time - timestamps to output
:Optional:
kind - sets the linear interpolator kind used in scipy.interpolate.interp1d
"""
# generate interpolation functions using linear interpolation
f = spitp.interp1d(in_time, in_data, kind=kind, bounds_error=False)
# Create output data
out_data = f(out_time)
return out_data
def get_riops_weights(ri,locations):
"""
Function to calculate interpolation weights for riops to points.
"""
print('Processing weights')
lon=ri['nav_lon'][:]-360
lat=ri['nav_lat'][:]
lo,la,proj=pjt.lcc(lon,lat)
ll=np.array(proj(locations[:,0],locations[:,1])).T
bll=mt.boxminmax(ll)
idx=np.empty((len(locations),2),dtype=int)
weights=np.empty((len(locations[:,0]),4))
for i in range(ri['nav_lon'].shape[0]-1):
for j in range(ri['nav_lon'].shape[1]-1):
a=np.array([lo[i,j],lo[i,j+1],lo[i+1,j+1],lo[i+1,j]])
b=np.array([la[i,j],la[i,j+1],la[i+1,j+1],la[i+1,j]])
if b.max()<np.min(bll[2:]) or b.min()>np.max(bll[2:]):
continue
if a.min()>np.max(bll[:2]) or a.max()<np.min(bll[:2]):
continue
p=path.Path(np.vstack([a,b]).T)
tidx=p.contains_points(ll)
if np.sum(tidx)>0:
for k in range(len(tidx)):
if tidx[k]:
idx[k,]=np.array([i,j])
for k,tt in enumerate(idx):
i=tt[0]
j=tt[1]
a=np.array([lo[i,j],lo[i,j+1],lo[i+1,j+1],lo[i+1,j]])
b=np.array([la[i,j],la[i,j+1],la[i+1,j+1],la[i+1,j]])
dist=np.sqrt((a-ll[k,0])**2+(b-ll[k,1])**2)
weights[k,:]=(dist**2)*np.sum(1/dist**2)
print('Done processing weights')
return weights, idx
def interp_riops(field, weights, idx):
"""
Interpolate riops using weights.
"""
try:
import pyximport; pyximport.install()
import interp_riops as ir
out=ir.interp_riops_c(field,weights,idx)
return out
except:
print('There was an issue with during using cython falling back to python.')
out=np.empty((len(idx),))
for k,tt in enumerate(idx):
i=tt[0]
j=tt[1]
vals=np.array([field[i,j],field[i,j+1],field[i+1,j+1],field[i+1,j]])
out[k]=np.nansum(vals/weights[k,:])
return out
def spread_field(fieldin):
"""
Spread a gridded field down and then out.
"""
fs=np.array(fieldin.shape)
if len(fs)==3:
field=fieldin[0,].reshape(-1)
else:
field=fieldin.reshape(-1)
try:
import pyximport; pyximport.install()
import interp_riops as ir
field=ir.spread_field_c(field, fs[1], fs[2])
except:
print('There was an issue with during using cython falling back to python.')
while np.sum(field.mask)>0:
for i in range(1,fs[1]-1):
for j in range(1,fs[2]-1):
if field.mask[i*fs[2]+j]:
idx=np.array([(i-1)*fs[2]+(j-1),(i-1)*fs[2]+(j),(i-1)*fs[2]+(j+1),
(i)*fs[2]+(j-1),(i)*fs[2]+(j+1),
(i+1)*fs[2]+(j-1),(i+1)*fs[2]+(j),(i+1)*fs[2]+(j+1)])
if np.sum(~field.mask[idx])>0:
ridx=idx[~field.mask[idx]]
pmean=field[ridx]
field[i*fs[2]+j]=np.mean(pmean)
i=0
for j in range(0,fs[2]):
if field.mask[i*fs[2]+j] and not field.mask[(i+1)*fs[2]+j]:
field[i*fs[2]+j]=field[(i+1)*fs[2]+j]
i=fs[1]-1
for j in range(0,fs[2]):
if field.mask[i*fs[2]+j] and not field.mask[(i-1)*fs[2]+j]:
field[i*fs[2]+j]=field[(i-1)*fs[2]+j]
j=0
for i in range(0,fs[1]):
if field.mask[i*fs[2]+j] and not field.mask[i*fs[2]+(j+1)]:
field[i*fs[2]+j]=field[i*fs[2]+(j+1)]
j=fs[2]-1
for i in range(0,fs[1]):
if field.mask[i*fs[2]+j] and not field.mask[i*fs[2]+(j-1)]:
field[i*fs[2]+j]=field[i*fs[2]+(j-1)]
if len(fs)==3:
fieldin[0,:]=field.reshape(fs[1],fs[2])
for i in range(1,fieldin.shape[0]):
fieldin[i,fieldin.mask[i,]]=fieldin[i-1,fieldin.mask[i,]]
else:
fieldin=field.reshape(fs)
return fieldin
| [
"[email protected]"
]
| |
aa478f8db29f79c33c87d31990ab69000a083ef4 | ec153cf6c65b02d8d714e042bbdcf476001c6332 | /openstack_dashboard/enabled/_802_metadata_defs.py | 715c3f57adb1b772f3513ae78202a66c8feed358 | []
| no_license | bopopescu/dashboard | c4322f7602a9ba589400212aaef865ed4ffa8bdb | a74b4a549cd7d516dd9a0f5f2e17d06679c13bf6 | refs/heads/master | 2022-11-21T15:56:42.755310 | 2017-07-05T12:04:14 | 2017-07-05T12:04:17 | 281,596,428 | 0 | 0 | null | 2020-07-22T06:38:37 | 2020-07-22T06:38:36 | null | UTF-8 | Python | false | false | 406 | py | # The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'metadata_defs'
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'system'
# The slug of the panel group the PANEL is associated with.
PANEL_GROUP = 'default'
# Python panel class of the PANEL to be added.
ADD_PANEL = ('openstack_dashboard.dashboards.admin.metadata_defs.panel.MetadataDefinitions')
| [
"[email protected]"
]
| |
a629ff545360e6bd157e394d377cbc1f1330141e | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_6/mtttaf002/question1.py | 9e72945c0d743ddcf7d64cd2596254bb5b69226b | []
| no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | """produce right aligned list of names
tafara mtutu
20 apr 2014"""
names = []
count = 0
aligned = []
sort = ""
#ask user for names
print("Enter strings (end with DONE):")
name = input()
while name.lower() != "done":
if count < len(name):
count = len(name)
names.append(name)
name = input()
#make length of equal to the length of longest string
for i in names:
sort = " "*(count-len(i)) + i
aligned.append(sort)
print()
print("Right-aligned list:")
for j in aligned:
print(j)
| [
"[email protected]"
]
| |
e5500f8613dd97c63af38a515d3fcaed24f1edfc | ef3fe422fc5644ce37cef2e8eb47a615e0865f27 | /0x00-python_variable_annotations/100-safe_first_element.py | a68a172a7b3aeffd93fd5ece78bd0461e3d8fca2 | []
| no_license | Manuelpv17/holbertonschool-web_back_end | b1b6d993b378f60e3d2312079b49fb059a2e14a7 | c4c60bf08648a8e9c846147808b6a7fbd9a818a7 | refs/heads/main | 2023-08-27T11:10:50.496692 | 2021-10-17T16:54:21 | 2021-10-17T16:54:21 | 366,537,768 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 306 | py | #!/usr/bin/env python3
""" 10. Duck typing - first element of a sequence """
from typing import Sequence, Union, Any
def safe_first_element(lst: Sequence[Any]) -> Union[Any, None]:
""" 10. Duck typing - first element of a sequence """
if lst:
return lst[0]
else:
return None
| [
"[email protected]"
]
| |
fdc09392606dbaa4da061b3a530db0f87a8dc68c | 8771c94dce3c7e30c9e5b5f45cf8683ba9cac6fd | /leetcode/algorithms/p0338_counting_bits_1.py | 369900a44f586dcd107afb5c442e1ac2172ed57f | []
| no_license | J14032016/LeetCode-Python | f2a80ecb7822cf12a8ae1600e07e4e6667204230 | 9a8f5329d7c48dd34de3105c88afb5e03c2aace4 | refs/heads/master | 2023-03-12T02:55:45.094180 | 2021-03-07T07:55:03 | 2021-03-07T07:55:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from typing import List
class Solution:
def countBits(self, num: int) -> List[int]:
return [self._hammingWeight(x) for x in range(num + 1)]
def _hammingWeight(self, n: int) -> int:
count = 0
while n > 0:
n = n & (n - 1)
count += 1
return count
| [
"[email protected]"
]
| |
10d914f403ac5bfd4aacc7330c3db318947f429e | e20ed90b9be7a0bcdc1603929d65b2375a224bf6 | /generated-libraries/python/netapp/net/net_ifgrp_info.py | 51fb53a5a5d184165370e0966a17a0a5662d4247 | [
"MIT"
]
| permissive | radekg/netapp-ontap-lib-gen | 530ec3248cff5ead37dc2aa47ced300b7585361b | 6445ebb071ec147ea82a486fbe9f094c56c5c40d | refs/heads/master | 2016-09-06T17:41:23.263133 | 2015-01-14T17:40:46 | 2015-01-14T17:40:46 | 29,256,898 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,702 | py | from netapp.netapp_object import NetAppObject
class NetIfgrpInfo(NetAppObject):
"""
Network interface group information
When returned as part of the output, all elements of this typedef
are reported, unless limited by a set of desired attributes
specified by the caller.
<p>
When used as input to specify desired attributes to return,
omitting a given element indicates that it shall not be returned
in the output. In contrast, by providing an element (even with
no value) the caller ensures that a value for that element will
be returned, given that the value can be retrieved.
<p>
When used as input to specify queries, any element can be omitted
in which case the resulting set of objects is not constrained by
any specific value of that attribute.
"""
_node = None
@property
def node(self):
"""
Specifies the name of node.
Attributes: key, required-for-create, non-modifiable
"""
return self._node
@node.setter
def node(self, val):
if val != None:
self.validate('node', val)
self._node = val
_up_ports = None
@property
def up_ports(self):
"""
Specifies all active ports of an ifgrp.
Attributes: non-creatable, non-modifiable
"""
return self._up_ports
@up_ports.setter
def up_ports(self, val):
if val != None:
self.validate('up_ports', val)
self._up_ports = val
_down_ports = None
@property
def down_ports(self):
"""
Specifies all inactive ports of an ifgrp.
Attributes: non-creatable, non-modifiable
"""
return self._down_ports
@down_ports.setter
def down_ports(self, val):
if val != None:
self.validate('down_ports', val)
self._down_ports = val
_mac_address = None
@property
def mac_address(self):
"""
Specifies the MAC address of the ifgrp.
For example: '02:0c:29:78:e1:b7'
Attributes: non-creatable, non-modifiable
"""
return self._mac_address
@mac_address.setter
def mac_address(self, val):
if val != None:
self.validate('mac_address', val)
self._mac_address = val
_ifgrp_name = None
@property
def ifgrp_name(self):
"""
Specifies the interface group name.
Attributes: key, required-for-create, non-modifiable
"""
return self._ifgrp_name
@ifgrp_name.setter
def ifgrp_name(self, val):
if val != None:
self.validate('ifgrp_name', val)
self._ifgrp_name = val
_mode = None
@property
def mode(self):
"""
Specifies the link policy for the ifgrp.
Possible values:
<ul>
<li> 'multimode - All links are simultaneously
active',
<li> 'multimode_lacp - Link state is managed by the
switch using link aggregation control protocol (LACP)
(IEEE 802.3ad)',
<li> 'singlemode - Only one link is active at a
time'
</ul>
Attributes: required-for-create, non-modifiable
"""
return self._mode
@mode.setter
def mode(self, val):
if val != None:
self.validate('mode', val)
self._mode = val
_port_participation = None
@property
def port_participation(self):
"""
Port participation state of the ifgrp.
Attributes: non-creatable, non-modifiable
Possible values:
<ul>
<li> "full" - Indicates all the ifgrp ports are
active,
<li> "partial" - Indicates not all the ifgrp ports
are active,
<li> "none" - Indicates none of the ifgrp ports is
active
</ul>
"""
return self._port_participation
@port_participation.setter
def port_participation(self, val):
if val != None:
self.validate('port_participation', val)
self._port_participation = val
_ports = None
@property
def ports(self):
"""
List of ports associated with this ifgrp.
Attributes: non-creatable, non-modifiable
"""
return self._ports
@ports.setter
def ports(self, val):
if val != None:
self.validate('ports', val)
self._ports = val
_distribution_function = None
@property
def distribution_function(self):
"""
Specifies the traffic distribution function for the
ifgrp.
Attributes: required-for-create, non-modifiable
Possible values:
<ul>
<li> "mac" - Network traffic is distributed
on the basis of MAC addresses,
<li> "ip" - Network traffic is distributed
on the basis of IP addresses,
<li> "sequential" - Network traffic is distributed
round-robin to each interface,
<li> "port" - Network traffic is distributed
by transport layer address 4-tuple
</ul>
"""
return self._distribution_function
@distribution_function.setter
def distribution_function(self, val):
if val != None:
self.validate('distribution_function', val)
self._distribution_function = val
@staticmethod
def get_api_name():
return "net-ifgrp-info"
@staticmethod
def get_desired_attrs():
return [
'node',
'up-ports',
'down-ports',
'mac-address',
'ifgrp-name',
'mode',
'port-participation',
'ports',
'distribution-function',
]
def describe_properties(self):
return {
'node': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'up_ports': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'down_ports': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'mac_address': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'ifgrp_name': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'mode': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'port_participation': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'ports': { 'class': basestring, 'is_list': True, 'required': 'optional' },
'distribution_function': { 'class': basestring, 'is_list': False, 'required': 'optional' },
}
| [
"[email protected]"
]
| |
b86128aee5418c0b7ac108bd068d443064cc3ec0 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_sermon.py | 40a9742cbaf0299a9d7ec6767d646bfc24b37d57 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 537 | py |
#calss header
class _SERMON():
def __init__(self,):
self.name = "SERMON"
self.definitions = [u'a part of a Christian church ceremony in which a priest gives a talk on a religious or moral subject, often based on something written in the Bible: ', u'a long talk in which someone advises other people how they should behave in order to be better people: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
]
| |
22f3faef32fd2f8ae1bcc30a8ea70d09546bc638 | 4a0f8c5c0e8324fa614da776f2a704b5c369ccbb | /Contact_maps/DeepLearning/DeepLearningTool/DL_contact_matrix_load2-new10fold_10_30_2014_server_8.py | 2dffb397ed45e0cfeb71b74a0cd9c63856f5453e | []
| no_license | magic2du/contact_matrix | 9f8ae868d71e7e5c8088bf22a9407ea3eb073be6 | 957e2ead76fabc0299e36c1435162edd574f4fd5 | refs/heads/master | 2021-01-18T21:15:07.341341 | 2015-09-16T02:14:53 | 2015-09-16T02:14:53 | 24,237,641 | 0 | 0 | null | 2015-09-10T19:58:24 | 2014-09-19T16:48:37 | null | UTF-8 | Python | false | false | 40,758 | py |
# coding: utf-8
# In[3]:
import sys, os
sys.path.append('../../../libs/')
import os.path
import IO_class
from IO_class import FileOperator
from sklearn import cross_validation
import sklearn
import numpy as np
import csv
from dateutil import parser
from datetime import timedelta
from sklearn import svm
import numpy as np
import pandas as pd
import pdb
import pickle
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import KFold
from sklearn import preprocessing
import sklearn
import scipy.stats as ss
from sklearn.svm import LinearSVC
import random
from DL_libs import *
from itertools import izip #new
import math
from sklearn.svm import SVC
# In[4]:
#filename = 'SUCCESS_log_CrossValidation_load_DL_remoteFisherM1_DL_RE_US_DL_RE_US_1_1_19MAY2014.txt'
#filename = 'listOfDDIsHaveOver2InterfacesHave40-75_Examples_2010_real_selected.txt' #for testing
# set settings for this script
settings = {}
settings['filename'] = 'ddi_examples_40_60_over2top_diff_name_2014.txt'
settings['fisher_mode'] = 'FisherM1'
settings['predicted_score'] = False
settings['reduce_ratio'] = 8
settings['SVM'] = 1
settings['DL'] = 1
settings['SAE_SVM'] = 0
settings['SVM_RBF'] = 0
settings['DL_S'] = 0
settings['DL_U'] = 1
settings['finetune_lr'] = 1
settings['batch_size'] = 100
settings['pretraining_interations'] = 5008
settings['pretrain_lr'] = 0.001
settings['training_epochs'] = 1508
settings['hidden_layers_sizes'] = [100, 100]
settings['corruption_levels'] = [0,0]
filename = settings['filename']
file_obj = FileOperator(filename)
ddis = file_obj.readStripLines()
import logging
import time
current_date = time.strftime("%m_%d_%Y")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logname = 'log_DL_contact_matrix_load' + current_date + '.log'
handler = logging.FileHandler(logname)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
logger.info('Input DDI file: ' + filename)
#logger.debug('This message should go to the log file')
for key, value in settings.items():
logger.info(key +': '+ str(value))
# In[5]:
ddis
# In[28]:
class DDI_family_base(object):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/du/Documents/Vectors_Fishers_aaIndex_raw_2014/'):
#def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/home/sun/Downloads/contactmatrix/contactmatrixanddeeplearningcode/data_test/'):
def __init__(self, ddi, Vectors_Fishers_aaIndex_raw_folder = '/big/du/Protein_Protein_Interaction_Project/Contact_Matrix_Project/Vectors_Fishers_aaIndex_raw_2014_paper/'):
""" get total number of sequences in a ddi familgy
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
LOO_data['FisherM1'][1]
"""
self.ddi = ddi
self.Vectors_Fishers_aaIndex_raw_folder = Vectors_Fishers_aaIndex_raw_folder
self.ddi_folder = self.Vectors_Fishers_aaIndex_raw_folder + ddi + '/'
self.total_number_of_sequences = self.get_total_number_of_sequences()
self.raw_data = {}
self.positve_negative_number = {}
self.equal_size_data = {}
for seq_no in range(1, self.total_number_of_sequences+1):
self.raw_data[seq_no] = self.get_raw_data_for_selected_seq(seq_no)
try:
#positive_file = self.ddi_folder + 'numPos_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(positive_file)
#lines = file_obj.readStripLines()
#import pdb; pdb.set_trace()
count_pos = int(np.sum(self.raw_data[seq_no][:, -1]))
count_neg = self.raw_data[seq_no].shape[0] - count_pos
#self.positve_negative_number[seq_no] = {'numPos': int(float(lines[0]))}
#assert int(float(lines[0])) == count_pos
self.positve_negative_number[seq_no] = {'numPos': count_pos}
#negative_file = self.ddi_folder + 'numNeg_'+ str(seq_no) + '.txt'
#file_obj = FileOperator(negative_file)
#lines = file_obj.readStripLines()
#self.positve_negative_number[seq_no]['numNeg'] = int(float(lines[0]))
self.positve_negative_number[seq_no]['numNeg'] = count_neg
except Exception,e:
print ddi, seq_no
print str(e)
logger.info(ddi + str(seq_no))
logger.info(str(e))
# get data for equal positive and negative
n_pos = self.positve_negative_number[seq_no]['numPos']
n_neg = self.positve_negative_number[seq_no]['numNeg']
index_neg = range(n_pos, n_pos + n_neg)
random.shuffle(index_neg)
index_neg = index_neg[: n_pos]
positive_examples = self.raw_data[seq_no][ : n_pos, :]
negative_examples = self.raw_data[seq_no][index_neg, :]
self.equal_size_data[seq_no] = np.vstack((positive_examples, negative_examples))
def get_LOO_training_and_reduced_traing(self, seq_no, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get the leave one out traing data, reduced traing
Parameters:
seq_no:
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_LOO = np.array([])
train_y_LOO = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
total_number_of_sequences = self.total_number_of_sequences
equal_size_data_selected_sequence = self.equal_size_data[seq_no]
#get test data for selected sequence
test_X, test_y = self.select_X_y(equal_size_data_selected_sequence, fisher_mode = fisher_mode)
total_sequences = range(1, total_number_of_sequences+1)
loo_sequences = [i for i in total_sequences if i != seq_no]
number_of_reduced = len(loo_sequences)/reduce_ratio if len(loo_sequences)/reduce_ratio !=0 else 1
random.shuffle(loo_sequences)
reduced_sequences = loo_sequences[:number_of_reduced]
#for loo data
for current_no in loo_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_LOO.ndim ==1:
train_X_LOO = current_X
else:
train_X_LOO = np.vstack((train_X_LOO, current_X))
train_y_LOO = np.concatenate((train_y_LOO, current_y))
#for reduced data
for current_no in reduced_sequences:
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y)
#def get_ten_fold_crossvalid_one_subset(self, start_subset, end_subset, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
def get_ten_fold_crossvalid_one_subset(self, train_index, test_index, fisher_mode = 'FisherM1ONLY' , reduce_ratio = 4):
""" get traing data, reduced traing data for 10-fold crossvalidation
Parameters:
start_subset: index of start of the testing data
end_subset: index of end of the testing data
fisher_mode: default 'FisherM1ONLY'
Returns:
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
"""
train_X_10fold = np.array([])
train_y_10fold = np.array([])
train_X_reduced = np.array([])
train_y_reduced = np.array([])
test_X = np.array([])
test_y = np.array([])
total_number_of_sequences = self.total_number_of_sequences
#get test data for selected sequence
#for current_no in range(start_subset, end_subset):
for num in test_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if test_X.ndim ==1:
test_X = current_X
else:
test_X = np.vstack((test_X, current_X))
test_y = np.concatenate((test_y, current_y))
#total_sequences = range(1, total_number_of_sequences+1)
#ten_fold_sequences = [i for i in total_sequences if not(i in range(start_subset, end_subset))]
#number_of_reduced = len(ten_fold_sequences)/reduce_ratio if len(ten_fold_sequences)/reduce_ratio !=0 else 1
#random.shuffle(ten_fold_sequences)
#reduced_sequences = ten_fold_sequences[:number_of_reduced]
number_of_reduced = len(train_index)/reduce_ratio if len(train_index)/reduce_ratio !=0 else 1
random.shuffle(train_index)
reduced_sequences = train_index[:number_of_reduced]
#for 10-fold cross-validation data
#for current_no in ten_fold_sequences:
for num in train_index:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_10fold.ndim ==1:
train_X_10fold = current_X
else:
train_X_10fold = np.vstack((train_X_10fold, current_X))
train_y_10fold = np.concatenate((train_y_10fold, current_y))
#for reduced data
for num in reduced_sequences:
current_no = num + 1
raw_current_data = self.equal_size_data[current_no]
current_X, current_y = self.select_X_y(raw_current_data, fisher_mode = fisher_mode)
if train_X_reduced.ndim ==1:
train_X_reduced = current_X
else:
train_X_reduced = np.vstack((train_X_reduced, current_X))
train_y_reduced = np.concatenate((train_y_reduced, current_y))
return (train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y)
def get_total_number_of_sequences(self):
""" get total number of sequences in a ddi familgy
Parameters:
ddi: string
Vectors_Fishers_aaIndex_raw_folder: string
Returns:
n: int
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path +'allPairs.txt'
all_pairs = np.loadtxt(filename)
return len(all_pairs)
def get_raw_data_for_selected_seq(self, seq_no):
""" get raw data for selected seq no in a family
Parameters:
ddi:
seq_no:
Returns:
data: raw data in the sequence file
"""
folder_path = self.Vectors_Fishers_aaIndex_raw_folder + self.ddi + '/'
filename = folder_path + 'F0_20_F1_20_Sliding_17_11_F0_20_F1_20_Sliding_17_11_ouput_'+ str(seq_no) + '.txt'
data = np.loadtxt(filename)
return data
def select_X_y(self, data, fisher_mode = ''):
""" select subset from the raw input data set
Parameters:
data: data from matlab txt file
fisher_mode: subset base on this Fisher of AAONLY...
Returns:
selected X, y
"""
y = data[:,-1] # get lable
if fisher_mode == 'FisherM1': # fisher m1 plus AA index
a = data[:, 20:227]
b = data[:, 247:454]
X = np.hstack((a,b))
elif fisher_mode == 'FisherM1ONLY':
a = data[:, 20:40]
b = data[:, 247:267]
X = np.hstack((a,b))
elif fisher_mode == 'AAONLY':
a = data[:, 40:227]
b = data[:, 267:454]
X = np.hstack((a,b))
else:
raise('there is an error in mode')
return X, y
# In[28]:
# In[29]:
import sklearn.preprocessing
def performance_score(target_label, predicted_label, predicted_score = False, print_report = True):
""" get performance matrix for prediction
Attributes:
target_label: int 0, 1
predicted_label: 0, 1 or ranking
predicted_score: bool if False, predicted_label is from 0, 1. If Ture, predicted_label is ranked, need to get AUC score.
print_report: if True, print the perfromannce on screen
"""
import sklearn
from sklearn.metrics import roc_auc_score
score = {}
if predicted_score == False:
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if predicted_score == True:
auc_score = roc_auc_score(target_label, predicted_label)
score['auc_score'] = auc_score
target_label = [x >= 0.5 for x in target_label]
score['accuracy'] = sklearn.metrics.accuracy_score(target_label, predicted_label)
score['precision'] = sklearn.metrics.precision_score(target_label, predicted_label, pos_label=1)
score['recall'] = sklearn.metrics.recall_score(target_label, predicted_label, pos_label=1)
if print_report == True:
for key, value in score.iteritems():
print key, '{percent:.1%}'.format(percent=value)
return score
def saveAsCsv(predicted_score, fname, score_dict, *arguments): #new
newfile = False
if os.path.isfile('report_' + fname + '.csv'):
pass
else:
newfile = True
csvfile = open('report_' + fname + '.csv', 'a+')
writer = csv.writer(csvfile)
if newfile == True:
if predicted_score == False:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest']+ score_dict.keys()) #, 'AUC'])
else:
writer.writerow(['DDI', 'no.', 'FisherMode', 'method', 'isTest'] + score_dict.keys())
for arg in arguments:
writer.writerows(arg)
csvfile.close()
def LOO_out_performance_for_all(ddis):
for ddi in ddis:
try:
one_ddi_family = LOO_out_performance_for_one_ddi(ddi)
one_ddi_family.get_LOO_perfermance(settings = settings)
except Exception,e:
print str(e)
logger.info("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
class LOO_out_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_LOO_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
predicted_score = settings['predicted_score']
reduce_ratio = settings['reduce_ratio']
for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
print seq_no
logger.info('sequence number: ' + str(seq_no))
if settings['SVM']:
print "SVM"
(train_X_LOO, train_y_LOO),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_LOO_training_and_reduced_traing(seq_no,fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# Deep learning part
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_LOO)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['DL']:
print "direct deep learning"
# direct deep learning
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if 0:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs_for_reduced = cal_epochs(1500, pretraining_X_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs_for_reduced,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
if settings['Split_DL']:
# deep learning using split network
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, seq_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
report_name = filename + '_' + '_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' +str(training_epochs) + '_' + current_date
saveAsCsv(predicted_score, report_name, performance_score(y_test, test_predicted, predicted_score), analysis_scr)
# In[29]:
# In[30]:
#for 10-fold cross validation
def ten_fold_crossvalid_performance_for_all(ddis):
for ddi in ddis:
try:
process_one_ddi_tenfold(ddi)
except Exception,e:
print str(e)
logger.debug("There is a error in this ddi: %s" % ddi)
logger.info(str(e))
def process_one_ddi_tenfold(ddi):
"""A function to waste CPU cycles"""
logger.info('DDI: %s' % ddi)
one_ddi_family = {}
one_ddi_family[ddi] = Ten_fold_crossvalid_performance_for_one_ddi(ddi)
one_ddi_family[ddi].get_ten_fold_crossvalid_perfermance(settings=settings)
return None
class Ten_fold_crossvalid_performance_for_one_ddi(object):
""" get the performance of ddi families
Attributes:
ddi: string ddi name
Vectors_Fishers_aaIndex_raw_folder: string, folder
total_number_of_sequences: int
raw_data: dict raw_data[2]
"""
def __init__(self, ddi):
self.ddi_obj = DDI_family_base(ddi)
self.ddi = ddi
def get_ten_fold_crossvalid_perfermance(self, settings = None):
fisher_mode = settings['fisher_mode']
analysis_scr = []
predicted_score = settings['predicted_score']
reduce_ratio = settings['reduce_ratio']
#for seq_no in range(1, self.ddi_obj.total_number_of_sequences+1):
#subset_size = math.floor(self.ddi_obj.total_number_of_sequences / 10.0)
kf = KFold(self.ddi_obj.total_number_of_sequences, n_folds = 10, shuffle = True)
#for subset_no in range(1, 11):
for ((train_index, test_index),subset_no) in izip(kf,range(1,11)):
#for train_index, test_index in kf;
print("Subset:", subset_no)
print("Train index: ", train_index)
print("Test index: ", test_index)
#logger.info('subset number: ' + str(subset_no))
if settings['SVM']:
print "SVM"
(train_X_10fold, train_y_10fold),(train_X_reduced, train_y_reduced), (test_X, test_y) = self.ddi_obj.get_ten_fold_crossvalid_one_subset(train_index, test_index, fisher_mode = fisher_mode, reduce_ratio = reduce_ratio)
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(scaled_train_X, train_y_reduced)
predicted_test_y = Linear_SVC.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['SVM_RBF']:
print "SVM_RBF"
standard_scaler = preprocessing.StandardScaler().fit(train_X_reduced)
scaled_train_X = standard_scaler.transform(train_X_reduced)
scaled_test_X = standard_scaler.transform(test_X)
L1_SVC_RBF_Selector = SVC(C=1, gamma=0.01, kernel='rbf').fit(scaled_train_X, train_y_reduced)
predicted_test_y = L1_SVC_RBF_Selector.predict(scaled_test_X)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = L1_SVC_RBF_Selector.predict(scaled_train_X)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SVM_RBF', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
# direct deep learning
min_max_scaler = Preprocessing_Scaler_with_mean_point5()
X_train_pre_validation_minmax = min_max_scaler.fit(train_X_reduced)
X_train_pre_validation_minmax = min_max_scaler.transform(train_X_reduced)
x_test_minmax = min_max_scaler.transform(test_X)
pretraining_X_minmax = min_max_scaler.transform(train_X_10fold)
x_train_minmax, x_validation_minmax, y_train_minmax, y_validation_minmax = train_test_split(X_train_pre_validation_minmax,
train_y_reduced
, test_size=0.4, random_state=42)
finetune_lr = settings['finetune_lr']
batch_size = settings['batch_size']
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
#pretrain_lr=0.001
pretrain_lr = settings['pretrain_lr']
training_epochs = settings['training_epochs']
hidden_layers_sizes= settings['hidden_layers_sizes']
corruption_levels = settings['corruption_levels']
if settings['SAE_SVM']:
# SAE_SVM
print 'SAE followed by SVM'
x = X_train_pre_validation_minmax
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(X_train_pre_validation_minmax)
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax)
Linear_SVC = LinearSVC(C=1, penalty="l2")
Linear_SVC.fit(new_x_train_minmax_A, train_y_reduced)
predicted_test_y = Linear_SVC.predict(new_x_test_minmax_A)
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(test_y, predicted_test_y).values())) #new
predicted_train_y = Linear_SVC.predict(new_x_train_minmax_A)
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'SAE_SVM', isTest) + tuple(performance_score(train_y_reduced, predicted_train_y).values()))
if settings['DL']:
print "direct deep learning"
sda = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_train, training_predicted).values()))
test_predicted = sda.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL', isTest) + tuple(performance_score(y_test, test_predicted).values()))
if settings['DL_U']:
# deep learning using unlabeled data for pretraining
print 'deep learning with unlabel data'
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
sda_unlabel = trainSda(x_train_minmax, y_train_minmax,
x_validation_minmax, y_validation_minmax ,
x_test_minmax, test_y,
pretraining_X_minmax = pretraining_X_minmax,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_unlabel.predict(x_train_minmax)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_unlabel.predict(x_test_minmax)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_U', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
if settings['DL_S']:
# deep learning using split network
print 'deep learning using split network'
# get the new representation for A set. first 784-D
pretraining_epochs = cal_epochs(settings['pretraining_interations'], x_train_minmax, batch_size = batch_size)
x = x_train_minmax[:, :x_train_minmax.shape[1]/2]
print "original shape for A", x.shape
a_MAE_A = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_A = a_MAE_A.transform(x_train_minmax[:, :x_train_minmax.shape[1]/2])
x = x_train_minmax[:, x_train_minmax.shape[1]/2:]
print "original shape for B", x.shape
a_MAE_B = train_a_MultipleAEs(x, pretraining_epochs=pretraining_epochs, pretrain_lr=pretrain_lr, batch_size=batch_size,
hidden_layers_sizes =hidden_layers_sizes, corruption_levels=corruption_levels)
new_x_train_minmax_B = a_MAE_B.transform(x_train_minmax[:, x_train_minmax.shape[1]/2:])
new_x_test_minmax_A = a_MAE_A.transform(x_test_minmax[:, :x_test_minmax.shape[1]/2])
new_x_test_minmax_B = a_MAE_B.transform(x_test_minmax[:, x_test_minmax.shape[1]/2:])
new_x_validation_minmax_A = a_MAE_A.transform(x_validation_minmax[:, :x_validation_minmax.shape[1]/2])
new_x_validation_minmax_B = a_MAE_B.transform(x_validation_minmax[:, x_validation_minmax.shape[1]/2:])
new_x_train_minmax_whole = np.hstack((new_x_train_minmax_A, new_x_train_minmax_B))
new_x_test_minmax_whole = np.hstack((new_x_test_minmax_A, new_x_test_minmax_B))
new_x_validationt_minmax_whole = np.hstack((new_x_validation_minmax_A, new_x_validation_minmax_B))
sda_transformed = trainSda(new_x_train_minmax_whole, y_train_minmax,
new_x_validationt_minmax_whole, y_validation_minmax ,
new_x_test_minmax_whole, y_test,
hidden_layers_sizes = hidden_layers_sizes, corruption_levels = corruption_levels, batch_size = batch_size , \
training_epochs = training_epochs, pretraining_epochs = pretraining_epochs,
pretrain_lr = pretrain_lr, finetune_lr=finetune_lr
)
print 'hidden_layers_sizes:', hidden_layers_sizes
print 'corruption_levels:', corruption_levels
training_predicted = sda_transformed.predict(new_x_train_minmax_whole)
y_train = y_train_minmax
isTest = False; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_train, training_predicted, predicted_score).values()))
test_predicted = sda_transformed.predict(new_x_test_minmax_whole)
y_test = test_y
isTest = True; #new
analysis_scr.append((self.ddi, subset_no, fisher_mode, 'DL_S', isTest) + tuple(performance_score(y_test, test_predicted, predicted_score).values()))
report_name = filename + '_' + '_test10fold_'.join(map(str, hidden_layers_sizes)) + '_' + str(pretrain_lr) + '_' + str(finetune_lr) + '_' + str(reduce_ratio)+ '_' + str(training_epochs) + '_' + current_date
saveAsCsv(predicted_score, report_name, performance_score(y_test, test_predicted, predicted_score), analysis_scr)
# In[1]:
ten_fold_crossvalid_performance_for_all(ddis[:])
# In[ ]:
#LOO_out_performance_for_all(ddis)
# In[25]:
x = logging._handlers.copy()
for i in x:
log.removeHandler(i)
i.flush()
i.close()
# In[ ]:
| [
"[email protected]"
]
| |
6a650aa632dd74a7959663c87c0517cf884812e8 | 1a775a01341a17da07f520b2e28166b0aeda801e | /moneysocket1/encoding/namespace.py | a31167730706a9511fd132c00f71f73f60597b21 | [
"MIT"
]
| permissive | moneysocket/py-moneysocket | cbf75cf79600abcb576b8a3144e23520b5ce19f9 | 0392b4b34d5d1ea8762d0a14c6d260b971f72597 | refs/heads/main | 2023-05-12T22:33:08.574271 | 2021-06-11T21:33:04 | 2021-06-11T21:33:04 | 314,912,605 | 2 | 3 | MIT | 2021-06-11T21:33:05 | 2020-11-21T22:11:12 | Python | UTF-8 | Python | false | false | 10,693 | py | # Copyright (c) 2021 Moneysocket Developers
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php
from .convert import b2i, b2h, h2i, h2b, i2h, i2b
from .tlv import Tlv
class Namespace:
"""
Represents a specific namespace of TLVs as referred to in BOLT 1 and
provides generic pop helpers for the fundamental types defined here:
https://github.com/lightningnetwork/lightning-rfc/blob/master/\
01-messaging.md#fundamental-types
"""
@staticmethod
def pop_tlv(byte_string):
return Tlv.pop(byte_string)
@staticmethod
def tlvs_are_valid(byte_string):
while len(byte_string) > 0:
_, byte_string, err = Namespace.pop_tlv(byte_string)
if err:
return False
return True
@staticmethod
def iter_tlvs(byte_string):
assert Namespace.tlvs_are_valid(byte_string), "bad byte_string?"
while len(byte_string) > 0:
tlv, byte_string, _ = Tlv.pop(byte_string)
yield tlv
###########################################################################
@staticmethod
def encode_bytes(hex_string):
return h2b(hex_string)
@staticmethod
def pop_bytes(n_bytes, byte_string):
if len(byte_string) < n_bytes:
return None, None, "underrun while popping bytes"
return b2h(byte_string[:n_bytes]), byte_string[n_bytes:], None
###########################################################################
@staticmethod
def pop_u8(byte_string):
if len(byte_string) < 1:
return None, None, "underrun while popping a u8"
return b2i(byte_string[:1]), byte_string[1:], None
@staticmethod
def pop_u16(byte_string):
if len(byte_string) < 2:
return None, None, "underrun while popping a u16"
return b2i(byte_string[:2]), byte_string[2:], None
@staticmethod
def pop_u32(byte_string):
if len(byte_string) < 4:
return None, None, "underrun while popping a u32"
return b2i(byte_string[:4]), byte_string[4:], None
@staticmethod
def pop_u64(byte_string):
if len(byte_string) < 8:
return None, None, "underrun while popping a u64"
return b2i(byte_string[:8]), byte_string[8:], None
###########################################################################
@staticmethod
def encode_u8(value):
return i2b(value, 1)
@staticmethod
def encode_u16(value):
return i2b(value, 2)
@staticmethod
def encode_u32(value):
return i2b(value, 4)
@staticmethod
def encode_u64(value):
return i2b(value, 8)
###########################################################################
def minimal_tu_bytes(int_value):
assert int_value <= 0xffffffffffffffff, "value too big for encoding"
if int_value == 0:
return 0
if int_value <= 0xff:
return 1
if int_value <= 0xffff:
return 2
if int_value <= 0xffffff:
return 3
if int_value <= 0xffffffff:
return 4
if int_value <= 0xffffffffff:
return 5
if int_value <= 0xffffffffffff:
return 6
if int_value <= 0xffffffffffffff:
return 7
if int_value <= 0xffffffffffffffff:
return 8
@staticmethod
def pop_tu16(n_bytes, byte_string):
if n_bytes > 2:
return None, None, "cannot pop more than 2 bytes for a tu16"
if len(byte_string) < n_bytes:
return None, None, "underrun while popping tu16"
if n_bytes == 0:
return 0, byte_string, None
val = b2i(byte_string[:n_bytes])
if n_bytes != Namespace.minimal_tu_bytes(val):
return None, None, "not minimal encoding for value"
return val, byte_string[n_bytes:], None
@staticmethod
def pop_tu32(n_bytes, byte_string):
if n_bytes > 4:
return None, None, "cannot pop more than 4 bytes for a tu32"
if len(byte_string) < n_bytes:
return None, None, "underrun while popping tu32"
if n_bytes == 0:
return 0, byte_string, None
val = b2i(byte_string[:n_bytes])
if n_bytes != Namespace.minimal_tu_bytes(val):
return None, None, "not minimal encoding for value"
return val, byte_string[n_bytes:], None
@staticmethod
def pop_tu64(n_bytes, byte_string):
if n_bytes > 8:
return None, None, "cannot pop more than 8 bytes for a tu64"
if len(byte_string) < n_bytes:
return None, None, "underrun while popping tu62"
if n_bytes == 0:
return 0, byte_string, None
val = b2i(byte_string[:n_bytes])
if n_bytes != Namespace.minimal_tu_bytes(val):
return None, None, "not minimal encoding for value"
return val, byte_string[n_bytes:], None
###########################################################################
@staticmethod
def encode_tu(value):
n_bytes = Namespace.minimal_tu_bytes(value)
if n_bytes == 0:
return b''
return i2b(value, n_bytes)
@staticmethod
def encode_tu16(value):
assert value <= 0xffff, "value too big for tu16"
return Namespace.encode_tu(value)
@staticmethod
def encode_tu32(value):
assert value <= 0xffffffff, "value too big for tu32"
return Namespace.encode_tu(value)
@staticmethod
def encode_tu64(value):
assert value <= 0xffffffffffffffff, "value too big for tu64"
return Namespace.encode_tu(value)
###########################################################################
@staticmethod
def pop_chain_hash(byte_string):
if not len(byte_string) >= 32:
return None, None, "underrun while popping chain_hash"
return b2h(byte_string)[:32], byte_string[32:], None
@staticmethod
def pop_channel_id(byte_string):
if not len(byte_string) >= 32:
return None, None, "underrun while popping channel_id"
return b2h(byte_string[:32]), byte_string[32:], None
@staticmethod
def pop_sha256(byte_string):
if not len(byte_string) >= 64:
return None, None, "underrun while popping signature"
return b2h(byte_string[:64]), byte_string[64:], None
@staticmethod
def pop_signature(byte_string):
if not len(byte_string) >= 64:
return None, None, "underrun while popping signature"
return b2h(byte_string[:64]), byte_string[64:], None
@staticmethod
def pop_point(byte_string):
if not len(byte_string) >= 33:
return None, None, "underrun wihle popping point"
point = b2h(byte_string[:33])
if not point.startswith("02") and not point.startswith("03"):
return None, None, "not valid compressed point"
return point, byte_string[33:], None
@staticmethod
def pop_short_channel_id(byte_string):
if not len(byte_string) >= 8:
return None, None, "underrun while popping short_channel_id"
block_height = b2i(byte_string[:3])
tx_index = b2i(byte_string[3:6])
output_index = b2i(byte_string[6:8])
formatted = "%dx%dx%d" % (block_height, tx_index, output_index)
return formatted, byte_string[8:], None
###########################################################################
@staticmethod
def encode_short_channel_id(short_channel_id):
values = short_channel_id.split("x")
assert len(values) == 3, "not a short_channel_id string"
try:
block_height = int(values[0])
tx_index = int(values[1])
output_index = int(values[2])
except:
assert False, "not a short_channel_id string"
return i2b(block_height, 3) + i2b(tx_index, 3) + i2b(output_index, 2)
###########################################################################
@staticmethod
def encode_tlv(t, v):
return Tlv(t, v).encode()
###########################################################################
@staticmethod
def parse_tlv(tlv, tlv_parsers):
if tlv.t not in tlv_parsers:
return {"tlv_type_name": "unknown",
"type": tlv.t,
"value": b2h(tlv.v)}, None
return False, "TLV type has no defined parser function"
parsed_tlv, err = tlv_parsers[tlv.t](tlv)
if err:
return False, err
assert 'tlv_type_name' in parsed_tlv, ("subclass parser must name the "
"parsed tlv type")
return parsed_tlv, None
@staticmethod
def parse_tlvs(tlvs, tlv_parsers):
parsed_tlvs = {}
for tlv in tlvs:
parsed_tlv, err = Namespace.parse_tlv(tlv, tlv_parsers)
if err:
return None, err
parsed_tlvs[tlv.t] = parsed_tlv
return parsed_tlvs, None
@staticmethod
def _has_unknown_even_types(tlvs, tlv_parsers):
present = set(tlv.t for tlv in tlvs)
known = set(tlv_parsers.keys())
unknown = present.difference(known)
for t in list(unknown):
if t % 2 == 0:
return True
return False
@staticmethod
def _ordered_ascending(tlvs):
if len(tlvs) == 0:
return True
if len(tlvs) == 1:
return True
max_type = tlvs[0].t
for tlv in tlvs[1:]:
if tlv.t <= max_type:
return False
max_type = tlv.t
return True
@staticmethod
def _has_duplicates(tlvs):
types = [tlv.t for tlv in tlvs]
dedupe = set(tlv.t for tlv in tlvs)
return len(types) != len(dedupe)
@staticmethod
def parse(byte_string, tlv_parsers):
if not Namespace.tlvs_are_valid(byte_string):
return None, "tlvs are not valid"
tlvs = list(Namespace.iter_tlvs(byte_string))
if Namespace._has_unknown_even_types(tlvs, tlv_parsers):
return None, "got unknown even type tlv for Namespace"
if Namespace._has_duplicates(tlvs):
return None, "duplicate TLVs in stream"
if not Namespace._ordered_ascending(tlvs):
return None, "tlvs values not ascending"
parsed_tlvs, err = Namespace.parse_tlvs(tlvs, tlv_parsers)
if err:
return None, err
return parsed_tlvs, None
| [
"[email protected]"
]
| |
8adb355b8d8850f4f2de49b4f36daf51077ab7e9 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/surface/compute/target_pools/create.py | 8e3acd78a8ae36cdba093c5765105c6b9efc81bf | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 6,524 | py | # Copyright 2014 Google Inc. All Rights Reserved.
"""Command for creating target pools."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import utils
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.third_party.apis.compute.v1 import compute_v1_messages
SESSION_AFFINITIES = sorted(
compute_v1_messages.TargetPool.SessionAffinityValueValuesEnum
.to_dict().keys())
class Create(base_classes.BaseAsyncCreator):
"""Define a load-balanced pool of virtual machine instances.
*{command}* is used to create a target pool. A target pool resource
defines a group of instances that can receive incoming traffic
from forwarding rules. When a forwarding rule directs traffic to a
target pool, Google Compute Engine picks an instance from the
target pool based on a hash of the source and
destination IP addresses and ports. For more
information on load balancing, see
link:https://cloud.google.com/compute/docs/load-balancing-and-autoscaling/[].
To add instances to a target pool, use 'gcloud compute
target-pools add-instances'.
"""
@staticmethod
def Args(parser):
backup_pool = parser.add_argument(
'--backup-pool',
help='Defines the fallback pool for the target pool.')
backup_pool.detailed_help = """\
Together with ``--failover-ratio'', this flag defines the fallback
behavior of the target pool (primary pool) to be created by this
command. If the ratio of the healthy instances in the primary pool
is at or below the specified ``--failover-ratio value'', then traffic
arriving at the load-balanced IP address will be directed to the
backup pool. If this flag is provided, then ``--failover-ratio'' is
required.
"""
parser.add_argument(
'--description',
help='An optional description of this target pool.')
failover_ratio = parser.add_argument(
'--failover-ratio',
type=float,
help=('The ratio of healthy instances below which the backup pool '
'will be used.'))
failover_ratio.detailed_help = """\
Together with ``--backup-pool'', defines the fallback behavior of the
target pool (primary pool) to be created by this command. If the
ratio of the healthy instances in the primary pool is at or below this
number, traffic arriving at the load-balanced IP address will be
directed to the backup pool. For example, if 0.4 is chosen as the
failover ratio, then traffic will fail over to the backup pool if
more than 40% of the instances become unhealthy.
If not set, the traffic will be directed the
instances in this pool in the ``force'' mode, where traffic will be
spread to the healthy instances with the best effort, or to all
instances when no instance is healthy.
If this flag is provided, then ``--backup-pool'' is required.
"""
health_check = parser.add_argument(
'--health-check',
help=('Specifies HttpHealthCheck to determine the health of instances '
'in the pool.'),
metavar='HEALTH_CHECK')
health_check.detailed_help = """\
Specifies an HTTP health check resource to use to determine the health
of instances in this pool. If no health check is specified, traffic will
be sent to all instances in this target pool as if the instances
were healthy, but the health status of this pool will appear as
unhealthy as a warning that this target pool does not have a health
check.
"""
utils.AddRegionFlag(
parser,
resource_type='target pool',
operation_type='create')
session_affinity = parser.add_argument(
'--session-affinity',
choices=SESSION_AFFINITIES,
type=lambda x: x.upper(),
default='NONE',
help='The session affinity option for the target pool.')
session_affinity.detailed_help = """\
Specifies the session affinity option for the connection.
If ``NONE'' is selected, then connections from the same client
IP address may go to any instance in the target pool.
If ``CLIENT_IP'' is selected, then connections
from the same client IP address will go to the same instance
in the target pool.
If ``CLIENT_IP_PROTO'' is selected, then connections from the same
client IP with the same IP protocol will go to the same client pool.
If not specified, then ``NONE'' is used as a default.
"""
parser.add_argument(
'name',
help='The name of the target pool.')
@property
def service(self):
return self.compute.targetPools
@property
def method(self):
return 'Insert'
@property
def resource_type(self):
return 'targetPools'
def CreateRequests(self, args):
"""Returns a list of requests necessary for adding a target pool."""
if ((args.backup_pool and not args.failover_ratio) or
(args.failover_ratio and not args.backup_pool)):
raise calliope_exceptions.ToolException(
'Either both or neither of [--failover-ratio] and [--backup-pool] '
'must be provided.')
if args.failover_ratio is not None:
if args.failover_ratio < 0 or args.failover_ratio > 1:
raise calliope_exceptions.ToolException(
'[--failover-ratio] must be a number between 0 and 1, inclusive.')
if args.health_check:
health_check = [self.CreateGlobalReference(
args.health_check, resource_type='httpHealthChecks').SelfLink()]
else:
health_check = []
target_pool_ref = self.CreateRegionalReference(args.name, args.region)
if args.backup_pool:
backup_pool_uri = self.CreateRegionalReference(
args.backup_pool, target_pool_ref.region).SelfLink()
else:
backup_pool_uri = None
request = self.messages.ComputeTargetPoolsInsertRequest(
targetPool=self.messages.TargetPool(
backupPool=backup_pool_uri,
description=args.description,
failoverRatio=args.failover_ratio,
healthChecks=health_check,
name=target_pool_ref.Name(),
sessionAffinity=(
self.messages.TargetPool.SessionAffinityValueValuesEnum(
args.session_affinity))),
region=target_pool_ref.region,
project=self.project)
return [request]
| [
"[email protected]"
]
| |
0b52a7d8625cdde3d880fe9de03a47671ea10878 | 6e8d58340f2be5f00d55e2629052c0bbc9dcf390 | /tools/data_source/microbial_import_code.py | 4efa96a13a738387c857a1897e50eaa3739530c4 | [
"CC-BY-2.5",
"MIT"
]
| permissive | JCVI-Cloud/galaxy-tools-prok | e57389750d33ac766e1658838cdb0aaf9a59c106 | 3c44ecaf4b2e1f2d7269eabef19cbd2e88b3a99c | refs/heads/master | 2021-05-02T06:23:05.414371 | 2014-03-21T18:12:43 | 2014-03-21T18:12:43 | 6,092,693 | 0 | 2 | NOASSERTION | 2020-07-25T20:38:17 | 2012-10-05T15:57:38 | Python | UTF-8 | Python | false | false | 7,420 | py |
def load_microbial_data( GALAXY_DATA_INDEX_DIR, sep='\t' ):
# FIXME: this function is duplicated in the DynamicOptions class. It is used here only to
# set data.name in exec_after_process().
microbe_info= {}
orgs = {}
filename = "%s/microbial_data.loc" % GALAXY_DATA_INDEX_DIR
for i, line in enumerate( open( filename ) ):
line = line.rstrip( '\r\n' )
if line and not line.startswith( '#' ):
fields = line.split( sep )
#read each line, if not enough fields, go to next line
try:
info_type = fields.pop(0)
if info_type.upper() == "ORG":
#ORG 12521 Clostridium perfringens SM101 bacteria Firmicutes CP000312,CP000313,CP000314,CP000315 http://www.ncbi.nlm.nih.gov/entrez/query.fcgi?db=genomeprj&cmd=Retrieve&dopt=Overview&list_uids=12521
org_num = fields.pop(0)
name = fields.pop(0)
kingdom = fields.pop(0)
group = fields.pop(0)
chromosomes = fields.pop(0)
info_url = fields.pop(0)
link_site = fields.pop(0)
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
orgs[ org_num ][ 'name' ] = name
orgs[ org_num ][ 'kingdom' ] = kingdom
orgs[ org_num ][ 'group' ] = group
orgs[ org_num ][ 'chromosomes' ] = chromosomes
orgs[ org_num ][ 'info_url' ] = info_url
orgs[ org_num ][ 'link_site' ] = link_site
elif info_type.upper() == "CHR":
#CHR 12521 CP000315 Clostridium perfringens phage phiSM101, complete genome 38092 110684521 CP000315.1
org_num = fields.pop(0)
chr_acc = fields.pop(0)
name = fields.pop(0)
length = fields.pop(0)
gi = fields.pop(0)
gb = fields.pop(0)
info_url = fields.pop(0)
chr = {}
chr[ 'name' ] = name
chr[ 'length' ] = length
chr[ 'gi' ] = gi
chr[ 'gb' ] = gb
chr[ 'info_url' ] = info_url
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
orgs[ org_num ][ 'chrs' ][ chr_acc ] = chr
elif info_type.upper() == "DATA":
#DATA 12521_12521_CDS 12521 CP000315 CDS bed /home/djb396/alignments/playground/bacteria/12521/CP000315.CDS.bed
uid = fields.pop(0)
org_num = fields.pop(0)
chr_acc = fields.pop(0)
feature = fields.pop(0)
filetype = fields.pop(0)
path = fields.pop(0)
data = {}
data[ 'filetype' ] = filetype
data[ 'path' ] = path
data[ 'feature' ] = feature
if org_num not in orgs:
orgs[ org_num ] = {}
orgs[ org_num ][ 'chrs' ] = {}
if 'data' not in orgs[ org_num ][ 'chrs' ][ chr_acc ]:
orgs[ org_num ][ 'chrs' ][ chr_acc ][ 'data' ] = {}
orgs[ org_num ][ 'chrs' ][ chr_acc ][ 'data' ][ uid ] = data
else: continue
except: continue
for org_num in orgs:
org = orgs[ org_num ]
if org[ 'kingdom' ] not in microbe_info:
microbe_info[ org[ 'kingdom' ] ] = {}
if org_num not in microbe_info[ org[ 'kingdom' ] ]:
microbe_info[ org[ 'kingdom' ] ][org_num] = org
return microbe_info
#post processing, set build for data and add additional data to history
from galaxy import datatypes, config, jobs, tools
from shutil import copyfile
def exec_after_process(app, inp_data, out_data, param_dict, tool, stdout, stderr):
base_dataset = out_data.items()[0][1]
history = base_dataset.history
if history == None:
print "unknown history!"
return
kingdom = param_dict.get( 'kingdom', None )
#group = param_dict.get( 'group', None )
org = param_dict.get( 'org', None )
#if not (kingdom or group or org):
if not (kingdom or org):
print "Parameters are not available."
#workflow passes galaxy.tools.parameters.basic.UnvalidatedValue instead of values
if isinstance( kingdom, tools.parameters.basic.UnvalidatedValue ):
kingdom = kingdom.value
if isinstance( org, tools.parameters.basic.UnvalidatedValue ):
org = org.value
GALAXY_DATA_INDEX_DIR = app.config.tool_data_path
microbe_info = load_microbial_data( GALAXY_DATA_INDEX_DIR, sep='\t' )
new_stdout = ""
split_stdout = stdout.split("\n")
basic_name = ""
for line in split_stdout:
fields = line.split("\t")
if fields[0] == "#File1":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
file_type = fields[4]
name, data = out_data.items()[0]
data.set_size()
basic_name = data.name
data.name = data.name + " (" + microbe_info[kingdom][org]['chrs'][chr]['data'][description]['feature'] +" for " + microbe_info[kingdom][org]['name'] + ":" + chr + ")"
data.dbkey = dbkey
data.info = data.name
data = app.datatypes_registry.change_datatype( data, file_type )
data.init_meta()
data.set_peek()
app.model.context.add( data )
app.model.context.flush()
elif fields[0] == "#NewFile":
description = fields[1]
chr = fields[2]
dbkey = fields[3]
filepath = fields[4]
file_type = fields[5]
newdata = app.model.HistoryDatasetAssociation( create_dataset = True, sa_session = app.model.context ) #This import should become a library
newdata.set_size()
newdata.extension = file_type
newdata.name = basic_name + " (" + microbe_info[kingdom][org]['chrs'][chr]['data'][description]['feature'] +" for "+microbe_info[kingdom][org]['name']+":"+chr + ")"
app.model.context.add( newdata )
app.model.context.flush()
app.security_agent.copy_dataset_permissions( base_dataset.dataset, newdata.dataset )
history.add_dataset( newdata )
app.model.context.add( history )
app.model.context.flush()
try:
copyfile(filepath,newdata.file_name)
newdata.info = newdata.name
newdata.state = jobs.JOB_OK
except:
newdata.info = "The requested file is missing from the system."
newdata.state = jobs.JOB_ERROR
newdata.dbkey = dbkey
newdata.init_meta()
newdata.set_peek()
app.model.context.flush()
| [
"[email protected]"
]
| |
0768b2e247703f696bc61b8a9841da4430449517 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/cylicRot_20200714235552.py | df3ed8e6be65ea59b21b9730c1d84306bfff2615 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 467 | py | # given an array rotate it k times to the right
def rotate(A,K):
# first I'd rotate the array once
# so how do we rotate the array
# we move the last element to the firs place and
# the rest follow suit
# moving elements to the right in an array
# [3,8,9,7,6]
# [6,3,8,9,7]
for i in range(len(A)):
A[i] = A[len(A)-1]
A[len(A)-1] = A[len(A)-2]
A[i] = A[i+1]
rotate([3, 8, 9, 7, 6], 3)
| [
"[email protected]"
]
| |
46dc15bb2d04819454a71d40ffa2011043e35239 | f8f70ed663ffccf61a739332697da5c97a41b9cf | /setup.py | 43d9b2c1a862d1468af29616515d7b25aedf5b33 | [
"MIT"
]
| permissive | kanurag94/rstcheck | 123116993d9d33e3efdbafe889de94b48dd2cbe8 | fdf2d324bf20357fd47f7579c58fec693f71a120 | refs/heads/master | 2020-06-22T17:04:39.016288 | 2019-07-21T18:21:20 | 2019-07-21T18:21:20 | 197,751,048 | 0 | 0 | MIT | 2019-07-19T10:10:13 | 2019-07-19T10:10:13 | null | UTF-8 | Python | false | false | 1,415 | py | #!/usr/bin/env python
"""Installer for rstcheck."""
import ast
import io
import setuptools
def version():
"""Return version string."""
with io.open('rstcheck.py', encoding='utf-8') as input_file:
for line in input_file:
if line.startswith('__version__'):
return ast.parse(line).body[0].value.s
with io.open('README.rst', encoding='utf-8') as readme:
setuptools.setup(
name='rstcheck',
version=version(),
url='https://github.com/myint/rstcheck',
description='Checks syntax of reStructuredText and code blocks nested '
'within it',
long_description=readme.read(),
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Quality Assurance',
],
keywords='restructuredtext,lint,check,pypi,readme,rst,analyze',
py_modules=['rstcheck'],
entry_points={'console_scripts': ['rstcheck = rstcheck:main']},
install_requires=['docutils >= 0.7'])
| [
"[email protected]"
]
| |
76243e46b0928b11c197e4e1c939786c01d8cf63 | 72bc1c9c8d5dd0b185fa4444ac4d6d721d097480 | /cooperative/analysis/analyse_cooperative.py | 0af1fa202c00721ee6ffe8d97996ea2447abf993 | []
| no_license | PeppaYao/shepherding-problem | ad54e5051d193f71e6301d9d94d8f2b0a05d8b50 | 15e199f0fb771891bcbfb804d653b95e8c141c59 | refs/heads/main | 2023-05-06T01:20:05.874427 | 2021-05-26T01:34:17 | 2021-05-26T01:34:17 | 305,695,862 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | import numpy as np
import matplotlib.pyplot as plt
# 合作:最远距离+驱赶
# 合作:最大角度+驱赶
fig, ax = plt.subplots()
X = np.arange(40, 51)
Y = np.array([1442.000, 1191.000, 1495.000, 1266.000, 1110.000, 563.000, 594.000, 545.000, 533.000, 641.000, 676.000, ])
Y2 = np.array([4001.000, 2179.000, 2610.000, 4001.000, 4001.000, 2017.000, 1599.000, 3604.000, 1222.000, 2871.000, 4001.000, ])
plt.plot(X, Y, 'purple', label="mam")
plt.plot(X, Y2, 'darkcyan', label="sppl")
plt.xlabel("the number of sheep")
plt.ylabel("dispersion")
# plt.xticks(np.arange(0, 100, 10))
plt.legend()
plt.xlim(40, 51)
plt.ylim(0, 4200)
plt.grid()
plt.show() | [
"[email protected]"
]
| |
0693ba5058ee6afecaa80396bfe052d8f61a5d6e | 242d8d05e457107bed4c539b9cbd117d2733614d | /untitled1.py | 6163b8dfaa7cfea4d89ae31f5995c63a8706940c | []
| no_license | lucmilot/datastage | b4a7abd17cec360db2fc814eddf26174ab807b9b | 5f06b74e87d25cee1c98394e4593200579cb18d6 | refs/heads/master | 2021-05-18T16:47:16.100681 | 2020-03-30T13:54:06 | 2020-03-30T13:54:06 | 251,323,513 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 3 13:43:46 2018
@author: XT21586
"""
import win32com.client as win32
pathx = "C:\\Users\\XT21586\Documents\\document\\Data Stage\\python\\build\\exe.win-amd64-3.6\\"
excel = win32.gencache.EnsureDispatch('Excel.Application')
outfilxls1 = pathx + "result1.xls"
wb = excel.Workbooks.Open(outfilxls1)
#try:
# wb = excel.Workbooks.Open(outfilxls1)
#except:
# print (outfilxls1 +" is already open!")
excel.Visible = True | [
"[email protected]"
]
| |
9356a925249fc9974103fcf1d00723517c16e27b | 5a2297cff798f4ac03255a803a25177d19235020 | /ipcrawler/spiders/xiciScrapy.py | da60480513a59924f4be0effc632c0fcfe596ecc | []
| no_license | yidun55/ipcrawler | 0d51184922470483f277ec4d1f40c2920f7b0bc5 | 7dd804bb687df57139addd63fe5e1284fea93e2d | refs/heads/master | 2021-01-19T07:31:08.804177 | 2015-07-28T09:15:51 | 2015-07-28T09:15:51 | 39,173,638 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | #!usr/bin/env python
#coding: utf-8
"""
从专利局官网上爬取各公司的专利信息
"""
from scrapy.spider import Spider
from scrapy.http import Request
from scrapy import log
from scrapy import Selector
import sys
from ipcrawler.items import *
reload(sys)
sys.setdefaultencoding("utf-8")
class patenttest(Spider):
# download_delay=20
name = 'xici'
start_urls = ['http://www.xici.net.co/nn']
def parse(self, response):
"""
获取总页数
"""
urls = ["http://www.xici.net.co/nn/"+str(i) for\
i in xrange(1, 204)]
for url in urls:
yield Request(url, callback=self.detail,\
dont_filter=True)
def detail(self, response):
sel = Selector(text=response.body)
ips = sel.xpath("//table[@id='ip_list']/tr[position()>1]\
/td[3]/text()").extract()
ports = sel.xpath("//table[@id='ip_list']/tr[position()>1]\
/td[4]/text()").extract()
scheme = sel.xpath("//table[@id='ip_list']/tr[position()>1]\
/td[7]/text()").extract()
if len(ips) == len(ports):
te = zip(ips,ports)
last = zip(scheme,[":".join(item) for item in te])
last = [(item[0].lower(),item[1]) for item in last]
ip_port = ["://".join(item)+"\n" for item in last]
ips_ports = "".join(ip_port)
print ips_ports
item = IpcrawlerItem()
item['content'] = ips_ports
return item
else:
log.msg("error in xpath",level=log.ERROR)
| [
"[email protected]"
]
| |
1867e8a3098592e90d6acaeabf4754755bba7650 | e79888cd68177e7ec5125270cdc52f888e211e78 | /kiyuna/chapter05/knock45.py | cf7d05c2a04832c8e87b16b0bf9d6c051cc71b0d | []
| no_license | cafenoctua/100knock2019 | ec259bee27936bdacfe0097d42f23cc7500f0a07 | 88717a78c4290101a021fbe8b4f054f76c9d3fa6 | refs/heads/master | 2022-06-22T04:42:03.939373 | 2019-09-03T11:05:19 | 2019-09-03T11:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,036 | py | '''
45. 動詞の格パターンの抽出
今回用いている文章をコーパスと見なし,日本語の述語が取りうる格を調査したい.
動詞を述語,動詞に係っている文節の助詞を格と考え,述語と格をタブ区切り形式で出力せよ.
ただし,出力は以下の仕様を満たすようにせよ.
- 動詞を含む文節において,最左の動詞の基本形を述語とする
- 述語に係る助詞を格とする
- 述語に係る助詞(文節)が複数あるときは,すべての助詞をスペース区切りで辞書順に並べる
「吾輩はここで始めて人間というものを見た」という例文(neko.txt.cabochaの8文目)を考える.
この文は「始める」と「見る」の2つの動詞を含み,「始める」に係る文節は「ここで」,
「見る」に係る文節は「吾輩は」と「ものを」と解析された場合は,次のような出力になるはずである.
始める で
見る は を
このプログラムの出力をファイルに保存し,以下の事項をUNIXコマンドを用いて確認せよ.
- コーパス中で頻出する述語と格パターンの組み合わせ
- 「する」「見る」「与える」という動詞の格パターン(コーパス中で出現頻度の高い順に並べよ)
'''
import sys
from knock41 import cabocha_into_chunks, Chunk
def message(text):
sys.stderr.write(f"\33[92m{text}\33[0m\n")
class Chunk_normalized(Chunk):
def __init__(self, chunk):
self.morphs, self.dst, self.srcs = (*chunk,)
self.norm = self.norm()
def norm(self):
clause = ''.join(m.surface for m in self.morphs if m.pos != '記号')
return clause
def has_pos(self, pos):
for m in self.morphs:
if m.pos == pos:
return True
return False
def get_pos(self, pos):
res = []
for m in self.morphs:
if m.pos == pos:
res.append(m)
return res
if __name__ == '__main__':
res = []
for chunks in cabocha_into_chunks():
chunks = tuple(map(Chunk_normalized, chunks.values()))
for dc in chunks:
if not dc.has_pos('動詞'):
continue
srcs = []
for sc_idx in dc.srcs:
# 述語に係る助詞を格とする
for m in chunks[sc_idx].get_pos('助詞'):
srcs.append(m.base)
# 動詞を含む文節において,最左の動詞の基本形を述語とする
base = dc.get_pos('動詞')[0].base
# 述語に係る助詞(文節)が複数あるときは,
# すべての助詞をスペース区切りで辞書順に並べる
srcs.sort()
particles = " ".join(srcs)
if srcs:
res.append(f'{base}\t{particles}\n')
sys.stdout.writelines(res)
message(f'{len(res)} 行書き出しました')
| [
"[email protected]"
]
| |
ba5d6ca0a74e5b6778ad6e411e41aefd456ae06c | 5cdbdc84b04c511a59ba649b64466d0ebe29f266 | /config.py | a829585f69f893dea66b03d7e873500875699602 | []
| no_license | Shatnerz/glad | a3298503231e4b8529d5b47e57b8279d67960ae7 | c58d44358c81a529559fc94825f911ea4ccb0e26 | refs/heads/master | 2021-05-16T03:21:05.146678 | 2020-03-09T14:56:57 | 2020-03-09T14:56:57 | 42,134,456 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 514 | py |
import ConfigParser
class BasicConfig(object):
def __init__(self, filename):
self.parser = ConfigParser.RawConfigParser()
self.parser.read(filename)
def get(section, option):
return self.parser.get(section, option)
def getBoolean(section, option):
return self.parser.getboolean(section,option)
def getResolution(section, option):
s = self.get(section, option)
l = s.split('x')
l = [int(x) for x in l]
return tuple(l)
| [
"devnull@localhost"
]
| devnull@localhost |
cc63c6d723f156472557a419377fda74f8a1e977 | cfb4e8721137a096a23d151f2ff27240b218c34c | /mypower/matpower_ported/lib/toggle_reserves.py | 58caa28e588ddccd4c28b9ff179664528955fe48 | [
"Apache-2.0"
]
| permissive | suryo12/mypower | eaebe1d13f94c0b947a3c022a98bab936a23f5d3 | ee79dfffc057118d25f30ef85a45370dfdbab7d5 | refs/heads/master | 2022-11-25T16:30:02.643830 | 2020-08-02T13:16:20 | 2020-08-02T13:16:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | def toggle_reserves(*args,nout=1,oc=None):
if oc == None:
from ...oc_matpower import oc_matpower
oc = oc_matpower()
return oc.toggle_reserves(*args,nout=nout)
| [
"[email protected]"
]
| |
169fc535e0b99ab762810b308d9274646618d9a1 | 9e2f24027e4044252639563461116a895acce039 | /biosteam/units/_vent_scrubber.py | 6dcb1e61a54f0310f69b12cd9ae417fef47bd40b | [
"LicenseRef-scancode-unknown-license-reference",
"MIT",
"NCSA"
]
| permissive | yalinli2/biosteam | 5010b5d430cc746f6fa00a23805a1c1f5cac7a81 | e7385ca1feac642881a357ffbc4461382549c3a4 | refs/heads/master | 2022-03-20T23:57:06.824292 | 2022-02-22T15:55:11 | 2022-02-22T15:55:11 | 190,422,353 | 0 | 0 | MIT | 2019-06-05T15:39:04 | 2019-06-05T15:39:03 | null | UTF-8 | Python | false | false | 1,150 | py | # -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, Yoel Cortes-Pena <[email protected]>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
from .decorators import cost
from .. import Unit
__all__ = ('VentScrubber',)
@cost('Flow rate', units='kg/hr',
S=22608, CE=522, cost=215e3, n=0.6, BM=2.4)
class VentScrubber(Unit):
_N_ins = _N_outs = 2
_units = {'Flow rate': 'kg/hr'}
def __init__(self, ID='', ins=None, outs=(), thermo=None, *, gas):
Unit.__init__(self, ID, ins, outs, thermo)
self.gas = gas
def _run(self):
water, vent_entry = self.ins
vent_exit, bottoms = self.outs
vent_exit.copy_like(vent_entry)
bottoms.empty()
bottoms.copy_flow(vent_exit, self.gas,
remove=True, exclude=True)
bottoms.mix_from([bottoms, water], energy_balance=False)
def _design(self):
self.design_results['Flow rate'] = self._ins[1].F_mass | [
"[email protected]"
]
| |
e576d9e8d6f40fda097536aead9d3ee9a9634d63 | f7c8df084dabf0d9c5dfa6dd15322a9cd8beb587 | /misc/projecteuler/p0001.py | 0fe3b39646525f0cada498e9d384d8a0cfa36161 | []
| no_license | sahands/problem-solving | 6591464366bac635f53e0960eb5cd796bddaea8f | 04d17ee2f55babcb106fdddd56a1caf7b65df2db | refs/heads/master | 2021-01-02T09:15:38.686212 | 2014-10-23T02:26:08 | 2014-10-23T02:26:08 | 24,439,994 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | def sum_of_multiples(k, n):
"""Returns the sum of all multiples of k less than n."""
m = (n-1) // k
return k * (m * (m + 1)) / 2
if __name__ == '__main__':
n = 1000
a = sum_of_multiples(3, n)
b = sum_of_multiples(5, n)
c = sum_of_multiples(15, n)
print a + b - c
| [
"[email protected]"
]
| |
0baf82b9755194c97a4cad88ec01c3161a46cf5e | d1f2a0473cc773986482607a4b1ee9de85627949 | /model/darknet53.py | ab670311e2203e3875bae29868dcd5078aa16dd0 | []
| no_license | choodly/PaddlePaddle_yolact | fcf8273a66ce5b1a464bd30f97e77bad5362ad65 | 7344e6fa98b5451dfe47e725f3c6aabf85e71d10 | refs/heads/master | 2022-11-21T00:40:41.692806 | 2020-07-13T03:00:22 | 2020-07-13T03:00:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,460 | py | #! /usr/bin/env python
# coding=utf-8
# ================================================================
#
# Author : miemie2013
# Created date: 2020-01-23 15:16:15
# Description : paddlepaddle_yolact++
#
# ================================================================
import paddle.fluid as fluid
import paddle.fluid.layers as P
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.regularizer import L2Decay
def conv2d_unit(x, filters, kernels, stride, padding, name, is_test, trainable):
x = P.conv2d(
input=x,
num_filters=filters,
filter_size=kernels,
stride=stride,
padding=padding,
act=None,
param_attr=ParamAttr(initializer=fluid.initializer.Normal(0.0, 0.01), name=name + ".conv.weights", trainable=trainable),
bias_attr=False)
bn_name = name + ".bn"
x = P.batch_norm(
input=x,
act=None,
is_test=is_test,
param_attr=ParamAttr(
initializer=fluid.initializer.Constant(1.0),
regularizer=L2Decay(0.),
trainable=trainable,
name=bn_name + '.scale'),
bias_attr=ParamAttr(
initializer=fluid.initializer.Constant(0.0),
regularizer=L2Decay(0.),
trainable=trainable,
name=bn_name + '.offset'),
moving_mean_name=bn_name + '.mean',
moving_variance_name=bn_name + '.var')
x = P.leaky_relu(x, alpha=0.1)
return x
def residual_block(inputs, filters, conv_start_idx, is_test, trainable):
x = conv2d_unit(inputs, filters, (1, 1), stride=1, padding=0, name='conv%.2d'% conv_start_idx, is_test=is_test, trainable=trainable)
x = conv2d_unit(x, 2 * filters, (3, 3), stride=1, padding=1, name='conv%.2d'% (conv_start_idx+1), is_test=is_test, trainable=trainable)
x = P.elementwise_add(x=inputs, y=x, act=None)
return x
def stack_residual_block(inputs, filters, n, conv_start_idx, is_test, trainable):
x = residual_block(inputs, filters, conv_start_idx, is_test, trainable)
for i in range(n - 1):
x = residual_block(x, filters, conv_start_idx+2*(1+i), is_test, trainable)
return x
def DarkNet53(inputs, is_test, trainable):
''' 所有卷积层都没有偏移bias_attr=False '''
x = conv2d_unit(inputs, 32, (3, 3), stride=1, padding=1, name='conv01', is_test=is_test, trainable=trainable)
x = conv2d_unit(x, 64, (3, 3), stride=2, padding=1, name='conv02', is_test=is_test, trainable=trainable)
x = stack_residual_block(x, 32, n=1, conv_start_idx=3, is_test=is_test, trainable=trainable)
x = conv2d_unit(x, 128, (3, 3), stride=2, padding=1, name='conv05', is_test=is_test, trainable=trainable)
x = stack_residual_block(x, 64, n=2, conv_start_idx=6, is_test=is_test, trainable=trainable)
x = conv2d_unit(x, 256, (3, 3), stride=2, padding=1, name='conv10', is_test=is_test, trainable=trainable)
s8 = stack_residual_block(x, 128, n=8, conv_start_idx=11, is_test=is_test, trainable=trainable)
x = conv2d_unit(s8, 512, (3, 3), stride=2, padding=1, name='conv27', is_test=is_test, trainable=trainable)
s16 = stack_residual_block(x, 256, n=8, conv_start_idx=28, is_test=is_test, trainable=trainable)
x = conv2d_unit(s16, 1024, (3, 3), stride=2, padding=1, name='conv44', is_test=is_test, trainable=trainable)
s32 = stack_residual_block(x, 512, n=4, conv_start_idx=45, is_test=is_test, trainable=trainable)
return s8, s16, s32
| [
"[email protected]"
]
| |
fe25948466810e069367b21e9f97ea3d090e7d98 | a15200778946f6f181e23373525b02b65c44ce6e | /Algoritmi/2019-07-30/all-CMS-submissions/2019-07-30.09:06:48.930075.VR437056.tree_transcode_disc.py | 281a15f510d37afd02faf19c70d70113b6a9bc3c | []
| no_license | alberto-uni/portafoglioVoti_public | db518f4d4e750d25dcb61e41aa3f9ea69aaaf275 | 40c00ab74f641f83b23e06806bfa29c833badef9 | refs/heads/master | 2023-08-29T03:33:06.477640 | 2021-10-08T17:12:31 | 2021-10-08T17:12:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,986 | py | """
* user: VR437056
* fname: MARTINI
* lname: MICHELE
* task: tree_transcode_disc
* score: 50.0
* date: 2019-07-30 09:06:48.930075
"""
#!/usr/bin/env python3
# -*- coding: latin-1 -*-
from __future__ import print_function
import sys
sys.setrecursionlimit(100000)
if sys.version_info < (3, 0):
input = raw_input # in python2, raw_input svolge la funzione della primitiva input in python3
class Node:
def __init__(self, value=1, parent=None):
self.value = value
self.parent = parent
self.children = []
self.counter = 1
def set_parent(self, parent):
self.parent = parent
def add_child(self, child):
self.children += [child]
def add_pre_child(self, child):
self.children = [child] + self.children
def print_node(self):
print('value:', self.value, 'parent', end=" ")
if self.parent is None:
print('None', end=" ")
else:
print(self.parent.value, end=" ")
for child in self.children:
print(child.value, end=" ")
print()
def pre_visit(self, result):
result += str(self.value) + ' '
for child in self.children:
result = child.pre_visit(result)
return result
def post_visit(self, result):
for child in self.children:
result = child.post_visit(result)
result += str(self.value) + ' '
return result
MAX_N = 100
seq = list(map(int,input().split()))
if len(seq) < 2:
exit
tree = [None] * MAX_N
last_node = 0
if seq[0] == 1:
# Tree root
tree[last_node] = Node(seq[1])
current_parent = tree[0]
# Building of the tree
for i in range(2, len(seq)):
while current_parent.counter == current_parent.value:
current_parent = current_parent.parent
current_parent.counter += seq[i]
last_node += 1
tree[last_node] = Node(seq[i], current_parent)
current_parent.add_child(tree[last_node])
if seq[i] > 1:
current_parent = tree[last_node]
# Print the result
print('2', tree[0].post_visit(''))
if seq[0] == 2:
# First node must have value 1
tree[last_node] = Node(seq[1])
for i in range(2, len(seq)):
if seq[i] > 1:
current_parent = Node(seq[i])
counter = 1
while counter < seq[i]:
# Create link parent-child
counter += tree[last_node].value
tree[last_node].set_parent(current_parent)
current_parent.add_pre_child(tree[last_node])
# Remove child from array
tree[last_node] = None
last_node -= 1
last_node += 1
tree[last_node] = current_parent
else:
last_node += 1
tree[last_node] = Node(seq[i])
print('1', tree[0].pre_visit(''))
#for i in range(last_node + 1):
# tree[i].print_node()
| [
"[email protected]"
]
| |
2b500c17ffc14a5693b584a428ff89f9e8c3bd15 | f5a4f340da539520c60c4bce08356c6f5c171c54 | /xrpl/asyncio/clients/__init__.py | 4b2ff4b3ba3d535a0b6861fb8890cb748c22e3eb | [
"ISC",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | yyolk/xrpl-py | e3935c0a0f488793153ca29e9d71c197cf88f857 | e5bbdf458ad83e6670a4ebf3df63e17fed8b099f | refs/heads/master | 2023-07-17T03:19:29.239838 | 2021-07-03T01:24:57 | 2021-07-03T01:24:57 | 355,299,041 | 1 | 0 | ISC | 2021-04-08T05:29:43 | 2021-04-06T18:57:06 | null | UTF-8 | Python | false | false | 639 | py | """Asynchronous network clients for interacting with the XRPL."""
from xrpl.asyncio.clients.async_json_rpc_client import AsyncJsonRpcClient
from xrpl.asyncio.clients.async_websocket_client import AsyncWebsocketClient
from xrpl.asyncio.clients.client import Client
from xrpl.asyncio.clients.exceptions import XRPLRequestFailureException
from xrpl.asyncio.clients.utils import json_to_response, request_to_json_rpc
__all__ = [
"AsyncJsonRpcClient",
"AsyncWebsocketClient",
"Client",
"json_to_response",
"request_to_json_rpc",
"XRPLRequestFailureException",
"request_to_websocket",
"websocket_to_response",
]
| [
"[email protected]"
]
| |
83dfa312d9bd9029ca0080502186ef133d6477f5 | e6bc1f55371786dad70313eb468a3ccf6000edaf | /Datasets/words-score/Correct/097.py | e9557ac613cf6b943e8bfe765d27f85eeb69ee64 | []
| no_license | prateksha/Source-Code-Similarity-Measurement | 9da92e3b22c372ed6ea54d8b6ab2c5921e8c41c0 | fb371b837917794d260a219a1ca09c46a5b15962 | refs/heads/master | 2023-01-04T07:49:25.138827 | 2020-10-25T14:43:57 | 2020-10-25T14:43:57 | 285,744,963 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 378 | py | def is_vowel(letter):
return letter in ['a', 'e', 'i', 'o', 'u', 'y']
def score_words(words):
#print (words)
score = 0
for word in words:
num_vowels = 0
for letter in word:
if is_vowel(letter):
num_vowels += 1
if num_vowels % 2 == 0:
score += 2
else:
score +=1
return score | [
"[email protected]"
]
| |
5861b98d046738f027e1fc06dca64339dafa8a2d | 2bdff209f959d7b577494f6ac908d3700ffb9eb6 | /fractals.py | d43e792a24b5f521a6fa166147c3d1007df758cf | []
| no_license | simrit1/Fractals | 347ebb3867eb0fc3f99027a657197378323bb373 | 2d4b5ed05628f616c72eed996bf579d810b3065c | refs/heads/main | 2023-03-25T16:16:18.324205 | 2021-03-25T12:16:11 | 2021-03-25T12:16:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,683 | py | import pygame
import sys
import math
import colorsys
pygame.init()
WIDTH = 1920
HEIGHT = 1080
l_system_text = sys.argv[1]
start = int(sys.argv[2]), int(sys.argv[3])
length = int(sys.argv[4])
ratio = float(sys.argv[5])
with open(l_system_text) as f:
axiom = f.readline()
num_rules = int(f.readline())
rules = {}
for i in range(num_rules):
rule = f.readline().split(' ')
rules[rule[0]] = rule[1]
angle = math.radians(int(f.readline()))
class LSystem():
def __init__(self, axiom, rules, angle, start, length, ratio):
self.sentence = axiom
self.rules = rules
self.angle = angle
self.start = start
self.x, self.y = start
self.length = length
self.ratio = ratio
self.theta = math.pi / 2
self.positions = []
def __str__(self):
return self.sentence
def generate(self):
self.x, self.y = self.start
self.theta = math.pi / 2
self.length *= self.ratio
new_sentence = ""
for char in self.sentence:
mapped = char
try:
mapped = self.rules[char]
except:
pass
new_sentence += mapped
self.sentence = new_sentence
def draw(self, screen):
hue = 0
for char in self.sentence:
if char == 'F':
x2 = self.x - self.length * math.cos(self.theta)
y2 = self.y - self.length * math.sin(self.theta)
pygame.draw.line(screen, (hsv2rgb(hue, 1, 1)), (self.x, self.y), (x2, y2), 2)
self.x, self.y = x2, y2
elif char == '+':
self.theta += self.angle
elif char == '-':
self.theta -= self.angle
elif char == '[':
self.positions.append({'x': self.x, 'y': self.y, 'theta': self.theta})
elif char == ']':
position = self.positions.pop()
self.x, self.y, self.theta = position['x'], position['y'], position['theta']
hue += 0.00005
def hsv2rgb(h, s, v):
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(h, s, v))
def main():
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.mouse.set_visible(False)
fractal = LSystem(axiom, rules, angle, start, length, ratio)
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
keystate = pygame.key.get_pressed()
if keystate[pygame.K_SPACE]:
screen.fill((0, 0, 0))
fractal.draw(screen)
fractal.generate()
if keystate[pygame.K_ESCAPE]:
pygame.quit()
pygame.display.update()
main()
# Adrian-Mariano-Doily python fractals.py fractals/Adrian_Mariano_Doily.txt 1350 350 110 0.5
# Anthony-Hanmer-ADH231a python fractals.py fractals/Anthony_Hanmer_ADH231a.txt 960 1000 50 0.52
# Anthony-Hanmer-ADH256a python fractals.py fractals/Anthony_Hanmer_ADH256a.txt 650 850 50 0.55
# Anthony-Hanmer-ADH258a python fractals.py fractals/Anthony_Hanmer_ADH258a.txt 700 950 80 0.4
# Board python fractals.py fractals/board.txt 500 1000 100 0.52
# Box-fractal python fractals.py fractals/box-fractal.txt 1400 1000 100 0.52
# Classic-Sierpinski-curve python fractals.py fractals/classic-sierpinski-curve.txt 1150 750 30 0.5
# Cross python fractals.py fractals/cross.txt 950 250 250 0.5
# Crystal: python fractals.py fractals/crystal.txt 580 920 100 0.5
# Dragon-curve: python fractals.py fractals/dragon-curve.txt 960 540 200 0.75
# Hilbert-curve python fractals.py fractals/hilbert-curve.txt 1920 1080 250 0.67
# Hilbert-curve-II python fractals.py fractals/hilbert-curve-II.txt 0 1080 50 0.7
# Koch-snowflake: python fractals.py fractals/koch-snowflake.txt 1200 900 100 0.5
# Krishna-anklets python fractals.py fractals/krishna-anklets.txt 1400 550 60 0.8
# Levy-curve python fractals.py fractals/levy-curve.txt 1100 750 70 0.8
# Moore-curve python fractals.py fractals/moore-curve.txt 1000 1080 50 0.8
# no_name python fractals.py fractals/no_name.txt 960 1020 120 0.51
# Peano-curve python fractals.py fractals/peano-curve.txt 0 1080 70 0.7
# Peano-Gosper-curve: python fractals.py fractals/peano-gosper-curve.txt 600 280 200 0.5
# Pentaplexity python fractals.py fractals/pentaplexity.txt 550 850 150 0.5
# Plant: python fractals.py fractals/plant.txt 960 1000 100 0.6
# Quadratic-Gosper python fractals.py fractals/quadratic-gosper.txt 1920 1080 70 0.61
# Quadratic-Koch-island python fractals.py fractals/quadratic-koch-island.txt 950 850 50 0.5
# Quadratic-snowflake python fractals.py fractals/quadratic-snowflake.txt 500 1000 50 0.52
# Rings: python fractals.py fractals/rings.txt 700 250 60 0.5
# Sierpinski-arrowhead python fractals.py fractals/sierpinski-arrowhead.txt 1300 1000 90 0.7
# Sierpinski-carpet python fractals.py fractals/sierpinski-carpet.txt 500 1020 50 0.6
# Sierpinski-curve: python fractals.py fractals/sierpinski-curve.txt 500 550 200 0.52
# Sierpinski-sieve: python fractals.py fractals/sierpinski-sieve.txt 1200 950 400 0.5
# Terdragon-curve python fractals.py fractals/terdragon-curve.txt 400 500 200 0.7
# Three-dragon-curve python fractals.py fractals/three-dragon-curve.txt 600 550 40 0.88
# Tiles python fractals.py fractals/tiles.txt 900 800 30 0.75
# Tree: python fractals.py fractals/tree.txt 960 950 250 0.5
# Triangle python fractals.py fractals/triangle.txt 1000 250 60 0.8
# Twin-dragon-curve python fractals.py fractals/twin-dragon-curve.txt 1000 250 90 0.8
# William-McWorter-Maze01 python fractals.py fractals/William_McWorter_Maze01.txt 1100 750 50 0.8
# William-McWorter-Moore python fractals.py fractals/William_McWorter_Moore.txt 900 350 100 0.5
# William-McWorter-Pentant python fractals.py fractals/William_McWorter_Pentant.txt 1000 120 90 0.39
# William-McWorter-Pentl python fractals.py fractals/William_McWorter_Pentl.txt 1400 400 90 0.5
| [
"[email protected]"
]
| |
fb4ea243547d2893b0fc90f79afa28cdd4b3a796 | 961580252a30afb63cfec05debdab039741a4573 | /src/truck.py | bfe44c6bdd034755d3c21a886b876f5e6a3354a0 | []
| no_license | gokceozden/capraz_sevkiyat | f7fdfa5e344c5db1f094de50878c3e3099de2fd0 | 78c374b55c4abf087f4a89e051a361f1182b8db0 | refs/heads/master | 2021-01-21T05:02:46.887208 | 2016-06-20T14:16:12 | 2016-06-20T14:16:12 | 34,739,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,253 | py | from PyQt5.QtCore import *
class Truck(QObject):
"""
General truck class with common types and functions
"""
def __init__(self):
QObject.__init__(self)
self.truck_name = None
self.current_time = 0
self.function_list = []
self.times = {'arrival_time': 0}
self.current_state = 0
self.state_signal = False
self.behaviour_list = []
self.relevant_data = None
self.changeover_time = 0
self.next_state_time = 0
self.current_door = None
self.finish_time = 0
def run(self, current_time):
self.current_time = current_time
self.function_list[self.current_state]()
if self.state_signal:
self.state_signal = False
return 1
return 0
def coming(self):
if self.times['arrival_time'] == self.current_time:
self.times['arrived'] = self.current_time
self.next_state()
def next_state(self, name=None):
self.state_signal = True
if name:
print('name')
print(self.behaviour_list.index('loading'))
self.current_state = self.behaviour_list.index(name)
else:
self.current_state += 1 | [
"[email protected]"
]
| |
d2b852072eab9b442cdc09671d692a18ba683652 | a3d6556180e74af7b555f8d47d3fea55b94bcbda | /third_party/blink/web_tests/external/wpt/fetch/api/resources/preflight.py | f983ef952272a75a6706d3cdfabb08aced7efc7b | [
"BSD-3-Clause",
"GPL-1.0-or-later",
"MIT",
"LGPL-2.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft"
]
| permissive | chromium/chromium | aaa9eda10115b50b0616d2f1aed5ef35d1d779d6 | a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c | refs/heads/main | 2023-08-24T00:35:12.585945 | 2023-08-23T22:01:11 | 2023-08-23T22:01:11 | 120,360,765 | 17,408 | 7,102 | BSD-3-Clause | 2023-09-10T23:44:27 | 2018-02-05T20:55:32 | null | UTF-8 | Python | false | false | 3,394 | py | def main(request, response):
headers = [(b"Content-Type", b"text/plain")]
stashed_data = {b'control_request_headers': b"", b'preflight': b"0", b'preflight_referrer': b""}
token = None
if b"token" in request.GET:
token = request.GET.first(b"token")
if b"origin" in request.GET:
for origin in request.GET[b'origin'].split(b", "):
headers.append((b"Access-Control-Allow-Origin", origin))
else:
headers.append((b"Access-Control-Allow-Origin", b"*"))
if b"clear-stash" in request.GET:
if request.server.stash.take(token) is not None:
return headers, b"1"
else:
return headers, b"0"
if b"credentials" in request.GET:
headers.append((b"Access-Control-Allow-Credentials", b"true"))
if request.method == u"OPTIONS":
if not b"Access-Control-Request-Method" in request.headers:
response.set_error(400, u"No Access-Control-Request-Method header")
return b"ERROR: No access-control-request-method in preflight!"
if request.headers.get(b"Accept", b"") != b"*/*":
response.set_error(400, u"Request does not have 'Accept: */*' header")
return b"ERROR: Invalid access in preflight!"
if b"control_request_headers" in request.GET:
stashed_data[b'control_request_headers'] = request.headers.get(b"Access-Control-Request-Headers", None)
if b"max_age" in request.GET:
headers.append((b"Access-Control-Max-Age", request.GET[b'max_age']))
if b"allow_headers" in request.GET:
headers.append((b"Access-Control-Allow-Headers", request.GET[b'allow_headers']))
if b"allow_methods" in request.GET:
headers.append((b"Access-Control-Allow-Methods", request.GET[b'allow_methods']))
preflight_status = 200
if b"preflight_status" in request.GET:
preflight_status = int(request.GET.first(b"preflight_status"))
stashed_data[b'preflight'] = b"1"
stashed_data[b'preflight_referrer'] = request.headers.get(b"Referer", b"")
stashed_data[b'preflight_user_agent'] = request.headers.get(b"User-Agent", b"")
if token:
request.server.stash.put(token, stashed_data)
return preflight_status, headers, b""
if token:
data = request.server.stash.take(token)
if data:
stashed_data = data
if b"checkUserAgentHeaderInPreflight" in request.GET and request.headers.get(b"User-Agent") != stashed_data[b'preflight_user_agent']:
return 400, headers, b"ERROR: No user-agent header in preflight"
#use x-* headers for returning value to bodyless responses
headers.append((b"Access-Control-Expose-Headers", b"x-did-preflight, x-control-request-headers, x-referrer, x-preflight-referrer, x-origin"))
headers.append((b"x-did-preflight", stashed_data[b'preflight']))
if stashed_data[b'control_request_headers'] != None:
headers.append((b"x-control-request-headers", stashed_data[b'control_request_headers']))
headers.append((b"x-preflight-referrer", stashed_data[b'preflight_referrer']))
headers.append((b"x-referrer", request.headers.get(b"Referer", b"")))
headers.append((b"x-origin", request.headers.get(b"Origin", b"")))
if token:
request.server.stash.put(token, stashed_data)
return headers, b""
| [
"[email protected]"
]
| |
bd1c69067fdc1bd4cbb833f1cd197b1c735de133 | 62858ffe3288aeff9765a38bee2e0d2f2b710f37 | /rsenv/dataanalysis/nanodrop/nanodrop_cli.py | c4db0e6371c709a8334bececbcdc52c815ecefa2 | []
| no_license | scholer/rsenv | f275a9c8fa0398bad32de89e6abac1efeb3869c1 | 04f50a1717baaab74e199d2e961d37d07aac69e7 | refs/heads/master | 2021-07-17T08:22:33.135827 | 2021-04-07T10:16:12 | 2021-04-07T10:16:12 | 69,067,062 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,217 | py |
import os
import itertools
from collections import Counter, OrderedDict, defaultdict
import yaml
import click
import pandas as pd
from rsenv.dataanalysis.nanodrop import denovix
from rsenv.utils.query_parsing import translate_all_requests_to_idxs
"""
Alternative name:
plot
plot_nanodrop_data
load_nanodrop_data_and_plot_requested_samples
# Edit: Don't provide Args help in the docstring for click commands, provide that to click.option.
Args:
yamlfile:
header_fmt: Format the DataFrame header this way using metadata fields from the file.
Default: "{Sample Name}-{Sample Number}"
query_match_method: How to query select the nanodrop data set to plot.
See `rsenv.utils.query_parsing.translate_all_requests_to_idxs` for info.
savetofile: Save plot to this file. Default = `filepath+.png`.
nm_range: The range of wavelengths (nm) to use (x-axis).
Note: This is data-selection, not actual plot `xlim`. Use `plot_kwargs` in `yamlfile` to provide xlim/ylim.
tight_layout: Whether to invoke `tight_layout()` before saving the figure.
verbose: The verbosity with which information is printed during execution.
Returns:
None
"""
unit_prefixes = {
'm': 1_000,
'u': 1_000_000,
'µ': 1_000_000,
'n': 1_000_000_000,
'p': 1_000_000_000_000,
'f': 1_000_000_000_000_000,
}
# From http://click.pocoo.org/6/arguments/:
# Click will not attempt to document arguments for you and wants you to document them manually (...).
# @click.option(
# '--query', '-q',
CONTEXT_SETTINGS = dict(
max_content_width=200, # Set to very high to prevent rewrapping completely.
)
@click.command(context_settings=CONTEXT_SETTINGS) #
# Other options:
@click.option('--yamlfile', help="If given, read options from this yaml-formatted file.")
@click.option('--verbose', '-v', count=True,
help="The verbosity with which information is printed during execution. "
"Specify multiple times to increase verbosity.")
@click.option('--user-vars', '-u', nargs=2, multiple=True, metavar="NAME VALUE",
help="Add custom variables that can be used in filenames and query requests. "
"You can specify as many user-vars as you want, "
"and each user-var name can be specified multiple times to loop over the different variations.")
# Dataset naming and selection:
@click.option('--header-fmt',
help="A python format string used to generate DataFrame column headers using metadata fields from the "
'input file. To only include Sample Name, use `--header-fmt "{Sample Name}"')
@click.option('--query-match-method', default='glob',
help="How to query select the data sets to plot (matching against the generated header). "
"Options: 'glob', 'regex', 'full-word', 'exact', 'contains', etc. Default: glob.")
@click.option('--query-include-method', default='extend-unique',
help="How to build/merge the list of selected samples for each query. "
"Default: 'extend-unique'. Alternatively: 'set-sorted' (for sorted idxs list).")
@click.option('--min-query-selection', default=0, type=int,
help="Raise an error if query-selections return less than this number of candidates. (Query debugging)")
@click.option('--normalize/--no-normalize', default=False, help="Normalize the spectrograms before plotting.")
@click.option('--normalize-to', default=None, type=int, metavar="NM-VALUE",
help="Normalize the spectrograms at a specific wavelength.")
@click.option('--normalize-range', nargs=2, type=int, metavar="LOWER UPPER",
help="Normalize, using the average value within a certain range.")
# Plotting options and styles:
@click.option('--nm-range', nargs=2, type=int, metavar="MIN MAX",
help="The range of wavelengths (nm) to use (data slicing).")
# @click.option('--AU-range', nargs=2, type=int, help="The range of absorbance values (AU/cm) to use.")
@click.option('--xlim', '-x', nargs=2, type=int, metavar="XMIN XMAX", help="Limit the plot to this x-axis range.")
@click.option('--ylim', '-y', nargs=2, type=int, metavar="YMIN YMAX", help="Limit the plot to this y-axis range.")
@click.option('--linestyles', '-l', multiple=True,
help="The line style(s) to use when plotting. Will be combined combinatorically with colors."
" Click options doesn't support an undefined number of values per option,"
" so linestyles, colors, and markers must be given multiple times, once for each color."
" Example: ` -l '-' -l ':', -l '--' `."
" See https://matplotlib.org/gallery/lines_bars_and_markers/line_styles_reference.html for reference.")
@click.option('--colors', '-c', multiple=True,
help="The color(s) to use when plotting. Will be combined combinatorially with line styles."
" Example: `-c r -c #DD8833` to use red and orange lines.")
@click.option('--markers', multiple=True,
help="The marker(s) to use when plotting. Will be combined combinatorially with other styles."
' Example: ` --markers "" --markers . --markers 1 --markers + `')
@click.option('--style-combination-order', nargs=3, default=('linestyle', 'color', 'marker'),
help="The order in which linestyles, colors, and markers are combinatorically combined.")
@click.option('--mpl-style', help="Use this matplotlib style.")
@click.option('--figsize', nargs=2, type=float, metavar="WIDTH HEIGHT", help="Figure size (width, height).")
@click.option('--use-seaborn/--no-use-seaborn', default=False, help="Import seaborn color/style schemes.")
@click.option('--tight-layout/--no-tight-layout', '-t', default=None,
help="If given, invoke `tight_layout()` before saving the figure.")
@click.option('--savetofile', '-o', multiple=True, help="Save plot to this file. Default = `filepath+.png`.")
@click.option('--saveformat', '-f', multiple=True, help="The file format(s) to save plot in. Default: 'png'.")
@click.option('--showplot/--no-showplot', '-s', default=None,
help="If given, invoke `show()` after creating the plot.")
# Arguments:
@click.argument(
'filepath', type=click.Path(exists=True),
# help="Path of the nanodrop/denovix data file to plot.", # Do not provide help for arguments, only options.
)
@click.argument(
'query', nargs=-1,
# help= # 'help' arg only for click options, not args.
)
def plot(
filepath,
yamlfile=None,
user_vars=(),
header_fmt=None,
query=None,
query_match_method="glob",
query_include_method='extend-unique',
query_exclude_method='list-remove',
min_query_selection=0,
normalize=False,
normalize_to=None,
normalize_range=None,
nm_range=None,
xlim=None,
ylim=None,
linestyles=None,
colors=None,
markers=None,
style_combination_order=('linestyle', 'color', 'marker'),
mpl_style=None,
use_seaborn=False,
figsize=None,
tight_layout=None,
savetofile=None,
saveformat='png',
showplot=None,
verbose=0
):
""" Plot spectrograms from a given nanodrop data file.
Command line usage:
$ nanodrop_cli plot [--OPTIONS] DATAFILE [SELECTION QUERIES]
Example:
$ nanodrop_cli plot datafile.csv "RS511*" "-RS511 b*" "RS511 b-11"
This will load datafile.csv and plot all samples starting with RS511, except samples starting with "RS511 b",
although do include "RS511 b-11"
\b
Args:
filepath: path to nanodrop/denovix data file (.csv).
yamlfile:
user_vars:
header_fmt:
query: Query list used to select which data to plot. If None, plot all samples in the data file.
See 'Examples' for how to use the query parameter.
query_match_method: How to match each query request. Default: 'glob'.
Options: 'glob', 'regex', 'full-word', 'exact', 'contains', 'in', 'startswith', 'endswith'.
query_include_method:
query_exclude_method:
min_query_selection:
normalize: If specified, normalize each spectrogram so its max value is 1.0 (bool, default: False).
normalize_to: If given, normalize each spectrogram to its value at this wavelength.
normalize_range: If given, normalize each spectrogram to its average value within this wavelength range.
nm_range: The range (wavelength min, max) to plot.
ylim: Limit the y-axis to this range.
linestyles: Use these line styles, e.g. '-' or ':'.
colors: The colors (cycle) to use to plot the spectrograms.
markers: Markers to use when plotting.
style_combination_order: The order in which (linestyles, colors, markers) are combined to produce styles.
mpl_style: Use this matplotlib style.
use_seaborn: Import seaborn and use the seaborn styles and plots.
tight_layout: Apply tight_layout to pyplot figure.
savetofile: Save plotted figure to this file.
saveformat:
showplot: Show plotted figure.
verbose: Increase this to print more information during the data parsing+plotting process.
Returns:
Examples:
To plot all samples in `datafile.csv` starting with "RS511":
$ nanodrop_cli plot datafile.csv "RS511*"
Note the use of the asterix (*) GLOB character used to select samples.
You can use multiple "queries" to select the samples:
$ nanodrop_cli plot datafile.csv "RS511*" ControlStandard "*B4*"
This will plot all samples in `datafile.csv` that either starts with "RS511", or is "ControlStandard",
or includes "B4" in the sample name.
You can even negative selections, e.g.
$ nanodrop_cli plot datafile.csv "RS511*" "-*B4*"
This will plot all samples in `datafile.csv` that starts with "RS511"
except samples containing "B4" in the name.
You can even use the special query keyword "all" to start by including all samples,
and then removing specific samples, e.g.:
$ nanodrop_cli plot datafile.csv all "-*B4*"
Will plot all samples from `datafile.csv` except samples containing "B4" in the name.
It is possible to change the "query matching method" from 'glob' to e.g. 'substring', 'exact' or 'regex'
using the `--query-match-method` option.
Note that the query selection is done using the Legend / Header generated for each sample.
By default, this includes both Sample Name and Sample Number (joined by a dash).
You can use the `--header-fmt` option to change how the Header (Legend) is stiched together
for each sample.
The `--user-vars <KEY> <VALUE>` option can be used to define user variables and avoid having to change both the
filename and queries when invoking the command repeatedly to display different data selections:
Example 1: Using `-u` to define `expid` and `plateno` uservars, used in filename and queries:
$ nanodrop-cli data.csv -o "{expid}_Plate{plateno}.{ext}" "{expid}*P{plateno}*" -u expid RS535 -u plateno 1
This will plot all samples from data.csv matching "RS535*P1*" and save to file "RS535_Plate1.png"
The `ext` extension is already defined with a default value 'png'.
We currently have the following predefined variables and default values:
ext 'png', depending on the value of 'saveformat'.
normstr 'normX', depending on which normalization is used, or 'absorbance' if no normalization is used.
Example 2: Specifying the same user variable multiple times:
$ nanodrop-cli data.csv -o "RS535b_Plate{p}.{ext}" "*P{p}*" -u p 1 -u p 2 -u p 3
\b
This will load spectrograms from data.csv
and plot all samples matching "*P1*" and save to file "RS535_Plate1.png",
then plot all samples matching "*P2*" and save to file "RS535_Plate2.png",
then plot all samples matching "*P3*" and save to file "RS535_Plate3.png".
Note: You can specify multiple values for multiple user-vars to loop over the cartesian product combinations.
Note: The input file is only loaded once and cannot contain user vars.
If you need to loop over multiple input data files, you should use a command script, or use find/xargs/etc.
"""
rootpath, ext = os.path.splitext(filepath)
if yamlfile is None and os.path.isfile(rootpath+'.yaml'):
# If no yamlfile was specified but we have a yaml file with same name as the input data file, use that one.
yamlfile = rootpath+'.yaml'
if yamlfile:
yamlconfig = yaml.load(open(yamlfile))
else:
yamlconfig = {}
if not savetofile:
savetofile = yamlconfig.get('savetofile', rootpath+'.{ext}')
if not saveformat:
saveformat = yamlconfig.get('saveformat', 'png')
if showplot is None:
showplot = yamlconfig.get('showplot', False)
if header_fmt is None:
header_fmt = yamlconfig.get('header_fmt', "{Sample Number:>2} {Sample Name}") # OBS: Sample Number is str.
if not query:
query = yamlconfig.get('query_list')
if not nm_range:
nm_range = yamlconfig.get('nm_range')
if tight_layout is None:
tight_layout = yamlconfig.get('tight_layout')
if not user_vars:
user_vars = yamlconfig.get('user_vars', [('we-need-at-least-one-loop-variable', 'this')])
if isinstance(user_vars, (list, tuple)):
# TODO: Detect user variables that have been specified multiple times, and define those as 'loop variables'.
# You then extract loop combinations as itertools.product(*loop_vars.values())
# user_vars_dict = dict(user_vars)
# user_var_count = Counter([k for k, v in user_vars]) - Counter(user_vars_dict.keys())
# # Zero counts should be removed. (The '+' unary operator can be used to remove negative counts).
# user_loop_vars = OrderedDict([(k, [v_ for k_, v_ in user_vars if k_ == k]) for k in user_var_count])
# for k in user_loop_vars:
# del user_vars_dict[k]
# user_vars = user_vars_dict
# In theory, we could just let all user vars be loop vars, with a repetition of 1?
# Actually, that is probably a more consistent implementation path.
user_loop_vars = defaultdict(list)
for k, v in user_vars:
user_loop_vars[k].append(v)
print("User vars:", user_vars)
print("Loop vars:", user_loop_vars)
user_loop_keys, user_loop_vals = zip(*user_loop_vars.items())
print("Loop keys:", user_loop_keys)
print("Loop vals:", user_loop_vals)
else:
user_loop_keys, user_loop_vals = zip(*user_vars.items())
loop_combinations = list(itertools.product(*user_loop_vals))
print("Loop combinations:", loop_combinations)
#
# Load data:
df, metadata = denovix.csv_to_dataframe(filepath, header_fmt=header_fmt, verbose=verbose)
# Prepare for plot loops:
fig_kwargs = yamlconfig.get('fig_kwargs', {})
axes_kwargs = yamlconfig.get('axes_kwargs', {})
plot_kwargs = yamlconfig.get('plot_kwargs', {})
# * fig_kwargs - given to pyplot.figure() - https://matplotlib.org/api/_as_gen/matplotlib.pyplot.figure.html
# * axes_kwargs - given to Figure.add_axes()
# * plot_kwargs - given to pyplot.plot() or possibly df.plot()
if figsize:
fig_kwargs['figsize'] = figsize # w, h?
_query = query # Make a copy
ax = None
for cno, comb in enumerate(loop_combinations, 1):
user_vars = dict(zip(user_loop_keys, comb))
print(f"{cno:01} User vars:", user_vars)
print("savetofile:", savetofile)
if query:
if verbose:
print(f"\nUsing the header/legend (as generated by `header_fmt` = {header_fmt!r}) to select columns...")
if user_vars:
# Update each request in the query list:
print("Substituting user vars in query requests:", user_vars, end='\n')
query = [request.format(**user_vars) for request in _query]
selected_idxs = translate_all_requests_to_idxs(
query, candidates=df.columns, match_method=query_match_method,
include_method=query_include_method, exclude_method=query_exclude_method,
min_query_selection=min_query_selection
)
selected_cols = [df.columns[idx] for idx in selected_idxs]
if verbose >= 2:
print(" query_list:", query)
print("Selected:")
print("\n".join(f" {i:2} {n}" for i, n in zip(selected_idxs, selected_cols)))
else:
selected_cols = None
print("Plotting, using fig_kwargs:")
print(fig_kwargs)
ax = denovix.plot_nanodrop_df(
df=df, selected_columnnames=selected_cols,
nm_range=nm_range, xlim=xlim, ylim=ylim,
normalize=normalize, normalize_to=normalize_to, normalize_range=normalize_range,
linestyles=linestyles, colors=colors, markers=markers, style_combination_order=style_combination_order,
mpl_style=mpl_style, use_seaborn=use_seaborn,
fig_kwargs=fig_kwargs, axes_kwargs=axes_kwargs, plot_kwargs=plot_kwargs,
tight_layout=tight_layout,
showplot=showplot, savetofile=savetofile, saveformat=saveformat,
verbose=verbose, user_vars=user_vars,
)
print("\n - done plotting!\n\n\n")
return ax
@click.command(help="List samples in a Nanodrop/Denovix data file.")
@click.argument('filepath', type=click.Path(exists=True)) # Do not provide help to arguments only options.
# @click.option('--print-fmt', default=" {meta['Sample Number']:>3} \t{meta['Sample Name']:20} \t{header:30}")
@click.option('--print-fmt', default='default1')
@click.option('--report-header-fmt', default=None)
@click.option(
'--header-fmt', default="{Sample Name}-{Sample Number}",
help="Format the DataFrame column names/headers with this format string using metadata fields from the file."
)
@click.option('--verbose', '-v', count=True, help="The verbosity with which information is printed during execution.")
@click.option('--query-match-method', default='glob',
help="How to query select the nanodrop data set to plot. Default: glob.")
@click.option('--query-include-method', default='extend-unique',
help="How to build/merge the list of selected samples for each query. "
"Default: 'extend-unique'. Alternatively: 'set-sorted' (for sorted idxs list).")
@click.option('--min-query-selection', default=0, type=int)
@click.argument('query', nargs=-1)
def ls(
filepath,
header_fmt="{Sample Name}-{Sample Number}", # how to generate measurement/column headers
print_fmt=" {meta['Sample Number']:>3} \t{meta['Sample Name']:20} \t{header:30}",
report_header_fmt=None,
query=None, query_match_method="glob",
query_include_method='extend-unique',
query_exclude_method='list-remove',
min_query_selection=0,
unescape_backslashes=True,
verbose=0,
):
""" Print/list information about sample data in a Nanodrop file.
OBS: This command was changed to make it more simple. Use `report` for advanced usage (concentration calc, etc.)
Differences between `ls` and `report`:
`ls` supports custom `print-fmt` and `header-fmt`; `report` uses a standard table layout format.
`report` supports `extinction` (wavelength, ext.coeff) arguments to extract absorbance
values and calculate concentrations; `ls` does not.
Args:
filepath: The nanodrop file to list information from.
header_fmt: How to generate dataframe column names (column headers).
print_fmt: How to print information for each sample (python format string).
report_header_fmt: Add this header on top of the reported list.
query: Only list samples matching these selection queries.
query_match_method: The query matching method used. Default is glob, which allows wildcards, e.g "RS511*".
query_include_method: How to merge idxs for each request with the existing idxs list from preceding requests.
query_exclude_method: How to remove idxs for negative selection requests (starting with minus-hyphen).
Queries are processed sequentially, and supports '-' prefix to negate the selection,
and special 'all' keyword, e.g.
["all", "-John*", "John Hancock"]
to select all people except if their name starts with John, although do include John Hancock.
min_query_selection:
unescape_backslashes: Can be activated to make it easier to provide special characters on the command line,
e.g. tab. Useful when making reports.
verbose: The verbosity with which to print informational messages during execution.
Returns:
Examples:
$ nanodrop-cli ls UVvis_merged.csv -e 290 12500 -v -v \
--print-fmt "{meta[Sample Number]:2}\t {meta[Sample Name]:16}\t {A[290]: 6.3f} {uM: 5.0f}" \
--report-header-fmt "#:\t Sample Name \t A290 \t uM\n----------------------------------------------"
"RS531*" "*dUTP" "KCl*"
Notes:
As always, you can use '--' to separate options and arguments, if you need to input arguments
that looks like options (e.g. starts with '-').
"""
df, metadata = denovix.csv_to_dataframe(filepath, header_fmt=header_fmt)
if query:
selected_idxs = translate_all_requests_to_idxs(
query, candidates=df.columns, match_method=query_match_method,
include_method=query_include_method, exclude_method=query_exclude_method,
min_query_selection=min_query_selection,
)
selected_cols = [df.columns[idx] for idx in selected_idxs]
if verbose >= 2:
print("\nSelected idxs:", selected_idxs)
print("Selected cols:", selected_cols)
# print(df.iloc[:, 0:2]) # 0-based indexing; equivalent to df.loc[:, selected_cols]
assert len(selected_idxs) == len(set(selected_idxs))
df = df.iloc[:, selected_idxs] # 0-based indexing; equivalent to df.loc[:, selected_cols]
# Also need to update metadata list:
metadata = [metadata[idx] for idx in selected_idxs]
if unescape_backslashes:
if report_header_fmt:
report_header_fmt = report_header_fmt.replace("\\t", "\t").replace("\\n", "\n")
if print_fmt:
print_fmt = print_fmt.replace("\\t", "\t").replace("\\n", "\n")
if verbose >= 2:
print("\n\n")
print(f"report_header_fmt:\n{report_header_fmt!r}")
print(f"print_fmt:\n{print_fmt!r}")
print("\n\n")
if verbose > -1:
print(f"\n\nThe following samples / datasets were found in {filepath!r}:\n")
# print("Sample Number:\tSample Name: \tHeader/Legend: (generated with `header-fmt`)")
# print("--------------\t------------ \t--------------------------------------------")
if report_header_fmt is None and print_fmt == 'default1':
report_header_fmt = 'default1'
if report_header_fmt:
if report_header_fmt in ('default1', 1):
report_header_fmt = """
Sample Number:\tSample Name: \tHeader/Legend: (generated with `header-fmt`)
--------------\t------------ \t--------------------------------------------
"""
print(report_header_fmt.format(df, metadata))
if print_fmt == 'default1':
print_fmt = " {meta[Sample Number]:>3} \t{meta[Sample Name]:20} \t{header:30}"
for header, meta in zip(df.columns, metadata):
# Note: f-strings and string.format have slightly different signature for e.g. accessing items:
# "{dic[key]}".format(dic=dic) vs f"{dic['key']}"
A = values = df[header] # All absorbance values
# print(f"df[{header!r}]:")
# print(df[header])
# fmtdict = dict(header=header, meta=meta, values=values, A=values, c=conc, M=conc, mM=mM, uM=uM, AU=AU)
# click.echo(print_fmt.format(**fmtdict))
click.echo(print_fmt.format(**locals()))
@click.command(help="Report samples in a Nanodrop/Denovix data file.")
@click.argument('filepath', type=click.Path(exists=True)) # Do not provide help to arguments only options.
@click.option('--output-format', '-f', default='text')
# @click.option('--output-destination', '-o', default='-')
@click.option(
'--header-fmt', default="{Sample Name}-{Sample Number}",
help="Format the DataFrame column names/headers with this format string using metadata fields from the file."
)
@click.option('--wavelength', '-w', default=None, type=int, multiple=True,
help="Specify a wavelength for which to include absorbance on in the report. See also `--extinction`."
"Can be given multiple times to report at multiple wavelengths, e.g. `-w 230 -w 260 -w 280 -w 310`.")
@click.option('--extinction', '-e', default=None, type=(int, float), multiple=True,
help="Provide extinction tuples (wavelength, ext. coeff), and the report will calculate concentrations."
"Can be given multiple times to report at multiple wavelengths, e.g. `-e 230 8000 -e 260 10000`")
@click.option('--concentration-units', '-u', default=['mM', 'uM'], multiple=True,
help="The units in which to report concentrations."
"Can be given multiple times to report at multiple units, e.g. `-u mM -u uM`")
@click.option('--verbose', '-v', count=True, help="The verbosity with which information is printed during execution.")
@click.option('--query-match-method', default='glob',
help="How to query-select the nanodrop data sets to include. Default: glob.")
@click.option('--query-include-method', default='extend-unique',
help="How to build/merge the list of selected samples for each query. "
"Default: 'extend-unique'. Alternatively: 'set-sorted' (for sorted idxs list).")
@click.option('--min-query-selection', default=0, type=int,
help="Raise an error if the query selection returns less than this number. "
"Can be used to debug and prevent accidental querying errors, especially in batch operations.")
@click.argument('query', nargs=-1)
def report(
filepath,
header_fmt="{Sample Name}-{Sample Number}", # how to generate column names
output_format='text', # Text or HTML.
wavelength=None,
extinction=None, # list of two-tuples with (wavelength, ext-coeff) floats in (nm, AU/cm/M)
concentration_units=('mM', 'uM'),
pathlength=1, # Light path, in cm.
query=None, query_match_method="glob",
query_include_method='extend-unique',
query_exclude_method='list-remove',
min_query_selection=0,
verbose=0,
):
""" Print a report with information about sample/measurements in a Nanodrop/Denovix file.
Args:
filepath: The nanodrop file to list information from.
header_fmt: How to generate dataframe column names/headers based on metadata (Sample Name, Sample Number, etc.)
output_format: The format to output to, e.g. text or HTML.
wavelength: A list of wavelength for which to include absorbance on in the report.
extinction: Provide sample extinction (wavelength, ext. coeff) tuple,
and the program will calculate concentration (M, mM, uM).
concentration_units: The units to report the concentration in (e.g. 'M', 'mM', 'uM', etc).
pathlength: The pathlength (light path) used when acquiring the data, in cm. Typically 0.1 cm for Nanodrop.
query: Only list samples matching these selection queries.
query_match_method: The query matching method used. Default is glob, which allows wildcards, e.g "RS511*".
query_include_method: How to merge idxs for each request with the existing idxs list from preceding requests.
query_exclude_method: How to remove idxs for negative selection requests (starting with minus-hyphen).
Queries are processed sequentially, and supports '-' prefix to negate the selection,
and special 'all' keyword, e.g.
["all", "-John*", "John Hancock"]
to select all people except if their name starts with John, although do include John Hancock.
min_query_selection:
verbose: The verbosity with which to print informational messages during execution.
Returns:
None (all output is done to stdout).
Examples:
$ nanodrop-cli ls UVvis_merged.csv -e 290 12500 -v -v \
--print-fmt "{meta[Sample Number]:2}\t {meta[Sample Name]:16}\t {A[290]: 6.3f} {uM: 5.0f}" \
--report-header-fmt "#:\t Sample Name \t A290 \t uM\n----------------------------------------------"
"RS531*" "*dUTP" "KCl*"
Notes:
As always, you can use '--' to separate options and arguments, if you need to input arguments
that looks like options (e.g. starts with '-').
See also:
* `ls` to list samples from a Nanodrop file (a simpler version of `report`).
"""
if concentration_units is None or concentration_units == ():
concentration_units = ['mM', 'uM']
df, metadata = denovix.csv_to_dataframe(filepath, header_fmt=header_fmt)
if query:
selected_idxs = translate_all_requests_to_idxs(
query, candidates=df.columns, match_method=query_match_method,
include_method=query_include_method, exclude_method=query_exclude_method,
min_query_selection=min_query_selection,
)
selected_cols = [df.columns[idx] for idx in selected_idxs]
if verbose >= 2:
print("\nSelected idxs:", selected_idxs)
print("Selected cols:", selected_cols)
# print(df.iloc[:, 0:2]) # 0-based indexing; equivalent to df.loc[:, selected_cols]
assert len(selected_idxs) == len(set(selected_idxs))
df = df.iloc[:, selected_idxs] # 0-based indexing; equivalent to df.loc[:, selected_cols]
# Also need to update metadata list:
metadata = [metadata[idx] for idx in selected_idxs]
report_df = pd.DataFrame(data={'Measurement-Header:': df.columns})
if extinction:
print(f"\nUsing path length L = {pathlength} cm for concentration calculations (`--pathlength`)...\n\n")
for wavelength in wavelength:
AU = df.loc[wavelength, :].values # This produces a series; use pd.Series.values to get values.
abs_header, apl_header = f"A{wavelength}", f"A{wavelength}/cm"
AU_per_cm = AU / pathlength # A/cm, pathlength in cm.
report_df[abs_header], report_df[apl_header] = AU, AU_per_cm
print("concentration_units:")
print(concentration_units)
for wavelength, ext_coeff in extinction:
AU = df.loc[wavelength, :].values # This produces a series; use pd.Series.values to get values.
abs_header, apl_header, = f"A{wavelength}", f"A{wavelength}/cm"
AU_per_cm = AU / pathlength # A/cm, pathlength in cm.
conc = AU_per_cm / ext_coeff # In moles/L
report_df[abs_header], report_df[apl_header] = AU, AU_per_cm
for unit in concentration_units:
if len(unit) > 2:
raise ValueError(f"Unit {unit!r} not recognized.")
if unit == 'M':
factor = 1
else:
factor = unit_prefixes[unit[0]]
report_df[f"c_A{wavelength}/{unit}"] = conc * factor
# Table styling:
# https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.to_string.html
# pd.options.display.float_format = '{:,.2f}'.format # Using the format() function of the string.
# Use `float_format` parameter to control how floats are printed,
# Use `formatters` to have per-column formats.
output_format = output_format.lower()
if output_format == "to_string":
click.echo(report_df.to_string(index=False, header=True))
elif output_format == "text":
import tabulate
# Other packages for pretty-printing tables include:
# beautifultable terminaltables tabulate prettypandas fixedwidth
click.echo(tabulate.tabulate(report_df, headers=report_df.columns, showindex=False))
elif output_format == "html":
click.echo(report_df.to_html(index=False, header=True))
else:
raise ValueError(f"`output_format` {output_format!r} not recognized.")
@click.group()
def cli(): # context_settings=CONTEXT_SETTINGS
pass
cli.add_command(plot)
cli.add_command(ls)
cli.add_command(report)
if __name__ == '__main__':
cli()
| [
"[email protected]"
]
| |
03fa881168a8380f10195c4be84ed3e7522fd640 | 13d222bc3332378d433835914da26ed16b583c8b | /src/pemjh/challenge36/__init__.py | c80a9f9687728a17735aeb896b6a57ecf07edff7 | []
| no_license | mattjhussey/pemjh | c27a09bab09cd2ade31dc23fffac07374bea9366 | 2ebb0a525d2d1c0ee28e83fdc2638c2bec97ac99 | refs/heads/master | 2023-04-16T03:08:59.390698 | 2023-04-08T10:54:00 | 2023-04-08T10:54:00 | 204,912,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | """ challenge36 """
from .main import main
__all__ = ['main']
| [
"[email protected]"
]
| |
29b46029d177011615a88307afbb80e6f4f05d35 | bd3a58fac4d2547f87ca00d1faf73f6bcf781cef | /tests/test_matrix.py | 89c31d7c8a777517b674b6fe1b9cbc9b86d14af2 | [
"MIT"
]
| permissive | Nachtfeuer/pipeline | 0a41f5fef2672678dbbbe33c7ee9cbd5e21bc9d5 | ee15d98f4d8f343d57dd5b84339ea41b4e2dc673 | refs/heads/master | 2023-01-23T10:18:47.171697 | 2021-07-11T09:08:38 | 2021-07-11T09:08:38 | 106,919,631 | 30 | 6 | MIT | 2022-12-26T20:28:07 | 2017-10-14T10:41:32 | Python | UTF-8 | Python | false | false | 7,090 | py | """Testing of class Stage."""
# pylint: disable=no-self-use, invalid-name
import unittest
from hamcrest import assert_that, equal_to
from spline.components.config import ApplicationOptions
from spline.matrix import Matrix, MatrixProcessData, matrix_worker
class TestMatrix(unittest.TestCase):
"""Testing of class Matrix."""
def test_simple_with_one_entry(self):
"""Testing simple matrix with one entry."""
matrix_definition = [{'name': 'one', 'env': {'message': 'hello'}}]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo tasks1:hello1''', 'when': ''}},
{'shell': {'script': '''echo tasks1:hello2''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml')
matrix = Matrix(matrix_definition, parallel=False)
result = matrix.process(process_data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('tasks1:hello1'))
assert_that(output[1], equal_to('tasks1:hello2'))
def test_with_tags_and_filter_ordered(self):
"""Testing simple matrix with tags and filtering."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}, 'tags': ['group-a']},
{'name': 'two', 'env': {'message': 'hello2'}, 'tags': ['group-b']},
{'name': 'three', 'env': {'message': 'hello3'}, 'tags': ['group-a']}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo $message''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml', matrix_tags='group-a')
matrix = Matrix(matrix_definition, parallel=False)
result = matrix.process(process_data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello3'))
def test_with_tags_and_filter_parallel(self):
"""Testing simple matrix with tags and filtering."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}, 'tags': ['group-a']},
{'name': 'two', 'env': {'message': 'hello2'}, 'tags': ['group-b']},
{'name': 'three', 'env': {'message': 'hello3'}, 'tags': ['group-a']}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo $message''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml', matrix_tags='group-a')
matrix = Matrix(matrix_definition, parallel=True)
result = matrix.process(process_data)
output = sorted([line for line in result['output'] if line.find("hello") >= 0])
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(2))
assert_that(output[0], equal_to('hello1'))
assert_that(output[1], equal_to('hello3'))
def test_failed_ordered(self):
"""Testing failed ordered."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}},
{'name': 'two', 'env': {'message': 'hello2'}}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''exit 123''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml')
matrix = Matrix(matrix_definition, parallel=False)
result = matrix.process(process_data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(0))
def test_failed_parallel(self):
"""Testing failed parallel."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}},
{'name': 'two', 'env': {'message': 'hello2'}}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''exit 123''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml')
matrix = Matrix(matrix_definition, parallel=True)
result = matrix.process(process_data)
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(False))
assert_that(len(output), equal_to(0))
def test_matrix_worker(self):
"""Testing worker for matrix used in multiprocessing."""
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo $message''', 'when': ''}}]}]}]
result = matrix_worker({
'matrix': {'name': 'one', 'env': {'message': 'hello1'}},
'pipeline': pipeline_definition,
'model': {},
'options': ApplicationOptions(definition='fake.yaml'),
'hooks': None
})
output = [line for line in result['output'] if line.find("hello") >= 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(1))
def test_dry_run(self):
"""Testing simple matrix with tags and filtering."""
matrix_definition = [
{'name': 'one', 'env': {'message': 'hello1'}},
{'name': 'two', 'env': {'message': 'hello2'}},
{'name': 'three', 'env': {'message': 'hello3'}}
]
pipeline_definition = [{'stage(test)': [{
'tasks': [{'shell': {'script': '''echo {{ env.message }}''', 'when': ''}}]}]}]
process_data = MatrixProcessData()
process_data.pipeline = pipeline_definition
process_data.options = ApplicationOptions(definition='fake.yaml', dry_run=True)
matrix = Matrix(matrix_definition, parallel=True)
result = matrix.process(process_data)
output = [line for line in result['output'] if len(line) > 0]
assert_that(result['success'], equal_to(True))
assert_that(len(output), equal_to(6))
assert_that(output[0], equal_to('#!/bin/bash'))
assert_that(output[1], equal_to('echo hello1'))
assert_that(output[2], equal_to('#!/bin/bash'))
assert_that(output[3], equal_to('echo hello2'))
assert_that(output[4], equal_to('#!/bin/bash'))
assert_that(output[5], equal_to('echo hello3'))
| [
"[email protected]"
]
| |
c03fe4e820efd8100e1a25426d3e4f808af557d2 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /cEzT2e8tLpwYnrstP_16.py | 968877026dfd242524f4110a45bf84a10221c0d0 | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 836 | py | """
Create a function that takes:
1. A list of keys.
2. A list of values (same size).
3. `True`, if key and value should be swapped, else `False`.
The function returns the constructed dict. Empty lists return an empty dict.
### Examples
swap_d([1, 2, 3], ["one", "two", "three"], False)
➞ { 1: "one", 2: "two", 3: "three" }
swap_d([1, 2, 3], ["one", "two", "three"], True)
➞ { "one": 1, "two": 2, "three": 3 }
swap_d(["Paris", 3, 4.5], ["France", "is odd", "is half of 9"], True)
➞ { "France": "Paris", "is odd": 3, "is half of 9": 4.5 }
### Notes
* To make it simple, use only hashable (= immutable) keys.
* To make it simple, use only unique keys.
"""
def swap_d(k, v, swapped):
if swapped:
k, v = v, k
output = {k[i]:v[i] for i in range(len(k))}
return output
| [
"[email protected]"
]
| |
42e1f9b04e2ca7c82ba069ee31d79bfc45840003 | d15bdaddab59d1cfea76790004cbad3e5f0c2c55 | /batkin/build_isolated/joy/catkin_generated/pkg.develspace.context.pc.py | 049bfc6580751b52bcf56b4d0bc9f765466f9dcc | []
| no_license | gychen-n/robot | 4265a1ff469d22550b6b537d1c81aa846ee7641a | 0663a33aea2c2de9e3ac5863307619091e5b5959 | refs/heads/main | 2023-04-10T13:32:06.623682 | 2021-04-16T00:41:04 | 2021-04-16T00:41:04 | 358,431,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;diagnostic_updater;sensor_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "joy"
PROJECT_SPACE_DIR = "/home/robot/batkin/devel_isolated/joy"
PROJECT_VERSION = "1.11.0"
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.