blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b5903534224224cc9623bb9bb469302d4eb61e4f | 7fb805dc0789bfa3bbac7a94b667548c01a8eb4a | /site-packages/cinderclient/tests/unit/v3/test_shell.py | eaba63f9c7e158d3b99d41dd78b9859e479570de | [
"Python-2.0"
]
| permissive | hclareth7/freezer_libraries | 8a6173140971679e5b5dc8428e1e56734f02d906 | e0bd890eba5e7438976fb3b4d66c41c128bab790 | refs/heads/master | 2022-11-30T02:19:46.718660 | 2019-05-29T20:29:20 | 2019-05-29T20:29:20 | 189,293,415 | 0 | 1 | NOASSERTION | 2022-11-17T05:38:07 | 2019-05-29T20:28:12 | Python | UTF-8 | Python | false | false | 61,936 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE(geguileo): For v3 we cannot mock any of the following methods
# - utils.find_volume
# - shell_utils.find_backup
# - shell_utils.find_volume_snapshot
# - shell_utils.find_group
# - shell_utils.find_group_snapshot
# because we are caching them in cinderclient.v3.shell:RESET_STATE_RESOURCES
# which means that our tests could fail depending on the mocking and loading
# order.
#
# Alternatives are:
# - Mock utils.find_resource when we have only 1 call to that method
# - Use an auxiliary method that will call original method for irrelevant
# calls. Example from test_revert_to_snapshot:
# original = client_utils.find_resource
#
# def find_resource(manager, name_or_id, **kwargs):
# if isinstance(manager, volume_snapshots.SnapshotManager):
# return volume_snapshots.Snapshot(self,
# {'id': '5678',
# 'volume_id': '1234'})
# return original(manager, name_or_id, **kwargs)
import ddt
import fixtures
import mock
from requests_mock.contrib import fixture as requests_mock_fixture
import six
from six.moves.urllib import parse
import cinderclient
from cinderclient import base
from cinderclient import client
from cinderclient import exceptions
from cinderclient import shell
from cinderclient import utils as cinderclient_utils
from cinderclient.v3 import volume_snapshots
from cinderclient.v3 import volumes
from cinderclient.tests.unit.fixture_data import keystone_client
from cinderclient.tests.unit import utils
from cinderclient.tests.unit.v3 import fakes
@ddt.ddt
@mock.patch.object(client, 'Client', fakes.FakeClient)
class ShellTest(utils.TestCase):
FAKE_ENV = {
'CINDER_USERNAME': 'username',
'CINDER_PASSWORD': 'password',
'CINDER_PROJECT_ID': 'project_id',
'OS_VOLUME_API_VERSION': '3',
'CINDER_URL': keystone_client.BASE_URL,
}
# Patch os.environ to avoid required auth info.
def setUp(self):
"""Run before each test."""
super(ShellTest, self).setUp()
for var in self.FAKE_ENV:
self.useFixture(fixtures.EnvironmentVariable(var,
self.FAKE_ENV[var]))
self.shell = shell.OpenStackCinderShell()
self.requests = self.useFixture(requests_mock_fixture.Fixture())
self.requests.register_uri(
'GET', keystone_client.BASE_URL,
text=keystone_client.keystone_request_callback)
self.cs = mock.Mock()
def run_command(self, cmd):
self.shell.main(cmd.split())
def assert_called(self, method, url, body=None,
partial_body=None, **kwargs):
return self.shell.cs.assert_called(method, url, body,
partial_body, **kwargs)
def assert_call_contained(self, url_part):
self.shell.cs.assert_in_call(url_part)
@ddt.data({'resource': None, 'query_url': None},
{'resource': 'volume', 'query_url': '?resource=volume'},
{'resource': 'group', 'query_url': '?resource=group'})
@ddt.unpack
def test_list_filters(self, resource, query_url):
url = '/resource_filters'
if resource is not None:
url += query_url
self.run_command('--os-volume-api-version 3.33 '
'list-filters --resource=%s' % resource)
else:
self.run_command('--os-volume-api-version 3.33 list-filters')
self.assert_called('GET', url)
@ddt.data(
# testcases for list volume
{'command':
'list --name=123 --filters name=456',
'expected':
'/volumes/detail?name=456'},
{'command':
'list --filters name=123',
'expected':
'/volumes/detail?name=123'},
{'command':
'list --filters metadata={key1:value1}',
'expected':
'/volumes/detail?metadata=%7B%27key1%27%3A+%27value1%27%7D'},
{'command':
'list --filters name~=456',
'expected':
'/volumes/detail?name~=456'},
{'command':
u'list --filters name~=Σ',
'expected':
'/volumes/detail?name~=%CE%A3'},
# testcases for list group
{'command':
'group-list --filters name=456',
'expected':
'/groups/detail?name=456'},
{'command':
'group-list --filters status=available',
'expected':
'/groups/detail?status=available'},
{'command':
'group-list --filters name~=456',
'expected':
'/groups/detail?name~=456'},
# testcases for list group-snapshot
{'command':
'group-snapshot-list --status=error --filters status=available',
'expected':
'/group_snapshots/detail?status=available'},
{'command':
'group-snapshot-list --filters availability_zone=123',
'expected':
'/group_snapshots/detail?availability_zone=123'},
{'command':
'group-snapshot-list --filters status~=available',
'expected':
'/group_snapshots/detail?status~=available'},
# testcases for list message
{'command':
'message-list --event_id=123 --filters event_id=456',
'expected':
'/messages?event_id=456'},
{'command':
'message-list --filters request_id=123',
'expected':
'/messages?request_id=123'},
{'command':
'message-list --filters request_id~=123',
'expected':
'/messages?request_id~=123'},
# testcases for list attachment
{'command':
'attachment-list --volume-id=123 --filters volume_id=456',
'expected':
'/attachments?volume_id=456'},
{'command':
'attachment-list --filters mountpoint=123',
'expected':
'/attachments?mountpoint=123'},
{'command':
'attachment-list --filters volume_id~=456',
'expected':
'/attachments?volume_id~=456'},
# testcases for list backup
{'command':
'backup-list --volume-id=123 --filters volume_id=456',
'expected':
'/backups/detail?volume_id=456'},
{'command':
'backup-list --filters name=123',
'expected':
'/backups/detail?name=123'},
{'command':
'backup-list --filters volume_id~=456',
'expected':
'/backups/detail?volume_id~=456'},
# testcases for list snapshot
{'command':
'snapshot-list --volume-id=123 --filters volume_id=456',
'expected':
'/snapshots/detail?volume_id=456'},
{'command':
'snapshot-list --filters name=123',
'expected':
'/snapshots/detail?name=123'},
{'command':
'snapshot-list --filters volume_id~=456',
'expected':
'/snapshots/detail?volume_id~=456'},
# testcases for get pools
{'command':
'get-pools --filters name=456 --detail',
'expected':
'/scheduler-stats/get_pools?detail=True&name=456'},
{'command':
'get-pools --filters name=456',
'expected':
'/scheduler-stats/get_pools?name=456'}
)
@ddt.unpack
def test_list_with_filters_mixed(self, command, expected):
self.run_command('--os-volume-api-version 3.33 %s' % command)
self.assert_called('GET', expected)
def test_list(self):
self.run_command('list')
# NOTE(jdg): we default to detail currently
self.assert_called('GET', '/volumes/detail')
def test_list_with_with_count(self):
self.run_command('--os-volume-api-version 3.45 list --with-count')
self.assert_called('GET', '/volumes/detail?with_count=True')
def test_summary(self):
self.run_command('--os-volume-api-version 3.12 summary')
self.assert_called('GET', '/volumes/summary')
def test_list_with_group_id_before_3_10(self):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'list --group_id fake_id')
def test_type_list_with_filters_invalid(self):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'--os-volume-api-version 3.51 type-list '
'--filters key=value')
def test_type_list_with_filters(self):
self.run_command('--os-volume-api-version 3.52 type-list '
'--filters extra_specs={key:value}')
self.assert_called('GET', mock.ANY)
self.assert_call_contained(
parse.urlencode(
{'extra_specs':
{six.text_type('key'): six.text_type('value')}}))
self.assert_call_contained(parse.urlencode({'is_public': None}))
def test_type_list_no_filters(self):
self.run_command('--os-volume-api-version 3.52 type-list')
self.assert_called('GET', '/types?is_public=None')
@ddt.data("3.10", "3.11")
def test_list_with_group_id_after_3_10(self, version):
command = ('--os-volume-api-version %s list --group_id fake_id' %
version)
self.run_command(command)
self.assert_called('GET', '/volumes/detail?group_id=fake_id')
@mock.patch("cinderclient.utils.print_list")
def test_list_duplicate_fields(self, mock_print):
self.run_command('list --field Status,id,Size,status')
self.assert_called('GET', '/volumes/detail')
key_list = ['ID', 'Status', 'Size']
mock_print.assert_called_once_with(mock.ANY, key_list,
exclude_unavailable=True, sortby_index=0)
def test_list_availability_zone(self):
self.run_command('availability-zone-list')
self.assert_called('GET', '/os-availability-zone')
@ddt.data({'cmd': '1234 1233',
'body': {'instance_uuid': '1233',
'connector': {},
'volume_uuid': '1234'}},
{'cmd': '1234 1233 '
'--connect True '
'--ip 10.23.12.23 --host server01 '
'--platform x86_xx '
'--ostype 123 '
'--multipath true '
'--mountpoint /123 '
'--initiator aabbccdd',
'body': {'instance_uuid': '1233',
'connector': {'ip': '10.23.12.23',
'host': 'server01',
'os_type': '123',
'multipath': 'true',
'mountpoint': '/123',
'initiator': 'aabbccdd',
'platform': 'x86_xx'},
'volume_uuid': '1234'}},
{'cmd': 'abc 1233',
'body': {'instance_uuid': '1233',
'connector': {},
'volume_uuid': '1234'}})
@mock.patch('cinderclient.utils.find_resource')
@ddt.unpack
def test_attachment_create(self, mock_find_volume, cmd, body):
mock_find_volume.return_value = volumes.Volume(self,
{'id': '1234'},
loaded=True)
command = '--os-volume-api-version 3.27 attachment-create '
command += cmd
self.run_command(command)
expected = {'attachment': body}
self.assertTrue(mock_find_volume.called)
self.assert_called('POST', '/attachments', body=expected)
@ddt.data({'cmd': '1234 1233',
'body': {'instance_uuid': '1233',
'connector': {},
'volume_uuid': '1234',
'mode': 'ro'}},
{'cmd': '1234 1233 '
'--connect True '
'--ip 10.23.12.23 --host server01 '
'--platform x86_xx '
'--ostype 123 '
'--multipath true '
'--mountpoint /123 '
'--initiator aabbccdd',
'body': {'instance_uuid': '1233',
'connector': {'ip': '10.23.12.23',
'host': 'server01',
'os_type': '123',
'multipath': 'true',
'mountpoint': '/123',
'initiator': 'aabbccdd',
'platform': 'x86_xx'},
'volume_uuid': '1234',
'mode': 'ro'}},
{'cmd': 'abc 1233',
'body': {'instance_uuid': '1233',
'connector': {},
'volume_uuid': '1234',
'mode': 'ro'}})
@mock.patch('cinderclient.utils.find_resource')
@ddt.unpack
def test_attachment_create_with_mode(self, mock_find_volume, cmd, body):
mock_find_volume.return_value = volumes.Volume(self,
{'id': '1234'},
loaded=True)
command = ('--os-volume-api-version 3.54 '
'attachment-create '
'--mode ro ')
command += cmd
self.run_command(command)
expected = {'attachment': body}
self.assertTrue(mock_find_volume.called)
self.assert_called('POST', '/attachments', body=expected)
@mock.patch.object(volumes.VolumeManager, 'findall')
def test_attachment_create_duplicate_name_vol(self, mock_findall):
found = [volumes.Volume(self, {'id': '7654', 'name': 'abc'},
loaded=True),
volumes.Volume(self, {'id': '9876', 'name': 'abc'},
loaded=True)]
mock_findall.return_value = found
self.assertRaises(exceptions.CommandError,
self.run_command,
'--os-volume-api-version 3.27 '
'attachment-create abc 789')
@ddt.data({'cmd': '',
'expected': ''},
{'cmd': '--volume-id 1234',
'expected': '?volume_id=1234'},
{'cmd': '--status error',
'expected': '?status=error'},
{'cmd': '--all-tenants 1',
'expected': '?all_tenants=1'},
{'cmd': '--all-tenants 1 --volume-id 12345',
'expected': '?all_tenants=1&volume_id=12345'},
{'cmd': '--all-tenants 1 --tenant 12345',
'expected': '?all_tenants=1&project_id=12345'},
{'cmd': '--tenant 12345',
'expected': '?all_tenants=1&project_id=12345'}
)
@ddt.unpack
def test_attachment_list(self, cmd, expected):
command = '--os-volume-api-version 3.27 attachment-list '
command += cmd
self.run_command(command)
self.assert_called('GET', '/attachments%s' % expected)
def test_revert_to_snapshot(self):
original = cinderclient_utils.find_resource
def find_resource(manager, name_or_id, **kwargs):
if isinstance(manager, volume_snapshots.SnapshotManager):
return volume_snapshots.Snapshot(self,
{'id': '5678',
'volume_id': '1234'})
return original(manager, name_or_id, **kwargs)
with mock.patch('cinderclient.utils.find_resource',
side_effect=find_resource):
self.run_command(
'--os-volume-api-version 3.40 revert-to-snapshot 5678')
self.assert_called('POST', '/volumes/1234/action',
body={'revert': {'snapshot_id': '5678'}})
def test_attachment_show(self):
self.run_command('--os-volume-api-version 3.27 attachment-show 1234')
self.assert_called('GET', '/attachments/1234')
@ddt.data({'cmd': '1234 '
'--ip 10.23.12.23 --host server01 '
'--platform x86_xx '
'--ostype 123 '
'--multipath true '
'--mountpoint /123 '
'--initiator aabbccdd',
'body': {'connector': {'ip': '10.23.12.23',
'host': 'server01',
'os_type': '123',
'multipath': 'true',
'mountpoint': '/123',
'initiator': 'aabbccdd',
'platform': 'x86_xx'}}})
@ddt.unpack
def test_attachment_update(self, cmd, body):
command = '--os-volume-api-version 3.27 attachment-update '
command += cmd
self.run_command(command)
self.assert_called('PUT', '/attachments/1234', body={'attachment':
body})
@ddt.unpack
def test_attachment_complete(self):
command = '--os-volume-api-version 3.44 attachment-complete 1234'
self.run_command(command)
self.assert_called('POST', '/attachments/1234/action', body=None)
def test_attachment_delete(self):
self.run_command('--os-volume-api-version 3.27 '
'attachment-delete 1234')
self.assert_called('DELETE', '/attachments/1234')
def test_upload_to_image(self):
expected = {'os-volume_upload_image': {'force': False,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'test-image'}}
self.run_command('upload-to-image 1234 test-image')
self.assert_called_anytime('GET', '/volumes/1234')
self.assert_called_anytime('POST', '/volumes/1234/action',
body=expected)
def test_upload_to_image_private_not_protected(self):
expected = {'os-volume_upload_image': {'force': False,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'test-image',
'protected': False,
'visibility': 'private'}}
self.run_command('--os-volume-api-version 3.1 '
'upload-to-image 1234 test-image')
self.assert_called_anytime('GET', '/volumes/1234')
self.assert_called_anytime('POST', '/volumes/1234/action',
body=expected)
def test_upload_to_image_public_protected(self):
expected = {'os-volume_upload_image': {'force': False,
'container_format': 'bare',
'disk_format': 'raw',
'image_name': 'test-image',
'protected': 'True',
'visibility': 'public'}}
self.run_command('--os-volume-api-version 3.1 '
'upload-to-image --visibility=public '
'--protected=True 1234 test-image')
self.assert_called_anytime('GET', '/volumes/1234')
self.assert_called_anytime('POST', '/volumes/1234/action',
body=expected)
def test_backup_update(self):
self.run_command('--os-volume-api-version 3.9 '
'backup-update --name new_name 1234')
expected = {'backup': {'name': 'new_name'}}
self.assert_called('PUT', '/backups/1234', body=expected)
def test_backup_list_with_with_count(self):
self.run_command(
'--os-volume-api-version 3.45 backup-list --with-count')
self.assert_called('GET', '/backups/detail?with_count=True')
def test_backup_update_with_description(self):
self.run_command('--os-volume-api-version 3.9 '
'backup-update 1234 --description=new-description')
expected = {'backup': {'description': 'new-description'}}
self.assert_called('PUT', '/backups/1234', body=expected)
def test_backup_update_with_metadata(self):
cmd = '--os-volume-api-version 3.43 '
cmd += 'backup-update '
cmd += '--metadata foo=bar '
cmd += '1234'
self.run_command(cmd)
expected = {'backup': {'metadata': {'foo': 'bar'}}}
self.assert_called('PUT', '/backups/1234', body=expected)
def test_backup_update_all(self):
# rename and change description
self.run_command('--os-volume-api-version 3.43 '
'backup-update --name new-name '
'--description=new-description '
'--metadata foo=bar 1234')
expected = {'backup': {
'name': 'new-name',
'description': 'new-description',
'metadata': {'foo': 'bar'}
}}
self.assert_called('PUT', '/backups/1234', body=expected)
def test_backup_update_without_arguments(self):
# Call rename with no arguments
self.assertRaises(SystemExit, self.run_command,
'--os-volume-api-version 3.9 backup-update')
def test_backup_update_bad_request(self):
self.assertRaises(exceptions.ClientException,
self.run_command,
'--os-volume-api-version 3.9 backup-update 1234')
def test_backup_update_wrong_version(self):
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.8 '
'backup-update --name new-name 1234')
def test_group_type_list(self):
self.run_command('--os-volume-api-version 3.11 group-type-list')
self.assert_called_anytime('GET', '/group_types?is_public=None')
def test_group_type_show(self):
self.run_command('--os-volume-api-version 3.11 '
'group-type-show 1')
self.assert_called('GET', '/group_types/1')
def test_group_type_create(self):
self.run_command('--os-volume-api-version 3.11 '
'group-type-create test-type-1')
self.assert_called('POST', '/group_types')
def test_group_type_create_public(self):
expected = {'group_type': {'name': 'test-type-1',
'description': 'test_type-1-desc',
'is_public': True}}
self.run_command('--os-volume-api-version 3.11 '
'group-type-create test-type-1 '
'--description=test_type-1-desc '
'--is-public=True')
self.assert_called('POST', '/group_types', body=expected)
def test_group_type_create_private(self):
expected = {'group_type': {'name': 'test-type-3',
'description': 'test_type-3-desc',
'is_public': False}}
self.run_command('--os-volume-api-version 3.11 '
'group-type-create test-type-3 '
'--description=test_type-3-desc '
'--is-public=False')
self.assert_called('POST', '/group_types', body=expected)
def test_group_specs_list(self):
self.run_command('--os-volume-api-version 3.11 group-specs-list')
self.assert_called('GET', '/group_types?is_public=None')
def test_create_volume_with_group(self):
self.run_command('--os-volume-api-version 3.13 create --group-id 5678 '
'--volume-type 4321 1')
self.assert_called('GET', '/volumes/1234')
expected = {'volume': {'imageRef': None,
'size': 1,
'availability_zone': None,
'source_volid': None,
'consistencygroup_id': None,
'group_id': '5678',
'name': None,
'snapshot_id': None,
'metadata': {},
'volume_type': '4321',
'description': None,
'multiattach': False,
'backup_id': None}}
self.assert_called_anytime('POST', '/volumes', expected)
@ddt.data({'cmd': '--os-volume-api-version 3.47 create --backup-id 1234',
'update': {'backup_id': '1234'}},
{'cmd': '--os-volume-api-version 3.47 create 2',
'update': {'size': 2}}
)
@ddt.unpack
def test_create_volume_with_backup(self, cmd, update):
self.run_command(cmd)
self.assert_called('GET', '/volumes/1234')
expected = {'volume': {'imageRef': None,
'size': None,
'availability_zone': None,
'source_volid': None,
'consistencygroup_id': None,
'name': None,
'snapshot_id': None,
'metadata': {},
'volume_type': None,
'description': None,
'multiattach': False,
'backup_id': None}}
expected['volume'].update(update)
self.assert_called_anytime('POST', '/volumes', body=expected)
def test_group_list(self):
self.run_command('--os-volume-api-version 3.13 group-list')
self.assert_called_anytime('GET', '/groups/detail')
def test_group_list__with_all_tenant(self):
self.run_command(
'--os-volume-api-version 3.13 group-list --all-tenants')
self.assert_called_anytime('GET', '/groups/detail?all_tenants=1')
def test_group_show(self):
self.run_command('--os-volume-api-version 3.13 '
'group-show 1234')
self.assert_called('GET', '/groups/1234')
def test_group_show_with_list_volume(self):
self.run_command('--os-volume-api-version 3.25 '
'group-show 1234 --list-volume')
self.assert_called('GET', '/groups/1234?list_volume=True')
@ddt.data(True, False)
def test_group_delete(self, delete_vol):
cmd = '--os-volume-api-version 3.13 group-delete 1234'
if delete_vol:
cmd += ' --delete-volumes'
self.run_command(cmd)
expected = {'delete': {'delete-volumes': delete_vol}}
self.assert_called('POST', '/groups/1234/action', expected)
def test_group_create(self):
expected = {'group': {'name': 'test-1',
'description': 'test-1-desc',
'group_type': 'my_group_type',
'volume_types': ['type1', 'type2'],
'availability_zone': 'zone1'}}
self.run_command('--os-volume-api-version 3.13 '
'group-create --name test-1 '
'--description test-1-desc '
'--availability-zone zone1 '
'my_group_type type1,type2')
self.assert_called_anytime('POST', '/groups', body=expected)
def test_group_update(self):
self.run_command('--os-volume-api-version 3.13 group-update '
'--name group2 --description desc2 '
'--add-volumes uuid1,uuid2 '
'--remove-volumes uuid3,uuid4 '
'1234')
expected = {'group': {'name': 'group2',
'description': 'desc2',
'add_volumes': 'uuid1,uuid2',
'remove_volumes': 'uuid3,uuid4'}}
self.assert_called('PUT', '/groups/1234',
body=expected)
def test_group_update_invalid_args(self):
self.assertRaises(exceptions.ClientException,
self.run_command,
'--os-volume-api-version 3.13 group-update 1234')
def test_group_snapshot_list(self):
self.run_command('--os-volume-api-version 3.14 group-snapshot-list')
self.assert_called_anytime('GET',
'/group_snapshots/detail')
def test_group_snapshot_show(self):
self.run_command('--os-volume-api-version 3.14 '
'group-snapshot-show 1234')
self.assert_called('GET', '/group_snapshots/1234')
def test_group_snapshot_delete(self):
cmd = '--os-volume-api-version 3.14 group-snapshot-delete 1234'
self.run_command(cmd)
self.assert_called('DELETE', '/group_snapshots/1234')
def test_group_snapshot_create(self):
expected = {'group_snapshot': {'name': 'test-1',
'description': 'test-1-desc',
'group_id': '1234'}}
self.run_command('--os-volume-api-version 3.14 '
'group-snapshot-create --name test-1 '
'--description test-1-desc 1234')
self.assert_called_anytime('POST', '/group_snapshots', body=expected)
@ddt.data(
{'grp_snap_id': '1234', 'src_grp_id': None,
'src': '--group-snapshot 1234'},
{'grp_snap_id': None, 'src_grp_id': '1234',
'src': '--source-group 1234'},
)
@ddt.unpack
def test_group_create_from_src(self, grp_snap_id, src_grp_id, src):
expected = {'create-from-src': {'name': 'test-1',
'description': 'test-1-desc'}}
if grp_snap_id:
expected['create-from-src']['group_snapshot_id'] = grp_snap_id
elif src_grp_id:
expected['create-from-src']['source_group_id'] = src_grp_id
cmd = ('--os-volume-api-version 3.14 '
'group-create-from-src --name test-1 '
'--description test-1-desc ')
cmd += src
self.run_command(cmd)
self.assert_called_anytime('POST', '/groups/action', body=expected)
def test_volume_manageable_list(self):
self.run_command('--os-volume-api-version 3.8 '
'manageable-list fakehost')
self.assert_called('GET', '/manageable_volumes/detail?host=fakehost')
def test_volume_manageable_list_details(self):
self.run_command('--os-volume-api-version 3.8 '
'manageable-list fakehost --detailed True')
self.assert_called('GET', '/manageable_volumes/detail?host=fakehost')
def test_volume_manageable_list_no_details(self):
self.run_command('--os-volume-api-version 3.8 '
'manageable-list fakehost --detailed False')
self.assert_called('GET', '/manageable_volumes?host=fakehost')
def test_volume_manageable_list_cluster(self):
self.run_command('--os-volume-api-version 3.17 '
'manageable-list --cluster dest')
self.assert_called('GET', '/manageable_volumes/detail?cluster=dest')
def test_snapshot_manageable_list(self):
self.run_command('--os-volume-api-version 3.8 '
'snapshot-manageable-list fakehost')
self.assert_called('GET', '/manageable_snapshots/detail?host=fakehost')
def test_snapshot_manageable_list_details(self):
self.run_command('--os-volume-api-version 3.8 '
'snapshot-manageable-list fakehost --detailed True')
self.assert_called('GET', '/manageable_snapshots/detail?host=fakehost')
def test_snapshot_manageable_list_no_details(self):
self.run_command('--os-volume-api-version 3.8 '
'snapshot-manageable-list fakehost --detailed False')
self.assert_called('GET', '/manageable_snapshots?host=fakehost')
def test_snapshot_manageable_list_cluster(self):
self.run_command('--os-volume-api-version 3.17 '
'snapshot-manageable-list --cluster dest')
self.assert_called('GET', '/manageable_snapshots/detail?cluster=dest')
@ddt.data('', 'snapshot-')
def test_manageable_list_cluster_before_3_17(self, prefix):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'--os-volume-api-version 3.16 '
'%smanageable-list --cluster dest' % prefix)
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
@ddt.data('', 'snapshot-')
def test_manageable_list_mutual_exclusion(self, prefix, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.17 '
'%smanageable-list fakehost --cluster dest' % prefix)
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
@ddt.data('', 'snapshot-')
def test_manageable_list_missing_required(self, prefix, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.17 '
'%smanageable-list' % prefix)
def test_list_messages(self):
self.run_command('--os-volume-api-version 3.3 message-list')
self.assert_called('GET', '/messages')
@ddt.data('volume', 'backup', 'snapshot', None)
def test_reset_state_entity_not_found(self, entity_type):
cmd = 'reset-state 999999'
if entity_type is not None:
cmd += ' --type %s' % entity_type
self.assertRaises(exceptions.CommandError, self.run_command, cmd)
@ddt.data({'entity_types': [{'name': 'volume', 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'backup', 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'snapshot', 'version': '3.0',
'command': 'os-reset_status'},
{'name': None, 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'group', 'version': '3.20',
'command': 'reset_status'},
{'name': 'group-snapshot', 'version': '3.19',
'command': 'reset_status'}],
'r_id': ['1234'],
'states': ['available', 'error', None]},
{'entity_types': [{'name': 'volume', 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'backup', 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'snapshot', 'version': '3.0',
'command': 'os-reset_status'},
{'name': None, 'version': '3.0',
'command': 'os-reset_status'},
{'name': 'group', 'version': '3.20',
'command': 'reset_status'},
{'name': 'group-snapshot', 'version': '3.19',
'command': 'reset_status'}],
'r_id': ['1234', '5678'],
'states': ['available', 'error', None]})
@ddt.unpack
def test_reset_state_normal(self, entity_types, r_id, states):
for state in states:
for t in entity_types:
if state is None:
expected = {t['command']: {}}
cmd = ('--os-volume-api-version '
'%s reset-state %s') % (t['version'],
' '.join(r_id))
else:
expected = {t['command']: {'status': state}}
cmd = ('--os-volume-api-version '
'%s reset-state '
'--state %s %s') % (t['version'],
state, ' '.join(r_id))
if t['name'] is not None:
cmd += ' --type %s' % t['name']
self.run_command(cmd)
name = t['name'] if t['name'] else 'volume'
for re in r_id:
self.assert_called_anytime('POST', '/%ss/%s/action'
% (name.replace('-', '_'), re),
body=expected)
@ddt.data({'command': '--attach-status detached',
'expected': {'attach_status': 'detached'}},
{'command': '--state in-use --attach-status attached',
'expected': {'status': 'in-use',
'attach_status': 'attached'}},
{'command': '--reset-migration-status',
'expected': {'migration_status': 'none'}})
@ddt.unpack
def test_reset_state_volume_additional_status(self, command, expected):
self.run_command('reset-state %s 1234' % command)
expected = {'os-reset_status': expected}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_snapshot_list_with_with_count(self):
self.run_command(
'--os-volume-api-version 3.45 snapshot-list --with-count')
self.assert_called('GET', '/snapshots/detail?with_count=True')
def test_snapshot_list_with_metadata(self):
self.run_command('--os-volume-api-version 3.22 '
'snapshot-list --metadata key1=val1')
expected = ("/snapshots/detail?metadata=%s"
% parse.quote_plus("{'key1': 'val1'}"))
self.assert_called('GET', expected)
@ddt.data(('resource_type',), ('event_id',), ('resource_uuid',),
('level', 'message_level'), ('request_id',))
def test_list_messages_with_filters(self, filter):
self.run_command('--os-volume-api-version 3.5 message-list --%s=TEST'
% filter[0])
self.assert_called('GET', '/messages?%s=TEST' % filter[-1])
def test_list_messages_with_sort(self):
self.run_command('--os-volume-api-version 3.5 '
'message-list --sort=id:asc')
self.assert_called('GET', '/messages?sort=id%3Aasc')
def test_list_messages_with_limit(self):
self.run_command('--os-volume-api-version 3.5 message-list --limit=1')
self.assert_called('GET', '/messages?limit=1')
def test_list_messages_with_marker(self):
self.run_command('--os-volume-api-version 3.5 message-list --marker=1')
self.assert_called('GET', '/messages?marker=1')
def test_list_with_image_metadata_before_3_4(self):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'list --image_metadata image_name=1234')
def test_list_filter_image_metadata(self):
self.run_command('--os-volume-api-version 3.4 '
'list --image_metadata image_name=1234')
url = ('/volumes/detail?%s' %
parse.urlencode([('glance_metadata', {"image_name": "1234"})]))
self.assert_called('GET', url)
def test_show_message(self):
self.run_command('--os-volume-api-version 3.5 message-show 1234')
self.assert_called('GET', '/messages/1234')
def test_delete_message(self):
self.run_command('--os-volume-api-version 3.5 message-delete 1234')
self.assert_called('DELETE', '/messages/1234')
def test_delete_messages(self):
self.run_command(
'--os-volume-api-version 3.3 message-delete 1234 12345')
self.assert_called_anytime('DELETE', '/messages/1234')
self.assert_called_anytime('DELETE', '/messages/12345')
@mock.patch('cinderclient.utils.find_resource')
def test_delete_metadata(self, mock_find_volume):
mock_find_volume.return_value = volumes.Volume(self,
{'id': '1234',
'metadata':
{'k1': 'v1',
'k2': 'v2',
'k3': 'v3'}},
loaded = True)
expected = {'metadata': {'k2': 'v2'}}
self.run_command('--os-volume-api-version 3.15 '
'metadata 1234 unset k1 k3')
self.assert_called('PUT', '/volumes/1234/metadata', body=expected)
@ddt.data(("3.0", None), ("3.6", None),
("3.7", True), ("3.7", False), ("3.7", ""))
@ddt.unpack
def test_service_list_withreplication(self, version, replication):
command = ('--os-volume-api-version %s service-list' %
version)
if replication is not None:
command += ' --withreplication %s' % replication
self.run_command(command)
self.assert_called('GET', '/os-services')
def test_group_enable_replication(self):
cmd = '--os-volume-api-version 3.38 group-enable-replication 1234'
self.run_command(cmd)
expected = {'enable_replication': {}}
self.assert_called('POST', '/groups/1234/action', body=expected)
def test_group_disable_replication(self):
cmd = '--os-volume-api-version 3.38 group-disable-replication 1234'
self.run_command(cmd)
expected = {'disable_replication': {}}
self.assert_called('POST', '/groups/1234/action', body=expected)
@ddt.data((False, None), (True, None),
(False, "backend1"), (True, "backend1"),
(False, "default"), (True, "default"))
@ddt.unpack
def test_group_failover_replication(self, attach_vol, backend):
attach = '--allow-attached-volume ' if attach_vol else ''
backend_id = ('--secondary-backend-id ' + backend) if backend else ''
cmd = ('--os-volume-api-version 3.38 group-failover-replication 1234 '
+ attach + backend_id)
self.run_command(cmd)
expected = {'failover_replication':
{'allow_attached_volume': attach_vol,
'secondary_backend_id': backend if backend else None}}
self.assert_called('POST', '/groups/1234/action', body=expected)
def test_group_list_replication_targets(self):
cmd = ('--os-volume-api-version 3.38 group-list-replication-targets'
' 1234')
self.run_command(cmd)
expected = {'list_replication_targets': {}}
self.assert_called('POST', '/groups/1234/action', body=expected)
@mock.patch('cinderclient.v3.services.ServiceManager.get_log_levels')
def test_service_get_log_before_3_32(self, get_levels_mock):
self.assertRaises(SystemExit,
self.run_command, '--os-volume-api-version 3.28 '
'service-get-log')
get_levels_mock.assert_not_called()
@mock.patch('cinderclient.v3.services.ServiceManager.get_log_levels')
@mock.patch('cinderclient.utils.print_list')
def test_service_get_log_no_params(self, print_mock, get_levels_mock):
self.run_command('--os-volume-api-version 3.32 service-get-log')
get_levels_mock.assert_called_once_with('', '', '')
print_mock.assert_called_once_with(get_levels_mock.return_value,
('Binary', 'Host', 'Prefix',
'Level'))
@ddt.data('*', 'cinder-api', 'cinder-volume', 'cinder-scheduler',
'cinder-backup')
@mock.patch('cinderclient.v3.services.ServiceManager.get_log_levels')
@mock.patch('cinderclient.utils.print_list')
def test_service_get_log(self, binary, print_mock, get_levels_mock):
server = 'host1'
prefix = 'sqlalchemy'
self.run_command('--os-volume-api-version 3.32 service-get-log '
'--binary %s --server %s --prefix %s' % (
binary, server, prefix))
get_levels_mock.assert_called_once_with(binary, server, prefix)
print_mock.assert_called_once_with(get_levels_mock.return_value,
('Binary', 'Host', 'Prefix',
'Level'))
@mock.patch('cinderclient.v3.services.ServiceManager.set_log_levels')
def test_service_set_log_before_3_32(self, set_levels_mock):
self.assertRaises(SystemExit,
self.run_command, '--os-volume-api-version 3.28 '
'service-set-log debug')
set_levels_mock.assert_not_called()
@mock.patch('cinderclient.v3.services.ServiceManager.set_log_levels')
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
def test_service_set_log_missing_required(self, error_mock,
set_levels_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command, '--os-volume-api-version 3.32 '
'service-set-log')
set_levels_mock.assert_not_called()
# Different error message from argparse library in Python 2 and 3
if six.PY3:
msg = 'the following arguments are required: <log-level>'
else:
msg = 'too few arguments'
error_mock.assert_called_once_with(msg)
@ddt.data('debug', 'DEBUG', 'info', 'INFO', 'warning', 'WARNING', 'error',
'ERROR')
@mock.patch('cinderclient.v3.services.ServiceManager.set_log_levels')
def test_service_set_log_min_params(self, level, set_levels_mock):
self.run_command('--os-volume-api-version 3.32 '
'service-set-log %s' % level)
set_levels_mock.assert_called_once_with(level, '', '', '')
@ddt.data('*', 'cinder-api', 'cinder-volume', 'cinder-scheduler',
'cinder-backup')
@mock.patch('cinderclient.v3.services.ServiceManager.set_log_levels')
def test_service_set_log_levels(self, binary, set_levels_mock):
level = 'debug'
server = 'host1'
prefix = 'sqlalchemy.'
self.run_command('--os-volume-api-version 3.32 '
'service-set-log %s --binary %s --server %s '
'--prefix %s' % (level, binary, server, prefix))
set_levels_mock.assert_called_once_with(level, binary, server, prefix)
@mock.patch('cinderclient.shell_utils._poll_for_status')
def test_create_with_poll(self, poll_method):
self.run_command('create --poll 1')
self.assert_called_anytime('GET', '/volumes/1234')
volume = self.shell.cs.volumes.get('1234')
info = dict()
info.update(volume._info)
info.pop('links', None)
self.assertEqual(1, poll_method.call_count)
timeout_period = 3600
poll_method.assert_has_calls([mock.call(self.shell.cs.volumes.get,
1234, info, 'creating', ['available'], timeout_period,
self.shell.cs.client.global_request_id,
self.shell.cs.messages)])
@mock.patch('cinderclient.shell_utils.time')
def test_poll_for_status(self, mock_time):
poll_period = 2
some_id = "some-id"
global_request_id = "req-someid"
action = "some"
updated_objects = (
base.Resource(None, info={"not_default_field": "creating"}),
base.Resource(None, info={"not_default_field": "available"}))
poll_fn = mock.MagicMock(side_effect=updated_objects)
cinderclient.shell_utils._poll_for_status(
poll_fn = poll_fn,
obj_id = some_id,
global_request_id = global_request_id,
messages = base.Resource(None, {}),
info = {},
action = action,
status_field = "not_default_field",
final_ok_states = ['available'],
timeout_period=3600)
self.assertEqual([mock.call(poll_period)] * 2,
mock_time.sleep.call_args_list)
self.assertEqual([mock.call(some_id)] * 2, poll_fn.call_args_list)
@mock.patch('cinderclient.v3.messages.MessageManager.list')
@mock.patch('cinderclient.shell_utils.time')
def test_poll_for_status_error(self, mock_time, mock_message_list):
poll_period = 2
some_id = "some_id"
global_request_id = "req-someid"
action = "some"
updated_objects = (
base.Resource(None, info={"not_default_field": "creating"}),
base.Resource(None, info={"not_default_field": "error"}))
poll_fn = mock.MagicMock(side_effect=updated_objects)
msg_object = base.Resource(cinderclient.v3.messages.MessageManager,
info = {"user_message": "ERROR!"})
mock_message_list.return_value = (msg_object,)
self.assertRaises(exceptions.ResourceInErrorState,
cinderclient.shell_utils._poll_for_status,
poll_fn=poll_fn,
obj_id=some_id,
global_request_id=global_request_id,
messages=cinderclient.v3.messages.MessageManager(api=3.34),
info=dict(),
action=action,
final_ok_states=['available'],
status_field="not_default_field",
timeout_period=3600)
self.assertEqual([mock.call(poll_period)] * 2,
mock_time.sleep.call_args_list)
self.assertEqual([mock.call(some_id)] * 2, poll_fn.call_args_list)
def test_backup(self):
self.run_command('--os-volume-api-version 3.42 backup-create '
'--name 1234 1234')
expected = {'backup': {'volume_id': 1234,
'container': None,
'name': '1234',
'description': None,
'incremental': False,
'force': False,
'snapshot_id': None,
}}
self.assert_called('POST', '/backups', body=expected)
def test_backup_with_metadata(self):
self.run_command('--os-volume-api-version 3.43 backup-create '
'--metadata foo=bar --name 1234 1234')
expected = {'backup': {'volume_id': 1234,
'container': None,
'name': '1234',
'description': None,
'incremental': False,
'force': False,
'snapshot_id': None,
'metadata': {'foo': 'bar'}, }}
self.assert_called('POST', '/backups', body=expected)
def test_backup_with_az(self):
self.run_command('--os-volume-api-version 3.51 backup-create '
'--availability-zone AZ2 --name 1234 1234')
expected = {'backup': {'volume_id': 1234,
'container': None,
'name': '1234',
'description': None,
'incremental': False,
'force': False,
'snapshot_id': None,
'availability_zone': 'AZ2'}}
self.assert_called('POST', '/backups', body=expected)
@mock.patch("cinderclient.utils.print_list")
def test_snapshot_list_with_userid(self, mock_print_list):
"""Ensure 3.41 provides User ID header."""
self.run_command('--os-volume-api-version 3.41 snapshot-list')
self.assert_called('GET', '/snapshots/detail')
columns = ['ID', 'Volume ID', 'Status', 'Name', 'Size', 'User ID']
mock_print_list.assert_called_once_with(mock.ANY, columns,
sortby_index=0)
@mock.patch('cinderclient.v3.volumes.Volume.migrate_volume')
def test_migrate_volume_before_3_16(self, v3_migrate_mock):
self.run_command('--os-volume-api-version 3.15 '
'migrate 1234 fakehost')
v3_migrate_mock.assert_called_once_with(
'fakehost', False, False, None)
@mock.patch('cinderclient.v3.volumes.Volume.migrate_volume')
def test_migrate_volume_3_16(self, v3_migrate_mock):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost')
self.assertEqual(4, len(v3_migrate_mock.call_args[0]))
def test_migrate_volume_with_cluster_before_3_16(self):
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'--os-volume-api-version 3.15 '
'migrate 1234 fakehost --cluster fakecluster')
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
def test_migrate_volume_mutual_exclusion(self, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.16 '
'migrate 1234 fakehost --cluster fakecluster')
msg = 'argument --cluster: not allowed with argument <host>'
error_mock.assert_called_once_with(msg)
@mock.patch('cinderclient.shell.CinderClientArgumentParser.error')
def test_migrate_volume_missing_required(self, error_mock):
error_mock.side_effect = SystemExit
self.assertRaises(SystemExit,
self.run_command,
'--os-volume-api-version 3.16 '
'migrate 1234')
msg = 'one of the arguments <host> --cluster is required'
error_mock.assert_called_once_with(msg)
def test_migrate_volume_host(self):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost')
expected = {'os-migrate_volume': {'force_host_copy': False,
'lock_volume': False,
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_migrate_volume_cluster(self):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 --cluster mycluster')
expected = {'os-migrate_volume': {'force_host_copy': False,
'lock_volume': False,
'cluster': 'mycluster'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_migrate_volume_bool_force(self):
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost --force-host-copy '
'--lock-volume')
expected = {'os-migrate_volume': {'force_host_copy': True,
'lock_volume': True,
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
def test_migrate_volume_bool_force_false(self):
# Set both --force-host-copy and --lock-volume to False.
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost --force-host-copy=False '
'--lock-volume=False')
expected = {'os-migrate_volume': {'force_host_copy': 'False',
'lock_volume': 'False',
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action', body=expected)
# Do not set the values to --force-host-copy and --lock-volume.
self.run_command('--os-volume-api-version 3.16 '
'migrate 1234 fakehost')
expected = {'os-migrate_volume': {'force_host_copy': False,
'lock_volume': False,
'host': 'fakehost'}}
self.assert_called('POST', '/volumes/1234/action',
body=expected)
@ddt.data({'bootable': False, 'by_id': False, 'cluster': None},
{'bootable': True, 'by_id': False, 'cluster': None},
{'bootable': False, 'by_id': True, 'cluster': None},
{'bootable': True, 'by_id': True, 'cluster': None},
{'bootable': True, 'by_id': True, 'cluster': 'clustername'})
@ddt.unpack
def test_volume_manage(self, bootable, by_id, cluster):
cmd = ('--os-volume-api-version 3.16 '
'manage host1 some_fake_name --name foo --description bar '
'--volume-type baz --availability-zone az '
'--metadata k1=v1 k2=v2')
if by_id:
cmd += ' --id-type source-id'
if bootable:
cmd += ' --bootable'
if cluster:
cmd += ' --cluster ' + cluster
self.run_command(cmd)
ref = 'source-id' if by_id else 'source-name'
expected = {'volume': {'host': 'host1',
'ref': {ref: 'some_fake_name'},
'name': 'foo',
'description': 'bar',
'volume_type': 'baz',
'availability_zone': 'az',
'metadata': {'k1': 'v1', 'k2': 'v2'},
'bootable': bootable}}
if cluster:
expected['volume']['cluster'] = cluster
self.assert_called_anytime('POST', '/os-volume-manage', body=expected)
def test_volume_manage_before_3_16(self):
"""Cluster optional argument was not acceptable."""
self.assertRaises(exceptions.UnsupportedAttribute,
self.run_command,
'manage host1 some_fake_name '
'--cluster clustername'
'--name foo --description bar --bootable '
'--volume-type baz --availability-zone az '
'--metadata k1=v1 k2=v2')
def test_worker_cleanup_before_3_24(self):
self.assertRaises(SystemExit,
self.run_command,
'work-cleanup fakehost')
def test_worker_cleanup(self):
self.run_command('--os-volume-api-version 3.24 '
'work-cleanup --cluster clustername --host hostname '
'--binary binaryname --is-up false --disabled true '
'--resource-id uuid --resource-type Volume '
'--service-id 1')
expected = {'cluster_name': 'clustername',
'host': 'hostname',
'binary': 'binaryname',
'is_up': 'false',
'disabled': 'true',
'resource_id': 'uuid',
'resource_type': 'Volume',
'service_id': 1}
self.assert_called('POST', '/workers/cleanup', body=expected)
def test_create_transfer(self):
self.run_command('transfer-create 1234')
expected = {'transfer': {'volume_id': 1234,
'name': None,
}}
self.assert_called('POST', '/os-volume-transfer', body=expected)
def test_create_transfer_no_snaps(self):
self.run_command('--os-volume-api-version 3.55 transfer-create '
'--no-snapshots 1234')
expected = {'transfer': {'volume_id': 1234,
'name': None,
'no_snapshots': True
}}
self.assert_called('POST', '/volume-transfers', body=expected)
| [
"[email protected]"
]
| |
b6bc653644bc26cd60b6afcceabe021e4f545686 | 9c368c9fe78a2dd186daeed2d0714651c1c27d66 | /absorption/ml_project/analyse_spectra/more_plotting/plot_phase_space_ssfr_split.py | c2c1475a8e26d8ca760a0dee52ded9982b0a0c6f | []
| no_license | sarahappleby/cgm | 5ff2121919e36b10069692f71fb1dc03f3678462 | 656bf308771dd3ff2f8c2e77107cdc14507c7ce7 | refs/heads/master | 2023-01-24T03:10:01.610418 | 2023-01-20T11:04:31 | 2023-01-20T11:04:31 | 160,820,718 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,599 | py | import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
import h5py
import pygad as pg
import sys
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=13)
def truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100, alpha=1.):
cmap_list = cmap(np.linspace(minval, maxval, n))
cmap_list[:, -1] = alpha
new_cmap = colors.LinearSegmentedColormap.from_list('trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval),
cmap_list)
return new_cmap
def quench_thresh(z): # in units of yr^-1
return -1.8 + 0.3*z -9.
def ssfr_type_check(ssfr_thresh, ssfr):
sf_mask = (ssfr >= ssfr_thresh)
gv_mask = (ssfr < ssfr_thresh) & (ssfr > ssfr_thresh -1)
q_mask = ssfr == -14.0
return sf_mask, gv_mask, q_mask
if __name__ == '__main__':
model = sys.argv[1]
wind = sys.argv[2]
snap = sys.argv[3]
cmap = plt.get_cmap('Greys')
cmap = truncate_colormap(cmap, 0.0, .6)
lines = ["H1215", "MgII2796", "CII1334", "SiIII1206", "CIV1548", "OVI1031"]
plot_lines = [r'${\rm HI}1215$', r'${\rm MgII}2796$', r'${\rm CII}1334$',
r'${\rm SiIII}1206$', r'${\rm CIV}1548$', r'${\rm OVI}1031$']
x = [0.75, 0.69, 0.73, 0.705, 0.71, 0.71]
cbar_ticks = [[12, 13, 14, 15, 16], [11, 12, 13, 14], [12, 13, 14], [11, 12, 13, 14], [12, 13, 14], [12, 13, 14],]
#chisq_lim = [4.5, 63.1, 20.0, 70.8, 15.8, 4.5] limits with old fitting procedure
chisq_lim = [4., 50., 15.8, 39.8, 8.9, 4.5]
width = 0.007
height = 0.1283
vertical_position = [0.76, 0.632, 0.504, 0.373, 0.247, 0.1175]
vertical_position = [0.7516, 0.623, 0.495, 0.366, 0.238, 0.11]
horizontal_position = 0.9
inner_outer = [[0.25, 0.5, 0.75], [1.0, 1.25]]
rho_labels = ['Inner CGM', 'Outer CGM']
ssfr_labels = ['Star forming', 'Green valley', 'Quenched']
N_min = [12., 11., 12., 11., 12., 12.]
snapfile = f'/disk04/sapple/data/samples/{model}_{wind}_{snap}.hdf5'
s = pg.Snapshot(snapfile)
redshift = s.redshift
rho_crit = float(s.cosmology.rho_crit(z=redshift).in_units_of('g/cm**3'))
cosmic_rho = rho_crit * float(s.cosmology.Omega_b)
quench = quench_thresh(redshift)
delta_fr200 = 0.25
min_fr200 = 0.25
nbins_fr200 = 5
fr200 = np.arange(min_fr200, (nbins_fr200+1)*delta_fr200, delta_fr200)
phase_space_file = f'/disk04/sapple/data/samples/{model}_{wind}_{snap}_phase_space.h5'
with h5py.File(phase_space_file, 'r') as hf:
rho_overdensity_temp_hist2d = hf['rho_delta_temp'][:]
rho_overdensity_bins = hf['rho_delta_bins'][:]
temp_bins = hf['temp_bins'][:]
plot_dir = '/disk04/sapple/cgm/absorption/ml_project/analyse_spectra/plots/'
sample_dir = f'/disk04/sapple/data/samples/'
with h5py.File(f'{sample_dir}{model}_{wind}_{snap}_galaxy_sample.h5', 'r') as sf:
gal_ids = sf['gal_ids'][:]
mass = sf['mass'][:]
ssfr = sf['ssfr'][:]
# ssfr split, all fr200
fig, ax = plt.subplots(len(lines), 3, figsize=(9.7, 13), sharey='row', sharex='col')
for l, line in enumerate(lines):
results_file = f'/disk04/sapple/data/normal/results/{model}_{wind}_{snap}_fit_lines_{line}.h5'
all_Z = []
all_T = []
all_rho = []
all_N = []
all_chisq = []
all_ids = []
for i in range(len(fr200)):
with h5py.File(results_file, 'r') as hf:
all_T.extend(hf[f'log_T_{fr200[i]}r200'][:])
all_rho.extend(hf[f'log_rho_{fr200[i]}r200'][:])
all_N.extend(hf[f'log_N_{fr200[i]}r200'][:])
all_chisq.extend(hf[f'chisq_{fr200[i]}r200'][:])
all_ids.extend(hf[f'ids_{fr200[i]}r200'][:])
all_T = np.array(all_T)
all_rho = np.array(all_rho)
all_N = np.array(all_N)
all_chisq = np.array(all_chisq)
all_ids = np.array(all_ids)
mask = (all_N > N_min[l]) * (all_chisq < chisq_lim[l])
all_T = all_T[mask]
all_delta_rho = all_rho[mask] - np.log10(cosmic_rho)
all_ids = all_ids[mask]
all_N = all_N[mask]
idx = np.array([np.where(gal_ids == j)[0] for j in all_ids]).flatten()
all_mass = mass[idx]
all_ssfr = ssfr[idx]
sf_mask, gv_mask, q_mask = ssfr_type_check(quench, all_ssfr)
for i in range(3):
ax[l][i].imshow(np.log10(rho_overdensity_temp_hist2d), extent=(rho_overdensity_bins[0], rho_overdensity_bins[-1], temp_bins[0], temp_bins[-1]),
cmap=cmap)
if line == 'H1215':
im = ax[l][0].scatter(all_delta_rho[sf_mask], all_T[sf_mask], c=all_N[sf_mask], cmap='magma', s=1, vmin=N_min[l], vmax=16)
im = ax[l][1].scatter(all_delta_rho[gv_mask], all_T[gv_mask], c=all_N[gv_mask], cmap='magma', s=1, vmin=N_min[l], vmax=16)
im = ax[l][2].scatter(all_delta_rho[q_mask], all_T[q_mask], c=all_N[q_mask], cmap='magma', s=1, vmin=N_min[l], vmax=16)
else:
im = ax[l][0].scatter(all_delta_rho[sf_mask], all_T[sf_mask], c=all_N[sf_mask], cmap='magma', s=1, vmin=N_min[l], vmax=15)
im = ax[l][1].scatter(all_delta_rho[gv_mask], all_T[gv_mask], c=all_N[gv_mask], cmap='magma', s=1, vmin=N_min[l], vmax=15)
im = ax[l][2].scatter(all_delta_rho[q_mask], all_T[q_mask], c=all_N[q_mask], cmap='magma', s=1, vmin=N_min[l], vmax=15)
for i in range(3):
ax[l][i].set_xlim(-1, 5)
ax[l][i].set_ylim(3, 7)
cax = plt.axes([horizontal_position, vertical_position[l], width, height])
cbar = fig.colorbar(im, cax=cax, label=r'${\rm log }(N / {\rm cm}^{-2})$')
cbar.set_ticks(cbar_ticks[l])
ax[l][0].annotate(plot_lines[l], xy=(x[l], 0.85), xycoords='axes fraction', fontsize=12, bbox=dict(boxstyle="round", fc="w", lw=0.75))
if l == 0:
for i in range(3):
ax[l][i].set_title(ssfr_labels[i])
if l == len(lines)-1:
for i in range(3):
ax[l][i].set_xlabel(r'${\rm log }\Delta$')
ax[l][0].set_yticks([3, 4, 5, 6, 7])
else:
ax[l][0].set_yticks([4, 5, 6, 7])
ax[l][0].set_ylabel(r'${\rm log } (T / {\rm K})$')
fig.subplots_adjust(wspace=0., hspace=0.)
plt.savefig(f'{plot_dir}{model}_{wind}_{snap}_deltaTN_ssfr_split_chisqion.png')
plt.close()
| [
"[email protected]"
]
| |
27d97928b393f307f4e44f2cc3e0f270f71ffb57 | f1dc351b5e493bb4480f21b3a7704b9a56bb7e47 | /lego/apps/restricted/tests/test_utils.py | 30c36d42e11672b18b4bc8b8a7b26ee54bce7396 | [
"MIT"
]
| permissive | andrinelo/lego | 8437d830f2b534687687d302e78ab5d34172a81b | 9b53c8fe538d9107b980a70e2a21fb487cc3b290 | refs/heads/master | 2020-03-10T01:56:41.997044 | 2018-04-11T16:09:41 | 2018-04-11T16:09:41 | 129,123,416 | 0 | 0 | MIT | 2018-04-11T16:34:22 | 2018-04-11T16:34:22 | null | UTF-8 | Python | false | false | 1,246 | py | from django.conf import settings
from lego.apps.restricted.parser import EmailParser, ParserMessageType
from lego.apps.restricted.utils import get_mail_token
from lego.utils.test_utils import BaseTestCase
from .utils import read_file
class EmailTokenTestCase(BaseTestCase):
def test_parse_valid_message(self):
"""Try to parse a valid message and make sure ve remove the token payload"""
raw_message = read_file(f'{settings.BASE_DIR}/apps/restricted/fixtures/emails/valid.txt')
parser = EmailParser(raw_message, '[email protected]', ParserMessageType.STRING)
message = parser.parse()
payloads = len(message.get_payload())
token = get_mail_token(message)
self.assertEquals('test_token', token)
self.assertEquals(len(message.get_payload()), payloads - 1)
def test_parse_message_no_token(self):
"""Parsing a message with no token has no effect, the function returns None"""
raw_message = read_file(f'{settings.BASE_DIR}/apps/restricted/fixtures/emails/no_token.txt')
parser = EmailParser(raw_message, '[email protected]', ParserMessageType.STRING)
message = parser.parse()
token = get_mail_token(message)
self.assertIsNone(token)
| [
"[email protected]"
]
| |
829f0239757ec9d128bbd27819f875217a449fd9 | f59b30d52433903ef8cd0a15db8ae0287c89d4e2 | /Python/libraries/recognizers-number/recognizers_number/number/__init__.py | fcdd5ac467fff37e2a9dafd4c8d5781da2912f02 | [
"MIT"
]
| permissive | tellarin/Recognizers-Text | 4965c928bcbe6bbd83ec15441ab880239b6370f9 | ff019a69e9cb64de862c94b08125baaaf832ed25 | refs/heads/master | 2022-11-22T10:56:13.482434 | 2019-12-16T17:30:41 | 2019-12-16T17:30:41 | 96,207,365 | 2 | 0 | MIT | 2019-12-17T05:50:58 | 2017-07-04T10:43:16 | C# | UTF-8 | Python | false | false | 249 | py | from .models import *
from .extractors import *
from .parsers import *
from .english import *
from .spanish import *
from .chinese import *
from .french import *
from .japanese import *
from .number_recognizer import *
from .parser_factory import *
| [
"[email protected]"
]
| |
8339057bbe395fec4de673d8d72f19072c39babc | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_103/ch5_2020_03_09_11_45_12_070647.py | 1cef7e181a6d5b7b06d5a934a5c54ce42f17567d | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | def libras_para_kg(a):
y=int(a)*0.453592
a= input('quantas libras?:')
print (a) | [
"[email protected]"
]
| |
c605acdc23389944fad9f0aeb4320499e4c54668 | 4c33dee03cf55b112b9a0b950690ccd45511e580 | /code/lvmeng_project-master/erp/utils.py | 56e185e2cf8979858aa412abf2d48d2e598c77bd | []
| no_license | yfjelley/lvmeng | 4a40cf5b8c89581162d4ab4bcf303b97d40c4ccd | bb109b39381177782838fb6301204dd7d9a2d206 | refs/heads/master | 2020-08-02T21:00:40.579192 | 2016-11-12T14:00:44 | 2016-11-12T14:00:44 | 73,555,732 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 9,962 | py | #coding:utf-8
from erp.models import Announcement,Real_purchase,Position,Online_chat
from oa.models import Internal_announcement
from django.db.models import Q
def check_product(request,id,business):
ids = []
if request.user.is_superuser:
return True
products = business.product_set.all()
for pro in products:
ids.append(int(pro.id))
if int(id) in ids:
return True
def check_agent(request,business,agent):
if request.user.is_superuser:
return True
ageId = []
agent_id = agent.id
agents = business.agent_set.filter(is_active=1)
for age in agents:
ageId.append(age.id)
if agent_id in ageId:
return True
def check_agent_modify(request,business,agent):
if request.user.is_superuser:
return False
ageId = []
agent_id = agent.id
agents = business.agent_set.filter(is_active=1)
for age in agents:
ageId.append(age.id)
if agent_id in ageId:
return True
def check_customer(request,customer,business,permissions):
if request.user.is_superuser:
return True
ids = []
agents = customer.agents.filter(is_active=1,business=business)
try:
user_agent = request.user.agent
if user_agent in agents :
return True
if 'auth.customer_show' in permissions and agents:
return True
except:
pass
try:
user_business = request.user.business
for age in agents:
ids.append(age.business)
if business in ids:
return True
except:
pass
def check_announcement_out(request,announce,business):#面向客户(对外)
announcements = Announcement.objects.filter(announce_business=business,is_active=1)
if request.user.is_superuser or announce in announcements:
return True
def check_internal_announcement(request,announce,business):#面向员工(对内)
announcements = Internal_announcement.objects.filter(announcement_business=business,is_active=1)
if request.user.is_superuser or announce in announcements:
return True
def check_position(request,position,business):#面向员工(对内)
positions = Position.objects.filter(business=business)
if position in positions:
return True
def check_daily_work(request,dailyWork,business):
if request.user.is_superuser:
return True
try:
employee = request.user.agent
user = request.user
daily_works = user.daily_work_set.filter(is_active=1)
if dailyWork in daily_works:
return True
except:
daily_works = business.daily_work_set.filter(is_active=1)
if dailyWork in daily_works:
return True
def check_cost_application(request,cost,business):
if request.user.is_superuser:
return True
try:
employee = request.user.agent
user = request.user
costs = user.cost_application_set.filter(is_active=1)
if cost in costs:
return True
except:
costs = business.cost_application_set.filter(is_active=1)
if cost in costs:
return True
def check_leave_application(request,leave,business):
if request.user.is_superuser:
return True
try:
employee = request.user.agent
user = request.user
leaves = user.leave_management_set.filter(is_active=1)
if leave in leaves:
return True
except:
leaves = business.leave_management_set.filter(is_active=1)
if leave in leaves:
return True
def check_travel_apply(request,travel,business):
if request.user.is_superuser:
return True
try:
employee = request.user.agent
user = request.user
travels = user.travel_apply_set.filter(is_active=1)
if travel in travels:
return True
except:
travels = business.travel_apply_set.filter(is_active=1)
if travel in travels:
return True
def delete_customer(request,obj):#客户操作限制
try:
if obj.customer_type == '2' and request.user.agent in obj.agents.filter(is_active=1):
return True
except:
return False
return False
def real_purchase_precess(request,obj):#真实购买修改和删除
try:
if request.user.agent in obj.agents.filter(is_active=1):
return True
except:
return False
return False
def real_purchase_list(request,business):#购买列表
if request.user.is_superuser:
return Real_purchase.objects.filter(customer__is_active=1)
try:
agent = request.user.agent
return Real_purchase.objects.filter(business=business,real_agent=request.user.agent,customer__is_active=1)
except:
return Real_purchase.objects.filter(business=business,customer__is_active=1)
def check_real_purchase(request,obj,business):#真实购买查看过滤
if request.user.is_superuser:
return True
try:
agent = request.user.agent
if obj in agent.real_purchase_set.filter(is_active=1):
return True
except:
if obj in business.real_purchase_set.filter(is_active=1):
return True
def check_employee(request,obj,business):#过滤员工是否属于该机构并删除
if business and obj in business.agent_set.filter(is_active=1):
return True
def delete_announcement_inner(request,obj,business):#删除内部公告过滤
if business and obj in business.internal_announcement_set.filter(is_active=1):
return True
#删除产品过滤
def product_delete(request,obj,business):
if not request.user.is_superuser and obj in business.product_set.filter(is_active=1):
return True
#删除每日待办过滤
def delete_daily(request,obj):
if obj in request.user.daily_to_do_set.filter(is_active=1):
return True
class Apply_Process(object):#所有申请操作
def daily_work_delete(self,request,obj):#工作日报删除
try:
agent = request.user.agent
if obj in request.user.daily_work_set.all():
return True
except:
return False
def check_daily_work_examine(self,request,obj,business):#工作日报查看审核情况
if request.user.is_superuser:
return True
try:
agent = request.user.agent
if obj in request.user.daily_work_set.all():
return True
except:
if obj in business.daily_work_set.all():
return True
def cost_application_delete(self,request,obj):#费用申请删除
try:
agent = request.user.agent
if obj in request.user.cost_application_set.all():
return True
except:
return False
def check_cost_application_examine(self,request,obj,business):#费用申请查看审核情况
if request.user.is_superuser:
return True
try:
agent = request.user.agent
if obj in request.user.cost_application_set.all():
return True
except:
if obj in business.cost_application_set.all():
return True
def leave_management_delete(self,request,obj):#请假申请删除
try:
agent = request.user.agent
if obj in request.user.leave_management_set.all():
return True
except:
return False
def check_leave_management_examine(self,request,obj,business):#请假申请查看审核情况
if request.user.is_superuser:
return True
try:
agent = request.user.agent
if obj in request.user.leave_management_set.all():
return True
except:
if obj in business.leave_management_set.all():
return True
def travel_apply_delete(self,request,obj):#出差申请删除
try:
agent = request.user.agent
if obj in request.user.travel_apply_set.all():
return True
except:
return False
def travel_apply_management_examine(self,request,obj,business):#出差申请查看审核情况
if request.user.is_superuser:
return True
try:
agent = request.user.agent
if obj in request.user.travel_apply_set.all():
return True
except:
if obj in business.travel_apply_set.all():
return True
#获取聊天消息,对应的每一个员工,和消息总和
def get_user_agents(request,business):
if request.user.is_superuser:
return None,None
total_unread = 0
user_agents = business.agent_set.filter(~Q(user=request.user),is_active=1)
for user in user_agents:
unread = 0
unread = Online_chat.objects.filter(business=business,recipient=request.user,sender=user.user,read=False).count()
if unread:
user.unread = unread
total_unread += unread
return user_agents,total_unread
import logging
from erp.diff_finder import NoChangeDiffFinder
logger = logging.getLogger('data_change')
#记录所有的操作记录
def load_process_message(request,old,new,action,id,business,diffs_str=True):
if diffs_str:
diff_find = NoChangeDiffFinder()
diff_find.diff(old, new)
log_msg = 'user_id=%s user_name=%s action=%s id=%s business=%s %s' % (
request.user.id,
request.user,
action,
id,
business,
diff_find.format_diffs_str
)
else:
log_msg = 'user_id=%s user_name=%s action=%s id=%s business=%s' % (
request.user.id,
request.user,
action,
id,
business,
)
logger.info(log_msg+"\n")
| [
"[email protected]"
]
| |
fb0b9424ecde134faf0417e0c5e185342354c25b | 53e0b0616ece7867b1d37d755fd034e5b3d5ebe5 | /Easy/937. Reorder Data in Log Files/solution (1).py | 6f88cdf8f90632966920efaff690919c9bae4d7f | [
"MIT"
]
| permissive | czs108/LeetCode-Solutions | a7a29b90ad330d8d4bd73b5d0d243dc5b4121bc9 | fc4ef8aed90614e2e4ad39fa1c9eec5881b7b5f5 | refs/heads/master | 2023-03-03T09:55:51.045837 | 2023-02-20T23:39:15 | 2023-02-20T23:39:15 | 237,709,633 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 356 | py | # 937. Reorder Data in Log Files
class Solution:
# Sorting by Keys
def reorderLogFiles(self, logs: list[str]) -> list[str]:
def get_key(log: list[str]) -> tuple:
id, content = log.split(" ", maxsplit=1)
return (0, content, id) if content[0].isalpha() else (1, None, None)
return sorted(logs, key=get_key) | [
"[email protected]"
]
| |
0aec8b543a666ebec07ee8d1f7de9f7df6ae0aa3 | 6710c52d04e17facbc9fb35a7df313f7a2a7bd53 | /0496. Next Greater Element I.py | 81a83b3a6afa832fca1c54f9dee831ff8ecb2cb3 | []
| no_license | pwang867/LeetCode-Solutions-Python | 535088fbe747a453360457728cc22cf336020bd2 | 188befbfb7080ba1053ee1f7187b177b64cf42d2 | refs/heads/master | 2022-11-13T16:20:28.211707 | 2020-06-28T06:01:14 | 2020-06-28T06:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,903 | py | # use descreasing stack, time/space O(n)
class Solution(object):
def nextGreaterElement(self, findNums, nums):
"""
:type findNums: List[int]
:type nums: List[int]
:rtype: List[int]
"""
# findNums is a subset of nums
# using stack
stack = [] # numbers stored inside stack will be descending
next_greater = {} # {number in findNums: its nextGreaterElement}
for num in nums:
while stack and stack[-1] < num:
next_greater[stack.pop()] = num
stack.append(num)
ans = []
for num in findNums: # subset
ans.append(next_greater.get(num, -1))
return ans
"""
You are given two arrays (without duplicates) nums1 and nums2 where nums1’s elements are subset of nums2. Find all the next greater numbers for nums1's elements in the corresponding places of nums2.
The Next Greater Number of a number x in nums1 is the first greater number to its right in nums2. If it does not exist, output -1 for this number.
Example 1:
Input: nums1 = [4,1,2], nums2 = [1,3,4,2].
Output: [-1,3,-1]
Explanation:
For number 4 in the first array, you cannot find the next greater number for it in the second array, so output -1.
For number 1 in the first array, the next greater number for it in the second array is 3.
For number 2 in the first array, there is no next greater number for it in the second array, so output -1.
Example 2:
Input: nums1 = [2,4], nums2 = [1,2,3,4].
Output: [3,-1]
Explanation:
For number 2 in the first array, the next greater number for it in the second array is 3.
For number 4 in the first array, there is no next greater number for it in the second array, so output -1.
Note:
All elements in nums1 and nums2 are unique.
The length of both nums1 and nums2 would not exceed 1000.
"""
| [
"[email protected]"
]
| |
0b1491be7cb19a099088b52747a39f61a0ac1a3f | 951a84f6fafa763ba74dc0ad6847aaf90f76023c | /PythonLearning/c15.py | 6564d10808d0f33742e95d0a46ea1ab568e6afe4 | []
| no_license | SakuraGo/leetcodepython3 | 37258531f1994336151f8b5c8aec5139f1ba79f8 | 8cedddb997f4fb6048b53384ac014d933b6967ac | refs/heads/master | 2020-09-27T15:55:28.353433 | 2020-02-15T12:00:02 | 2020-02-15T12:00:02 | 226,550,406 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | import t.t1.c9
print("~~~~~~~~~C15~~~~~~~~~")
print("name:",__name__) ##名字
print("package:"+ (__package__ or "package不属于任何包")) ## 所属包
print("doc:",__doc__) ## 模块注释
print("file:",__file__) ##物理路径
vvv = 23 if 3>5 else 35
print(vvv) | [
"[email protected]"
]
| |
fda206922f8d0287b1b159050f65d979846ea8ad | c9f54e1a2e11a033b53b4f12564c7b87c5ce1a4a | /one_time/hidden_units.py | 0788a78f5a75634b5a6eb773e44b1b85d12ced5d | []
| no_license | mikewycklendt/dcadventures | b2e5e38ed53a698bb3c18c5b332df424540a18e3 | 542f90c3cce859416de14e40bdebf6a8cddcf67a | refs/heads/master | 2023-06-10T17:00:33.380125 | 2021-06-21T20:38:25 | 2021-06-21T20:38:25 | 290,849,079 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 572 | py |
@app.route('/table/db')
def table_db_columns_create():
name = 'Time Rank'
entry = Unit(time=True, name=name, hide=True )
db.session.add(entry)
db.session.commit()
name = 'Distance Rank'
entry = Unit(distance=True, name=name, hide=True )
db.session.add(entry)
db.session.commit()
name = 'Speed Rank'
entry = Unit(speed=True, name=name, hide=True )
db.session.add(entry)
db.session.commit()
results = db.session.query(Unit).filter_by(hide=True).all()
for result in results:
print (result.id)
print (result.name)
return ('Unit Ranks db added')
| [
"[email protected]"
]
| |
d18a5ff31839e9598493723bd5c0347c58ecfd44 | ccf94dcb6b1500fcbbd56964ae8c4832a496b8b3 | /python/baiduads-sdk-auto/test/test_get_media_packages_response_wrapper.py | 7431508715bab9f2bef9fbf745fb5efc555b45d5 | [
"Apache-2.0"
]
| permissive | baidu/baiduads-sdk | 24c36b5cf3da9362ec5c8ecd417ff280421198ff | 176363de5e8a4e98aaca039e4300703c3964c1c7 | refs/heads/main | 2023-06-08T15:40:24.787863 | 2023-05-20T03:40:51 | 2023-05-20T03:40:51 | 446,718,177 | 16 | 11 | Apache-2.0 | 2023-06-02T05:19:40 | 2022-01-11T07:23:17 | Python | UTF-8 | Python | false | false | 1,104 | py | """
dev2 api schema
'dev2.baidu.com' api schema # noqa: E501
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import baiduads
from baiduads.common.model.api_response_header import ApiResponseHeader
from baiduads.searchfeed.model.get_media_packages_response_wrapper_body import GetMediaPackagesResponseWrapperBody
globals()['ApiResponseHeader'] = ApiResponseHeader
globals()['GetMediaPackagesResponseWrapperBody'] = GetMediaPackagesResponseWrapperBody
from baiduads.searchfeed.model.get_media_packages_response_wrapper import GetMediaPackagesResponseWrapper
class TestGetMediaPackagesResponseWrapper(unittest.TestCase):
"""GetMediaPackagesResponseWrapper unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testGetMediaPackagesResponseWrapper(self):
"""Test GetMediaPackagesResponseWrapper"""
# FIXME: construct object with mandatory attributes with example values
# model = GetMediaPackagesResponseWrapper() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
14145e52b961975903bfd44a7d99be0c2f3207d0 | cf149421eb604826dc4757bc4fa1ac524b44476b | /pyscene/gradient/by_value.py | 1581fa003c44ce1b7fb7437f2d07eb90cef50cef | []
| no_license | Windspar/PyScene | 60e903106905b6eaff640dfde08d8bb447353ab5 | 004a274326f1aac06da04e3f5a663374da618a64 | refs/heads/master | 2021-09-07T11:05:41.362873 | 2018-02-22T01:31:12 | 2018-02-22T01:31:12 | 113,268,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,915 | py | import numpy as np
import pygame
from .base import RGBA_FORMULA, pure_color
class HSL:
H = 0
HUE = 0
S = 1
SAT = 1
SATURATION = 1
L = 2
LUM = 2
LIGHT = 2
LIGHTNESS = 2
class RGB:
R = 0
RED = 0
G = 1
GREEN = 1
B = 2
BLUE = 2
class HSV:
H = 0
HUE = 0
S = 1
SAT = 1
SATURATION = 1
V = 2
VALUE = 2
def by_value(horizontal, value, color, value_begin, value_end, decimal, flip):
length = value_end - value_begin
if horizontal:
surface = pygame.Surface((1, length))
else:
surface = pygame.Surface((length, 1))
surface = surface.convert_alpha()
surface_array = pygame.surfarray.pixels2d(surface)
np_color = color
for val in range(value_begin, value_end):
pos = val - value_begin
np_color[value] = val
if horizontal:
surface_array[0][pos] = decimal(np_color)
else:
surface_array[pos][0] = decimal(np_color)
if flip:
if horizontal:
surface = pygame.transform.flip(surface, False, True)
else:
surface = pygame.transform.flip(surface, True, False)
return surface
def hsl_by_value(horizontal, value, color, offset_begin, offset_end, flip=False):
color = np.array(pure_color(color).hsla)
base = int(color[value] + 0.5)
if base - offset_begin < 0:
offset_begin = base
else:
offset_begin = base - offset_begin
if value == 0:
if base + offset_end > 360:
offset_end = 360 - base
else:
offset_end = base + offset_end
else:
if base + offset_end > 100:
offset_end = 100 - base
else:
offset_end = base + offset_end
def decimal(color):
pcolor = pygame.Color(0,0,0)
pcolor.hsla = color
return np.sum(np.array(pcolor, int) << RGBA_FORMULA)
return by_value(horizontal, value, color, offset_begin, offset_end, decimal, flip)
def hsv_by_value(horizontal, value, color, offset_begin, offset_end, flip=False):
color = np.array(pure_color(color).hsva)
base = color[value]
if base - offset_begin < 0:
offset_begin = base
if value == 0:
if base + offset_end > 360:
offset_end = 360 - base
else:
if base + offset_end > 100:
offset_end = 100 - base
def decimal(color):
pcolor = pygame.Color(0,0,0)
pcolor.hsva = color
return np.sum(np.array(pcolor, int) << RGBA_FORMULA)
return by_value(horizontal, value, color, offset_begin, offset_end, decimal, flip)
def rgb_by_value(horizontal, value, color, value_begin, value_end, flip=False):
color = np.array(pure_color(color))
def decimal(color):
return np.sum(color.astype(int) << RGBA_FORMULA)
return by_value(horizontal, value, color, value_begin, value_end, decimal, flip)
| [
"[email protected]"
]
| |
6bb9e9709cd46995967059850e6158cce9302c2d | 24381845fe5e8b8a774d74911e2f07bcd1a00190 | /azure-iot-provisioning-servicesdk/azure/iot/provisioning/servicesdk/protocol/models/__init__.py | 87d86af26eeff8f238fba3eec4bb42462abddc84 | [
"MIT",
"LicenseRef-scancode-generic-cla"
]
| permissive | noopkat/azure-iot-sdk-python-preview | 94b3b29497b80e1dac6471ae906d8491f018b12d | f51733e9d3424c33ed86d51e214b20c843716763 | refs/heads/master | 2020-04-30T03:07:37.445782 | 2019-03-18T18:42:15 | 2019-03-18T18:42:15 | 176,579,084 | 6 | 0 | MIT | 2019-03-19T18:53:43 | 2019-03-19T18:53:43 | null | UTF-8 | Python | false | false | 4,016 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
try:
from .provisioning_service_error_details_py3 import (
ProvisioningServiceErrorDetails,
ProvisioningServiceErrorDetailsException,
)
from .device_capabilities_py3 import DeviceCapabilities
from .device_registration_state_py3 import DeviceRegistrationState
from .tpm_attestation_py3 import TpmAttestation
from .x509_certificate_info_py3 import X509CertificateInfo
from .x509_certificate_with_info_py3 import X509CertificateWithInfo
from .x509_certificates_py3 import X509Certificates
from .x509_ca_references_py3 import X509CAReferences
from .x509_attestation_py3 import X509Attestation
from .symmetric_key_attestation_py3 import SymmetricKeyAttestation
from .attestation_mechanism_py3 import AttestationMechanism
from .metadata_py3 import Metadata
from .twin_collection_py3 import TwinCollection
from .initial_twin_properties_py3 import InitialTwinProperties
from .initial_twin_py3 import InitialTwin
from .reprovision_policy_py3 import ReprovisionPolicy
from .custom_allocation_definition_py3 import CustomAllocationDefinition
from .individual_enrollment_py3 import IndividualEnrollment
from .enrollment_group_py3 import EnrollmentGroup
from .bulk_enrollment_operation_py3 import BulkEnrollmentOperation
from .bulk_enrollment_operation_error_py3 import BulkEnrollmentOperationError
from .bulk_enrollment_operation_result_py3 import BulkEnrollmentOperationResult
from .query_specification_py3 import QuerySpecification
except (SyntaxError, ImportError):
from .provisioning_service_error_details import (
ProvisioningServiceErrorDetails,
ProvisioningServiceErrorDetailsException,
)
from .device_capabilities import DeviceCapabilities
from .device_registration_state import DeviceRegistrationState
from .tpm_attestation import TpmAttestation
from .x509_certificate_info import X509CertificateInfo
from .x509_certificate_with_info import X509CertificateWithInfo
from .x509_certificates import X509Certificates
from .x509_ca_references import X509CAReferences
from .x509_attestation import X509Attestation
from .symmetric_key_attestation import SymmetricKeyAttestation
from .attestation_mechanism import AttestationMechanism
from .metadata import Metadata
from .twin_collection import TwinCollection
from .initial_twin_properties import InitialTwinProperties
from .initial_twin import InitialTwin
from .reprovision_policy import ReprovisionPolicy
from .custom_allocation_definition import CustomAllocationDefinition
from .individual_enrollment import IndividualEnrollment
from .enrollment_group import EnrollmentGroup
from .bulk_enrollment_operation import BulkEnrollmentOperation
from .bulk_enrollment_operation_error import BulkEnrollmentOperationError
from .bulk_enrollment_operation_result import BulkEnrollmentOperationResult
from .query_specification import QuerySpecification
__all__ = [
"ProvisioningServiceErrorDetails",
"ProvisioningServiceErrorDetailsException",
"DeviceCapabilities",
"DeviceRegistrationState",
"TpmAttestation",
"X509CertificateInfo",
"X509CertificateWithInfo",
"X509Certificates",
"X509CAReferences",
"X509Attestation",
"SymmetricKeyAttestation",
"AttestationMechanism",
"Metadata",
"TwinCollection",
"InitialTwinProperties",
"InitialTwin",
"ReprovisionPolicy",
"CustomAllocationDefinition",
"IndividualEnrollment",
"EnrollmentGroup",
"BulkEnrollmentOperation",
"BulkEnrollmentOperationError",
"BulkEnrollmentOperationResult",
"QuerySpecification",
]
| [
"[email protected]"
]
| |
d956d330feab1a324f7948bdcbc9d48882bd40a8 | bbcce934ac20249a006580915aa61b72f8521544 | /G5_template.py | 7c966f89352d288441e458325d0c88a33e15da29 | []
| no_license | SanjuktaBhatt/G11_Projects | 6225a51cb653001af46a859ab578efba30ce7d79 | 435163bc26b759b4a185a2f4cce3d228c47cbf43 | refs/heads/main | 2023-06-19T18:58:54.684729 | 2021-07-22T08:37:56 | 2021-07-22T08:37:56 | 382,996,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | import pygame
import random
pygame.init()
WHITE = (255,255,255)
DARKBLUE = (36,90,190)
LIGHTBLUE = (0,176,240)
RED = (255,0,0)
ORANGE = (255,100,0)
YELLOW = (255,255,0)
COLOR=[WHITE,DARKBLUE,LIGHTBLUE,RED,ORANGE,YELLOW]
size = (400, 400)
screen = pygame.display.set_mode(size)
pygame.display.set_caption("Project C5")
#Create "carryOn" variable and set to true
#Begin the while loop
#Iterate through each event
#Identify is user has quit
#change "carryOn" to False
discolight_color=random.choice(COLOR)
screen.fill(discolight_color)
pygame.display.flip()
pygame.quit()
| [
"[email protected]"
]
| |
f25b22df40286f6b6c2b5f938e282191d7e00fa2 | 313110e3a0d01adb562e40f73d9c6fc32c74e0ca | /inventory/migrations/0004_auto_20210327_0821.py | be019a0d3e9d95fa0f6820c8f05625773f08f086 | []
| no_license | prabaldeshar/erp-project | e911ac447aab9ede39567fb82275bbbf0357932e | 31762cf765d1aee21623033c963147d69219ba56 | refs/heads/main | 2023-03-29T19:29:18.689599 | 2021-04-07T15:19:14 | 2021-04-07T15:19:14 | 344,389,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | # Generated by Django 3.1.7 on 2021-03-27 08:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0003_auto_20210310_1630'),
]
operations = [
migrations.AlterField(
model_name='productcategory',
name='parent',
field=models.IntegerField(blank=True, null=True),
),
]
| [
"[email protected]"
]
| |
44168503a255ef0073c8a1d1da7327f6fbd4ce70 | 78f54f911d47019da0deeeb6579c7e9e65bb8d21 | /src/process/models/base/operation/DefinitionBase.py | c35aa1747e8905cbff9dd2e3bdbd0e7a6be6e07e | [
"MIT"
]
| permissive | jedicontributors/pythondataintegrator | 02f8ae1a50cf5ddd85341da738c24aa6a320c442 | 3e877b367ab9b20185476128ec053db41087879f | refs/heads/main | 2023-06-15T07:37:13.313988 | 2021-07-03T15:46:43 | 2021-07-03T15:46:43 | 354,021,102 | 0 | 0 | MIT | 2021-07-03T15:46:44 | 2021-04-02T13:03:12 | Python | UTF-8 | Python | false | false | 849 | py | from models.base.EntityBase import EntityBase
from infrastructor.json.BaseConverter import BaseConverter
@BaseConverter.register
class DefinitionBase(EntityBase):
def __init__(self,
Name: str = None,
Version: int = None,
Content: str = None,
IsActive: bool = None,
DataOperations = [],
DataIntegrations = [],
DataOperationJobExecutions = [],
*args, **kwargs):
super().__init__(*args, **kwargs)
self.DataOperationJobExecutions = DataOperationJobExecutions
self.DataIntegrations = DataIntegrations
self.DataOperations = DataOperations
self.Name: str = Name
self.Version: int = Version
self.Content: str = Content
self.IsActive: bool = IsActive
| [
"[email protected]"
]
| |
138c960e607b7bf65684073d4251a7dbde2c05b2 | 34ee39bcb01247758b96a9dc4801588d0696a098 | /backend/xcx_23352/settings.py | eb3460ccad4ef70af98ffda7a4dfc80887b99ecf | []
| no_license | crowdbotics-apps/xcx-23352 | 1eac3cfd05921af893d51a4219496153bf9beb78 | bf45a84e05c1ea6a959d319fa679de54554a11d5 | refs/heads/master | 2023-01-30T10:15:20.053556 | 2020-12-14T03:22:55 | 2020-12-14T03:22:55 | 321,220,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,722 | py | """
Django settings for xcx_23352 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
env = environ.Env()
environ.Env.read_env(os.path.join(BASE_DIR, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=True)
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'xcx_23352.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'xcx_23352.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'xcx_23352',
'USER': 'xcx_23352',
'PASSWORD': 'xcx_23352',
'HOST': 'localhost',
'PORT': '5432',
}
}
if env.str('DATABASE_URL', default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = env.str('SENDGRID_USERNAME', '')
EMAIL_HOST_PASSWORD = env.str('SENDGRID_PASSWORD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Import local settings
try:
from .local_settings import *
INSTALLED_APPS += DEBUG_APPS
except:
pass
| [
"[email protected]"
]
| |
70fef156091609d8d1b6faba71b19d8d7e5d3ee8 | b8d208679a3a3b16960eecd92dd3dd5ce78f8e7f | /setup.py | dac804bcf98486f6b1a18f1dc210f95a471dea12 | [
"BSD-3-Clause"
]
| permissive | jessicalumian/distillerycats | e509bdd5fbb8062931938d7f952058d0c5775851 | 67ac76b21397255af604d8bf4aad2eb7889dc88c | refs/heads/main | 2023-05-03T22:10:44.999734 | 2021-05-24T16:14:36 | 2021-05-24T16:14:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,905 | py | from setuptools import setup, find_packages
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
CLASSIFIERS = [
"Environment :: Console",
"Environment :: MacOS X",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Operating System :: POSIX :: Linux",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering :: Bio-Informatics",
]
setup(
name = 'distillerycats',
description="Find disease associations across metagenomes with k-mers using sourmash, and then recover pangenomic accessory elements using spacegraphcats.",
url="https://github.com/dib-lab/distillerycats",
author="Taylor Reiter, N. Tessa Pierce and C. Titus Brown",
author_email="[email protected],[email protected],[email protected]",
license="BSD 3-clause",
packages = find_packages(),
classifiers = CLASSIFIERS,
entry_points = {'console_scripts': ['distillerycats = distillerycats.__main__:main']},
include_package_data=True,
package_data = { "distillerycats": ["Snakefile", "*.yaml", "*.yml", "*.ipynb"] },
setup_requires = [ "setuptools>=38.6.0",
'setuptools_scm',
'setuptools_scm_git_archive',
'pytest-runner'],
use_scm_version = {"write_to": "distillerycats/version.py"},
install_requires = ['snakemake>=6.3.0', 'click>=8', 'pandas'],
tests_require=["pytest>=5.1.2", "pytest-dependency"],
long_description=long_description,
long_description_content_type="text/markdown",
)
| [
"[email protected]"
]
| |
781083edd3afb79a9b9fbac3d4156eaf5378e226 | f16886292a92b113f901bfd757a909854e713431 | /dd/__init__.py | 926c384fe50d4d3bbbeaa08fb2ee3e041c13208c | []
| no_license | WilliamMayor/distorteddecade | a224132aa4396d26b740415bac9ea4830c4c0c0c | 9e6cfc651a44eb1f32ba300f8954f6694de87bf1 | refs/heads/master | 2021-01-02T15:35:12.418862 | 2015-06-06T18:13:51 | 2015-06-06T18:13:51 | 25,695,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 798 | py | import os
from flask import Flask
from blueprints import admin, public
from assets import assets
from login import manager
from models import db, bcrypt
import logging
from logging import StreamHandler
def create_app():
app = Flask(__name__)
file_handler = StreamHandler()
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
app.config.from_object('dd.config')
for k in app.config:
v = os.environ.get(k, None)
if v is not None:
app.config[k] = v
app.config['SQLALCHEMY_DATABASE_URI'] = app.config['DATABASE_URL']
assets.init_app(app)
manager.init_app(app)
db.init_app(app)
bcrypt.init_app(app)
app.register_blueprint(admin, url_prefix='/admin')
app.register_blueprint(public)
return app
| [
"[email protected]"
]
| |
8725d49f69e07e80693171c657e35320b01b2f6d | a712ec2940aa6a2fa51da6392ae1fb7b1ba8ce40 | /setup.py | b77b6ede0f4085fce861f415b26ea1e28f1756c1 | []
| no_license | adam139/emc.policy | 511366acd3cd70a26e12bdecae8e890704362340 | bace3cec2cf7dbcf387d34ac3b22799e650e5dd0 | refs/heads/master | 2022-05-08T21:35:15.736156 | 2022-01-10T09:05:43 | 2022-01-10T09:05:43 | 47,091,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,376 | py | from setuptools import setup, find_packages
import os
version = '2.0'
setup(name='emc.policy',
version=version,
description="A plone5 website policy package for emc project",
long_description=open("README.txt").read() + "\n" +
open(os.path.join("docs", "HISTORY.txt")).read(),
# Get more strings from
# http://pypi.python.org/pypi?:action=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
],
keywords='python plone',
author='Adam tang',
author_email='[email protected]',
url='https://github.com/collective/',
license='GPL',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['emc'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'Products.CMFPlone',
'collective.autopermission',
'z3c.jbot',
'z3c.unconfigure',
'collective.wtf',
'collective.monkeypatcher',
'collective.filepreviewbehavior',
# -*- Extra requirements: -*-
],
extras_require={
'test': ['plone.app.testing',]
},
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
)
| [
"[email protected]"
]
| |
f4fcd2dd96b19b48f2372793f2cc7c7ade76a977 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/intersectingDiscs_20200804182850.py | 48817f0ba68d4f55ec35335a75c1659d556927a8 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11 | py | def discs() | [
"[email protected]"
]
| |
384d4adc895d76bbf6dea63d7d3703e61a53f8c6 | 7262a3c12297e853f72d0c0db2c9ca0f65d2115b | /Automation/page_locators/imdb.py | 2bdd74bb839a1086727e0fa301c734f0b06e0fe6 | []
| no_license | pavankumarag/navi | 6ae77b1d26a8e2ee31ee18ea35d54823bd26a1cb | 7a05ea1e3cfe3d339cbb3047948d7d370739fc51 | refs/heads/master | 2021-01-02T06:57:23.181001 | 2020-02-14T06:17:53 | 2020-02-14T06:17:53 | 239,538,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 597 | py | from selenium.webdriver.common.by import By
class Imdb:
IMDB_SEARCH = By.XPATH, "//input[@placeholder='Search IMDb']"
SELECT_DIRECTOR = By.XPATH, "//li//div[text()='Steven Spielberg']"
GET_DIRECTION_DETAILS = By.XPATH, "//td[contains(@class, 'name-overview')]//a[@href='#director']"
GET_PRODUCER_DETAILS = By.XPATH, "//td[contains(@class, 'name-overview')]//a[@href='#producer']"
ALL_MOVIES = By.XPATH, "//div[@data-category='director']//following-sibling::div"
EVERY_MOVIE_NAME =By.XPATH, "//div[@data-category='director']//following-sibling::div/div[contains(@class,'filmo-row')][%d]/b/a" | [
"[email protected]"
]
| |
5e600add0db69a5528470b8e1bb05629320211b6 | 7a704e838d89f942a1099fec141f1fbe9828e528 | /third/object_detection/utils/shape_utils.py | dfa96e799a2f7df379fe843872bfc8305587c61e | [
"Apache-2.0"
]
| permissive | cap-ntu/Video-to-Retail-Platform | 3ee00d22b7fd94925adac08c5ea733ee647f4574 | 757c68d9de0778e3da8bbfa678d89251a6955573 | refs/heads/hysia_v2 | 2023-02-14T05:22:16.792928 | 2021-01-10T02:31:43 | 2021-01-10T02:31:43 | 212,741,650 | 63 | 20 | Apache-2.0 | 2021-01-10T02:32:00 | 2019-10-04T05:22:08 | Python | UTF-8 | Python | false | false | 12,612 | py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils used to manipulate tensor shapes."""
import tensorflow as tf
from object_detection.utils import static_shape
def _is_tensor(t):
"""Returns a boolean indicating whether the input is a tensor.
Args:
t: the input to be tested.
Returns:
a boolean that indicates whether t is a tensor.
"""
return isinstance(t, (tf.Tensor, tf.SparseTensor, tf.Variable))
def _set_dim_0(t, d0):
"""Sets the 0-th dimension of the input tensor.
Args:
t: the input tensor, assuming the rank is at least 1.
d0: an integer indicating the 0-th dimension of the input tensor.
Returns:
the tensor t with the 0-th dimension set.
"""
t_shape = t.get_shape().as_list()
t_shape[0] = d0
t.set_shape(t_shape)
return t
def pad_tensor(t, length):
"""Pads the input tensor with 0s along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after padding, assuming length <= t.shape[0].
Returns:
padded_t: the padded tensor, whose first dimension is length. If the length
is an integer, the first dimension of padded_t is set to length
statically.
"""
t_rank = tf.rank(t)
t_shape = tf.shape(t)
t_d0 = t_shape[0]
pad_d0 = tf.expand_dims(length - t_d0, 0)
pad_shape = tf.cond(
tf.greater(t_rank, 1), lambda: tf.concat([pad_d0, t_shape[1:]], 0),
lambda: tf.expand_dims(length - t_d0, 0))
padded_t = tf.concat([t, tf.zeros(pad_shape, dtype=t.dtype)], 0)
if not _is_tensor(length):
padded_t = _set_dim_0(padded_t, length)
return padded_t
def clip_tensor(t, length):
"""Clips the input tensor along the first dimension up to the length.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after clipping, assuming length <= t.shape[0].
Returns:
clipped_t: the clipped tensor, whose first dimension is length. If the
length is an integer, the first dimension of clipped_t is set to length
statically.
"""
clipped_t = tf.gather(t, tf.range(length))
if not _is_tensor(length):
clipped_t = _set_dim_0(clipped_t, length)
return clipped_t
def pad_or_clip_tensor(t, length):
"""Pad or clip the input tensor along the first dimension.
Args:
t: the input tensor, assuming the rank is at least 1.
length: a tensor of shape [1] or an integer, indicating the first dimension
of the input tensor t after processing.
Returns:
processed_t: the processed tensor, whose first dimension is length. If the
length is an integer, the first dimension of the processed tensor is set
to length statically.
"""
return pad_or_clip_nd(t, [length] + t.shape.as_list()[1:])
def pad_or_clip_nd(tensor, output_shape):
"""Pad or Clip given tensor to the output shape.
Args:
tensor: Input tensor to pad or clip.
output_shape: A list of integers / scalar tensors (or None for dynamic dim)
representing the size to pad or clip each dimension of the input tensor.
Returns:
Input tensor padded and clipped to the output shape.
"""
tensor_shape = tf.shape(tensor)
clip_size = [
tf.where(tensor_shape[i] - shape > 0, shape, -1)
if shape is not None else -1 for i, shape in enumerate(output_shape)
]
clipped_tensor = tf.slice(
tensor,
begin=tf.zeros(len(clip_size), dtype=tf.int32),
size=clip_size)
# Pad tensor if the shape of clipped tensor is smaller than the expected
# shape.
clipped_tensor_shape = tf.shape(clipped_tensor)
trailing_paddings = [
shape - clipped_tensor_shape[i] if shape is not None else 0
for i, shape in enumerate(output_shape)
]
paddings = tf.stack(
[
tf.zeros(len(trailing_paddings), dtype=tf.int32),
trailing_paddings
],
axis=1)
padded_tensor = tf.pad(clipped_tensor, paddings=paddings)
output_static_shape = [
dim if not isinstance(dim, tf.Tensor) else None for dim in output_shape
]
padded_tensor.set_shape(output_static_shape)
return padded_tensor
def combined_static_and_dynamic_shape(tensor):
"""Returns a list containing static and dynamic values for the dimensions.
Returns a list of static and dynamic values for shape dimensions. This is
useful to preserve static shapes when available in reshape operation.
Args:
tensor: A tensor of any type.
Returns:
A list of size tensor.shape.ndims containing integers or a scalar tensor.
"""
static_tensor_shape = tensor.shape.as_list()
dynamic_tensor_shape = tf.shape(tensor)
combined_shape = []
for index, dim in enumerate(static_tensor_shape):
if dim is not None:
combined_shape.append(dim)
else:
combined_shape.append(dynamic_tensor_shape[index])
return combined_shape
def static_or_dynamic_map_fn(fn, elems, dtype=None,
parallel_iterations=32, back_prop=True):
"""Runs map_fn as a (static) for loop when possible.
This function rewrites the map_fn as an explicit unstack input -> for loop
over function calls -> stack result combination. This allows our graphs to
be acyclic when the batch size is static.
For comparison, see https://www.tensorflow.org/api_docs/python/tf/map_fn.
Note that `static_or_dynamic_map_fn` currently is not *fully* interchangeable
with the default tf.map_fn function as it does not accept nested inputs (only
Tensors or lists of Tensors). Likewise, the output of `fn` can only be a
Tensor or list of Tensors.
TODO(jonathanhuang): make this function fully interchangeable with tf.map_fn.
Args:
fn: The callable to be performed. It accepts one argument, which will have
the same structure as elems. Its output must have the
same structure as elems.
elems: A tensor or list of tensors, each of which will
be unpacked along their first dimension. The sequence of the
resulting slices will be applied to fn.
dtype: (optional) The output type(s) of fn. If fn returns a structure of
Tensors differing from the structure of elems, then dtype is not optional
and must have the same structure as the output of fn.
parallel_iterations: (optional) number of batch items to process in
parallel. This flag is only used if the native tf.map_fn is used
and defaults to 32 instead of 10 (unlike the standard tf.map_fn default).
back_prop: (optional) True enables support for back propagation.
This flag is only used if the native tf.map_fn is used.
Returns:
A tensor or sequence of tensors. Each tensor packs the
results of applying fn to tensors unpacked from elems along the first
dimension, from first to last.
Raises:
ValueError: if `elems` a Tensor or a list of Tensors.
ValueError: if `fn` does not return a Tensor or list of Tensors
"""
if isinstance(elems, list):
for elem in elems:
if not isinstance(elem, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elem_shapes = [elem.shape.as_list() for elem in elems]
# Fall back on tf.map_fn if shapes of each entry of `elems` are None or fail
# to all be the same size along the batch dimension.
for elem_shape in elem_shapes:
if (not elem_shape or not elem_shape[0]
or elem_shape[0] != elem_shapes[0][0]):
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
arg_tuples = zip(*[tf.unstack(elem) for elem in elems])
outputs = [fn(arg_tuple) for arg_tuple in arg_tuples]
else:
if not isinstance(elems, tf.Tensor):
raise ValueError('`elems` must be a Tensor or list of Tensors.')
elems_shape = elems.shape.as_list()
if not elems_shape or not elems_shape[0]:
return tf.map_fn(fn, elems, dtype, parallel_iterations, back_prop)
outputs = [fn(arg) for arg in tf.unstack(elems)]
# Stack `outputs`, which is a list of Tensors or list of lists of Tensors
if all([isinstance(output, tf.Tensor) for output in outputs]):
return tf.stack(outputs)
else:
if all([isinstance(output, list) for output in outputs]):
if all([all(
[isinstance(entry, tf.Tensor) for entry in output_list])
for output_list in outputs]):
return [tf.stack(output_tuple) for output_tuple in zip(*outputs)]
raise ValueError('`fn` should return a Tensor or a list of Tensors.')
def check_min_image_dim(min_dim, image_tensor):
"""Checks that the image width/height are greater than some number.
This function is used to check that the width and height of an image are above
a certain value. If the image shape is static, this function will perform the
check at graph construction time. Otherwise, if the image shape varies, an
Assertion control dependency will be added to the graph.
Args:
min_dim: The minimum number of pixels along the width and height of the
image.
image_tensor: The image tensor to check size for.
Returns:
If `image_tensor` has dynamic size, return `image_tensor` with a Assert
control dependency. Otherwise returns image_tensor.
Raises:
ValueError: if `image_tensor`'s' width or height is smaller than `min_dim`.
"""
image_shape = image_tensor.get_shape()
image_height = static_shape.get_height(image_shape)
image_width = static_shape.get_width(image_shape)
if image_height is None or image_width is None:
shape_assert = tf.Assert(
tf.logical_and(tf.greater_equal(tf.shape(image_tensor)[1], min_dim),
tf.greater_equal(tf.shape(image_tensor)[2], min_dim)),
['image size must be >= {} in both height and width.'.format(min_dim)])
with tf.control_dependencies([shape_assert]):
return tf.identity(image_tensor)
if image_height < min_dim or image_width < min_dim:
raise ValueError(
'image size must be >= %d in both height and width; image dim = %d,%d' %
(min_dim, image_height, image_width))
return image_tensor
def assert_shape_equal(shape_a, shape_b):
"""Asserts that shape_a and shape_b are equal.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
if shape_a[0] != shape_b[0]:
raise ValueError('Unequal first dimension {}, {}'.format(
shape_a[0], shape_b[0]))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a[0], shape_b[0])
| [
"[email protected]"
]
| |
598a004b595a8e29f00f67bdb01fedc0d456de2d | 2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae | /python/python_7244.py | d529abaf169b2ce536079c9c20d24013963c5eab | []
| no_license | AK-1121/code_extraction | cc812b6832b112e3ffcc2bb7eb4237fd85c88c01 | 5297a4a3aab3bb37efa24a89636935da04a1f8b6 | refs/heads/master | 2020-05-23T08:04:11.789141 | 2015-10-22T19:19:40 | 2015-10-22T19:19:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49 | py | # Python error on building Dart on Windows
print
| [
"[email protected]"
]
| |
820877e8a570363ddc872e2c6b1b948264dc3cd2 | dce6a528cbebcf8a33fcff3468950b87084f76c2 | /transformers/modeling_albert.py | 0e72136a445c94d6dbeb6c5149e06f89eaf92ba8 | [
"Apache-2.0"
]
| permissive | zbn123/huggingface_albert | c523de1735af9ae646eef43dd7126b1acea6f508 | a527e926d6fc675290470a44db3808a3e639c922 | refs/heads/master | 2022-04-16T13:16:43.054101 | 2019-11-29T11:07:30 | 2019-11-29T11:07:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61,556 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model. """
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import logging
import math
import os
import sys
from io import open
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from .modeling_utils import PreTrainedModel, prune_linear_layer
from .configuration_albert import ALBertConfig
from .file_utils import add_start_docstrings
logger = logging.getLogger(__name__)
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = {
'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-pytorch_model.bin",
'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-pytorch_model.bin",
'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-pytorch_model.bin",
'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-pytorch_model.bin",
'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-pytorch_model.bin",
'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-pytorch_model.bin",
'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-pytorch_model.bin",
'bert-base-german-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-cased-pytorch_model.bin",
'bert-large-uncased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-pytorch_model.bin",
'bert-large-cased-whole-word-masking': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-pytorch_model.bin",
'bert-large-uncased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-large-cased-whole-word-masking-finetuned-squad': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-whole-word-masking-finetuned-squad-pytorch_model.bin",
'bert-base-cased-finetuned-mrpc': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-finetuned-mrpc-pytorch_model.bin",
'bert-base-german-dbmdz-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-cased-pytorch_model.bin",
'bert-base-german-dbmdz-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-german-dbmdz-uncased-pytorch_model.bin",
}
def load_tf_weights_in_albert(model, config, tf_checkpoint_path):
""" Load tf checkpoints in a pytorch model.
"""
try:
import re
import numpy as np
import tensorflow as tf
except ImportError:
logger.error("Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions.")
raise
tf_path = os.path.abspath(tf_checkpoint_path)
logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
# Load weights from TF model
init_vars = tf.train.list_variables(tf_path)
names = []
arrays = []
for name, shape in init_vars:
logger.info("Loading TF weight {} with shape {}".format(name, shape))
array = tf.train.load_variable(tf_path, name)
names.append(name)
arrays.append(array)
for name, array in zip(names, arrays):
name = name.split('/')
# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(n in ["adam_v", "adam_m", "global_step"] for n in name):
logger.info("Skipping!!! {}".format("/".join(name)))
continue
pointer = model
for m_name in name:
if re.fullmatch(r'[A-Za-z]+_\d+', m_name):
print("+++++++++++++++++++++++++++++++++")
print("m_name: ",m_name)
print("pointer: ",pointer)
print("===============================")
if m_name == "LayerNorm_1" or "attention_1":
l = [m_name]
else:
l = re.split(r'_(\d+)', m_name)
l = [m_name]
else:
l = [m_name]
# print(l[0], pointer)
if l[0] == "ffn_1":
print("pass")
continue
if l[0] == 'inner_group_0':
print("pass!")
continue
if l[0] == 'group_0':
print("pass!!")
continue
if l[0] == 'kernel' or l[0] == 'gamma':
pointer = getattr(pointer, 'weight')
elif l[0] == 'output_bias' or l[0] == 'beta':
pointer = getattr(pointer, 'bias')
elif l[0] == 'output_weights':
pointer = getattr(pointer, 'weight')
elif l[0] == 'squad':
pointer = getattr(pointer, 'classifier')
else:
try:
pointer = getattr(pointer, l[0])
except AttributeError:
logger.info("Skipping {}".format("/".join(name)))
continue
if len(l) >= 2:
num = int(l[1])
pointer = pointer[num]
if m_name[-11:] == '_embeddings':
pointer = getattr(pointer, 'weight')
print("pointer.shape: ", pointer.shape)
print("array.shape: ",array.shape)
elif m_name == 'kernel':
array = np.transpose(array)
print("array.shape: ",array.shape)
try:
print(name)
print(pointer.shape, array.shape )
assert pointer.shape == array.shape
except AssertionError as e:
e.args += (pointer.shape, array.shape)
raise
logger.info("Initialize PyTorch weight {}".format(name))
pointer.data = torch.from_numpy(array)
return model
def gelu(x):
""" Original Implementation of the gelu activation function in Google ALBert repo when initially created.
For information: OpenAI GPT's gelu is slightly different (and gives slightly different results):
0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
Also see https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
""" Implementation of the gelu activation function currently in Google ALBert repo (identical to OpenAI GPT).
Also see https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
def swish(x):
return x * torch.sigmoid(x)
ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish, "gelu_new": gelu_new}
ALBertLayerNorm = torch.nn.LayerNorm
class ALBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config):
super(ALBertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = ALBertLayerNorm(config.embedding_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids=None, position_ids=None):
seq_length = input_ids.size(1)
if position_ids is None:
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device)
position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + token_type_embeddings
# embeddings = self.dense(embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class ALBertSelfAttention(nn.Module):
def __init__(self, config):
super(ALBertSelfAttention, self).__init__()
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads))
self.output_attentions = config.output_attentions
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, attention_mask=None, head_mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in ALBertModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.Softmax(dim=-1)(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,)
return outputs
class ALBertSelfOutput(nn.Module):
def __init__(self, config):
super(ALBertSelfOutput, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = ALBertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class ALBertAttention(nn.Module):
def __init__(self, config):
super(ALBertAttention, self).__init__()
self.output = ALBertSelfOutput(config)
self.self = ALBertSelfAttention(config)
self.pruned_heads = set()
def prune_heads(self, heads):
if len(heads) == 0:
return
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size)
heads = set(heads) - self.pruned_heads # Convert to set and emove already pruned heads
for head in heads:
# Compute how many pruned heads are before the head and move the index accordingly
head = head - sum(1 if h < head else 0 for h in self.pruned_heads)
mask[head] = 0
mask = mask.view(-1).contiguous().eq(1)
index = torch.arange(len(mask))[mask].long()
# Prune linear layers
self.self.query = prune_linear_layer(self.self.query, index)
self.self.key = prune_linear_layer(self.self.key, index)
self.self.value = prune_linear_layer(self.self.value, index)
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
# Update hyper params and store pruned heads
self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
self.pruned_heads = self.pruned_heads.union(heads)
def forward(self, input_tensor, attention_mask=None, head_mask=None):
self_outputs = self.self(input_tensor, attention_mask, head_mask)
attention_output = self.output(self_outputs[0], input_tensor)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class ALBertIntermediate(nn.Module):
def __init__(self, config):
super(ALBertIntermediate, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
self.output = ALBertOutput(config)
def forward(self, hidden_states_0):
hidden_states = self.dense(hidden_states_0)
hidden_states = self.intermediate_act_fn(hidden_states)
layer_output = self.output(hidden_states, hidden_states_0)
return layer_output
class ALBertOutput(nn.Module):
def __init__(self, config):
super(ALBertOutput, self).__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = hidden_states + input_tensor
return hidden_states
class ALBertLayer(nn.Module):
def __init__(self, config):
super(ALBertLayer, self).__init__()
self.LayerNorm = ALBertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.LayerNorm_1 = ALBertLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.attention_1 = ALBertAttention(config)
self.intermediate = ALBertIntermediate(config)
def forward(self,idx, hidden_states, attention_mask=None, head_mask=None):
if (idx == 0):
hidden_states= self.LayerNorm(hidden_states)
attention_outputs = self.attention_1(hidden_states, attention_mask, head_mask)
attention_output = attention_outputs[0]
attention_norm=self.LayerNorm(attention_output)
layer_output = self.intermediate(attention_norm)
layer_output = self.LayerNorm_1(layer_output)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class ALBertEncoder(nn.Module):
def __init__(self, config):
super(ALBertEncoder, self).__init__()
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.embedding_hidden_mapping_in = nn.Linear(config.embedding_size, config.hidden_size)
self.transformer = ALBertLayer(config)
self.layer_num = config.num_hidden_layers
def forward(self, hidden_states, attention_mask=None, head_mask=None):
all_hidden_states = ()
all_attentions = ()
for i in range(self.layer_num):
if (i ==0):
hidden_states = self.embedding_hidden_mapping_in(hidden_states)
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = self.transformer(i,hidden_states, attention_mask, head_mask[i])
hidden_states = layer_outputs[0]
if self.output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if self.output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
outputs = (hidden_states,)
if self.output_hidden_states:
outputs = outputs + (all_hidden_states,)
if self.output_attentions:
outputs = outputs + (all_attentions,)
return outputs # last-layer hidden state, (all hidden states), (all attentions)
class ALBertPooler(nn.Module):
def __init__(self, config):
super(ALBertPooler, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class ALBertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super(ALBertPredictionHeadTransform, self).__init__()
self.dense = nn.Linear(config.hidden_size, config.embedding_size)
if isinstance(config.hidden_act, str) or (sys.version_info[0] == 2 and isinstance(config.hidden_act, unicode)):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = ALBertLayerNorm(config.embedding_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class ALBertLMPredictionHead(nn.Module):
def __init__(self, config):
super(ALBertLMPredictionHead, self).__init__()
self.transform = ALBertPredictionHeadTransform(config)
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.bias = nn.Parameter(torch.zeros(config.vocab_size))
self.decoder = nn.Linear(config.embedding_size,
config.vocab_size,
bias=False)
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states) + self.bias
return hidden_states
class ALBertOnlyMLMHead(nn.Module):
def __init__(self, config):
super(ALBertOnlyMLMHead, self).__init__()
self.predictions = ALBertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class ALBertOnlyNSPHead(nn.Module):
def __init__(self, config):
super(ALBertOnlyNSPHead, self).__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class ALBertPreTrainingHeads(nn.Module):
def __init__(self, config):
super(ALBertPreTrainingHeads, self).__init__()
self.predictions = ALBertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class ALBertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = ALBertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_albert
base_model_prefix = "bert"
def _init_weights(self, module):
""" Initialize the weights """
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, ALBertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
BERT_START_DOCSTRING = r""" The BERT model was proposed in
`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_
by Jacob Devlin, Ming-Wei Chang, Kenton Lee and Kristina Toutanova. It's a bidirectional transformer
pre-trained using a combination of masked language modeling objective and next sentence prediction
on a large corpus comprising the Toronto Book Corpus and Wikipedia.
This model is a PyTorch `torch.nn.Module`_ sub-class. Use it as a regular PyTorch Module and
refer to the PyTorch documentation for all matter related to general usage and behavior.
.. _`BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`:
https://arxiv.org/abs/1810.04805
.. _`torch.nn.Module`:
https://pytorch.org/docs/stable/nn.html#module
Parameters:
config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the configuration.
Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""
BERT_INPUTS_DOCSTRING = r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
ALBert is a model with absolute position embeddings so it's usually advised to pad the inputs on
the right rather than the left.
Indices can be obtained using :class:`transformers.ALBertTokenizer`.
See :func:`transformers.PreTrainedTokenizer.encode` and
:func:`transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``:
Mask to avoid performing attention on padding token indices.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**position_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Indices of positions of each input sequence tokens in the position embeddings.
Selected in the range ``[0, config.max_position_embeddings - 1]``.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""
@add_start_docstrings("The bare ALBert Model transformer outputting raw hidden-states without any specific head on top.",
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class ALBertModel(ALBertPreTrainedModel):
r"""
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**last_hidden_state**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, hidden_size)``
Sequence of hidden-states at the output of the last layer of the model.
**pooler_output**: ``torch.FloatTensor`` of shape ``(batch_size, hidden_size)``
Last layer hidden-state of the first token of the sequence (classification token)
further processed by a Linear layer and a Tanh activation function. The Linear
layer weights are trained from the next sentence prediction (classification)
objective during ALBert pretraining. This output is usually *not* a good summary
of the semantic content of the input, you're often better with averaging or pooling
the sequence of hidden-states for the whole input sequence.
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = ALBertTokenizer.from_pretrained('bert-base-uncased')
model = ALBertModel.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple
"""
def __init__(self, config):
super(ALBertModel, self).__init__(config)
self.embeddings = ALBertEmbeddings(config)
self.encoder = ALBertEncoder(config)
self.pooler = ALBertPooler(config)
self.init_weights()
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self.embeddings.word_embeddings
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
self.embeddings.word_embeddings = new_embeddings
return self.embeddings.word_embeddings
def _prune_heads(self, heads_to_prune):
""" Prunes heads of the model.
heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
See base class PreTrainedModel
"""
for layer, heads in heads_to_prune.items():
self.encoder.layer.attention.prune_heads(heads)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) # We can specify head_mask for each layer
head_mask = head_mask.to(dtype=next(self.parameters()).dtype) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * self.config.num_hidden_layers
embedding_output = self.embeddings(input_ids, position_ids=position_ids, token_type_ids=token_type_ids)
encoder_outputs = self.encoder(embedding_output,
extended_attention_mask,
head_mask=head_mask)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output)
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions)
@add_start_docstrings("""ALBert Model with two heads on top as done during the pre-training:
a `masked language modeling` head and a `next sentence prediction (classification)` head. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class ALBertForPreTraining(ALBertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when both ``masked_lm_labels`` and ``next_sentence_label`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = ALBertTokenizer.from_pretrained('bert-base-uncased')
model = ALBertForPreTraining.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
prediction_scores, seq_relationship_scores = outputs[:2]
"""
def __init__(self, config):
super(ALBertForPreTraining, self).__init__(config)
self.bert = ALBertModel(config)
self.cls = ALBertPreTrainingHeads(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
masked_lm_labels=None, next_sentence_label=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
outputs = (prediction_scores, seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if masked_lm_labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
total_loss = masked_lm_loss + next_sentence_loss
outputs = (total_loss,) + outputs
return outputs # (loss), prediction_scores, seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""ALBert Model with a `language modeling` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class ALBertForMaskedLM(ALBertPreTrainedModel):
r"""
**masked_lm_labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the masked language modeling loss.
Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
in ``[0, ..., config.vocab_size]``
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Masked language modeling loss.
**prediction_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = ALBertTokenizer.from_pretrained('bert-base-uncased')
model = ALBertForMaskedLM.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids, masked_lm_labels=input_ids)
loss, prediction_scores = outputs[:2]
"""
def __init__(self, config):
super(ALBertForMaskedLM, self).__init__(config)
self.bert = ALBertModel(config)
self.cls = ALBertOnlyMLMHead(config)
self.init_weights()
self.tie_weights()
def tie_weights(self):
""" Make sure we are sharing the input and output embeddings.
Export to TorchScript can't handle parameter sharing so we are cloning them instead.
"""
self._tie_or_clone_weights(self.cls.predictions.decoder,
self.bert.embeddings.word_embeddings)
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
masked_lm_labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
prediction_scores = self.cls(sequence_output)
outputs = (prediction_scores,) + outputs[2:] # Add hidden states and attention if they are here
if masked_lm_labels is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1))
outputs = (masked_lm_loss,) + outputs
return outputs # (masked_lm_loss), prediction_scores, (hidden_states), (attentions)
@add_start_docstrings("""ALBert Model with a `next sentence prediction (classification)` head on top. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class ALBertForNextSentencePrediction(ALBertPreTrainedModel):
r"""
**next_sentence_label**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring)
Indices should be in ``[0, 1]``.
``0`` indicates sequence B is a continuation of sequence A,
``1`` indicates sequence B is a random sequence.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``next_sentence_label`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Next sequence prediction (classification) loss.
**seq_relationship_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, 2)``
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = ALBertTokenizer.from_pretrained('bert-base-uncased')
model = ALBertForNextSentencePrediction.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
outputs = model(input_ids)
seq_relationship_scores = outputs[0]
"""
def __init__(self, config):
super(ALBertForNextSentencePrediction, self).__init__(config)
self.bert = ALBertModel(config)
self.cls = ALBertOnlyNSPHead(config)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
next_sentence_label=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
seq_relationship_score = self.cls(pooled_output)
outputs = (seq_relationship_score,) + outputs[2:] # add hidden states and attention if they are here
if next_sentence_label is not None:
loss_fct = CrossEntropyLoss(ignore_index=-1)
next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
outputs = (next_sentence_loss,) + outputs
return outputs # (next_sentence_loss), seq_relationship_score, (hidden_states), (attentions)
@add_start_docstrings("""ALBert Model transformer with a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class ALBertForSequenceClassification(ALBertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the sequence classification/regression loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss),
If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy).
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification (or regression if config.num_labels==1) loss.
**logits**: ``torch.FloatTensor`` of shape ``(batch_size, config.num_labels)``
Classification (or regression if config.num_labels==1) scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = ALBertTokenizer.from_pretrained('bert-base-uncased')
model = ALBertForSequenceClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1]).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, logits = outputs[:2]
"""
def __init__(self, config):
super(ALBertForSequenceClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = ALBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, self.config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
if self.num_labels == 1:
# We are doing regression
loss_fct = MSELoss()
loss = loss_fct(logits.view(-1), labels.view(-1))
else:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), logits, (hidden_states), (attentions)
@add_start_docstrings("""ALBert Model with a multiple choice classification head on top (a linear layer on top of
the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class ALBertForMultipleChoice(ALBertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = ALBertTokenizer.from_pretrained('bert-base-uncased')
model = ALBertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(ALBertForMultipleChoice, self).__init__(config)
self.bert = ALBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
num_choices = input_ids.shape[1]
input_ids = input_ids.view(-1, input_ids.size(-1))
attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs # (loss), reshaped_logits, (hidden_states), (attentions)
@add_start_docstrings("""ALBert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class ALBertForTokenClassification(ALBertPreTrainedModel):
r"""
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, sequence_length)``:
Labels for computing the token classification loss.
Indices should be in ``[0, ..., config.num_labels - 1]``.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length, config.num_labels)``
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = ALBertTokenizer.from_pretrained('bert-base-uncased')
model = ALBertForTokenClassification.from_pretrained('bert-base-uncased')
input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute")).unsqueeze(0) # Batch size 1
labels = torch.tensor([1] * input_ids.size(1)).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, scores = outputs[:2]
"""
def __init__(self, config):
super(ALBertForTokenClassification, self).__init__(config)
self.num_labels = config.num_labels
self.bert = ALBertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None,
position_ids=None, head_mask=None, labels=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here
if labels is not None:
loss_fct = CrossEntropyLoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
active_logits = logits.view(-1, self.num_labels)[active_loss]
active_labels = labels.view(-1)[active_loss]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
outputs = (loss,) + outputs
return outputs # (loss), scores, (hidden_states), (attentions)
@add_start_docstrings("""ALBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """,
BERT_START_DOCSTRING, BERT_INPUTS_DOCSTRING)
class ALBertForQuestionAnswering(ALBertPreTrainedModel):
r"""
**start_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
**end_positions**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`).
Position outside of the sequence are not taken into account for computing the loss.
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Total span extraction loss is the sum of a Cross-Entropy for the start and end positions.
**start_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-start scores (before SoftMax).
**end_scores**: ``torch.FloatTensor`` of shape ``(batch_size, sequence_length,)``
Span-end scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = ALBertTokenizer.from_pretrained('bert-base-uncased')
model = ALBertForQuestionAnswering.from_pretrained('bert-large-uncased-whole-word-masking-finetuned-squad')
question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
input_text = "[CLS] " + question + " [SEP] " + text + " [SEP]"
input_ids = tokenizer.encode(input_text)
token_type_ids = [0 if i <= input_ids.index(102) else 1 for i in range(len(input_ids))]
start_scores, end_scores = model(torch.tensor([input_ids]), token_type_ids=torch.tensor([token_type_ids]))
all_tokens = tokenizer.convert_ids_to_tokens(input_ids)
print(' '.join(all_tokens[torch.argmax(start_scores) : torch.argmax(end_scores)+1]))
# a nice puppet
"""
def __init__(self, config):
super(ALBertForQuestionAnswering, self).__init__(config)
self.num_labels = config.num_labels
self.bert = ALBertModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
self.init_weights()
def forward(self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None,
start_positions=None, end_positions=None):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
outputs = (start_logits, end_logits,) + outputs[2:]
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions.clamp_(0, ignored_index)
end_positions.clamp_(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
outputs = (total_loss,) + outputs
return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
| [
"[email protected]"
]
| |
6e7eebbf4a4ec6e46a966d3ee419978748a8cb27 | 529792835d99e8f19afbc123a09b3c7920a024d5 | /space_invaders/__main__.py | 280d82477400ca6ab3cd2e7b5db605118bf665a9 | []
| no_license | tt-n-walters/python-thursday | fe39213f5ba267fbf9c501f1ea925253a034a8d4 | 6aaff5b5e3cb567b75a7ca8e5ad422303f54060e | refs/heads/master | 2020-12-23T21:22:42.112977 | 2020-03-12T19:08:12 | 2020-03-12T19:08:12 | 237,278,906 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 981 | py | import arcade
from player import Player
class SpaceInvaders(arcade.Window):
def __init__(self):
super().__init__(800, 600, "Space Invaders")
self.player = Player(self.width / 2, self.height * 0.1)
def on_key_press(self, symbol, modifiers):
if symbol == arcade.key.ESCAPE:
arcade.close_window()
if symbol == arcade.key.A:
self.player.move_left()
if symbol == arcade.key.D:
self.player.move_right()
def on_key_release(self, symbol, modifiers):
if symbol == arcade.key.A:
self.player.stop()
if symbol == arcade.key.D:
self.player.stop()
def on_draw(self):
arcade.start_render()
self.player.draw()
def on_update(self, time):
self.player.update()
if __name__ == "__main__":
from sys import argv
from os import chdir
chdir(argv[0].rpartition("/")[0])
SpaceInvaders()
arcade.run()
| [
"[email protected]"
]
| |
494cf7447b0fc1042e5c805ae6638b17650f89bb | 03bca281c8bb3ba69c3a01252cc7c9e35cd675bd | /code/zabbix/Bin/Configurations/Zabbix_Mysql_DB_HA_Config_Secondary.py | 68081858e9e50934113f123784fd34631b7325c9 | []
| no_license | satish15625/pythonwork | 380fef04170064aef8aeb919a4e30f65db9a097f | 12d776152689a84f1560d08f35987f8ca4ea3fb0 | refs/heads/master | 2023-07-07T15:12:48.355226 | 2021-08-13T06:33:13 | 2021-08-13T06:33:13 | 374,058,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,229 | py | # File Name:- Zabbix_Mysql_DB_HA_Config_Secondary.py
# Service Name:- N/A
# Purpose: To config zabbix db replica
# Author Name: sankar
# Create Date: 25/Sep/20
# Modifed By:-28/09/2020
# Last Modify Date:
# Current Version: 1.0
# Summary of Last Change: N/A
import os
import sys
import logging
import time
import subprocess
import json
import mysql.connector
from mysql.connector import Error
sys.path.insert(0, os.environ['Admin_Root'] + '/zabbix/Lib')
sys.path.insert(0, os.environ['Admin_Root'] + '/zabbix/Lib/uistatus')
from uistatus import sendstatus
class Zabbix_DB():
def __init__(self):
self.flag = 0
self.server_type = sys.argv[1]
self.pri_db = sys.argv[2]
self.root = os.environ['Admin_Root']
global_json = open(self.root + '/zabbix/Input/global.json', 'r')
self.load_variable = json.load(global_json)
dt = time.strftime("%Y:%m:%d-%H:%M:%S")
self.output = open(self.root + "/zabbix/Output/Zabbix_DB_HA_Config_" + dt + ".txt", "a+")
self.environment = self.load_variable["Environment"]
self.db_user = self.load_variable['db_creds']['username']
self.db_pwd = self.load_variable['db_creds']['password']
self.hostname = subprocess.getoutput("hostname -s")
self.component = "DB"
self.send_status = sendstatus()
self.api_payload = {"ToolName": "Zabbix", "Component": self.component, "ServerFQDN": self.hostname,
"ServerType": self.server_type, "Environment": self.environment, "Stage": "3",
"Status": "1", "StatusReason": "Zabbix HA DB Configuration Initiated"
}
# self.send_status.send_status([self.api_payload])
self.send_status.send_logs([self.api_payload])
self.log_file = self.root + "/zabbix/Logs/Zabbix_DB_HA_Config_" + dt + ".log"
logging.basicConfig(filename=self.log_file,
format='%(asctime)s %(message)s',
filemode='a', datefmt='%m/%d/%Y %I:%M:%S %p')
self.logger = logging.getLogger()
self.logger.setLevel(logging.INFO)
self.logger.info("Deployment Started on " + dt)
def update_file(self):
"""
Code for updating the file
"""
try:
# put server conf file in /etc/my.cnf
self.logger.info("Updating the my.cnf file")
conf_data = [
"server-id = 2\n",
"log-bin = mysql-bin\n",
"binlog_format = row\n",
"gtid-mode=ON\n",
"enforce-gtid-consistency\n",
"log-slave-updates\n",
"relay-log = relay-log-server\n"
]
# production config path
file_object = open('/etc/my.cnf', 'a')
file_object.writelines(conf_data)
file_object.close()
except Exception as e:
self.logger.info("Exception caught updating my.cnf file" + str(e))
self.flag = 1
def restart_service(self):
"""
Code for restarting services
"""
try:
self.logger("Mysql Restarting.......")
reset = "systemctl restart mysqld"
os.system(reset)
time.sleep(5)
except Exception as e:
self.logger.info("Exception caught while restart" + str(e))
def run_mysql_commands(self):
# with open('primary_data.json') as data_file:
# data = json.load(data_file)
try:
dbname = 'zabbix'
user = 'zabbix'
pwd = self.db_pwd
rep_User = self.load_variable['Zabbix HA']['replication_User']
rep_Pass = self.load_variable['Zabbix HA'] ['replication_Pass']
master_usr = self.load_variable['Zabbix HA'] ['master_user']
master_pass = self.load_variable['Zabbix HA'] ['master_password']
conn = mysql.connector.connect(host='localhost',
database=dbname,
user=user,
password=pwd)
if conn.is_connected():
self.logger.info('Connected to MySQL database')
mycursor = conn.cursor()
self.logger.info("Creating Replication dbuser")
tmpsecond = "create user '{replication_dbuser}'@'%' identified by '{replication_Pass}';".format(
replication_dbuser=rep_User, replication_Pass=rep_Pass)
mycursor.execute(tmpsecond)
self.logger.info("GRANT REPLICATION SLAVE ON")
gtmp = "GRANT REPLICATION SLAVE ON *.* TO '{replication_dbuser}'@'%';".format(replication_dbuser=rep_User)
mycursor.execute(gtmp)
self.logger.info('-------------------Creating User in Secondry Ip -----------------')
tmpsec = "CREATE USER '{replication_dbuser}'@'{replication_Ip}' IDENTIFIED WITH mysql_native_password BY '{replication_Pass}';".format(
replication_dbuser=rep_User, replication_Ip=self.pri_db, replication_Pass=rep_Pass)
mycursor.execute(tmpsec)
self.logger.info("Grant Permisson")
prsectmp = "GRANT ALL ON *.* TO '{replication_dbuser}'@'{replication_Ip}'".format(
replication_dbuser=rep_User, replication_Ip=self.pri_db)
mycursor.execute(prsectmp)
mycursor.execute("FLUSH PRIVILEGES;")
self.logger.info("------Master Status------------")
mycursor.execute("show master status;")
for st in mycursor:
self.logger.info(str(st))
# stop slave status
self.logger.info("Stoping slave..")
mycursor.execute("stop slave;")
time.sleep(5)
# CHANGE MASTER TO MASTER_HOST
master_host = "CHANGE MASTER TO MASTER_HOST = '{replication_Ip}', MASTER_PORT = 3306, MASTER_USER = '{replication_user}', MASTER_PASSWORD = '{replication_password}', MASTER_AUTO_POSITION = 1;".format(
replication_Ip=self.pri_db, replication_user=master_usr, replication_password=master_pass)
mycursor.execute(master_host)
self.logger.info("CHANGE MASTER TO MASTER_HOST Done")
# start slave
self.logger.info("Restarting the Slave....")
mycursor.execute("start slave;")
time.sleep(8)
self.logger.info("Show slave status ")
mycursor.execute("show slave status")
for sst in mycursor:
self.logger.info(str(sst))
self.logger.info("--------------done-----------------------")
else:
self.logger.info("Connection err")
self.flag = 1
except Exception as e:
self.logger.info("Exception caught during config" + str(e))
self.flag = 1
def final_check(self):
if self.flag == 0:
self.logger.info("Zabbix HA DB Configuration Success")
self.api_payload["StatusReason"] = "Zabbix HA DB Secondary Configuration Success"
self.api_payload["Status"] = 2
# self.send_status.send_status([self.api_payload])
self.send_status.send_logs([self.api_payload])
# self.output.writelines("Pass")
print("3rr0rC0d3[200]")
else:
self.logger.info("Zabbix HA DB Configuration Failed")
self.api_payload["StatusReason"] = "Zabbix HA DB Secondary Configuration Failed"
self.api_payload["Status"] = 3
# self.send_status.send_status([self.api_payload])
self.send_status.send_logs([self.api_payload])
# self.output.writelines("Fail")
print("3rr0rC0d3[500]")
if __name__ == "__main__":
call = Zabbix_DB()
call.update_file()
call.restart_service()
#call.run_mysql_commands()
call.final_check()
| [
"[email protected]"
]
| |
00fccce3734b11b4909c02dc597c81c60d9c8e91 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_014/ch164_2020_06_22_14_35_35_738838.py | 0ade4746644b61691e840b595e7b7a32fd220af8 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | def traduz (lista_ing, ing_para_port):
lista_port = []
i = 0
while i < len(lista_ing):
for palavra_ing in ing_para_port.keys():
if palavra_ing == lista_ing[i]:
lista_port.append(ing_para_port[palavra_ing])
i += 1
return lista_port | [
"[email protected]"
]
| |
90b0873478e89f1b576399d52041d83d4f8238dd | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2962/60632/270495.py | 42c9a572b669dda8b1dd54444c3567f603ecce28 | []
| no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 284 | py | n, p = map(int, input().split(' '))
key = list(map(str, input().split(' ')))
for i in range(n):
tmp = key[i][-3:]
key[i] = [ord(tmp[j])-ord('A') for j in range(3)]
val = 0
for j in range(3):
val += key[i][2-j] * int(pow(32, j))
key[i] = val % p
print(key)
| [
"[email protected]"
]
| |
67d4c41d2f15933810af28140432f8c215daa312 | 8acbd7fcfe1bcf94e4e895e58ac5c81f8ed13741 | /logindjango/urls.py | 4c3c68c03abc8254f5dada8c9dfad97689615c85 | []
| no_license | Rajangupta09/School-beta | 440af5d5d078a46036cfa3c50865f980c5ff1ace | 3ca6ca9992d2b47bcfe1762beb8c88609d519ea5 | refs/heads/master | 2022-12-07T19:42:19.562804 | 2020-08-04T09:53:04 | 2020-08-04T09:53:04 | 284,509,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,475 | py |
from django.contrib import admin
from django.urls import path
from django.conf.urls import include
from django.conf import settings
from django.conf.urls.static import static
import accounts.views as account_view
urlpatterns = [
path('admin/', admin.site.urls),
path('', account_view.login, name="login"),
path('auth/', include('accounts.urls')),
path('studentForm/', include('form.urls')),
path('marks/', include('markssection.urls')),
path('dashboard/', include('dashboard.urls')),
path('empForm/', include('employeeform.urls')),
path('classForm/', include('classform.urls')),
path('attendence/', include('attendence.urls')),
path('homework/', include('homework.urls')),
path('notice/', include('notice.urls')),
path('thought/', include('dailythought.urls')),
path('newsletter/', include('newsletter.urls')),
path('schoolinfo/', include('schoolinfo.urls')),
path('holiday/', include('holiday.urls')),
path('fees/', include('fees.urls')),
path('feeReport/', include('feereport.urls')),
path('transport/', include('transport.urls')),
path('visitor/', include('visitors.urls')),
path('leave/', include('leave.urls')),
path('gallery/', include('gallery.urls')),
path('timetable/', include('timetable.urls')),
path('api/', include('rest_api.urls')),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
]
| |
2f520bd3b6b313173985892d867dcf64be36af6a | 600df3590cce1fe49b9a96e9ca5b5242884a2a70 | /third_party/WebKit/Source/build/scripts/make_media_features.py | 54d96901113dd71893195d9d4389b8f107851a99 | [
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"MIT",
"Apache-2.0"
]
| permissive | metux/chromium-suckless | efd087ba4f4070a6caac5bfbfb0f7a4e2f3c438a | 72a05af97787001756bae2511b7985e61498c965 | refs/heads/orig | 2022-12-04T23:53:58.681218 | 2017-04-30T10:59:06 | 2017-04-30T23:35:58 | 89,884,931 | 5 | 3 | BSD-3-Clause | 2022-11-23T20:52:53 | 2017-05-01T00:09:08 | null | UTF-8 | Python | false | false | 1,324 | py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import media_feature_symbol
import in_generator
import template_expander
import name_utilities
import sys
class MakeMediaFeaturesWriter(in_generator.Writer):
defaults = {
'Conditional': None, # FIXME: Add support for Conditional.
'RuntimeEnabled': None,
'ImplementedAs': None,
}
filters = {
'symbol': media_feature_symbol.getMediaFeatureSymbolWithSuffix(''),
'to_macro_style': name_utilities.to_macro_style,
}
default_parameters = {
'namespace': '',
'export': '',
}
def __init__(self, in_file_path):
super(MakeMediaFeaturesWriter, self).__init__(in_file_path)
self._outputs = {
('MediaFeatures.h'): self.generate_header,
}
self._template_context = {
'namespace': '',
'export': '',
'entries': self.in_file.name_dictionaries,
}
@template_expander.use_jinja('MediaFeatures.h.tmpl', filters=filters)
def generate_header(self):
return self._template_context
if __name__ == '__main__':
in_generator.Maker(MakeMediaFeaturesWriter).main(sys.argv)
| [
"[email protected]"
]
| |
193d45a64aa8c10d15e3bff1fe15b1fdea4b440c | c167d1618c1df50de21238a00d4168505dce868f | /0x0A-python-inheritance/6-base_geometry.py | f666928f53bb510d1e7df03101098d47fd1de7a1 | []
| no_license | keen-s/alx-higher_level_programming | 8201845d7142bfdcbfa603fa3d135e3fabbe2bf2 | 7a70051b8cf89441f034c4886f51a99ae85e4f34 | refs/heads/main | 2023-08-12T18:23:30.985481 | 2021-10-18T20:48:29 | 2021-10-18T20:48:29 | 403,688,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 259 | py | #!/usr/bin/python3
"""A class based on 5-base_geometry.py"""
class BaseGeometry:
"""A class"""
def area(self):
""" A public instance method that raises
an exception
"""
raise Exception("area() is not implemented")
| [
"[email protected]"
]
| |
c7d25be25b084123be9f503b4dc69a84f322d9d1 | d76e8c5e7853b145b2c084975cadd0e4f29943f1 | /lib/bloggertool/commands/browse.py | fd3cde8a0673dbc98206c4d686146d1b6099ede0 | [
"MIT"
]
| permissive | asvetlov/bloggertool | 57ab7408d6a7624f7d44ccc60de3761e7524935c | 7c145f66aa22f4124e8b1d198bc93ff703fa72b4 | refs/heads/master | 2021-03-12T20:16:56.014541 | 2015-03-29T09:54:36 | 2015-03-29T09:54:36 | 19,406,280 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,879 | py | # commands/browse.py
# Copyright (C) 2011-2014 Andrew Svetlov
# [email protected]
#
# This module is part of BloggerTool and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from textwrap import dedent
import webbrowser
from bloggertool.notify import Notifier
from bloggertool.str_util import T, a
from bloggertool.exceptions import ConfigError
from .basecommand import BaseCommand
class OpenCommand(BaseCommand):
NAME = 'open'
HELP = "Open browser with local html output for post."
DESCR = dedent("""\
Open browser with local html output for post
""")
require_interactive = True
@classmethod
def fill_parser(cls, parser):
parser.add_argument('file', help="md file to link with")
parser.add_argument('--always', default=False, action='store_true',
help="Always regenerate html files")
parser.add_argument('--serve', default=False, action='store_true',
help=T("""
Loop forever waiting for source file change,
updating html as reaction
"""))
def __init__(self, args):
self.file = args.file
self.always = args.always
self.serve = args.serve
def run(self):
config = self.config
post = config.post_by_path(self.file)
if not post:
self.log.error("MD file '%s' is not registered", self.file)
return
if config.interactive is None and self.serve:
raise ConfigError(a("""
Cannot process --serve in interactive mode.
Specify either --force or --no-clobber
"""))
post.refresh_html(self.always)
abs_path = config.fs.abs_path(post.nice_html_path)
self.log.info("Opening '%s'", abs_path)
webbrowser.open('file:///' + abs_path)
if self.serve:
notifier = Notifier(config.fs.root)
notifier.add(config.fs.abs_path(post.file),
post.refresh_html, force=False)
self.log.info(T("Run serve loop"))
notifier.loop()
class ROpenCommand(BaseCommand):
NAME = 'ropen'
HELP = "Open browser with remote html output for post."
DESCR = dedent("""\
Open browser with remote html output for post
""")
@classmethod
def fill_parser(cls, parser):
parser.add_argument('file', help="md file to link with")
def __init__(self, args):
self.file = args.file
def run(self):
config = self.config
post = config.post_by_path(self.file)
if not post:
self.log.error("MD file '%s' is not registered", self.file)
return
self.log.info("Opening '%s'", post.link)
webbrowser.open(post.link)
| [
"[email protected]"
]
| |
a84dc99ea307684578abc7349b72c0a66f31f49b | 4a0348ccb890c73ebd88feafafc279af26e05f25 | /django/django_fullstack/TV shows - Copy/ibtsal/settings.py | 70bf9b51e79cf9074541c8cbf0ee818adfeda5ff | []
| no_license | wadeeeawwad/python_stack | 00936837103b9f78f66961d88ae3a6233adbbea3 | 6d2c3712c40b035e0d43cc7a27b2e2f48d4a8281 | refs/heads/master | 2023-07-11T14:59:02.617899 | 2021-08-23T11:37:15 | 2021-08-23T11:37:15 | 364,533,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,099 | py | """
Django settings for ibtsal project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't*wcfq8!nn&)zn+ez!+5i-t_1)1(2^n_&rt93n@bc^9b2jo54h'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'app',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ibtsal.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ibtsal.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
]
| |
da98e4bb53e067d4f151787d8ffd0e33a97109ae | f23fda62b1335182e0a59764c83680401c32c1b8 | /API_EX_PrtyPrinted/settings.py | 699115e70e992756d10182dcfdfe325d5ba38cd4 | []
| no_license | brrbaral/DjagoRESTLanguages | 2798aa087e0607ed5afe68141ec80965a7f47c36 | 07601986ce5976268bb86d8d7e8e4e4fecd912ea | refs/heads/master | 2023-01-10T16:53:06.515318 | 2020-11-11T03:52:46 | 2020-11-11T03:52:46 | 311,851,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,309 | py | """
Django settings for API_EX_PrtyPrinted project.
Generated by 'django-admin startproject' using Django 2.0.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!459cd@qoo2xvc+^z1teyz9@p*2ying(^cf=kdh@bp7+7)eh@o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'languages',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'API_EX_PrtyPrinted.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'API_EX_PrtyPrinted.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK={
#THIS IS A TUPLE SO , AFTER ITEMS
'DEFAULT_PERMISSION_CLASSES':('rest_framework.permissions.IsAuthenticatedOrReadOnly',)
}
| [
"[email protected]"
]
| |
a8dde743a40b22f756722ef65d395379ba3c280d | f1c20d0836f4815b81c895ffe22a29005db3746d | /backend/main/wsgi.py | 55aa3e2e2e988c5c5affb5d4806a1c97c8ae1eb1 | []
| no_license | pavelm2007/leadersofdigital_2020_04 | 6ceacf0858ea46bd73c5a0e0ab120cae802e85bd | 0132d1b3361518b109b0632daaf13ed8e849192d | refs/heads/main | 2023-04-04T21:12:54.890040 | 2021-04-17T20:37:02 | 2021-04-17T20:37:02 | 358,649,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | """
WSGI config for main project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings.production")
application = get_wsgi_application()
| [
"[email protected]"
]
| |
cef6047ec940823cb0576c6bf724bb9d4e71612f | 549d11c89ce5a361de51f1e1c862a69880079e3c | /feiji/testss.py | 0a2fa1c42bfbdea0b87652b6085ffd6b503f8b1f | []
| no_license | BaldSuperman/workspace | f304845164b813b2088d565fe067d5cb1b7cc120 | 4835757937b700963fdbb37f75a5e6b09db97535 | refs/heads/master | 2020-08-01T15:32:02.593251 | 2019-09-26T08:04:50 | 2019-09-26T08:04:50 | 211,034,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | import pygame
from feiji.plane_Sprite import *
screen = pygame.display.set_mode(SCREEN_RECT.size)
while True:
for even in pygame.event.get():
print(even.type)
| [
"[email protected]"
]
| |
a89d752f30f6424ebd44fad015341ca5ff2fb94b | 178766cfa5b4a4785a900595278889ed8a828c90 | /blog/migrations/0011_rename_user_author_author.py | 7c1813b1f639eb0fbbb4c61c013dc6d2806ec67a | []
| no_license | ShahadatShuvo/django_blog_app | 0a48fd03b1ab585a08ae41bea40dd905771f7093 | 13c5d98c73fd65ad7353d83ca065344a2811c694 | refs/heads/master | 2023-06-05T08:11:40.479944 | 2021-06-26T18:55:12 | 2021-06-26T18:55:12 | 376,760,411 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | # Generated by Django 3.2.4 on 2021-06-26 14:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('blog', '0010_rename_author_author_user'),
]
operations = [
migrations.RenameField(
model_name='author',
old_name='user',
new_name='author',
),
]
| [
"[email protected]"
]
| |
a39df03e00d5dcd7b104a4626d01939719e4656b | fc5fa8501e8a62291a48c82611e1b74b961ca561 | /abps/new_pymetrics.py | b898a44f4de4d2e6446aba52eddb37a94d5606be | [
"Apache-2.0"
]
| permissive | hitesh-hk/google-research | fa3d3e31cce995fa6da89322dab4993bf1c1ead8 | ddc22300c4cb3223654c9a981f892dc0f6286e35 | refs/heads/master | 2021-02-17T18:57:31.267570 | 2020-01-17T14:49:25 | 2020-01-17T14:54:27 | 245,119,290 | 1 | 1 | Apache-2.0 | 2020-03-05T09:24:01 | 2020-03-05T09:24:00 | null | UTF-8 | Python | false | false | 13,482 | py | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of various python metrics."""
import gin
import numpy as np
import tensorflow.compat.v2 as tf
from tf_agents.metrics import py_metric
from tf_agents.metrics import py_metrics
from tf_agents.utils import numpy_storage
@gin.configurable
class PolicyUsageFrequency(py_metric.PyStepMetric):
"""Class for policy usage metrics.
Policy usage metrics keep track of the last (upto) K values of binary
indicator of the selection of behavior policy in a Deque buffer of size K.
Calling result() will return the frequency of the policy being used in recent
K selections.
"""
def __init__(self, name='PolicyUsageFrequency', buffer_size=10):
super(PolicyUsageFrequency, self).__init__(name)
self._buffer = py_metrics.NumpyDeque(maxlen=buffer_size, dtype=np.float64)
self.reset()
def reset(self):
self._buffer.clear()
def add_to_buffer(self, values):
"""Appends new values to the buffer."""
self._buffer.extend(values)
def result(self):
"""Returns the value of this metric."""
if self._buffer:
return self._buffer.mean(dtype=np.float32)
return np.array(0.0, dtype=np.float32)
def _batched_call(self, is_selected):
self.add_to_buffer(is_selected)
def call(self, is_selected=0):
self._batched_call(is_selected)
@gin.configurable
class StatsNumpyDeque(py_metrics.NumpyDeque):
"""Deque implementation using a numpy array as a circular buffer."""
def __init__(self, maxlen, dtype):
super(StatsNumpyDeque, self).__init__(maxlen=maxlen, dtype=dtype)
def resize_buffer(self, new_size=20, dtype=np.float64):
new_buffer = np.zeros(shape=(new_size,), dtype=dtype)
if self._len == self._buffer.shape[0]:
data = np.append(self._buffer[self._start_index:],
self._buffer[0:self._start_index])
else:
assert self._start_index == 0
data = self._buffer[:self._len]
new_buffer[0:self._len] = data
self._start_index = 0
self._maxlen = np.array(new_size)
self._buffer = new_buffer
def std(self, dtype=None):
if self._len == self._buffer.shape[0]:
return np.std(self._buffer, dtype=dtype)
assert self._start_index == 0
return np.std(self._buffer[:self._len], dtype=dtype)
def ucb(self, coeff=0.0, dtype=None):
if self._len == self._buffer.shape[0]:
array = self._buffer
else:
assert self._start_index == 0
array = self._buffer[:self._len]
return np.mean(array, dtype=dtype) + coeff * np.std(array, dtype=dtype)
def sum(self, dtype=None):
if self._len == self._buffer.shape[0]:
array = np.append(self._buffer[self._start_index:],
self._buffer[0:self._start_index])
else:
assert self._start_index == 0
array = self._buffer[:self._len]
result = np.sum(array[array != 0], dtype=dtype)
return np.nan if result == 0 else result
# def rolling_q(self, way='q', coeff=0, dtype=None):
# if self._len == self._buffer.shape[0]:
# array = np.append(self._buffer[self._start_index:],
# . self._buffer[0:self._start_index])
# else:
# assert self._start_index == 0
# array = self._buffer[:self._len]
# q = np.mean(array[array != 0],dtype=dtype)
# if way == 'ucb':
# ucb = q + np.sqrt(coeff*np.log(len(array))/sum(array != 0))
# return np.inf if np.isnan(ucb) else ucb
# elif way == 'lcb':
# lcb = q - np.sqrt(coeff*np.log(len(array))/sum(array != 0))
# return -np.inf if np.isnan(lcb) else lcb
# else:
# return np.inf if np.isnan(q) else q
def rolling_most_recent(self, dtype=None):
if self._len == self._buffer.shape[0]:
array = np.append(self._buffer[self._start_index:],
self._buffer[0:self._start_index])
else:
assert self._start_index == 0
array = self._buffer[:self._len]
valid = array[array != 0]
return valid[-1] if valid else np.nan
def replace_last(self, value):
if self._len == self._buffer.shape[0]:
self._buffer[self._start_index - 1] = value
else:
assert self._start_index == 0
self._buffer[self._len - 1] = value
@gin.configurable
class DistributionReturnMetric(py_metrics.StreamingMetric):
"""Computes the mean and variance of batched undiscounted rewards."""
def __init__(self,
name='DistributionReturn',
buffer_size=10,
batch_size=None):
"""Creates an DistributionReturnMetric."""
self._np_state = numpy_storage.NumpyState()
# Set a dummy value on self._np_state.episode_return so it gets included in
# the first checkpoint (before metric is first called).
self._np_state.episode_return = np.float64(0)
self._np_state.episode_end_mask = np.float64(0)
# self.count_episode = 0
super(DistributionReturnMetric, self).__init__(
name, buffer_size=buffer_size, batch_size=batch_size)
# overwrite buffer to enable more statistics computation
self._buffer = StatsNumpyDeque(maxlen=buffer_size, dtype=np.float64)
def _reset(self, batch_size):
"""Resets stat gathering variables."""
self._np_state.episode_return = np.zeros(
shape=(batch_size,), dtype=np.float64)
self._np_state.episode_end_mask = np.zeros(
shape=(batch_size,), dtype=np.float64)
# self.count_episode = 0
def set_mask(self, mask_item):
self._np_state.episode_end_mask[mask_item] = 1
return sum(self._np_state.episode_end_mask)
def get_buffer_size(self):
return len(self._buffer)
# overwrite result to output statistics
def result(self, way='mean', coeff=0.0):
"""Returns the value of this metric."""
if self._buffer:
if way == 'mean':
return self._buffer.mean(dtype=np.float32)
elif way == 'std':
return self._buffer.std(dtype=np.float32)
elif way == 'ucb':
return self._buffer.ucb(coeff=coeff, dtype=np.float32)
return np.array(0.0, dtype=np.float32)
def _batched_call(self, traj):
"""Processes the trajectory to update the metric.
Args:
traj: a tf_agents.trajectory.Trajectory.
"""
episode_return = self._np_state.episode_return
is_first = np.where(traj.is_first())[0]
episode_return[is_first] = 0
episode_return += traj.reward
is_last = np.where(traj.is_last())[0]
is_masked = np.where(self._np_state.episode_end_mask > 0)[0]
new_last = np.setdiff1d(is_last, is_masked)
self.add_to_buffer(episode_return[new_last])
@gin.configurable
class DistributionEpisodeLengthMetric(py_metrics.StreamingMetric):
"""Computes the average episode length."""
def __init__(self,
name='DistributionEpisodeLength',
buffer_size=10,
batch_size=None):
"""Creates an AverageEpisodeLengthMetric."""
self._np_state = numpy_storage.NumpyState()
# Set a dummy value on self._np_state.episode_return so it gets included in
# the first checkpoint (before metric is first called).
self._np_state.episode_steps = np.float64(0)
self._np_state.episode_end_mask = np.float64(0)
super(DistributionEpisodeLengthMetric, self).__init__(
name, buffer_size=buffer_size, batch_size=batch_size)
self._buffer = StatsNumpyDeque(maxlen=buffer_size, dtype=np.float64)
def _reset(self, batch_size):
"""Resets stat gathering variables."""
self._np_state.episode_steps = np.zeros(
shape=(batch_size,), dtype=np.float64)
self._np_state.episode_end_mask = np.zeros(
shape=(batch_size,), dtype=np.float64)
def set_mask(self, mask_item):
self._np_state.episode_end_mask[mask_item] = 1
return sum(self._np_state.episode_end_mask)
def get_buffer_size(self):
return len(self._buffer)
def result(self, way='mean', coeff=0.0):
"""Returns the value of this metric."""
if self._buffer:
if way == 'mean':
return self._buffer.mean(dtype=np.float32)
elif way == 'std':
return self._buffer.std(dtype=np.float32)
elif way == 'ucb':
return self._buffer.ucb(coeff=coeff, dtype=np.float32)
elif way == '95ucb':
return self._buffer.mean(dtype=np.float32) + 1.96 * self._buffer.std(
dtype=np.float32) / self.get_buffer_size()
return np.array(0.0, dtype=np.float32)
def _batched_call(self, traj):
"""Processes the trajectory to update the metric.
Args:
traj: a tf_agents.trajectory.Trajectory.
"""
episode_steps = self._np_state.episode_steps
# Each non-boundary trajectory (first, mid or last) represents a step.
episode_steps[np.where(~traj.is_boundary())[0]] += 1
is_last = np.where(traj.is_last())[0]
is_masked = np.where(self._np_state.episode_end_mask > 0)[0]
new_last = np.setdiff1d(is_last, is_masked)
self.add_to_buffer(episode_steps[new_last])
episode_steps[new_last] = 0
@gin.configurable
class QMetric(py_metric.PyStepMetric):
"""Class for policy usage metrics.
Policy usage metrics keep track of the last (upto) K values of binary
indicator of the selection of behavior policy in a Deque buffer of size K.
Calling result() will return the frequency of the policy being used in recent
K selections.
"""
def __init__(self, name='QMetric', buffer_size=10):
super(QMetric, self).__init__(name)
self._buffer = StatsNumpyDeque(maxlen=buffer_size, dtype=np.float64)
self._count = StatsNumpyDeque(maxlen=buffer_size, dtype=np.float64)
self._sumcount = StatsNumpyDeque(maxlen=buffer_size, dtype=np.float64)
self._np_state = numpy_storage.NumpyState()
self._np_state._most_recent_q = np.float64(-100) # pylint: disable=protected-access
self._np_state._most_recent_time = np.int64(0) # pylint: disable=protected-access
self.reset()
def rename(self, name):
self._name = name
with tf.name_scope(name) as scope_name:
self._scope_name = scope_name
def get_buffer_size(self):
return len(self._buffer)
# overwrite result to output q
def result(self, way='q', coeff=0.0):
"""Returns the value of specified metric."""
if self._buffer:
if way == 'rolling_most_recent':
return self._buffer.rolling_most_recent(dtype=np.float32)
elif way == 'q':
q = self._buffer.sum(dtype=np.float32) / self._count.sum()
return np.inf if np.isnan(q) else q
elif way == 'ucb':
ucb = self._buffer.sum(dtype=np.float32) / self._count.sum() + np.sqrt(
coeff * np.log(self._sumcount.sum()) / self._count.sum())
return np.inf if np.isnan(ucb) else ucb
elif way == 'lcb':
lcb = self._buffer.sum(dtype=np.float32) / self._count.sum() - np.sqrt(
coeff * np.log(self._sumcount.sum()) / self._count.sum())
return -np.inf if np.isnan(lcb) else lcb
elif way == 'most_recent':
return self._np_state._most_recent_q # pylint: disable=protected-access
elif way == 'most_recent_time':
return self._np_state._most_recent_time # pylint: disable=protected-access
return np.nan
def reset(self):
self._buffer.clear()
self._np_state._most_recent_q = np.float64(-100) # pylint: disable=protected-access
self._np_state._most_recent_time = np.int64(0) # pylint: disable=protected-access
self._count.clear()
self._sumcount.clear()
def add_to_buffer(self, value, discount=1.0, update_time=True):
"""Appends new values to the buffer."""
self._buffer._buffer *= discount
self._count._buffer *= discount
self._sumcount._buffer *= discount
self._buffer.extend([value])
if value != 0:
self._np_state._most_recent_q = value # pylint: disable=protected-access
self._np_state._most_recent_time = np.int64(0) # pylint: disable=protected-access
self._count.extend([1.0])
else:
if update_time:
self._np_state._most_recent_time += 1
self._count.extend([0.0])
self._sumcount.extend([1.0])
def modify_last_buffer(self, value, update_time=True):
"""Modify the last element of the buffer."""
self._buffer.replace_last(value)
if value != 0:
self._np_state._most_recent_q = np.float64(value) # pylint: disable=protected-access
self._np_state._most_recent_time = np.int64(0) # pylint: disable=protected-access
self._count.replace_last(1.0)
elif update_time:
self._np_state._most_recent_time += 1
def is_recent(self, update_time=10):
return ~np.isinf(
self.result()) and self._np_state._most_recent_time <= update_time # pylint: disable=protected-access
def resize_buffer(self, new_size=20):
self._buffer.resize_buffer(new_size=new_size)
self._count.resize_buffer(new_size=new_size)
self._sumcount.resize_buffer(new_size=new_size)
def _batched_call(self, reward):
self.add_to_buffer(reward)
def call(self, reward=0):
self._batched_call(reward)
| [
"[email protected]"
]
| |
b216d42b41d02698e713dca93ca58a36287b9f1c | 03dfcd4bd41ff9ba76e67895e96a9794ad003a31 | /sandbox/problems/l2q6.py | 3f72d83401c659405f22bde75a2cff7ae09c26ae | []
| no_license | gittygitgit/python-sandbox | 71ca68fcc90745931737f7aeb61306ac3417ce60 | 3b3e0eaf4edad13aabe51eb3258ebe9e6b951c67 | refs/heads/master | 2021-01-19T02:41:17.047711 | 2018-11-22T18:07:15 | 2018-11-22T18:07:15 | 39,742,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 987 | py | #!/usr/bin/python
'''
Question:
Write a program that calculates and prints the value according to the given formula:
Q = Square root of [(2 * C * D)/H]
Following are the fixed values of C and H:
C is 50. H is 30.
D is the variable whose values should be input to your program in a comma-separated sequence.
Example
Let us assume the following comma separated input sequence is given to the program:
100,150,180
The output of the program should be:
18,22,24
Hints:
If the output received is in decimal form, it should be rounded off to its nearest value (for example, if the output received is 26.0, it should be printed as 26)
In case of input data being supplied to the question, it should be assumed to be a console input.
'''
import math
def calculate(x):
C = 50
H = 30
l=x.split(",")
r=[];
for i in l:
v=math.sqrt((2*C*int(i)) / H)
r.append(str(int(math.floor(v))))
print ",".join(r)
v=raw_input("Enter comma-separated input sequence.");
calculate(v);
| [
"[email protected]"
]
| |
9dd0cdef10c7a872c03294c15a2d2ac2174cea62 | 90df9cbc8d15b0bd08ceb7fc42088021efbdfbe1 | /projectile.py | 0ef350a3367ca4d77503f15f70ae673e3c2e05cb | [
"MIT"
]
| permissive | whaleygeek/variable_duty_cycle | bef0ef9dbba99fd508cbb47a2b8befc0f958f855 | 82c197f7c79c2074b7f0eacac1686aa8e28a9a21 | refs/heads/master | 2021-01-20T01:45:36.806435 | 2017-04-27T12:55:35 | 2017-04-27T12:55:35 | 89,324,831 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,300 | py | # projectile.py 25/04/2017 D.J.Whale
#
# A Python implementation of the Variable Duty Cycle algorithm
# implementing a simple projectile plotter
#
# Idea from:
# https://archive.org/stream/byte-magazine-1981-10/1981_10_BYTE_06-10_Local_Networks#page/n389/mode/2up
# Original Pseudo Code
# C horizontal duty counter
# D vertical duty counter
# H horizontal duty cycle (velocity)
# V vertical duty cycle (velocity)
# M duty master
# C := C - H
# if C < 0
# then do
# <move projectile one cell to the right>
# C := C + M
# end
# D := D - V
# if D < 0
# then do
# <move projectile one cell up>
# D := C + M #NOTE: should be D := D + M??
# end
# else if D >= M #NOTE: indentation fixed from original code
# then do
# <move projectile one cell down>
# D := D - M
# end
# <decrease V by a fixed amount>
from Timer import Timer
try:
ask = raw_input # python2
except AttributeError:
ask = input #python3
duty_counter_h = 0 # C
duty_counter_v = 0 # D
duty_cycle_h = 45 # H horizontal velocity (rightwards direction)
duty_cycle_v = 125 # V vertical velocity (75 in upwards direction)
v_adjust = -1 # amount to adjust V by each time round loop
duty_master = 125 # M
x = 0 # x position of projectile
y = 0 # y position of projectile
LOOP_RATE = None # run the loop as fast as possible
timer = Timer(LOOP_RATE)
screen = None
def output(x,y):
global screen
if screen is None:
from screen import Screen
screen = Screen()
screen.start()
screen.plot(x, screen.height - y)
while y >= 0: # stop when projectile hits ground
timer.wait()
# VDC#1 for x movement
duty_counter_h -= duty_cycle_h
if duty_counter_h < 0:
x += 1 # move one cell to the right
duty_counter_h += duty_master
# VDC#2 for y movement
duty_counter_v -= duty_cycle_v
if duty_counter_v < 0:
y += 1 # move one cell up
duty_counter_v += duty_master
elif duty_counter_v >= duty_master:
y -= 1 # move one cell down
duty_counter_v -= duty_master
# vertical velocity adustment due to gravity
duty_cycle_v += v_adjust
#print(duty_cycle_v)
output(x*5, y*5)
ask("finished?")
# END
| [
"[email protected]"
]
| |
9361eead7ef5afd56bc221c6dd7b4606ee11a2cc | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/CISCO-SLB-DFP-MIB.py | 6cf6098abdb328d93c73eb64c492651f9966b413 | [
"Apache-2.0"
]
| permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 7,855 | py | #
# PySNMP MIB module CISCO-SLB-DFP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-SLB-DFP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:55:25 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection", "ConstraintsUnion", "ValueSizeConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
EntPhysicalIndexOrZero, = mibBuilder.importSymbols("CISCO-TC", "EntPhysicalIndexOrZero")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
NotificationGroup, ModuleCompliance, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance", "ObjectGroup")
Unsigned32, Integer32, Bits, MibIdentifier, ModuleIdentity, Counter64, ObjectIdentity, Counter32, IpAddress, NotificationType, TimeTicks, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "Integer32", "Bits", "MibIdentifier", "ModuleIdentity", "Counter64", "ObjectIdentity", "Counter32", "IpAddress", "NotificationType", "TimeTicks", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
ciscoSlbDfpMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 689))
ciscoSlbDfpMIB.setRevisions(('2009-01-29 00:00',))
if mibBuilder.loadTexts: ciscoSlbDfpMIB.setLastUpdated('200901290000Z')
if mibBuilder.loadTexts: ciscoSlbDfpMIB.setOrganization('Cisco Systems, Inc.')
ciscoSlbDfpMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 689, 0))
ciscoSlbDfpMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 689, 1))
ciscoSlbDfpMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 689, 2))
cslbcDfpCongestionThresholdType = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 689, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("reject", 1), ("abort", 2), ("redirect", 3), ("drop", 4)))).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: cslbcDfpCongestionThresholdType.setStatus('current')
cslbcProcessorDfpValTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 689, 1, 4), )
if mibBuilder.loadTexts: cslbcProcessorDfpValTable.setStatus('current')
cslbcProcessorDfpValEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 689, 1, 4, 1), ).setIndexNames((0, "CISCO-SLB-DFP-MIB", "cslbcProcessorDfpValPhysicalIndex"))
if mibBuilder.loadTexts: cslbcProcessorDfpValEntry.setStatus('current')
cslbcProcessorDfpValPhysicalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 689, 1, 4, 1, 1), EntPhysicalIndexOrZero())
if mibBuilder.loadTexts: cslbcProcessorDfpValPhysicalIndex.setStatus('current')
cslbcProcessorDfpValDescription = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 689, 1, 4, 1, 2), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cslbcProcessorDfpValDescription.setStatus('current')
class CslbcDfpValue(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(0, 65535)
cslbcDfpCongestionOnsetThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 689, 1, 1), CslbcDfpValue()).setUnits('DFP weight').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cslbcDfpCongestionOnsetThreshold.setStatus('current')
cslbcDfpCongestionAbateThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 689, 1, 2), CslbcDfpValue()).setUnits('DFP weight').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cslbcDfpCongestionAbateThreshold.setStatus('current')
cslbcProcessorDfpValDfpValue = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 689, 1, 4, 1, 3), CslbcDfpValue()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cslbcProcessorDfpValDfpValue.setStatus('current')
cslbcSlbDfpCongestionOnset = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 689, 0, 1)).setObjects(("CISCO-SLB-DFP-MIB", "cslbcProcessorDfpValDescription"), ("CISCO-SLB-DFP-MIB", "cslbcProcessorDfpValDfpValue"), ("CISCO-SLB-DFP-MIB", "cslbcDfpCongestionThresholdType"), ("CISCO-SLB-DFP-MIB", "cslbcDfpCongestionOnsetThreshold"))
if mibBuilder.loadTexts: cslbcSlbDfpCongestionOnset.setStatus('current')
cslbcSlbDfpCongestionAbate = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 689, 0, 2)).setObjects(("CISCO-SLB-DFP-MIB", "cslbcProcessorDfpValDescription"), ("CISCO-SLB-DFP-MIB", "cslbcProcessorDfpValDfpValue"), ("CISCO-SLB-DFP-MIB", "cslbcDfpCongestionAbateThreshold"), ("CISCO-SLB-DFP-MIB", "cslbcDfpCongestionThresholdType"))
if mibBuilder.loadTexts: cslbcSlbDfpCongestionAbate.setStatus('current')
ciscoSlbDfpMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 689, 2, 1))
ciscoSlbDfpMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 689, 2, 2))
ciscoSlbDfpMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 689, 2, 1, 1)).setObjects(("CISCO-SLB-DFP-MIB", "ciscoSlbDfpInstanceGroup"), ("CISCO-SLB-DFP-MIB", "cslbcSlbDfpScalarsGroup"), ("CISCO-SLB-DFP-MIB", "cslbcSlbDfpCongestionGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSlbDfpMIBCompliance = ciscoSlbDfpMIBCompliance.setStatus('current')
ciscoSlbDfpInstanceGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 689, 2, 2, 1)).setObjects(("CISCO-SLB-DFP-MIB", "cslbcProcessorDfpValDescription"), ("CISCO-SLB-DFP-MIB", "cslbcProcessorDfpValDfpValue"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoSlbDfpInstanceGroup = ciscoSlbDfpInstanceGroup.setStatus('current')
cslbcSlbDfpScalarsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 689, 2, 2, 2)).setObjects(("CISCO-SLB-DFP-MIB", "cslbcDfpCongestionOnsetThreshold"), ("CISCO-SLB-DFP-MIB", "cslbcDfpCongestionAbateThreshold"), ("CISCO-SLB-DFP-MIB", "cslbcDfpCongestionThresholdType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cslbcSlbDfpScalarsGroup = cslbcSlbDfpScalarsGroup.setStatus('current')
cslbcSlbDfpCongestionGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 689, 2, 2, 3)).setObjects(("CISCO-SLB-DFP-MIB", "cslbcSlbDfpCongestionOnset"), ("CISCO-SLB-DFP-MIB", "cslbcSlbDfpCongestionAbate"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cslbcSlbDfpCongestionGroup = cslbcSlbDfpCongestionGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-SLB-DFP-MIB", ciscoSlbDfpMIBConform=ciscoSlbDfpMIBConform, cslbcProcessorDfpValPhysicalIndex=cslbcProcessorDfpValPhysicalIndex, CslbcDfpValue=CslbcDfpValue, cslbcSlbDfpCongestionAbate=cslbcSlbDfpCongestionAbate, ciscoSlbDfpMIBObjects=ciscoSlbDfpMIBObjects, cslbcDfpCongestionOnsetThreshold=cslbcDfpCongestionOnsetThreshold, cslbcSlbDfpCongestionGroup=cslbcSlbDfpCongestionGroup, cslbcProcessorDfpValTable=cslbcProcessorDfpValTable, ciscoSlbDfpMIBNotifs=ciscoSlbDfpMIBNotifs, cslbcDfpCongestionThresholdType=cslbcDfpCongestionThresholdType, PYSNMP_MODULE_ID=ciscoSlbDfpMIB, cslbcProcessorDfpValEntry=cslbcProcessorDfpValEntry, cslbcProcessorDfpValDescription=cslbcProcessorDfpValDescription, cslbcSlbDfpCongestionOnset=cslbcSlbDfpCongestionOnset, ciscoSlbDfpMIB=ciscoSlbDfpMIB, ciscoSlbDfpMIBCompliances=ciscoSlbDfpMIBCompliances, ciscoSlbDfpMIBCompliance=ciscoSlbDfpMIBCompliance, ciscoSlbDfpInstanceGroup=ciscoSlbDfpInstanceGroup, cslbcSlbDfpScalarsGroup=cslbcSlbDfpScalarsGroup, cslbcProcessorDfpValDfpValue=cslbcProcessorDfpValDfpValue, cslbcDfpCongestionAbateThreshold=cslbcDfpCongestionAbateThreshold, ciscoSlbDfpMIBGroups=ciscoSlbDfpMIBGroups)
| [
"[email protected]"
]
| |
16d10155454d78b274b64a76eebdc6f731152e65 | 109a830aad476305f029274d75e28bec8b54f597 | /venv/bin/django-admin | 1608e243708e9f7fa2a64226ea036f01a70824c7 | []
| no_license | Dapucla/EP | 53b156088046abfd6833eba95dc4393ebeb93f4e | 9368032b4b289b20ec1bdf0033d3fe199223d200 | refs/heads/master | 2023-06-19T08:02:55.984888 | 2021-07-11T22:52:24 | 2021-07-11T22:52:24 | 330,009,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | #!/Users/daniilalekseev/PycharmProjects/DevicesOnlineShop/ENG-PROJECT/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
| [
"[email protected]"
]
| ||
f5d774828824bb6b08702b8147a2b8fef9436add | 4c992ca9bb41383ec3a9c7dc7a1e4204ef6cb850 | /publicdata/censusreporter/series.py | e211e540e6e3d1318fa8fb6cb525d1ea0e75569b | [
"MIT"
]
| permissive | rkiyengar/publicdata | f72d19d5d2821707d7a84c232cb66fb6686801db | 8fddc1a460716a3d54a7504c82376c083a0af014 | refs/heads/master | 2021-08-28T15:14:33.113145 | 2017-12-12T15:11:46 | 2017-12-12T15:11:46 | 113,387,497 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,995 | py | # Copyright (c) 2017 Civic Knowledge. This file is licensed under the terms of the
# MIT License, included in this distribution as LICENSE
"""
"""
from pandas import DataFrame, Series
import numpy as np
from six import string_types
import numpy as np
from six import string_types
class CensusSeries(Series):
_metadata = ['schema', 'parent_frame']
@property
def _constructor(self):
return CensusSeries
@property
def _constructor_expanddim(self):
from .dataframe import CensusDataFrame
return CensusDataFrame
@property
def title(self): # b/c the _metadata elements aren't created until assigned
try:
return self._title
except AttributeError:
return None
@title.setter
def title(self, v):
self._title = v
@property
def census_code(self):
return self.name
@property
def census_index(self):
raise NotImplementedError
@property
def census_title(self):
return self.title
@property
def col_position(self):
raise NotImplementedError
def __init__(self, data=None, index=None, dtype=None, name=None, copy=False, fastpath=False):
super(CensusSeries, self).__init__(data, index, dtype, name, copy, fastpath)
@property
def m90(self):
if self.census_code.endswith('_m90'):
return self
else:
return self.parent_frame[self.census_code+'_m90'].astype('float')
@property
def estimate(self):
"""Return the estimate value, for either an estimate column or a margin column. """
if self.census_code.endswith('_m90'):
return self.parent_frame[self.census_code.replace('_m90','')].astype('float')
else:
return self
@property
def value(self):
"""Synonym for estimate()"""
if self.census_code.endswith('_m90'):
return self.parent_frame[self.census_code.replace('_m90','')].astype('float')
else:
return self
@property
def se(self):
"""Return a standard error series, computed from the 90% margins"""
return self.m90 / 1.645
@property
def rse(self):
"""Return the relative standard error for a column"""
return ( (self.se / self.value) * 100).replace([np.inf, -np.inf], np.nan)
@property
def m95(self):
"""Return a standard error series, computed from the 90% margins"""
return self.se * 1.96
@property
def m99(self):
"""Return a standard error series, computed from the 90% margins"""
return self.se * 2.575
def sum_m90(self, *cols):
""""""
# See the ACS General Handbook, Appendix A, "Calculating Margins of Error for Derived Estimates".
# (https://www.census.gov/content/dam/Census/library/publications/2008/acs/ACSGeneralHandbook.pdf)
# for a guide to these calculations.
return np.sqrt(sum(self.m90 ** 2))
| [
"[email protected]"
]
| |
72dc2fa92097e059528253cd249e8a5adaa613ad | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/linkedlist_20200623140505.py | 3fbc3a13dbff9fdaff5799ef81ca03724a251395 | []
| no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,510 | py | # Linked List implementation
# implements the node type
# stores a single data field ---> val
class Node(object):
def __init__(self,val):
self.val = val
self.next = None
def get_data(self):
return self.val
def set_data(self,val):
self.val = val
def get_next(self):
return self.next
def set_next(self,next):
self.next = next
class LinkedList(object):
def __init__(self,head=None):
self.head = head
self.count = 0
def get_count(self):
return self.count
def insert(self,data):
# insert a new node
new_node = Node(data)
# point the new node to the current head
new_node.set_next(self.head)
# set head as the new node
self.head = new_node
self.count +=1
def find(self,val):
# Find the first element with a given value
item = self.head
# we check if item is not none and equal to the val we are looking for
while item !=None:
if item.get_data() == val:
return item
else:
item.get_next()
return None
def deleteAt(self,idx):
# to delete an item at a given index
if idx > self.count-1:
return
def dump_list(self):
tempnode = self.head
while tempnode != None:
print("Node: ",tempnode.get_data())
tempnode = tempnode.get_next()
# create a linked
| [
"[email protected]"
]
| |
0ea6c1d03a86d7ca86b5f5f804c1422b102cb628 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_spites.py | d2c6c8a7bc2b2a2463e64b4751f5d767476db7b2 | [
"MIT"
]
| permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py |
from xai.brain.wordbase.verbs._spite import _SPITE
#calss header
class _SPITES(_SPITE, ):
def __init__(self,):
_SPITE.__init__(self)
self.name = "SPITES"
self.specie = 'verbs'
self.basic = "spite"
self.jsondata = {}
| [
"[email protected]"
]
| |
037699b5f44d1f196eb5a8aac2be877a7d4491f3 | 089b396be42accf7dedd6a935b8cb29d43f25e2c | /core/urls.py | d30152bfd637967c2d79e3491e09b842841d8b8f | [
"MIT"
]
| permissive | edgarslabans/django-ajax-xhr_nio | b4dcce4b9d5d73a60e912cc0cc926e5c78f737ba | b33ab12fc17a3fb98812c12259f97525f3746bd2 | refs/heads/main | 2023-08-22T21:16:28.266245 | 2023-08-22T11:38:48 | 2023-08-22T11:38:48 | 681,173,754 | 0 | 0 | MIT | 2023-08-21T12:35:21 | 2023-08-21T12:35:19 | null | UTF-8 | Python | false | false | 416 | py | from django.conf import settings
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path
from todos.views import home, todo, todos
urlpatterns = [
path('admin/', admin.site.urls),
path('todos/<int:todoId>/', todo, name="todo"),
path('todos/', todos, name="todos"),
path('', home),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| [
"[email protected]"
]
| |
b531a7e2091cb595f280d47d3bef514cc6dba92b | ec030fab06dab9a94835a5d308fb8411111be0b4 | /myprintgoogle.py | 47bb714ad8cd0381fa217c1d8dcc3361b5b82582 | []
| no_license | mortadagzar/Python_Kivy_MobileApps | e464319cb7825c57bbfb44f95b4aca15e3734988 | d941a8d2da00a2420a47b07224e3ed876698ea23 | refs/heads/master | 2020-04-08T03:35:51.097417 | 2019-03-08T05:17:07 | 2019-03-08T05:17:07 | 158,982,437 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 453 | py | from google.cloud import storage
def upload_blob(bucket_name, source_file_name, destination_blob_name):
"""Uploads a file to the bucket."""
storage_client = storage.Client()
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.upload_from_filename(source_file_name)
print('File {} uploaded to {}.'.format(
source_file_name,
destination_blob_name))
print("its google")
| [
"[email protected]"
]
| |
325ab1c396c76a85f683b839c99242b74a6e8b4f | 3a9b154aa9d5e379683476f80f30630bf44d2102 | /Server_v1/dictionary/forms/__init__.py | e5b398dcc84336f39778bbd6993647906cfd6c52 | []
| no_license | KevinDon/py_amazon_analysis | 81995e360d2b536e1df6e515aae9457054edae29 | 13b5fbb046ca6516ac3a47e8f7867baf358011f4 | refs/heads/master | 2022-12-13T00:27:27.511783 | 2019-08-14T11:45:53 | 2019-08-14T11:45:53 | 185,160,162 | 0 | 1 | null | 2022-12-10T05:38:15 | 2019-05-06T08:56:40 | TSQL | UTF-8 | Python | false | false | 76 | py | from .DataDictionaryForm import *
from .DataDictionaryCategoryForm import *
| [
"[email protected]"
]
| |
33d73097852c91a54cb2e24f3a6ee3f6e61acb17 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/127/usersdata/174/36355/submittedfiles/ex11.py | 6ab9a8927f2e4d904e182bedbdc3a5c7e569d9e1 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | # -*- coding: utf-8 -*-
d1 = float(input('Dia:'))
m1 = float(input('Mes:'))
a1 = float(input('Ano:'))
d2 = float(input('Dia:'))
m2 = float(input('Mes:'))
a2 = float(input('Ano:'))
if a1>a2:
print ('DATA 1')
elif a1<a2:
print ('DATA 2')
else:
if m1>m2:
print('DATA 1')
elif m1<m2:
print('DATA 2')
else:
if d1>d2:
print('DATA 1')
elif d1<d2:
print('DATA 2')
else:
print('IGUAIS') | [
"[email protected]"
]
| |
1176ee354b130c04c89c6d31bfccc0c8cd78fc12 | a78b5014f654658efc533fab715fae01d5b88134 | /mailpy/contrib/pelican/view.py | bf3404991946cc5c811ac003111f8d1c82eec1de | [
"BSD-3-Clause"
]
| permissive | dn0/mailpy | 4410d1d64753790bc5ef33060cdbbac9525eaa4a | db382d9d67a34444b8492bb0766b7320e46a0ac0 | refs/heads/master | 2016-08-11T10:37:15.709466 | 2015-10-22T19:38:25 | 2015-10-22T19:38:25 | 36,691,583 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,054 | py | # -*- coding: utf-8 -*-
from functools import wraps
from datetime import datetime
import os
import re
from mailpy.view import MailView
from mailpy.response import TextMailResponse
from mailpy.exceptions import MailViewError
from mailpy.contrib.filelock import FileLock, FileLockTimeout
from mailpy.contrib.pelican.api import PelicanAPI
from mailpy.contrib.pelican.utils import stringify, slugify
from mailpy.contrib.pelican.content import RstArticle
from mailpy.contrib.pelican.exceptions import FileNotFound, FileAlreadyExists, MultipleFilesFound, UnknownFileFormat
__all__ = ('PelicanMailView',)
def lock(fun):
"""Lock decorator"""
@wraps(fun)
def wrap(obj, request, *args, **kwargs):
flock = FileLock(obj.lock_file)
try:
flock.acquire()
except FileLockTimeout as exc:
raise MailViewError(request, 'Locked: %s' % exc, status_code=423)
try:
return fun(obj, request, *args, **kwargs)
finally:
flock.release()
return wrap
class PelicanMailView(MailView):
"""
Pelican mail view.
"""
settings_file = NotImplemented
article_class = RstArticle # Used only for new articles
article_file_name = '%Y-%m-%d-{slug}' # Without extension; valid placeholders: {slug} and strftime() directives
papi_class = PelicanAPI
papi_settings = ()
site_url = None
authors = ()
lock_file = None
detect_image_attachments = frozenset(('.jpg', '.jpeg', '.png', '.bmp', '.gif', '.tif', '.tiff'))
_valid_content_maintypes = frozenset(('text', 'image', 'audio', 'video', 'application'))
_valid_text_content_type = frozenset(('text/plain',))
_ignored_file_content_types = frozenset(('text/x-vcard', 'text/vcard',
'application/pgp-signature',
'application/x-pkcs12',
'application/x-pkcs7-signature',
'application/x-pkcs7-mime',
'application/pkcs12',
'application/pkcs7-signature',
'application/pkcs7-mime'))
def __init__(self):
super(PelicanMailView, self).__init__()
# Initialize the Pelican API
self.papi = self.papi_class(self.settings_file, **dict(self.papi_settings))
self.lock_file = self.lock_file or self.settings_file + '.lock'
@property
def _site_url(self):
"""Return site URL"""
return self.site_url or self.papi.site_url
def _get_author_from_email(self, email, default=None):
"""Helper for mail views"""
for author, emails in self.authors:
if email in emails:
return author
return default
def _create_article_slug(self, title, articles):
"""Create unique article slug from title"""
slug = orig_slug = slugify(title)
slugs = self.papi.get_article_slugs(articles=articles) # dict {slug: article}
i = 1
while slug in slugs:
i += 1
slug = '%s-%d' % (orig_slug, i)
return slug
def __create_article_filename(self, slug, addon=''):
"""Generate new article filename"""
name = datetime.now().strftime(self.article_file_name)
filename = name.format(slug=slug)
return '%s%s%s' % (filename, addon, self.article_class.extension)
def _create_article_filename(self, slug, articles):
"""Create new unique filename from title"""
filename = self.__create_article_filename(slug)
filenames = set(a.filename for a in articles)
i = 1
while filename in filenames:
i += 1
filename = self.__create_article_filename(slug, addon='-%d' % i)
return filename
def _create_article(self, title):
"""Create new PelicanArticle object"""
articles = self.papi.articles
slug = self._create_article_slug(title, articles)
filename = self._create_article_filename(slug, articles)
return self.article_class(self.papi.content_path, filename)
def _create_static_filename(self, maintype, orig_filename):
"""Create proper filename for static file according to original filename"""
if maintype == 'image':
directory = self.papi.images_dir
else:
directory = self.papi.files_dir
orig_filename = stringify(orig_filename).strip()
if not orig_filename:
orig_filename = 'noname'
filename = os.path.join(directory, orig_filename)
static_files = set(self.papi.get_static_files(directory))
name, ext = os.path.splitext(filename)
i = 1
while filename in static_files:
i += 1
filename = '%s-%d%s' % (name, i, ext)
return filename
@staticmethod
def _get_msg_text(msg_part, fallback_charset=None):
"""Return text in mime message converted into unicode str"""
content = msg_part.get_payload(decode=True)
charset = msg_part.get_content_charset() or fallback_charset or 'ascii'
return content.decode(charset, 'replace')
@staticmethod
def _edit_msg_text(text):
"""Process text extracted from mail message and return text suitable for article content"""
# Fix automatic links injected by mail clients, e.g. "<www.google.com> <http://www.google.com>"
return re.sub(r'([^\s]+) <([\w\+]+:/*)?\1/?>(?!`_)', r'\1', text)
def _get_msg_content(self, msg, article):
"""Parse message and retrieve text content and additional file attachments"""
text = []
files = []
for part in msg.walk():
content_type = part.get_content_type()
maintype = part.get_content_maintype()
if maintype in self._valid_content_maintypes:
orig_filename = part.get_filename()
if orig_filename: # Attached file
if content_type in self._ignored_file_content_types:
continue # Ignore vcard, digital signatures and stuff like this
filename = self._create_static_filename(maintype, orig_filename)
ext = os.path.splitext(orig_filename)[1].lower()
if maintype == 'image' or ext in self.detect_image_attachments:
text_data = article.image(orig_filename, '{filename}/%s' % filename)
else:
text_data = article.internal_link(orig_filename, '{filename}/%s' % filename)
text.append(text_data)
files.append(self.papi.get_static_file(filename, content=part.get_payload(decode=True),
encoding=part.get_content_charset())) # Store raw
elif content_type in self._valid_text_content_type: # Article text
msg_text = self._get_msg_text(part, msg.get_charset()) # Decode using content charset
text.append(self._edit_msg_text(msg_text))
return '\n\n'.join(text), files
def _get_article_metadata(self, request, article, text):
"""Create article metadata"""
metadata = {
'date': datetime.now().strftime('%Y-%m-%d %H:%M'),
'authors': self._get_author_from_email(request.sender, request.sender),
}
new_text, parsed_metadata = article.get_text_metadata(text)
metadata.update(parsed_metadata)
return new_text, metadata
@staticmethod
def _save_article(request, article, static_files):
"""Save article file and all static files; Delete already created files in case of an error"""
created = []
try:
for static_file in static_files:
static_file.save()
created.append(static_file.full_path)
try:
article.save()
except FileAlreadyExists:
raise MailViewError(request, 'Article "%s" already exists' % article, status_code=406)
else:
created.append(article.full_path)
except Exception as exc:
for f in created:
# noinspection PyBroadException
try:
os.remove(f)
except:
pass
raise exc # Re-raise original exception
return created
# noinspection PyUnusedLocal
@staticmethod
def _delete_article(request, article):
"""Delete article"""
# TODO: delete related static files
article.delete()
return [article.full_path]
def _get_article(self, request, title_or_filename):
"""Fetch existing article according to title or filename"""
try:
try:
return self.papi.get_article(title_or_filename)
except UnknownFileFormat:
return self.papi.get_article_by_slug(slugify(title_or_filename.lstrip('Re: ')))
except FileNotFound:
raise MailViewError(request, 'Article "%s" was not found' % title_or_filename, status_code=404)
except MultipleFilesFound:
err = 'Found multiple articles related to the title "%s".\n' \
'Please use the filename to find specific article.' % title_or_filename
raise MailViewError(request, err, status_code=406)
def _commit_and_publish(self, commit_msg, **commit_kwargs):
"""Commit to git if repo_path is set and update html files"""
if commit_msg and self.papi.repo_path:
self.papi.commit(commit_msg, **commit_kwargs)
self.papi.publish()
def _response(self, request, msg, **kwargs):
"""Create nice mail response"""
site_url = self.site_url
if site_url and site_url.startswith('http'):
msg += '\n\n--\n%s\n' % site_url
return TextMailResponse(request, msg, **kwargs)
def get(self, request):
"""Return list of blog posts or content of one blog post depending on the subject"""
filename = request.subject.strip()
if filename:
article = self._get_article(request, filename)
res = article.load()
else:
res = '\n'.join(a.filename for a in self.papi.articles)
return self._response(request, res)
@lock
def post(self, request):
"""Create new blog post, commit and rebuild the html output"""
title = request.subject.strip()
if not title:
raise MailViewError(request, 'Subject (title) is required')
article = self._create_article(title)
text, static_files = self._get_msg_content(request, article)
text, metadata = self._get_article_metadata(request, article, text)
article.compose(title, text, metadata)
created = self._save_article(request, article, static_files)
commit_msg = 'Added article %s' % article.filename
if static_files:
commit_msg += ' + static files:\n\t+ %s' % '\n\t+ '.join(i.filename for i in static_files)
self._commit_and_publish(commit_msg, add=created)
sep = '*' * 40
out = 'Article "%s" was successfully created\n\n%s\n%s\n%s' % (article.filename, sep, article.content, sep)
return self._response(request, out)
@lock
def delete(self, request):
"""Delete one blog post, commit and rebuild the html output"""
filename = request.subject.strip()
if not filename:
raise MailViewError(request, 'Subject (filename) is required')
article = self._get_article(request, filename)
deleted = self._delete_article(request, article)
self._commit_and_publish('Deleted article %s' % article, remove=deleted)
return self._response(request, 'Article "%s" was successfully deleted' % article.filename)
| [
"[email protected]"
]
| |
11f9cc4c53607b22d92b19a8e39858302b699a6c | 7b3711d4c6d7284255ba0270d49d120f984bf7c6 | /problems/1456_maximum_number_of_vowels_in_a_substring_of_given_length.py | b8c1ee441ba0fe9a18e4fc45c9b3521258e986f6 | []
| no_license | loganyu/leetcode | 2d336f30feb55379aaf8bf0273d00e11414e31df | 77c206305dd5cde0a249365ce7591a644effabfc | refs/heads/master | 2023-08-18T09:43:10.124687 | 2023-08-18T00:44:51 | 2023-08-18T00:44:51 | 177,875,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,042 | py | '''
Given a string s and an integer k, return the maximum number of vowel letters in any substring of s with length k.
Vowel letters in English are 'a', 'e', 'i', 'o', and 'u'.
Example 1:
Input: s = "abciiidef", k = 3
Output: 3
Explanation: The substring "iii" contains 3 vowel letters.
Example 2:
Input: s = "aeiou", k = 2
Output: 2
Explanation: Any substring of length 2 contains 2 vowels.
Example 3:
Input: s = "leetcode", k = 3
Output: 2
Explanation: "lee", "eet" and "ode" contain 2 vowels.
Constraints:
1 <= s.length <= 105
s consists of lowercase English letters.
1 <= k <= s.length
'''
class Solution:
def maxVowels(self, s: str, k: int) -> int:
vowels = {'a', 'e', 'i', 'o', 'u'}
count = 0
for i in range(k):
count += int(s[i] in vowels)
answer = count
for i in range(k, len(s)):
count += int(s[i] in vowels)
count -= int(s[i - k] in vowels)
answer = max(answer, count)
return answer
| [
"[email protected]"
]
| |
597713e0d9f271c187c3ffd7c3cec16228a0eb6d | 7390417d66411000e18156bf3ec6389f3a4aa3dc | /website/feature_importances_correlations/between_targets/page.py | 173578411f3867740fbdfbd59855634900e31e2d | [
"MIT"
]
| permissive | HMS-AgeVSSurvival/Website | fb911e10eedd3212f1440e57a97061a50cc4e33d | 298000aee6ab951a7f90e6bb4ca4716997a0398b | refs/heads/main | 2023-07-26T06:23:39.064024 | 2021-09-14T09:15:27 | 2021-09-14T09:15:27 | 388,773,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,560 | py | from website.app import APP
import dash_bootstrap_components as dbc
import dash_html_components as html
from dash.dependencies import Input, Output
from website.feature_importances_correlations.between_targets.tabs.all_categories import get_all_categories
from website.feature_importances_correlations.between_targets.tabs.custom_categories import get_custom_categories
@APP.callback(
Output("tab_content_feature_importances_correlations_between_targets", "children"),
Input("tab_manager_feature_importances_correlations_between_targets", "active_tab"),
)
def _fill_tab(
active_tab,
):
if active_tab == "feature_importances_correlations_between_targets_custom_categories":
return get_custom_categories()
else: # active_tab == "feature_importances_correlations_between_targets_all_categories":
return get_all_categories()
LAYOUT = html.Div(
[
dbc.Tabs(
[
dbc.Tab(
label="Custom categories",
tab_id="feature_importances_correlations_between_targets_custom_categories",
),
dbc.Tab(
label="All categories", tab_id="feature_importances_correlations_between_targets_all_categories"
),
],
id="tab_manager_feature_importances_correlations_between_targets",
active_tab="feature_importances_correlations_between_targets_custom_categories",
),
html.Div(id="tab_content_feature_importances_correlations_between_targets"),
]
)
| [
"[email protected]"
]
| |
deb0389d1cc707e4fbb2f91c535f9f8f36f9d5dc | 1b554b6c970ba31bffae8434611e307770fa2e95 | /lof/generator.py | e32c81ea71f3ccd889b232fb83343ac33986dba4 | [
"MIT"
]
| permissive | mkaraev/lof | 0d1821b04b0137d982638276e5d6fdb06c7d3441 | 19be33d1283842069af0dd0776027b24676aac5e | refs/heads/main | 2023-06-25T07:09:51.701689 | 2021-07-24T22:10:26 | 2021-07-24T22:10:26 | 389,283,255 | 0 | 0 | MIT | 2021-07-25T07:00:12 | 2021-07-25T07:00:12 | null | UTF-8 | Python | false | false | 2,993 | py | import importlib
import json
from typing import Callable, Dict, Optional
from fastapi import FastAPI, Request, Response
from fastapi.responses import JSONResponse
def create_route(endpoint: Dict, handler: Callable, method: str, app: FastAPI):
method = getattr(app, method)
@method(endpoint)
async def _method(request: Request, response: Response):
_handler = handler.split(".")
module = ".".join(_handler[0:-1])
_handler = _handler[-1]
my_module = importlib.import_module(module)
_handler = getattr(my_module, _handler)
event = await prepare_api_gateway_event(request)
result = _handler(event, {})
status_code = result.get("statusCode") or result.get("status_code") or 200
if result.get("body"):
content = result.get("body")
else:
content = result
for header, value in result.get("headers", {}).items():
response.headers[header] = value
if status_code == 204:
response = Response(status_code=status_code)
else:
response = JSONResponse(
content=content, status_code=status_code, headers=response.headers
)
return response
def get_query_params(multi_params: Optional[Dict]) -> Dict:
params = {}
if multi_params:
for param in multi_params:
params[param] = multi_params[param][-1]
return params
def get_multi_value_params(url: str) -> Dict:
"""extract parmas from url for multiqueryParams"""
url = str(url).split("/")[-1]
params = url.split("?")[-1]
params = params.split("&")
multi_query_params = {}
if len(params) == 1:
params = []
for param in params:
name, value = param.split("=")
if not multi_query_params.get(name):
multi_query_params[name] = [value]
else:
multi_query_params[name].append(value)
if not multi_query_params:
multi_query_params = None
return multi_query_params
async def prepare_api_gateway_event(request: Request) -> Request:
body = None
try:
body = await request.json()
except Exception:
pass
multi_params = get_multi_value_params(request.url)
headers = {}
for header in request.headers.items():
headers[header[0]] = header[1]
event = {
"resource": str(request.base_url),
"path": str(request.url),
"httpMethod": request.method,
"requestContext": {
"resourcePath": "/",
"httpMethod": request.method,
"path": str(request.base_url),
},
"headers": headers,
"multiValueHeaders": None,
"queryStringParameters": get_query_params(multi_params),
"multiValueQueryStringParameters": multi_params,
"pathParameters": request.path_params,
"stageVariables": None,
"body": json.dumps(body),
"isBase64Encoded": False,
}
return event
| [
"[email protected]"
]
| |
e724b791e0662abba8b8bc7979733f388d1ca514 | 0fccee4c738449f5e0a8f52ea5acabf51db0e910 | /genfragments/ThirteenTeV/SUSYGluGlu/SUSYGluGluToBBHTohhTo2Tau2B_M-300_TuneCUETP8M1_13TeV-pythia8_filter_cfi.py | 352f91727e0d5853e6653e614b6f39233fbe62da | []
| no_license | cms-sw/genproductions | f308ffaf3586c19b29853db40e6d662e937940ff | dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4 | refs/heads/master | 2023-08-30T17:26:02.581596 | 2023-08-29T14:53:43 | 2023-08-29T14:53:43 | 11,424,867 | 69 | 987 | null | 2023-09-14T12:41:28 | 2013-07-15T14:18:33 | Python | UTF-8 | Python | false | false | 4,945 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(1),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(518.3),
maxEventsToPrint = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring('Higgs:useBSM = on',
'HiggsBSM:gg2H2bbbar = on',
'35:m0 = 300',
'25:m0 = 125',
'35:onMode = off',
'35:onIfMatch = 25 25',
'25:onMode = off',
'25:onIfMatch = 5 -5',
'25:onIfMatch = 15 -15'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
bbgenfilter = cms.EDFilter("MCMultiParticleFilter",
Status = cms.vint32(23, 23),
src = cms.InputTag('generator'),
ParticleID = cms.vint32(5, -5),
PtMin = cms.vdouble(0, 0),
NumRequired = cms.int32(1),
EtaMax = cms.vdouble(9999, 9999),
AcceptMore = cms.bool(True)
)
tautaugenfilter = cms.EDFilter("MCMultiParticleFilter",
Status = cms.vint32(23, 23),
src = cms.InputTag('generator'),
ParticleID = cms.vint32(15, -15),
PtMin = cms.vdouble(0,0),
NumRequired = cms.int32(1),
EtaMax = cms.vdouble(9999, 9999),
AcceptMore = cms.bool(True)
)
ProductionFilterSequence = cms.Sequence(generator + bbgenfilter + tautaugenfilter)
configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('\$Revision$'),
name = cms.untracked.string('\$Source$'),
annotation = cms.untracked.string('bbH (H->hh->tautaubb), 13TeV, mH = 300GeV, filtered. TuneCUETP8M1')
)
| [
"[email protected]"
]
| |
44e0524543acf5ef0dae068943b35982c863e360 | 9ecf55bf2601e0d4f74e71f4903d2fd9e0871fd6 | /my_seg_keras/v2_unet_street/test_generator.py | bdf7e240c0cac4b018e854ff41d915a48e308ba9 | []
| no_license | qq191513/mySeg | 02bc9803cde43907fc5d96dc6a6a6371f2bef6fe | 4337e6a0ca50b8ccbf6ed9b6254f2aec814b24db | refs/heads/master | 2020-04-10T09:57:37.811133 | 2019-06-26T08:21:23 | 2019-06-26T08:21:23 | 160,951,962 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,395 | py | import numpy as np
import matplotlib.pyplot as plt
from v1 import config as cfg
import time
from data_process.use_generator import imageSegmentationGenerator
def split_batch_to_pic_list(img):
#一个batch_size的四维张量图片分割成一组列表图片(三维)
batch_size = img.shape[0]
img_list = np.split(img,batch_size,axis=0)
for index,img in enumerate(img_list):
img = np.squeeze(img,axis=0)
img_list[index] = img
return img_list
def plt_imshow_data(data):
#调成标准格式和标准维度,免得爆BUG
data = np.asarray(data)
if data.ndim == 3:
if data.shape[2] == 1:
data = data[:, :, 0]
plt.imshow(data)
plt.show()
time.sleep(2)
def plt_imshow_two_pics(data_1,data_2):
#调成标准格式和标准维度,免得爆BUG
data_1 = np.asarray(data_1)
if data_1.ndim == 3:
if data_1.shape[2] == 1:
data_1 = data_1[:, :, 0]
data_2 = np.asarray(data_2)
if data_2.ndim == 3:
if data_2.shape[2] == 1:
data_2 = data_2[:, :, 0]
plt.subplot(1, 2, 1)
plt.imshow(data_1)
plt.subplot(1, 2, 2)
plt.imshow(data_2)
plt.show()
def seg_vec_to_pic(seg_vec,restore_pic_shape,colors,n_classes):
#长度为w * h * n_classes的向量转w * h * 3的图片,随机生成颜色
seg_img = np.zeros(shape=restore_pic_shape)
the_shape = restore_pic_shape
w, h = the_shape[0],the_shape[1]
seg_vec = seg_vec.reshape((w, h, -1)) #w * h * n_classes 做成(w ,h ,n_classes)
# 共n_classes层,每层都弄一种颜色
for c in range(n_classes):
seg_img[:, :, 0] += (seg_vec[:, :, c] * (colors[c][0])).astype('uint8')
seg_img[:, :, 1] += (seg_vec[:, :, c] * (colors[c][1])).astype('uint8')
seg_img[:, :, 2] += (seg_vec[:, :, c] * (colors[c][2])).astype('uint8')
seg_img = seg_img / 255.0
seg_img = seg_img.astype('float32')
return seg_img
def use_generator_to_show(images_path , segs_path , batch_size,
n_classes , input_height , input_width , output_height , output_width):
batch_size_n = 0 #取第batch_size_n张图片观察
plt.figure()
colors = [(np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)) for _ in
range(n_classes)]
#使用Generator,返回一个batch_size的 im_fn和seg_fn
for im_fn , seg_vec in imageSegmentationGenerator(images_path , segs_path , batch_size,
n_classes , input_height , input_width , output_height , output_width):
pics_group = split_batch_to_pic_list(im_fn) #batchsize切成图片列表
pic = pics_group[batch_size_n] #取第batch_size_n张图片观察
# plt_imshow_data(pic) #用plt显示
print('img shape: ',im_fn.shape)
print('seg shape: ',seg_vec.shape)
seg_vec = split_batch_to_pic_list(seg_vec) # batchsize切成图片列表
seg_vec = seg_vec[batch_size_n] # 取第batch_size_n张图片观察
seg_img = seg_vec_to_pic(seg_vec,pic.shape,colors,n_classes)
plt_imshow_two_pics(pic,seg_img) #用plt显示
time.sleep(1)
print('train dataset')
use_generator_to_show(cfg.train_images , cfg.train_annotations , cfg.train_batch_size,
cfg.n_classes, cfg.input_shape[0], cfg.input_shape[1], cfg.output_shape[0],cfg.output_shape[1])
# print('valid dataset')
# use_generator_to_show(cfg.val_images , cfg.val_annotations , cfg.train_batch_size,
# cfg.n_classes , cfg.input_height , cfg.input_width , cfg.output_height , cfg.output_width)
| [
"[email protected]"
]
| |
1920bee1faeb280feb1f63b7b78c650bc8c772d4 | 2b29095a4f8a60e6ad2f09dd257dc8a9ceb04ebd | /misfits/tools/smooth/__init__.py | 2658ea2efa922b41405685907facd4a792e93f0c | []
| no_license | sholmbo/misfits | f82601cdf5778c4ea57c3d9fed8aea3cd3b641f9 | e34a0ba0b62948840a6bcb1c28d340b8c613dd66 | refs/heads/master | 2023-02-25T13:17:48.670735 | 2021-02-02T15:12:12 | 2021-02-02T15:30:11 | 277,625,987 | 0 | 1 | null | 2021-02-02T15:18:35 | 2020-07-06T19:04:35 | Python | UTF-8 | Python | false | false | 132 | py | from .lowpass import LowPass
from .boxcar import Boxcar
from .gaussian import Gaussian
from .smoothingspline import SmoothingSpline
| [
"[email protected]"
]
| |
9a9bc9fac9a12e1a306ff9724f080209bd2e6cf5 | cc9a6fa4012c58f66d735e20486b9a1df877d1b7 | /Strings/Integer To Roman.py | 6ce9d5509a8706b487d0cf4902e31d2557ad5670 | []
| no_license | sharmaji27/InterviewBit-Problems | 6e5acb6d45296b60df8632a02b0aa272dcde8f28 | 09054fdab0350a86268cfe5eb55edc2071067b2b | refs/heads/master | 2023-08-27T23:08:34.003883 | 2021-10-20T05:33:46 | 2021-10-20T05:33:46 | 265,551,578 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,129 | py | '''
Given an integer A, convert it to a roman numeral, and return a string corresponding to its roman numeral version
Note : This question has a lot of scope of clarification from the interviewer. Please take a moment to think of all the needed clarifications and see the expected response using “See Expected Output” For the purpose of this question, https://projecteuler.net/about=roman_numerals has very detailed explanations.
Input Format
The only argument given is integer A.
Output Format
Return a string denoting roman numeral version of A.
Constraints
1 <= A <= 3999
For Example
Input 1:
A = 5
Output 1:
"V"
Input 2:
A = 14
Output 2:
"XIV"
'''
class Solution:
# @param A : integer
# @return a strings
def intToRoman(self, A):
I = ['','I','II','III','IV','V','VI','VII','VIII','IX','X']
X = ['','X','XX','XXX','XL','L','LX','LXX','LXXX','XC','C']
C = ['','C','CC','CCC','CD','D','DC','DCC','DCCC','CM','M']
M = ['','M','MM','MMM']
return(M[A//1000] + C[(A%1000)//100] + X[(A%100)//10] + I[A%10]) | [
"[email protected]"
]
| |
505ecf386e4dc812d1ffee11c7b7ce8a2b19b3fa | 28f088b5356e66780c4bad204564bff92f910f02 | /src/python/pants/base/exception_sink_integration_test.py | bdbc6b8ff6855afb1a1bf2eb685a4f9e39400f25 | [
"Apache-2.0"
]
| permissive | wonlay/pants | 57dcd99f82cdb2e37fcb7c563ec2bccf797ee7b7 | 53c66503b6898e83c9c9596e56cde5ad9ed6a0d3 | refs/heads/master | 2023-03-06T03:23:08.602817 | 2022-05-05T23:41:32 | 2022-05-05T23:41:32 | 24,695,709 | 0 | 0 | Apache-2.0 | 2023-03-01T11:59:58 | 2014-10-01T21:15:29 | Python | UTF-8 | Python | false | false | 4,970 | py | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
import signal
import time
from pathlib import Path
from typing import List, Tuple
import pytest
from pants.base.build_environment import get_buildroot
from pants.base.exception_sink import ExceptionSink
from pants.testutil.pants_integration_test import run_pants_with_workdir
from pants.util.dirutil import read_file
from pants_test.pantsd.pantsd_integration_test_base import PantsDaemonIntegrationTestBase
pytestmark = pytest.mark.platform_specific_behavior
def lifecycle_stub_cmdline() -> List[str]:
# Load the testprojects pants-plugins to get some testing tasks and subsystems.
testproject_backend_src_dir = os.path.join(
get_buildroot(), "testprojects/pants-plugins/src/python"
)
testproject_backend_pkg_name = "test_pants_plugin"
lifecycle_stub_cmdline = [
"--no-pantsd",
f"--pythonpath=+['{testproject_backend_src_dir}']",
f"--backend-packages=+['{testproject_backend_pkg_name}']",
# This task will always raise an exception.
"lifecycle-stub-goal",
]
return lifecycle_stub_cmdline
def get_log_file_paths(workdir: str, pid: int) -> Tuple[str, str]:
pid_specific_log_file = ExceptionSink.exceptions_log_path(for_pid=pid, in_dir=workdir)
assert os.path.isfile(pid_specific_log_file)
shared_log_file = ExceptionSink.exceptions_log_path(in_dir=workdir)
assert os.path.isfile(shared_log_file)
assert pid_specific_log_file != shared_log_file
return (pid_specific_log_file, shared_log_file)
def assert_unhandled_exception_log_matches(pid: int, file_contents: str) -> None:
regex_str = f"""\
timestamp: ([^\n]+)
process title: ([^\n]+)
sys\\.argv: ([^\n]+)
pid: {pid}
Exception caught: \\([^)]*\\)
(.|\n)*
Exception message:.*
"""
assert re.match(regex_str, file_contents)
def assert_graceful_signal_log_matches(pid: int, signum, signame, contents: str) -> None:
regex_str = """\
timestamp: ([^\n]+)
process title: ([^\n]+)
sys\\.argv: ([^\n]+)
pid: {pid}
Signal {signum} \\({signame}\\) was raised\\. Exiting with failure\\.
""".format(
pid=pid, signum=signum, signame=signame
)
assert re.search(regex_str, contents)
def test_logs_unhandled_exception(tmp_path: Path) -> None:
pants_run = run_pants_with_workdir(
lifecycle_stub_cmdline(),
workdir=tmp_path.as_posix(),
# The backtrace should be omitted when --print-stacktrace=False.
print_stacktrace=False,
extra_env={"_RAISE_EXCEPTION_ON_IMPORT": "True"},
)
pants_run.assert_failure()
regex = "exception during import!"
assert re.search(regex, pants_run.stderr)
pid_specific_log_file, shared_log_file = get_log_file_paths(tmp_path.as_posix(), pants_run.pid)
assert_unhandled_exception_log_matches(pants_run.pid, read_file(pid_specific_log_file))
assert_unhandled_exception_log_matches(pants_run.pid, read_file(shared_log_file))
class ExceptionSinkIntegrationTest(PantsDaemonIntegrationTestBase):
hermetic = False
def test_dumps_logs_on_signal(self):
"""Send signals which are handled, but don't get converted into a KeyboardInterrupt."""
signal_names = {
signal.SIGQUIT: "SIGQUIT",
signal.SIGTERM: "SIGTERM",
}
for (signum, signame) in signal_names.items():
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
pid = ctx.checker.assert_started()
os.kill(pid, signum)
time.sleep(5)
# Check that the logs show a graceful exit by signal.
pid_specific_log_file, shared_log_file = get_log_file_paths(ctx.workdir, pid)
assert_graceful_signal_log_matches(
pid, signum, signame, read_file(pid_specific_log_file)
)
assert_graceful_signal_log_matches(pid, signum, signame, read_file(shared_log_file))
def test_dumps_traceback_on_sigabrt(self):
# SIGABRT sends a traceback to the log file for the current process thanks to
# faulthandler.enable().
with self.pantsd_successful_run_context() as ctx:
ctx.runner(["help"])
pid = ctx.checker.assert_started()
os.kill(pid, signal.SIGABRT)
time.sleep(5)
# Check that the logs show an abort signal and the beginning of a traceback.
pid_specific_log_file, shared_log_file = get_log_file_paths(ctx.workdir, pid)
regex_str = """\
Fatal Python error: Aborted
Thread [^\n]+ \\(most recent call first\\):
"""
assert re.search(regex_str, read_file(pid_specific_log_file))
# faulthandler.enable() only allows use of a single logging file at once for fatal tracebacks.
assert "" == read_file(shared_log_file)
| [
"[email protected]"
]
| |
fffdf977cf0f7526c91b672b614f4123a1b4ffb7 | 992969b8b0beb53bf884938ae20f5d56bb3878f2 | /rules/taxonomic_classification/Snakefile | cfeaba79fdeb1f60bd767ff30569e1b24c9ae5ba | [
"BSD-3-Clause"
]
| permissive | dahak-metagenomics/taco-taxonomic-classification | c6f0120e7d5e979c03d4c6d0778756960f46e593 | 854cae4f1b2427746a1faa6a0e0aefbfb11c5523 | refs/heads/master | 2020-03-12T21:29:04.347344 | 2018-05-03T16:47:32 | 2018-05-03T16:47:32 | 130,829,061 | 0 | 0 | BSD-3-Clause | 2018-12-16T03:00:58 | 2018-04-24T09:23:14 | Python | UTF-8 | Python | false | false | 298 | import utils
include: "taxonomic_classification.settings"
include: "biocontainers.rule"
include: "sourmash_sbt.rule"
include: "calculate_signatures.rule"
include: "trimmed_data.rule"
include: "kaiju.rule"
include: "kaiju2krona.rule"
include: "filter_taxa.rule"
include: "krona_visualization.rule"
| [
"[email protected]"
]
| ||
49ac68fcf23d7d889d0bd59ad259e4465769c696 | 1dae87abcaf49f1d995d03c0ce49fbb3b983d74a | /programs/subroutines/Feshbach ramp10ms 0-47A.sub.py | 393291c0239b4dbeeb5ba83566e5ced7dd8e41ea | []
| no_license | BEC-Trento/BEC1-data | 651cd8e5f15a7d9848f9921b352e0830c08f27dd | f849086891bc68ecf7447f62962f791496d01858 | refs/heads/master | 2023-03-10T19:19:54.833567 | 2023-03-03T22:59:01 | 2023-03-03T22:59:01 | 132,161,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,992 | py | prg_comment = ""
prg_version = "0.5.1"
def program(prg, cmd):
prg.add(0, "Delta 1 Current", 0.000000)
prg.add(1000, "Delta 1 Current", 0.470000)
prg.add(2000, "Delta 1 Current", 0.940000)
prg.add(3000, "Delta 1 Current", 1.410000)
prg.add(4000, "Delta 1 Current", 1.880000)
prg.add(5000, "Delta 1 Current", 2.350000)
prg.add(6000, "Delta 1 Current", 2.820000)
prg.add(7000, "Delta 1 Current", 3.290000)
prg.add(8000, "Delta 1 Current", 3.760000)
prg.add(9000, "Delta 1 Current", 4.230000)
prg.add(10000, "Delta 1 Current", 4.700000)
prg.add(11000, "Delta 1 Current", 5.170000)
prg.add(12000, "Delta 1 Current", 5.640000)
prg.add(13000, "Delta 1 Current", 6.110000)
prg.add(14000, "Delta 1 Current", 6.580000)
prg.add(15000, "Delta 1 Current", 7.050000)
prg.add(16000, "Delta 1 Current", 7.520000)
prg.add(17000, "Delta 1 Current", 7.990000)
prg.add(18000, "Delta 1 Current", 8.460000)
prg.add(19000, "Delta 1 Current", 8.930000)
prg.add(20000, "Delta 1 Current", 9.400000)
prg.add(21000, "Delta 1 Current", 9.870000)
prg.add(22000, "Delta 1 Current", 10.340000)
prg.add(23000, "Delta 1 Current", 10.810000)
prg.add(24000, "Delta 1 Current", 11.280000)
prg.add(25000, "Delta 1 Current", 11.750000)
prg.add(26000, "Delta 1 Current", 12.220000)
prg.add(27000, "Delta 1 Current", 12.690000)
prg.add(28000, "Delta 1 Current", 13.160000)
prg.add(29000, "Delta 1 Current", 13.630000)
prg.add(30000, "Delta 1 Current", 14.100000)
prg.add(31000, "Delta 1 Current", 14.570000)
prg.add(32000, "Delta 1 Current", 15.040000)
prg.add(33000, "Delta 1 Current", 15.510000)
prg.add(34000, "Delta 1 Current", 15.980000)
prg.add(35000, "Delta 1 Current", 16.450000)
prg.add(36000, "Delta 1 Current", 16.920000)
prg.add(37000, "Delta 1 Current", 17.390000)
prg.add(38000, "Delta 1 Current", 17.860000)
prg.add(39000, "Delta 1 Current", 18.330000)
prg.add(40000, "Delta 1 Current", 18.800000)
prg.add(41000, "Delta 1 Current", 19.270000)
prg.add(42000, "Delta 1 Current", 19.740000)
prg.add(43000, "Delta 1 Current", 20.210000)
prg.add(44000, "Delta 1 Current", 20.680000)
prg.add(45000, "Delta 1 Current", 21.150000)
prg.add(46000, "Delta 1 Current", 21.620000)
prg.add(47000, "Delta 1 Current", 22.090000)
prg.add(48000, "Delta 1 Current", 22.560000)
prg.add(49000, "Delta 1 Current", 23.030000)
prg.add(50000, "Delta 1 Current", 23.500000)
prg.add(51000, "Delta 1 Current", 23.970000)
prg.add(52000, "Delta 1 Current", 24.440000)
prg.add(53000, "Delta 1 Current", 24.910000)
prg.add(54000, "Delta 1 Current", 25.380000)
prg.add(55000, "Delta 1 Current", 25.850000)
prg.add(56000, "Delta 1 Current", 26.320000)
prg.add(57000, "Delta 1 Current", 26.790000)
prg.add(58000, "Delta 1 Current", 27.260000)
prg.add(59000, "Delta 1 Current", 27.730000)
prg.add(60000, "Delta 1 Current", 28.200000)
prg.add(61000, "Delta 1 Current", 28.670000)
prg.add(62000, "Delta 1 Current", 29.140000)
prg.add(63000, "Delta 1 Current", 29.610000)
prg.add(64000, "Delta 1 Current", 30.080000)
prg.add(65000, "Delta 1 Current", 30.550000)
prg.add(66000, "Delta 1 Current", 31.020000)
prg.add(67000, "Delta 1 Current", 31.490000)
prg.add(68000, "Delta 1 Current", 31.960000)
prg.add(69000, "Delta 1 Current", 32.430000)
prg.add(70000, "Delta 1 Current", 32.900000)
prg.add(71000, "Delta 1 Current", 33.370000)
prg.add(72000, "Delta 1 Current", 33.840000)
prg.add(73000, "Delta 1 Current", 34.310000)
prg.add(74000, "Delta 1 Current", 34.780000)
prg.add(75000, "Delta 1 Current", 35.250000)
prg.add(76000, "Delta 1 Current", 35.720000)
prg.add(77000, "Delta 1 Current", 36.190000)
prg.add(78000, "Delta 1 Current", 36.660000)
prg.add(79000, "Delta 1 Current", 37.130000)
prg.add(80000, "Delta 1 Current", 37.600000)
prg.add(81000, "Delta 1 Current", 38.070000)
prg.add(82000, "Delta 1 Current", 38.540000)
prg.add(83000, "Delta 1 Current", 39.010000)
prg.add(84000, "Delta 1 Current", 39.480000)
prg.add(85000, "Delta 1 Current", 39.950000)
prg.add(86000, "Delta 1 Current", 40.420000)
prg.add(87000, "Delta 1 Current", 40.890000)
prg.add(88000, "Delta 1 Current", 41.360000)
prg.add(89000, "Delta 1 Current", 41.830000)
prg.add(90000, "Delta 1 Current", 42.300000)
prg.add(91000, "Delta 1 Current", 42.770000)
prg.add(92000, "Delta 1 Current", 43.240000)
prg.add(93000, "Delta 1 Current", 43.710000)
prg.add(94000, "Delta 1 Current", 44.180000)
prg.add(95000, "Delta 1 Current", 44.650000)
prg.add(96000, "Delta 1 Current", 45.120000)
prg.add(97000, "Delta 1 Current", 45.590000)
prg.add(98000, "Delta 1 Current", 46.060000)
prg.add(99000, "Delta 1 Current", 46.530000)
prg.add(100000, "Delta 1 Current", 47.000000)
return prg
| [
"[email protected]"
]
| |
bc770e9ab63ca5279016744344e0868a658a80a9 | 257bd63361aa846ffdacdc15edaecf84c6364e78 | /psou2/pyanal1/apack1/ex7_t-test.py | 0753bfe9b286ee0d0bd0745aa8b0a95a5414b8f0 | []
| no_license | gom4851/hcjeon | 86dcfd05ce47a13d066f13fe187d6a63142fb9fe | 59a00ca9499f30e50127bb16eb510553e88ace43 | refs/heads/master | 2020-06-04T23:16:08.632278 | 2019-01-15T09:54:08 | 2019-01-15T09:54:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,682 | py | '''
Created on 2018. 11. 29.
두 집단일 때
남녀의 성적, A반과 B반의 키, 경기도와 충청도의 소득 따위의 서로 독립인 두 집단에서 얻은 표본을 독립표본(two sample)이라고 한다.
실습) 남녀 두 집단 간 파이썬 시험의 평균 차이 검정
Male = [75, 85, 100, 72.5, 86.5]
female = [63.2, 76, 52, 100, 70]
'''
from scipy import stats
import pandas as pd
from numpy import average
from pandas.io.parsers import read_csv
# 귀무가설 : 남녀 두 집단 간 파이썬 시험의 평균에 차이가 없다.
# 대립가설 : 남녀 두 집단 간 파이썬 시험의 평균에 차이가 있다.
male = [75, 85, 100, 72.5, 86.5]
female = [63.2, 76, 52, 100, 70]
two_sam = stats.ttest_ind(male, female) # ttest_ind : 집단이 두개일 때 사용. 두 개의 표본에 대한 t 검정.
#two_sam = stats.ttest_ind(male, female, equal_var=True)
print(two_sam)
# T값(검정통계량) : statistic=1.233193127514512, p-value=0.2525076844853278
t, p = two_sam
print('t검정통계량 : {}, p-value : {}'.format(t, p))
# p-value 0.2525 > 0.05 이므로 귀무가설 채택.
print('여 : ', average(female))
print('남 : ', average(male))
print('**' * 30)
'''
실습) 두 가지 교육방법에 따른 평균시험 점수에 대한 검정 수행 two_sample.csv'
'''
data = pd.read_csv("../testdata/two_sample.csv")
print(data.head())
df = data[['method', 'score']]
print(df.head())
#귀무가설 : 두 가지 교육방법에 따른 평균시험 점수에 차이가 없다.
#대립가설 : 두 가지 교육방법에 따른 평균시험에 점수에 차이가 있다.
m1 = df[df['method'] == 1] # 교육방법 1
m2 = df[df['method'] == 2] # 교육방법 2
score1 = m1['score'] # 교육방법 1 점수
score2 = m2['score'] # 교육방법 2 점수
#print('score1')
#print('score2')
# sco1 = score1.fillna())
sco1 = score1.fillna(score1.mean()) # 평균으로 NaN을 대체
sco2 = score2.fillna(score2.mean())
#print(sco2)
# 정규성 확인 : 히스토그램, shapiro()
result = stats.ttest_ind(sco1, sco2)
p, v = result
print('t검정통계량 : {}, p-value : {}'.format(p, v))
# t검정통계량 : -0.1964, p-value : 0.8450.
# p-value : 0.8450 > 0.05 이므로 귀무가설 채택.
print("**" * 30)
'''
* 서로 대응인 두 집단의 평균 차이 검정(paired samples t-test)
처리 이전과 처리 이후를 각각의 모집단으로 판단하여, 동일한 관찰 대상으로부터 처리 이전과 처리 이후를 1:1로
대응시킨 두 집단으로 부터의 표본을 대응표본(paired sample)이라고 한다.
대응인 두 집단의 평균 비교는 동일한 관찰 대상으로부터 처리 이전의 관찰과 이후의 관찰을 비교하여 영향을 미친 정도를 밝히는데 주로 사용하고 있다.
집단 간 비교가 아니므로 등분산 검정을 할 필요가 없다.
실습) 복부 수술 전 9명의 몸무게와 복부 수술 후 몸무게 변화
baseline = [67.2, 67.4, 71.5, 77.6, 86.0, 89.1, 59.5, 81.9, 105.5]
follow_up = [62.4, 64.6, 70.4, 62.6, 80.1, 73.2, 58.2, 71.0, 101.0]
'''
import numpy as np
import seaborn as sns
import matplotlib.pylab as plt
import scipy as sp
# 그냥 랜덤 값으로 해봄..?
np.random.seed(0)
x1 = np.random.normal(100, 10, 100)
x2 = np.random.normal(97, 10, 100)
#print(x1)
# 히스토그램으로 정규성 확인을 위해
sns.distplot(x1, kde=False, fit=sp.stats.norm)
sns.distplot(x2, kde=False, fit=sp.stats.norm)
#plt.show()
print(stats.ttest_rel(x1, x2))
# 실습) 복부 수술 전 9명의 몸무게와 복부 수술 후 몸무게 변화
baseline = [67.2, 67.4, 71.5, 77.6, 86.0, 89.1, 59.5, 81.9, 105.5]
follow_up = [62.4, 64.6, 70.4, 62.6, 80.1, 73.2, 58.2, 71.0, 101.0]
# 귀무가설 : 복부 수술 전/후 몸무게의 차이가 없다.
# 대립가설 : 복부 수술 전/후 몸무게의 차이가 있다.
paird_sam = stats.ttest_rel(baseline, follow_up)
print('t검정통계량 : %.5f, p-value : %.5f'%paird_sam)
# t검정통계량 : 3.66812, p-value : 0.00633
# p-value : 0.00633 < 0.05 이므로 귀무가설 기각.
print("**" * 30)
'''
추론통계 분석 중 비율검정
- 비율검정 특징
: 집단의 비율이 어떤 특정한 값과 같은지를 검증.
: 비율 차이 검정 통계량을 바탕으로 귀무가설의 기각여부를 결정.
'''
'''
# one-sample
A회사에는 100명 중에 45명이 흡연을 한다. 국가 통계를 보니 국민 흡연율은 35%라고 한다. 비율이 같냐?
'''
# 귀무가설 : 국민 흡연률 35%와 비율이 같다.
# 대립가설 : 국민 흡연률 35%와 비율이 다르다.
from statsmodels.stats.proportion import proportions_ztest # 정규표현식에서 x의 값을 z로..?
count = np.array([45]) # 해당 수
nobs = np.array([100]) # 전체 수
val = 0.35 # 비율
#print(count)
z, p = proportions_ztest(count, nobs, val) # proportions_ztest : 추론통계 분석에서 비율검정할 때 씀.
print('z값 : ', z)
print('p-value : ', p)
# 해석 : p-value : 0.04442318 < 0.05 이므로 귀무가설 기각. 비율이 다름.
print("**" * 30)
'''
# two-sample
A회사 사람들 300명 중 100명이 커피를 마시고, B회사 사람들 400명 중 170명이 커피를 마셨다.비율이 같냐?
'''
# 귀무가설 : A회사와 B회사에 사람들이 커피를 마시는 비율은 같다.
# 대립가설 : A회사와 B회사에 사람들이 커피를 마시는 비율은 다르다.
count = np.array([100, 170]) # 해당 수.
nobs = np.array([300, 400]) # 전체 수.
z, p = proportions_ztest(count, nobs, value=0)
print('z값 : ', z)
print('p-value : ', p)
# 해석 : p-value : 0.013 < 0.05 이므로 귀무가설 기각. 비율이 다름. | [
"[email protected]"
]
| |
9d600cce89991aa6203255279e0ca1a38c358019 | 9dda81fb80905226b10e69d4c20f72533f2b68bd | /word4u/settings.py | 428f2f62207b14eda69e375e388e9fc7056bd2e4 | []
| no_license | SardarDawar/graphicStore-ecommerce | 2c610d6ef4e62d98659bf98d3d80af3d1926aa3f | 1d9208d27e4ba69bedf7a34b7e54bcc540d26438 | refs/heads/master | 2023-02-02T13:45:31.335032 | 2020-12-15T18:52:43 | 2020-12-15T18:52:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,231 | py | """
Django settings for word4u project.
Generated by 'django-admin startproject' using Django 3.1.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'd*mwv+8mla!+u^kze0!o3l#qlfjd(%z&!-=svuhzqwy!m8)8rw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'home',
'products',
'personalise',
'bag',
'checkout',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'word4u.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
os.path.join(BASE_DIR, 'templates', 'allauth'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'bag.context_processors.cart',
],
},
},
]
AUTHENTICATION_BACKENDS = [
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
]
SITE_ID = 1
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ACCOUNT_AUTHENTICATION_METHOD = 'username_email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_SIGNUP_EMAIL_ENTER_TWICE = True
ACCOUNT_USERNAME_MIN_LENGTH = 4
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/'
WSGI_APPLICATION = 'word4u.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR, 'static'),)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
BUNDLE_DISCOUNT_THRESHOLD = 29.99
##### Cart Session ######
CART_SESSION_ID = 'cart' | [
"[email protected]"
]
| |
044ee99b8b988b65ecbc2c2dec1ecf70dd5fdc3e | 4f7319f90b9ea3bfdae13a59885b917db6037164 | /utils.py | 25d30a4ee9fd42b0e27142b1d9901d93f4fa377a | []
| no_license | DmytroKaminskiy/hillel-git | c78eb3b52b0a827527421cb2fef6e09eebf23e6d | d42f7d8f441333f83cfe5e57b7aa744737891acc | refs/heads/main | 2023-02-12T01:16:55.017896 | 2021-01-14T19:26:23 | 2021-01-14T19:26:23 | 323,099,692 | 0 | 0 | null | 2020-12-24T19:20:24 | 2020-12-20T15:10:59 | Python | UTF-8 | Python | false | false | 4,124 | py | def generate_password(length: int) -> str:
"""
generate password with given length
Homework
функция должна возвращать строку из случайных символов заданной длины.
"""
return ''
def encrypt_message(message: str) -> str:
"""
encrypt message
зашифровать сообщение по алгоритму.
Сместить каждый символ по ASCII таблице на заданное рассояние.
"""
key = 2
return ''.join(
chr(num + key)
for num in map(ord, message)
)
def lucky_number(ticket: str) -> bool:
"""
lucky number (tram ticket)
667766 - is lucky (6 + 6 + 7 == 7 + 6 + 6)
сумма первых трех числе должна равняться сумме последних трех чисел
"""
return True
def fizz_buzz(num: int) -> str:
"""
fizz buzz
усли число, кратно трем, программа должна выводить слово «Fizz»,
а вместо чисел, кратных пяти — слово «Buzz».
Если число кратно и 3, и 5, то программа должна выводить слово «FizzBuzz»
в остальных случаях число как строку
"""
return ''
def password_is_strong(password) -> bool:
"""
is password is strong
(has number, char, lowercase, uppercase, at least length is 10)
вернуть True если пароль надежный
Праметры:
1. Пароль должен содержать как минимум одну цифру
2. Пароль должен содержать как минимум один сивол в нижнем регистре
3. Пароль должен содержать как минимум один сивол в верхнем регистре
4. Пароль должен быть как минимум 10 символов
"""
return True
def number_is_prime(num: int) -> bool:
"""
number is prime
на вход принимаем число
вернуть True если число является простым
https://ru.wikipedia.org/wiki/%D0%9F%D1%80%D0%BE%D1%81%D1%82%D0%BE%D0%B5_%D1%87%D0%B8%D1%81%D0%BB%D0%BE#:~:text=2%2C%203%2C%205%2C%207,%D1%87%D0%B8%D1%81%D0%BB%D0%B0%20%D0%B1%D1%8B%D1%82%D1%8C%20%D0%BF%D1%80%D0%BE%D1%81%D1%82%D1%8B%D0%BC%20%D0%BD%D0%B0%D0%B7%D1%8B%D0%B2%D0%B0%D0%B5%D1%82%D1%81%D1%8F%20%D0%BF%D1%80%D0%BE%D1%81%D1%82%D0%BE%D1%82%D0%BE%D0%B9.
"""
return True
def decrypt_message(message: str) -> str:
"""
decrypt message
функция обратная encrypt_message
Расшифровать сообщение по заданному ключу
"""
return ''
def volume_of_sphere(radius: float) -> float:
"""
Volume of a Sphere
на вход принимаем радиус сферы.
Необходимо рассчитать объем сферы и округлить результат до двух знаков после точки
round to 2 places
"""
return 0.0
def days_diff(start_date: ..., end_date: ...) -> int:
"""
calculate number of days between two dates.
найти разницу между двумя датами
"""
return 0
def prs(client_choice: str) -> bool:
"""
paper rock scissors
принимаем значение от клиента из списка значений (например ['p', 'r', 's'])
сгенерировать случайный выбор на сервере
реализовать игру в камень-ножницы-бумага между клиент-сервер
"""
return True
def integer_as_roman(integer: int) -> str:
"""
***
integer to Roman Number
вывести значение в виде римского числа
"""
return ''
if __name__ == '__main__':
assert encrypt_message('Dima') == 'Fkoc'
| [
"[email protected]"
]
| |
16479781a6eaec6b633cfb0c482675ebfec21d1c | f1fe131614660a04e4fe4ad27b5183ffe2b2a6e4 | /2020/22a.py | adf2ab439b15c787a0786490d4ad786d1b4b22f6 | [
"MIT"
]
| permissive | msullivan/advent-of-code | 5873228d562c1069d6516ee99943013bc91c4caa | e52c93e2ffa1e598f23e6d0e356b54c5e82ee61d | refs/heads/master | 2023-01-04T07:14:46.367051 | 2023-01-03T19:04:12 | 2023-01-03T19:04:12 | 47,717,709 | 9 | 4 | MIT | 2022-12-07T19:39:52 | 2015-12-09T20:41:00 | Python | UTF-8 | Python | false | false | 721 | py | #!/usr/bin/env python3
import sys
import re
def extract(s):
return [int(x) for x in re.findall(r'-?\d+', s)]
def main(args):
data = [x.strip().split('\n') for x in sys.stdin.read().split('\n\n')]
# data = [s.strip() for s in sys.stdin]
d1 = [int(x) for x in data[0][1:]]
d2 = [int(x) for x in data[1][1:]]
while d1 and d2:
x1 = d1.pop(0)
x2 = d2.pop(0)
if x1 > x2:
d1.append(x1)
d1.append(x2)
else:
d2.append(x2)
d2.append(x1)
print(d1, d2)
xs = list(reversed(d1 + d2))
s = 0
for i, x in enumerate(xs):
s += (i+1)*x
print(s)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"[email protected]"
]
| |
951f0b687ce2c59a9cbbd6d74b48fae3832ff058 | 77311ad9622a7d8b88707d7cee3f44de7c8860cb | /res/scripts/client/gui/miniclient/lobby/header/account_popover.py | f40f37a1d090fd11f2dd1dc02f387086e216e1ab | []
| no_license | webiumsk/WOT-0.9.14-CT | 9b193191505a4560df4e872e022eebf59308057e | cfe0b03e511d02c36ce185f308eb48f13ecc05ca | refs/heads/master | 2021-01-10T02:14:10.830715 | 2016-02-14T11:59:59 | 2016-02-14T11:59:59 | 51,606,676 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 2,474 | py | # 2016.02.14 12:38:06 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/miniclient/lobby/header/account_popover.py
from gui.Scaleform.locale.MINICLIENT import MINICLIENT
from gui.shared.utils.functions import makeTooltip
from helpers import aop
from helpers.i18n import makeString as _ms
class ClanBtnsUnavailableAspect(aop.Aspect):
def atReturn(self, cd):
original_return_value = cd.returned
warnTooltip = makeTooltip(None, None, None, _ms(MINICLIENT.ACCOUNTPOPOVER_WARNING))
original_return_value['btnTooltip'] = warnTooltip
original_return_value['requestInviteBtnTooltip'] = warnTooltip
original_return_value['searchClanTooltip'] = warnTooltip
original_return_value['isOpenInviteBtnEnabled'] = False
original_return_value['isSearchClanBtnEnabled'] = False
original_return_value['btnEnabled'] = False
return original_return_value
class MyClanInvitesBtnUnavailableAspect(aop.Aspect):
def atReturn(self, cd):
original_return_value = cd.returned
original_return_value['inviteBtnTooltip'] = makeTooltip(None, None, None, _ms(MINICLIENT.ACCOUNTPOPOVER_WARNING))
original_return_value['inviteBtnEnabled'] = False
return original_return_value
class ClanBtnsUnavailable(aop.Pointcut):
def __init__(self):
aop.Pointcut.__init__(self, 'gui.Scaleform.daapi.view.lobby.header.AccountPopover', 'AccountPopover', '_getClanBtnsParams', aspects=(ClanBtnsUnavailableAspect,))
class MyClanInvitesBtnUnavailable(aop.Pointcut):
def __init__(self):
aop.Pointcut.__init__(self, 'gui.Scaleform.daapi.view.lobby.header.AccountPopover', 'AccountPopover', '_getMyInvitesBtnParams', aspects=(MyClanInvitesBtnUnavailableAspect,))
class CrewButtonStatusAspect(aop.Aspect):
def atCall(self, cd):
cd.avoid()
return {'isEnabled': False,
'disabledTooltip': _ms('#menu:header/account/popover/crew_button/disabledTooltip')}
class CrewButtonStatusPointcut(aop.Pointcut):
def __init__(self):
aop.Pointcut.__init__(self, 'gui.Scaleform.daapi.view.lobby.header.AccountPopover', 'AccountPopover', '_crewDataButtonStatus', aspects=(CrewButtonStatusAspect,))
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\miniclient\lobby\header\account_popover.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.02.14 12:38:06 Střední Evropa (běžný čas)
| [
"[email protected]"
]
| |
560fd9677d919732ed6f5c0c072bed734de5e606 | 626b14ce13986b6d5e03143e151004247659625a | /Day66-75/code/myutils.py | e013c27a881545e2cececd53c451c5dc84ae478a | []
| no_license | Focavn/Python-100-Days | c7586ecf7ae3f1fd42f024558bb998be23ee9df8 | d8de6307aeff9fe31fd752bd7725b9cc3fbc084b | refs/heads/master | 2021-08-08T17:57:02.025178 | 2020-09-17T11:58:04 | 2020-09-17T11:58:04 | 220,427,144 | 0 | 0 | null | 2019-11-08T08:59:43 | 2019-11-08T08:59:41 | null | UTF-8 | Python | false | false | 202 | py | from functools import wraps
def coroutine(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
gen = fn(*args, **kwargs)
next(gen)
return gen
return wrapper
| [
"[email protected]"
]
| |
72653be3b07d719ad692e5faa92e16a8f3b42b58 | 9adc810b07f7172a7d0341f0b38088b4f5829cf4 | /experiments/ashvin/icml2020/process_data/consolidate.py | ce20ffe52de5597a4618268be84969ec69785382 | [
"MIT"
]
| permissive | Asap7772/railrl_evalsawyer | 7ee9358b5277b9ddf2468f0c6d28beb92a5a0879 | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | refs/heads/main | 2023-05-29T10:00:50.126508 | 2021-06-18T03:08:12 | 2021-06-18T03:08:12 | 375,810,557 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,393 | py | import pickle
import glob
import numpy as np
def print_stats(data):
returns = []
path_lengths = []
print("num trajectories", len(data))
for path in data:
rewards = path["rewards"]
returns.append(np.sum(rewards))
path_lengths.append(len(rewards))
print("returns")
print("min", np.min(returns))
print("max", np.max(returns))
print("mean", np.mean(returns))
print("std", np.std(returns))
print("path lengths")
print("min", np.min(path_lengths))
print("max", np.max(path_lengths))
print("mean", np.mean(path_lengths))
print("std", np.std(path_lengths))
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc1/run5/id0/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc1.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run0/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc2.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run4/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc3.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run4/id*/video_*vae.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc3_vae.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run4/id*/video_*env.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc3_env.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/pen/demo-bc5/run6/id*/video_*vae.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/pen_bc4_vae.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/door/demo-bc5/run2/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/door_bc1.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/hammer/demo-bc1/run0/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/hammer_bc1.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/relocate/demo-bc1/run0/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/relocate_bc1.npy"
# input_patterns = [
# "/home/ashvin/data/s3doodad/ashvin/icml2020/hand/door/bc/bc-data1/run0/id*/video_*.p",
# ]
# output_file = "/home/ashvin/data/s3doodad/demos/icml2020/hand/door_bc2.npy"
input_patterns = [
"/media/ashvin/data2/s3doodad/ashvin/rfeatures/rlbench/open-drawer-vision3/td3bc-with-state3/run0/id0/video_*_vae.p",
]
output_file = "/home/ashvin/data/s3doodad/demos/icml2020/rlbench/rlbench_bc1.npy"
data = []
for pattern in input_patterns:
for file in glob.glob(pattern):
d = pickle.load(open(file, "rb"))
print(file, len(d))
for path in d: # for deleting image observations
for i in range(len(path["observations"])):
ob = path["observations"][i]
keys = list(ob.keys())
for key in keys:
if key != "state_observation":
del ob[key]
data.extend(d)
pickle.dump(data, open(output_file, "wb"))
print(output_file)
print_stats(data)
| [
"[email protected]"
]
| |
d955f629b3a6c204796080da55b86f3e501fa3d8 | 3312b5066954cbf96c79ef3e1f3d582b31ebc5ae | /colegend/events/migrations/0003_auto_20161127_1814.py | dd7c171af96b65ca770497884530df6b420a88a0 | []
| no_license | Eraldo/colegend | d3f3c2c37f3bade7a3a1e10d307d49db225fe7f5 | 2e7b9d27887d7663b8d0d1930c2397c98e9fa1fc | refs/heads/master | 2021-01-16T23:32:09.245967 | 2020-10-07T12:12:14 | 2020-10-07T12:12:14 | 21,119,074 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-27 17:14
from __future__ import unicode_literals
import colegend.cms.blocks
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
class Migration(migrations.Migration):
dependencies = [
('events', '0002_auto_20160711_2253'),
]
operations = [
migrations.AlterField(
model_name='eventspage',
name='content',
field=wagtail.core.fields.StreamField((('heading', colegend.cms.blocks.HeadingBlock()), ('rich_text', colegend.cms.blocks.RichTextBlock()), ('image', colegend.cms.blocks.ImageBlock()), ('embed', colegend.cms.blocks.EmbedBlock()), ('html', wagtail.core.blocks.RawHTMLBlock())), blank=True),
),
]
| [
"[email protected]"
]
| |
f43e887280e075b652207566e76e69e00e5fbf4d | 9f495456202ecbfdcbc17aae96f8db47116f7adf | /myenv/lib/python3.6/site-packages/django_extensions/management/commands/runprofileserver.py | c65bcf518017fb2c17af005a471b54fe29720d33 | []
| no_license | nknaveenkumar760/pythontutorial | 8dfae178e5ffa1942722a3754bd1b0c1fc99aa3b | 22df07acad252040c6b9b68c935fef5add9cf974 | refs/heads/master | 2022-02-19T18:06:40.386333 | 2019-08-02T08:12:11 | 2019-08-02T08:12:11 | 171,852,495 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,958 | py | # -*- coding: utf-8 -*-
"""
runprofileserver.py
Starts a lightweight Web server with profiling enabled.
Credits for kcachegrind support taken from lsprofcalltree.py go to:
David Allouche
Jp Calderone & Itamar Shtull-Trauring
Johan Dahlin
"""
import sys
from datetime import datetime
from django.conf import settings
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application
from django_extensions.management.utils import signalcommand
USE_STATICFILES = 'django.contrib.staticfiles' in settings.INSTALLED_APPS
class KCacheGrind(object):
def __init__(self, profiler):
self.data = profiler.getstats()
self.out_file = None
def output(self, out_file):
self.out_file = out_file
self.out_file.write('events: Ticks\n')
self._print_summary()
for entry in self.data:
self._entry(entry)
def _print_summary(self):
max_cost = 0
for entry in self.data:
totaltime = int(entry.totaltime * 1000)
max_cost = max(max_cost, totaltime)
self.out_file.write('summary: %d\n' % (max_cost,))
def _entry(self, entry):
out_file = self.out_file
code = entry.code
if isinstance(code, str):
out_file.write('fn=%s\n' % code)
else:
out_file.write('fl=%s\n' % code.co_filename)
out_file.write('fn=%s\n' % code.co_name)
inlinetime = int(entry.inlinetime * 1000)
if isinstance(code, str):
out_file.write('0 %s\n' % inlinetime)
else:
out_file.write('%d %d\n' % (code.co_firstlineno, inlinetime))
# recursive calls are counted in entry.calls
if entry.calls:
calls = entry.calls
else:
calls = []
if isinstance(code, str):
lineno = 0
else:
lineno = code.co_firstlineno
for subentry in calls:
self._subentry(lineno, subentry)
out_file.write("\n")
def _subentry(self, lineno, subentry):
out_file = self.out_file
code = subentry.code
if isinstance(code, str):
out_file.write('cfn=%s\n' % code)
out_file.write('calls=%d 0\n' % (subentry.callcount,))
else:
out_file.write('cfl=%s\n' % code.co_filename)
out_file.write('cfn=%s\n' % code.co_name)
out_file.write('calls=%d %d\n' % (subentry.callcount, code.co_firstlineno))
totaltime = int(subentry.totaltime * 1000)
out_file.write('%d %d\n' % (lineno, totaltime))
class Command(BaseCommand):
help = "Starts a lightweight Web server with profiling enabled."
args = '[optional port number, or ipaddr:port]'
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'addrport', nargs='?',
help='Optional port number, or ipaddr:port'
)
parser.add_argument(
'--noreload', action='store_false', dest='use_reloader',
default=True,
help='Tells Django to NOT use the auto-reloader.')
parser.add_argument(
'--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.',
)
parser.add_argument(
'--prof-path', dest='prof_path', default='/tmp',
help='Specifies the directory which to save profile information '
'in.'
)
parser.add_argument(
'--prof-file', dest='prof_file',
default='{path}.{duration:06d}ms.{time}',
help='Set filename format, default if '
'"{path}.{duration:06d}ms.{time}".'
)
parser.add_argument(
'--nomedia', action='store_true', dest='no_media', default=False,
help='Do not profile MEDIA_URL'
)
parser.add_argument(
'--use-cprofile', action='store_true', dest='use_cprofile',
default=False,
help='Use cProfile if available, this is disabled per default '
'because of incompatibilities.'
)
parser.add_argument(
'--kcachegrind', action='store_true', dest='use_lsprof',
default=False,
help='Create kcachegrind compatible lsprof files, this requires '
'and automatically enables cProfile.'
)
if USE_STATICFILES:
parser.add_argument(
'--nostatic', action="store_false", dest='use_static_handler',
default=True,
help='Tells Django to NOT automatically serve static files '
'at STATIC_URL.')
parser.add_argument(
'--insecure', action="store_true", dest='insecure_serving',
default=False,
help='Allows serving static files even if DEBUG is False.')
@signalcommand
def handle(self, addrport='', *args, **options):
import django
import socket
import errno
from django.core.servers.basehttp import run
if args:
raise CommandError('Usage is runserver %s' % self.args)
if not addrport:
addr = ''
port = '8000'
else:
try:
addr, port = addrport.split(':')
except ValueError:
addr, port = '', addrport
if not addr:
addr = '127.0.0.1'
if not port.isdigit():
raise CommandError("%r is not a valid port number." % port)
use_reloader = options['use_reloader']
shutdown_message = options.get('shutdown_message', '')
no_media = options['no_media']
quit_command = (sys.platform == 'win32') and 'CTRL-BREAK' or 'CONTROL-C'
def inner_run():
import os
import time
try:
import hotshot
HAS_HOTSHOT = True
except ImportError:
HAS_HOTSHOT = False # python 3.x
USE_CPROFILE = options['use_cprofile']
USE_LSPROF = options['use_lsprof']
if USE_LSPROF:
USE_CPROFILE = True
if USE_CPROFILE:
try:
import cProfile
USE_CPROFILE = True
except ImportError:
print("cProfile disabled, module cannot be imported!")
USE_CPROFILE = False
if USE_LSPROF and not USE_CPROFILE:
raise CommandError("Kcachegrind compatible output format required cProfile from Python 2.5")
if not HAS_HOTSHOT and not USE_CPROFILE:
raise CommandError("Hotshot profile library not found. (and not using cProfile)")
prof_path = options['prof_path']
prof_file = options['prof_file']
if not prof_file.format(path='1', duration=2, time=3):
prof_file = '{path}.{duration:06d}ms.{time}'
print("Filename format is wrong. Default format used: '{path}.{duration:06d}ms.{time}'.")
def get_exclude_paths():
exclude_paths = []
media_url = getattr(settings, 'MEDIA_URL', None)
if media_url:
exclude_paths.append(media_url)
static_url = getattr(settings, 'STATIC_URL', None)
if static_url:
exclude_paths.append(static_url)
return exclude_paths
def make_profiler_handler(inner_handler):
def handler(environ, start_response):
path_info = environ['PATH_INFO']
# when using something like a dynamic site middleware is could be necessary
# to refetch the exclude_paths every time since they could change per site.
if no_media and any(path_info.startswith(p) for p in get_exclude_paths()):
return inner_handler(environ, start_response)
path_name = path_info.strip("/").replace('/', '.') or "root"
profname = "%s.%d.prof" % (path_name, time.time())
profname = os.path.join(prof_path, profname)
if USE_CPROFILE:
prof = cProfile.Profile()
else:
prof = hotshot.Profile(profname)
start = datetime.now()
try:
return prof.runcall(inner_handler, environ, start_response)
finally:
# seeing how long the request took is important!
elap = datetime.now() - start
elapms = elap.seconds * 1000.0 + elap.microseconds / 1000.0
if USE_LSPROF:
kg = KCacheGrind(prof)
with open(profname, 'w') as f:
kg.output(f)
elif USE_CPROFILE:
prof.dump_stats(profname)
profname2 = prof_file.format(path=path_name, duration=int(elapms), time=int(time.time()))
profname2 = os.path.join(prof_path, "%s.prof" % profname2)
if not USE_CPROFILE:
prof.close()
os.rename(profname, profname2)
return handler
print("Validating models...")
if hasattr(self, 'check'):
self.check(display_num_errors=True)
else:
self.validate(display_num_errors=True)
print("\nDjango version %s, using settings %r" % (django.get_version(), settings.SETTINGS_MODULE))
print("Development server is running at http://%s:%s/" % (addr, port))
print("Quit the server with %s." % quit_command)
try:
handler = get_internal_wsgi_application()
if USE_STATICFILES:
use_static_handler = options['use_static_handler']
insecure_serving = options['insecure_serving']
if use_static_handler and (settings.DEBUG or insecure_serving):
handler = StaticFilesHandler(handler)
handler = make_profiler_handler(handler)
run(addr, int(port), handler, threading=options['use_threading'])
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned-to.",
}
try:
error_text = ERRORS[e.errno]
except (AttributeError, KeyError):
error_text = str(e)
sys.stderr.write(self.style.ERROR("Error: %s" % error_text) + '\n')
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
print(shutdown_message)
sys.exit(0)
if use_reloader:
from django.utils import autoreload
autoreload.main(inner_run)
else:
inner_run()
| [
"[email protected]"
]
| |
3294f3bed75b66731462f43071b989c78c1010b7 | a0801d0e7325b31f0383fc68517e208680bb36d6 | /ProjectEuler/142.py | 85d8bf0fe2a06c44ae1c87bb45a122ac7a0c6bae | []
| no_license | conormccauley1999/CompetitiveProgramming | bd649bf04438817c7fa4755df2c2c7727273b073 | a7e188767364be40f625612af3d16182f2d8d4de | refs/heads/master | 2023-05-14T13:19:32.678134 | 2023-05-11T16:07:33 | 2023-05-11T16:07:33 | 179,089,010 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | MX = 1000000
squares = set([n * n for n in range(1, MX)])
for i in range(1, MX):
s1 = i * i
for j in range(1, i):
s2 = j * j
s3 = s1 - s2
if s3 not in squares: continue
for k in range(1, j):
s4 = k * k
s5 = s1 - s4
s6 = s2 - s5
if s5 not in squares or s6 not in squares: continue
x = (s1 + s6) // 2
y = (s3 + s5) // 2
z = (s2 - s4) // 2
if all(s in squares for s in [x+y,x-y,x+z,x-z,y+z,y-z]):
print(x + y + z)
quit()
| [
"[email protected]"
]
| |
9acd75923def0033845f2bee8b1a89f62688789c | 0486b6ccf883e9cd7a24bbd89b5420e7de2172b9 | /DRF Study Material/Django REST Code/gs1/api/migrations/0001_initial.py | 3f7e085f4200f8573144c16f9a572d21f27b04b6 | []
| no_license | ajitexl/restfrmaework | 2980203d7faa6c8364288283758d32c8f2a37817 | 9ab203748e623516365d9924dcc68acc786a66e1 | refs/heads/main | 2023-02-03T08:52:00.672047 | 2020-12-10T09:50:51 | 2020-12-10T09:50:51 | 320,222,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 619 | py | # Generated by Django 3.1.1 on 2020-10-08 15:51
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('roll', models.IntegerField()),
('city', models.CharField(max_length=100)),
],
),
]
| [
"[email protected]"
]
| |
1b6cb7fbe59a9824a9fa1cc7a4ad29f83334d35f | fa4515eb50210372f010fbe7243d8748826a2461 | /whitebox/whitebox_tools.py | 40588882c9a903445eccab1bd50a59c0f1d7ac51 | [
"MIT"
]
| permissive | deTrident/whitebox-python | b2cbd301416aa47133c191ea61c5efa51b55129e | 909e74bb44ec2f29013a8935e1f57bb16742e38d | refs/heads/master | 2020-09-21T04:20:49.666574 | 2019-11-01T19:17:26 | 2019-11-01T19:17:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339,787 | py | #!/usr/bin/env python3
''' This file is intended to be a helper for running whitebox-tools plugins from a Python script.
See whitebox_example.py for an example of how to use it.
'''
# This script is part of the WhiteboxTools geospatial library.
# Authors: Dr. John Lindsay
# Created: 28/11/2017
# Last Modified: 25/07/2019
# License: MIT
from __future__ import print_function
import os
from os import path
import sys
import platform
import re
# import shutil
from subprocess import CalledProcessError, Popen, PIPE, STDOUT
import zipfile
import tarfile
import shutil
import urllib.request
import pkg_resources
def download_wbt():
'''
Download WhiteboxTools pre-complied binary for first-time use
'''
# print("Your operating system: {}".format(platform.system()))
package_name = "whitebox"
# Get package directory
pkg_dir = os.path.dirname(pkg_resources.resource_filename(package_name, 'whitebox_tools.py'))
exe_dir = os.path.join(pkg_dir, "WBT") # Get directory of WhiteboxTools executable file
work_dir = os.path.join(pkg_dir, "testdata") # Set working directory
try:
if not os.path.exists(exe_dir): # Download WhiteboxTools executable file if non-existent
print("Downloading WhiteboxTools pre-compiled binary for first time use ...")
if platform.system() == "Windows":
url = "https://jblindsay.github.io/ghrg/WhiteboxTools/WhiteboxTools_win_amd64.zip"
elif platform.system() == "Darwin":
url = "https://jblindsay.github.io/ghrg/WhiteboxTools/WhiteboxTools_darwin_amd64.zip"
elif platform.system() == "Linux":
url = "https://jblindsay.github.io/ghrg/WhiteboxTools/WhiteboxTools_linux_amd64.tar.xz"
else:
print("WhiteboxTools is not yet supported on {}!".format(platform.system()))
exit()
zip_name = os.path.join(pkg_dir, os.path.basename(url)) # Get WhiteboxTools zip file name
urllib.request.urlretrieve(url, zip_name) # Download WhiteboxTools
zip_ext = os.path.splitext(zip_name)[1] # Get downloaded zip file extension
print("Decompressing {} ...".format(os.path.basename(url)))
if zip_ext == ".zip": # Decompress Windows/Mac OS zip file
with zipfile.ZipFile(zip_name, "r") as zip_ref:
zip_ref.extractall(pkg_dir)
else: # Decompress Linux tar file
with tarfile.open(zip_name, "r") as tar_ref:
tar_ref.extractall(pkg_dir)
print("WhiteboxTools package directory: {}".format(pkg_dir))
exe_ext = "" # file extension for MacOS/Linux
if platform.system() == 'Windows':
exe_ext = '.exe'
exe_name = "whitebox_tools{}".format(exe_ext)
exe_path = os.path.join(exe_dir, exe_name)
if platform.system() != "Windows": # grant executable permission
os.system("chmod 755 " + exe_path)
exe_path_new = os.path.join(pkg_dir, exe_name)
shutil.copy(exe_path, exe_path_new)
if not os.path.exists(work_dir):
print("Downloading testdata ...")
os.mkdir(work_dir)
dem_url = "https://github.com/jblindsay/whitebox-tools/raw/master/testdata/DEM.tif"
dep_url = "https://github.com/jblindsay/whitebox-tools/raw/master/testdata/DEM.dep"
urllib.request.urlretrieve(dem_url, os.path.join(work_dir, "DEM.tif"))
urllib.request.urlretrieve(dep_url, os.path.join(work_dir, "DEM.dep"))
except:
print("Unexpected error:", sys.exc_info()[0])
raise
def default_callback(value):
'''
A simple default callback that outputs using the print function. When
tools are called without providing a custom callback, this function
will be used to print to standard output.
'''
print(value)
def to_camelcase(name):
'''
Convert snake_case name to CamelCase name
'''
return ''.join(x.title() for x in name.split('_'))
def to_snakecase(name):
'''
Convert CamelCase name to snake_case name
'''
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
class WhiteboxTools(object):
'''
An object for interfacing with the WhiteboxTools executable.
'''
def __init__(self):
if platform.system() == 'Windows':
self.ext = '.exe'
else:
self.ext = ''
self.exe_name = "whitebox_tools{}".format(self.ext)
# self.exe_path = os.path.dirname(shutil.which(
# self.exe_name) or path.dirname(path.abspath(__file__)))
# self.exe_path = os.path.dirname(os.path.join(os.path.realpath(__file__)))
self.exe_path = path.dirname(path.abspath(__file__))
self.work_dir = ""
self.verbose = True
self.cancel_op = False
self.default_callback = default_callback
download_wbt()
def set_whitebox_dir(self, path_str):
'''
Sets the directory to the WhiteboxTools executable file.
'''
self.exe_path = path_str
def set_working_dir(self, path_str):
'''
Sets the working directory, i.e. the directory in which
the data files are located. By setting the working
directory, tool input parameters that are files need only
specify the file name rather than the complete file path.
'''
self.work_dir = path.normpath(path_str)
def set_verbose_mode(self, val=True):
'''
Sets verbose mode. If verbose mode is False, tools will not
print output messages. Tools will frequently provide substantial
feedback while they are operating, e.g. updating progress for
various sub-routines. When the user has scripted a workflow
that ties many tools in sequence, this level of tool output
can be problematic. By setting verbose mode to False, these
messages are suppressed and tools run as background processes.
'''
self.verbose = val
def run_tool(self, tool_name, args, callback=None):
'''
Runs a tool and specifies tool arguments.
Returns 0 if completes without error.
Returns 1 if error encountered (details are sent to callback).
Returns 2 if process is cancelled by user.
'''
try:
if callback is None:
callback = self.default_callback
os.chdir(self.exe_path)
args2 = []
args2.append("." + path.sep + self.exe_name)
args2.append("--run=\"{}\"".format(to_camelcase(tool_name)))
if self.work_dir.strip() != "":
args2.append("--wd=\"{}\"".format(self.work_dir))
for arg in args:
args2.append(arg)
# args_str = args_str[:-1]
# a.append("--args=\"{}\"".format(args_str))
if self.verbose:
args2.append("-v")
if self.verbose:
cl = ""
for v in args2:
cl += v + " "
callback(cl.strip() + "\n")
proc = Popen(args2, shell=False, stdout=PIPE,
stderr=STDOUT, bufsize=1, universal_newlines=True)
while True:
line = proc.stdout.readline()
sys.stdout.flush()
if line != '':
if not self.cancel_op:
callback(line.strip())
else:
self.cancel_op = False
proc.terminate()
return 2
else:
break
return 0
except (OSError, ValueError, CalledProcessError) as err:
callback(str(err))
return 1
def help(self):
'''
Retrieves the help description for WhiteboxTools.
'''
try:
os.chdir(self.exe_path)
args = []
args.append("." + os.path.sep + self.exe_name)
args.append("-h")
proc = Popen(args, shell=False, stdout=PIPE,
stderr=STDOUT, bufsize=1, universal_newlines=True)
ret = ""
while True:
line = proc.stdout.readline()
if line != '':
ret += line
else:
break
return ret
except (OSError, ValueError, CalledProcessError) as err:
return err
def license(self):
'''
Retrieves the license information for WhiteboxTools.
'''
try:
os.chdir(self.exe_path)
args = []
args.append("." + os.path.sep + self.exe_name)
args.append("--license")
proc = Popen(args, shell=False, stdout=PIPE,
stderr=STDOUT, bufsize=1, universal_newlines=True)
ret = ""
while True:
line = proc.stdout.readline()
if line != '':
ret += line
else:
break
return ret
except (OSError, ValueError, CalledProcessError) as err:
return err
def version(self):
'''
Retrieves the version information for WhiteboxTools.
'''
try:
os.chdir(self.exe_path)
args = []
args.append("." + os.path.sep + self.exe_name)
args.append("--version")
proc = Popen(args, shell=False, stdout=PIPE,
stderr=STDOUT, bufsize=1, universal_newlines=True)
ret = ""
while True:
line = proc.stdout.readline()
if line != '':
ret += line
else:
break
return ret
except (OSError, ValueError, CalledProcessError) as err:
return err
def tool_help(self, tool_name=''):
'''
Retrieves the help description for a specific tool.
'''
try:
os.chdir(self.exe_path)
args = []
args.append("." + os.path.sep + self.exe_name)
args.append("--toolhelp={}".format(to_camelcase(tool_name)))
proc = Popen(args, shell=False, stdout=PIPE,
stderr=STDOUT, bufsize=1, universal_newlines=True)
ret = ""
while True:
line = proc.stdout.readline()
if line != '':
ret += line
else:
break
return ret
except (OSError, ValueError, CalledProcessError) as err:
return err
def tool_parameters(self, tool_name):
'''
Retrieves the tool parameter descriptions for a specific tool.
'''
try:
os.chdir(self.exe_path)
args = []
args.append("." + os.path.sep + self.exe_name)
args.append("--toolparameters={}".format(to_camelcase(tool_name)))
proc = Popen(args, shell=False, stdout=PIPE,
stderr=STDOUT, bufsize=1, universal_newlines=True)
ret = ""
while True:
line = proc.stdout.readline()
if line != '':
ret += line
else:
break
return ret
except (OSError, ValueError, CalledProcessError) as err:
return err
def toolbox(self, tool_name=''):
'''
Retrieve the toolbox for a specific tool.
'''
try:
os.chdir(self.exe_path)
args = []
args.append("." + os.path.sep + self.exe_name)
args.append("--toolbox={}".format(to_camelcase(tool_name)))
proc = Popen(args, shell=False, stdout=PIPE,
stderr=STDOUT, bufsize=1, universal_newlines=True)
ret = ""
while True:
line = proc.stdout.readline()
if line != '':
ret += line
else:
break
return ret
except (OSError, ValueError, CalledProcessError) as err:
return err
def view_code(self, tool_name):
'''
Opens a web browser to view the source code for a specific tool
on the projects source code repository.
'''
try:
os.chdir(self.exe_path)
args = []
args.append("." + os.path.sep + self.exe_name)
args.append("--viewcode={}".format(to_camelcase(tool_name)))
proc = Popen(args, shell=False, stdout=PIPE,
stderr=STDOUT, bufsize=1, universal_newlines=True)
ret = ""
while True:
line = proc.stdout.readline()
if line != '':
ret += line
else:
break
return ret
except (OSError, ValueError, CalledProcessError) as err:
return err
def list_tools(self, keywords=[]):
'''
Lists all available tools in WhiteboxTools.
'''
try:
os.chdir(self.exe_path)
args = []
args.append("." + os.path.sep + self.exe_name)
args.append("--listtools")
if len(keywords) > 0:
for kw in keywords:
args.append(kw)
proc = Popen(args, shell=False, stdout=PIPE,
stderr=STDOUT, bufsize=1, universal_newlines=True)
ret = {}
line = proc.stdout.readline() # skip number of available tools header
while True:
line = proc.stdout.readline()
if line != '':
if line.strip() != '':
name, descr = line.split(':')
ret[to_snakecase(name.strip())] = descr.strip()
else:
break
return ret
except (OSError, ValueError, CalledProcessError) as err:
return err
########################################################################
# The following methods are convenience methods for each available tool.
# This needs updating whenever new tools are added to the WhiteboxTools
# library. They can be generated automatically using the
# whitebox_plugin_generator.py script. It would also be possible to
# discover plugins at runtime and monkey-patch their methods using
# MethodType. However, this would not be as useful since it would
# restrict the ability for text editors and IDEs to use autocomplete.
########################################################################
##############
# Data Tools #
##############
def add_point_coordinates_to_table(self, i, callback=None):
"""Modifies the attribute table of a point vector by adding fields containing each point's X and Y coordinates.
Keyword arguments:
i -- Input vector Points file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('add_point_coordinates_to_table', args, callback) # returns 1 if error
def clean_vector(self, i, output, callback=None):
"""Removes null features and lines/polygons with fewer than the required number of vertices.
Keyword arguments:
i -- Input vector file.
output -- Output vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('clean_vector', args, callback) # returns 1 if error
def convert_nodata_to_zero(self, i, output, callback=None):
"""Converts nodata values in a raster to zero.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('convert_nodata_to_zero', args, callback) # returns 1 if error
def convert_raster_format(self, i, output, callback=None):
"""Converts raster data from one format to another.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('convert_raster_format', args, callback) # returns 1 if error
def csv_points_to_vector(self, i, output, xfield=0, yfield=1, epsg=None, callback=None):
"""Converts a CSV text file to vector points.
Keyword arguments:
i -- Input CSV file (i.e. source of data to be imported).
output -- Output vector file.
xfield -- X field number (e.g. 0 for first field).
yfield -- Y field number (e.g. 1 for second field).
epsg -- EPSG projection (e.g. 2958).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--xfield={}".format(xfield))
args.append("--yfield={}".format(yfield))
if epsg is not None: args.append("--epsg='{}'".format(epsg))
return self.run_tool('csv_points_to_vector', args, callback) # returns 1 if error
def export_table_to_csv(self, i, output, headers=True, callback=None):
"""Exports an attribute table to a CSV text file.
Keyword arguments:
i -- Input vector file.
output -- Output raster file.
headers -- Export field names as file header?.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if headers: args.append("--headers")
return self.run_tool('export_table_to_csv', args, callback) # returns 1 if error
def join_tables(self, input1, pkey, input2, fkey, import_field, callback=None):
"""Merge a vector's attribute table with another table based on a common field.
Keyword arguments:
input1 -- Input primary vector file (i.e. the table to be modified).
pkey -- Primary key field.
input2 -- Input foreign vector file (i.e. source of data to be imported).
fkey -- Foreign key field.
import_field -- Imported field (all fields will be imported if not specified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--pkey='{}'".format(pkey))
args.append("--input2='{}'".format(input2))
args.append("--fkey='{}'".format(fkey))
args.append("--import_field='{}'".format(import_field))
return self.run_tool('join_tables', args, callback) # returns 1 if error
def lines_to_polygons(self, i, output, callback=None):
"""Converts vector polylines to polygons.
Keyword arguments:
i -- Input vector line file.
output -- Output vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('lines_to_polygons', args, callback) # returns 1 if error
def merge_table_with_csv(self, i, pkey, csv, fkey, import_field=None, callback=None):
"""Merge a vector's attribute table with a table contained within a CSV text file.
Keyword arguments:
i -- Input primary vector file (i.e. the table to be modified).
pkey -- Primary key field.
csv -- Input CSV file (i.e. source of data to be imported).
fkey -- Foreign key field.
import_field -- Imported field (all fields will be imported if not specified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--pkey='{}'".format(pkey))
args.append("--csv='{}'".format(csv))
args.append("--fkey='{}'".format(fkey))
if import_field is not None: args.append("--import_field='{}'".format(import_field))
return self.run_tool('merge_table_with_csv', args, callback) # returns 1 if error
def merge_vectors(self, inputs, output, callback=None):
"""Combines two or more input vectors of the same ShapeType creating a single, new output vector.
Keyword arguments:
inputs -- Input vector files.
output -- Output vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('merge_vectors', args, callback) # returns 1 if error
def modify_no_data_value(self, i, new_value="-32768.0", callback=None):
"""Converts nodata values in a raster to zero.
Keyword arguments:
i -- Input raster file.
new_value -- New NoData value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--new_value={}".format(new_value))
return self.run_tool('modify_no_data_value', args, callback) # returns 1 if error
def multi_part_to_single_part(self, i, output, exclude_holes=True, callback=None):
"""Converts a vector file containing multi-part features into a vector containing only single-part features.
Keyword arguments:
i -- Input vector line or polygon file.
output -- Output vector line or polygon file.
exclude_holes -- Exclude hole parts from the feature splitting? (holes will continue to belong to their features in output.).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if exclude_holes: args.append("--exclude_holes")
return self.run_tool('multi_part_to_single_part', args, callback) # returns 1 if error
def new_raster_from_base(self, base, output, value="nodata", data_type="float", callback=None):
"""Creates a new raster using a base image.
Keyword arguments:
base -- Input base raster file.
output -- Output raster file.
value -- Constant value to fill raster with; either 'nodata' or numeric value.
data_type -- Output raster data type; options include 'double' (64-bit), 'float' (32-bit), and 'integer' (signed 16-bit) (default is 'float').
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--base='{}'".format(base))
args.append("--output='{}'".format(output))
args.append("--value={}".format(value))
args.append("--data_type={}".format(data_type))
return self.run_tool('new_raster_from_base', args, callback) # returns 1 if error
def polygons_to_lines(self, i, output, callback=None):
"""Converts vector polygons to polylines.
Keyword arguments:
i -- Input vector polygon file.
output -- Output vector lines file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('polygons_to_lines', args, callback) # returns 1 if error
def print_geo_tiff_tags(self, i, callback=None):
"""Prints the tags within a GeoTIFF.
Keyword arguments:
i -- Input GeoTIFF file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('print_geo_tiff_tags', args, callback) # returns 1 if error
def raster_to_vector_lines(self, i, output, callback=None):
"""Converts a raster lines features into a vector of the POLYLINE shapetype.
Keyword arguments:
i -- Input raster lines file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('raster_to_vector_lines', args, callback) # returns 1 if error
def raster_to_vector_points(self, i, output, callback=None):
"""Converts a raster dataset to a vector of the POINT shapetype.
Keyword arguments:
i -- Input raster file.
output -- Output vector points file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('raster_to_vector_points', args, callback) # returns 1 if error
def reinitialize_attribute_table(self, i, callback=None):
"""Reinitializes a vector's attribute table deleting all fields but the feature ID (FID).
Keyword arguments:
i -- Input vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('reinitialize_attribute_table', args, callback) # returns 1 if error
def remove_polygon_holes(self, i, output, callback=None):
"""Removes holes within the features of a vector polygon file.
Keyword arguments:
i -- Input vector polygon file.
output -- Output vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('remove_polygon_holes', args, callback) # returns 1 if error
def set_nodata_value(self, i, output, back_value=0.0, callback=None):
"""Assign a specified value in an input image to the NoData value.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
back_value -- Background value to set to nodata.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--back_value={}".format(back_value))
return self.run_tool('set_nodata_value', args, callback) # returns 1 if error
def single_part_to_multi_part(self, i, output, field=None, callback=None):
"""Converts a vector file containing multi-part features into a vector containing only single-part features.
Keyword arguments:
i -- Input vector line or polygon file.
field -- Grouping ID field name in attribute table.
output -- Output vector line or polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if field is not None: args.append("--field='{}'".format(field))
args.append("--output='{}'".format(output))
return self.run_tool('single_part_to_multi_part', args, callback) # returns 1 if error
def vector_lines_to_raster(self, i, output, field="FID", nodata=True, cell_size=None, base=None, callback=None):
"""Converts a vector containing polylines into a raster.
Keyword arguments:
i -- Input vector lines file.
field -- Input field name in attribute table.
output -- Output raster file.
nodata -- Background value to set to NoData. Without this flag, it will be set to 0.0.
cell_size -- Optionally specified cell size of output raster. Not used when base raster is specified.
base -- Optionally specified input base raster file. Not used when a cell size is specified.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field={}".format(field))
args.append("--output='{}'".format(output))
if nodata: args.append("--nodata")
if cell_size is not None: args.append("--cell_size='{}'".format(cell_size))
if base is not None: args.append("--base='{}'".format(base))
return self.run_tool('vector_lines_to_raster', args, callback) # returns 1 if error
def vector_points_to_raster(self, i, output, field="FID", assign="last", nodata=True, cell_size=None, base=None, callback=None):
"""Converts a vector containing points into a raster.
Keyword arguments:
i -- Input vector Points file.
field -- Input field name in attribute table.
output -- Output raster file.
assign -- Assignment operation, where multiple points are in the same grid cell; options include 'first', 'last' (default), 'min', 'max', 'sum'.
nodata -- Background value to set to NoData. Without this flag, it will be set to 0.0.
cell_size -- Optionally specified cell size of output raster. Not used when base raster is specified.
base -- Optionally specified input base raster file. Not used when a cell size is specified.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field={}".format(field))
args.append("--output='{}'".format(output))
args.append("--assign={}".format(assign))
if nodata: args.append("--nodata")
if cell_size is not None: args.append("--cell_size='{}'".format(cell_size))
if base is not None: args.append("--base='{}'".format(base))
return self.run_tool('vector_points_to_raster', args, callback) # returns 1 if error
def vector_polygons_to_raster(self, i, output, field="FID", nodata=True, cell_size=None, base=None, callback=None):
"""Converts a vector containing polygons into a raster.
Keyword arguments:
i -- Input vector polygons file.
field -- Input field name in attribute table.
output -- Output raster file.
nodata -- Background value to set to NoData. Without this flag, it will be set to 0.0.
cell_size -- Optionally specified cell size of output raster. Not used when base raster is specified.
base -- Optionally specified input base raster file. Not used when a cell size is specified.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field={}".format(field))
args.append("--output='{}'".format(output))
if nodata: args.append("--nodata")
if cell_size is not None: args.append("--cell_size='{}'".format(cell_size))
if base is not None: args.append("--base='{}'".format(base))
return self.run_tool('vector_polygons_to_raster', args, callback) # returns 1 if error
################
# GIS Analysis #
################
def aggregate_raster(self, i, output, agg_factor=2, type="mean", callback=None):
"""Aggregates a raster to a lower resolution.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
agg_factor -- Aggregation factor, in pixels.
type -- Statistic used to fill output pixels.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--agg_factor={}".format(agg_factor))
args.append("--type={}".format(type))
return self.run_tool('aggregate_raster', args, callback) # returns 1 if error
def block_maximum_gridding(self, i, field, output, use_z=False, cell_size=None, base=None, callback=None):
"""Creates a raster grid based on a set of vector points and assigns grid values using a block maximum scheme.
Keyword arguments:
i -- Input vector Points file.
field -- Input field name in attribute table.
use_z -- Use z-coordinate instead of field?.
output -- Output raster file.
cell_size -- Optionally specified cell size of output raster. Not used when base raster is specified.
base -- Optionally specified input base raster file. Not used when a cell size is specified.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field='{}'".format(field))
if use_z: args.append("--use_z")
args.append("--output='{}'".format(output))
if cell_size is not None: args.append("--cell_size='{}'".format(cell_size))
if base is not None: args.append("--base='{}'".format(base))
return self.run_tool('block_maximum_gridding', args, callback) # returns 1 if error
def block_minimum_gridding(self, i, field, output, use_z=False, cell_size=None, base=None, callback=None):
"""Creates a raster grid based on a set of vector points and assigns grid values using a block minimum scheme.
Keyword arguments:
i -- Input vector Points file.
field -- Input field name in attribute table.
use_z -- Use z-coordinate instead of field?.
output -- Output raster file.
cell_size -- Optionally specified cell size of output raster. Not used when base raster is specified.
base -- Optionally specified input base raster file. Not used when a cell size is specified.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field='{}'".format(field))
if use_z: args.append("--use_z")
args.append("--output='{}'".format(output))
if cell_size is not None: args.append("--cell_size='{}'".format(cell_size))
if base is not None: args.append("--base='{}'".format(base))
return self.run_tool('block_minimum_gridding', args, callback) # returns 1 if error
def centroid(self, i, output, text_output=False, callback=None):
"""Calculates the centroid, or average location, of raster polygon objects.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
text_output -- Optional text output.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if text_output: args.append("--text_output")
return self.run_tool('centroid', args, callback) # returns 1 if error
def centroid_vector(self, i, output, callback=None):
"""Identifes the centroid point of a vector polyline or polygon feature or a group of vector points.
Keyword arguments:
i -- Input vector file.
output -- Output vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('centroid_vector', args, callback) # returns 1 if error
def clump(self, i, output, diag=True, zero_back=False, callback=None):
"""Groups cells that form discrete areas, assigning them unique identifiers.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
diag -- Flag indicating whether diagonal connections should be considered.
zero_back -- Flag indicating whether zero values should be treated as a background.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if diag: args.append("--diag")
if zero_back: args.append("--zero_back")
return self.run_tool('clump', args, callback) # returns 1 if error
def construct_vector_tin(self, i, output, field=None, use_z=False, callback=None):
"""Creates a vector triangular irregular network (TIN) for a set of vector points.
Keyword arguments:
i -- Input vector points file.
field -- Input field name in attribute table.
use_z -- Use the 'z' dimension of the Shapefile's geometry instead of an attribute field?.
output -- Output vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if field is not None: args.append("--field='{}'".format(field))
if use_z: args.append("--use_z")
args.append("--output='{}'".format(output))
return self.run_tool('construct_vector_tin', args, callback) # returns 1 if error
def create_hexagonal_vector_grid(self, i, output, width, orientation="horizontal", callback=None):
"""Creates a hexagonal vector grid.
Keyword arguments:
i -- Input base file.
output -- Output vector polygon file.
width -- The grid cell width.
orientation -- Grid Orientation, 'horizontal' or 'vertical'.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--width='{}'".format(width))
args.append("--orientation={}".format(orientation))
return self.run_tool('create_hexagonal_vector_grid', args, callback) # returns 1 if error
def create_plane(self, base, output, gradient=15.0, aspect=90.0, constant=0.0, callback=None):
"""Creates a raster image based on the equation for a simple plane.
Keyword arguments:
base -- Input base raster file.
output -- Output raster file.
gradient -- Slope gradient in degrees (-85.0 to 85.0).
aspect -- Aspect (direction) in degrees clockwise from north (0.0-360.0).
constant -- Constant value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--base='{}'".format(base))
args.append("--output='{}'".format(output))
args.append("--gradient={}".format(gradient))
args.append("--aspect={}".format(aspect))
args.append("--constant={}".format(constant))
return self.run_tool('create_plane', args, callback) # returns 1 if error
def create_rectangular_vector_grid(self, i, output, width, height, xorig=0, yorig=0, callback=None):
"""Creates a rectangular vector grid.
Keyword arguments:
i -- Input base file.
output -- Output vector polygon file.
width -- The grid cell width.
height -- The grid cell height.
xorig -- The grid origin x-coordinate.
yorig -- The grid origin y-coordinate.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--width='{}'".format(width))
args.append("--height='{}'".format(height))
args.append("--xorig={}".format(xorig))
args.append("--yorig={}".format(yorig))
return self.run_tool('create_rectangular_vector_grid', args, callback) # returns 1 if error
def dissolve(self, i, output, field=None, snap=0.0, callback=None):
"""Removes the interior, or shared, boundaries within a vector polygon coverage.
Keyword arguments:
i -- Input vector file.
field -- Dissolve field attribute (optional).
output -- Output vector file.
snap -- Snap tolerance.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if field is not None: args.append("--field='{}'".format(field))
args.append("--output='{}'".format(output))
args.append("--snap={}".format(snap))
return self.run_tool('dissolve', args, callback) # returns 1 if error
def eliminate_coincident_points(self, i, output, tolerance, callback=None):
"""Removes any coincident, or nearly coincident, points from a vector points file.
Keyword arguments:
i -- Input vector file.
output -- Output vector polygon file.
tolerance -- The distance tolerance for points.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--tolerance='{}'".format(tolerance))
return self.run_tool('eliminate_coincident_points', args, callback) # returns 1 if error
def extend_vector_lines(self, i, output, dist, extend="both ends", callback=None):
"""Extends vector lines by a specified distance.
Keyword arguments:
i -- Input vector polyline file.
output -- Output vector polyline file.
dist -- The distance to extend.
extend -- Extend direction, 'both ends' (default), 'line start', 'line end'.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--dist='{}'".format(dist))
args.append("--extend={}".format(extend))
return self.run_tool('extend_vector_lines', args, callback) # returns 1 if error
def extract_nodes(self, i, output, callback=None):
"""Converts vector lines or polygons into vertex points.
Keyword arguments:
i -- Input vector lines or polygon file.
output -- Output vector points file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('extract_nodes', args, callback) # returns 1 if error
def extract_raster_values_at_points(self, inputs, points, out_text=False, callback=None):
"""Extracts the values of raster(s) at vector point locations.
Keyword arguments:
inputs -- Input raster files.
points -- Input vector points file.
out_text -- Output point values as text? Otherwise, the only output is to to the points file's attribute table.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--points='{}'".format(points))
if out_text: args.append("--out_text")
return self.run_tool('extract_raster_values_at_points', args, callback) # returns 1 if error
def find_lowest_or_highest_points(self, i, output, out_type="lowest", callback=None):
"""Locates the lowest and/or highest valued cells in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output vector points file.
out_type -- Output type; one of 'area' (default) and 'volume'.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--out_type={}".format(out_type))
return self.run_tool('find_lowest_or_highest_points', args, callback) # returns 1 if error
def idw_interpolation(self, i, field, output, use_z=False, weight=2.0, radius=None, min_points=None, cell_size=None, base=None, callback=None):
"""Interpolates vector points into a raster surface using an inverse-distance weighted scheme.
Keyword arguments:
i -- Input vector Points file.
field -- Input field name in attribute table.
use_z -- Use z-coordinate instead of field?.
output -- Output raster file.
weight -- IDW weight value.
radius -- Search Radius.
min_points -- Minimum number of points.
cell_size -- Optionally specified cell size of output raster. Not used when base raster is specified.
base -- Optionally specified input base raster file. Not used when a cell size is specified.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field='{}'".format(field))
if use_z: args.append("--use_z")
args.append("--output='{}'".format(output))
args.append("--weight={}".format(weight))
if radius is not None: args.append("--radius='{}'".format(radius))
if min_points is not None: args.append("--min_points='{}'".format(min_points))
if cell_size is not None: args.append("--cell_size='{}'".format(cell_size))
if base is not None: args.append("--base='{}'".format(base))
return self.run_tool('idw_interpolation', args, callback) # returns 1 if error
def layer_footprint(self, i, output, callback=None):
"""Creates a vector polygon footprint of the area covered by a raster grid or vector layer.
Keyword arguments:
i -- Input raster or vector file.
output -- Output vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('layer_footprint', args, callback) # returns 1 if error
def medoid(self, i, output, callback=None):
"""Calculates the medoid for a series of vector features contained in a shapefile.
Keyword arguments:
i -- Input vector file.
output -- Output vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('medoid', args, callback) # returns 1 if error
def minimum_bounding_box(self, i, output, criterion="area", features=True, callback=None):
"""Creates a vector minimum bounding rectangle around vector features.
Keyword arguments:
i -- Input vector file.
output -- Output vector polygon file.
criterion -- Minimization criterion; options include 'area' (default), 'length', 'width', and 'perimeter'.
features -- Find the minimum bounding rectangles around each individual vector feature.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--criterion={}".format(criterion))
if features: args.append("--features")
return self.run_tool('minimum_bounding_box', args, callback) # returns 1 if error
def minimum_bounding_circle(self, i, output, features=True, callback=None):
"""Delineates the minimum bounding circle (i.e. smallest enclosing circle) for a group of vectors.
Keyword arguments:
i -- Input vector file.
output -- Output vector polygon file.
features -- Find the minimum bounding circle around each individual vector feature.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if features: args.append("--features")
return self.run_tool('minimum_bounding_circle', args, callback) # returns 1 if error
def minimum_bounding_envelope(self, i, output, features=True, callback=None):
"""Creates a vector axis-aligned minimum bounding rectangle (envelope) around vector features.
Keyword arguments:
i -- Input vector file.
output -- Output vector polygon file.
features -- Find the minimum bounding envelop around each individual vector feature.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if features: args.append("--features")
return self.run_tool('minimum_bounding_envelope', args, callback) # returns 1 if error
def minimum_convex_hull(self, i, output, features=True, callback=None):
"""Creates a vector convex polygon around vector features.
Keyword arguments:
i -- Input vector file.
output -- Output vector polygon file.
features -- Find the hulls around each vector feature.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if features: args.append("--features")
return self.run_tool('minimum_convex_hull', args, callback) # returns 1 if error
def nearest_neighbour_gridding(self, i, field, output, use_z=False, cell_size=None, base=None, max_dist=None, callback=None):
"""Creates a raster grid based on a set of vector points and assigns grid values using the nearest neighbour.
Keyword arguments:
i -- Input vector Points file.
field -- Input field name in attribute table.
use_z -- Use z-coordinate instead of field?.
output -- Output raster file.
cell_size -- Optionally specified cell size of output raster. Not used when base raster is specified.
base -- Optionally specified input base raster file. Not used when a cell size is specified.
max_dist -- Maximum search distance (optional).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field='{}'".format(field))
if use_z: args.append("--use_z")
args.append("--output='{}'".format(output))
if cell_size is not None: args.append("--cell_size='{}'".format(cell_size))
if base is not None: args.append("--base='{}'".format(base))
if max_dist is not None: args.append("--max_dist='{}'".format(max_dist))
return self.run_tool('nearest_neighbour_gridding', args, callback) # returns 1 if error
def polygon_area(self, i, callback=None):
"""Calculates the area of vector polygons.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('polygon_area', args, callback) # returns 1 if error
def polygon_long_axis(self, i, output, callback=None):
"""This tool can be used to map the long axis of polygon features.
Keyword arguments:
i -- Input vector polygons file.
output -- Output vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('polygon_long_axis', args, callback) # returns 1 if error
def polygon_perimeter(self, i, callback=None):
"""Calculates the perimeter of vector polygons.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('polygon_perimeter', args, callback) # returns 1 if error
def polygon_short_axis(self, i, output, callback=None):
"""This tool can be used to map the short axis of polygon features.
Keyword arguments:
i -- Input vector polygons file.
output -- Output vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('polygon_short_axis', args, callback) # returns 1 if error
def raster_area(self, i, output=None, out_text=False, units="grid cells", zero_back=False, callback=None):
"""Calculates the area of polygons or classes within a raster image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
out_text -- Would you like to output polygon areas to text?.
units -- Area units; options include 'grid cells' and 'map units'.
zero_back -- Flag indicating whether zero values should be treated as a background.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
if out_text: args.append("--out_text")
args.append("--units={}".format(units))
if zero_back: args.append("--zero_back")
return self.run_tool('raster_area', args, callback) # returns 1 if error
def raster_cell_assignment(self, i, output, assign="column", callback=None):
"""Assign row or column number to cells.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
assign -- Which variable would you like to assign to grid cells? Options include 'column', 'row', 'x', and 'y'.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--assign={}".format(assign))
return self.run_tool('raster_cell_assignment', args, callback) # returns 1 if error
def reclass(self, i, output, reclass_vals, assign_mode=False, callback=None):
"""Reclassifies the values in a raster image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
reclass_vals -- Reclassification triplet values (new value; from value; to less than), e.g. '0.0;0.0;1.0;1.0;1.0;2.0'.
assign_mode -- Optional Boolean flag indicating whether to operate in assign mode, reclass_vals values are interpreted as new value; old value pairs.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--reclass_vals='{}'".format(reclass_vals))
if assign_mode: args.append("--assign_mode")
return self.run_tool('reclass', args, callback) # returns 1 if error
def reclass_equal_interval(self, i, output, interval=10.0, start_val=None, end_val=None, callback=None):
"""Reclassifies the values in a raster image based on equal-ranges.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
interval -- Class interval size.
start_val -- Optional starting value (default is input minimum value).
end_val -- Optional ending value (default is input maximum value).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--interval={}".format(interval))
if start_val is not None: args.append("--start_val='{}'".format(start_val))
if end_val is not None: args.append("--end_val='{}'".format(end_val))
return self.run_tool('reclass_equal_interval', args, callback) # returns 1 if error
def reclass_from_file(self, i, reclass_file, output, callback=None):
"""Reclassifies the values in a raster image using reclass ranges in a text file.
Keyword arguments:
i -- Input raster file.
reclass_file -- Input text file containing reclass ranges.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--reclass_file='{}'".format(reclass_file))
args.append("--output='{}'".format(output))
return self.run_tool('reclass_from_file', args, callback) # returns 1 if error
def smooth_vectors(self, i, output, filter=3, callback=None):
"""Smooths a vector coverage of either a POLYLINE or POLYGON base ShapeType.
Keyword arguments:
i -- Input vector POLYLINE or POLYGON file.
output -- Output vector file.
filter -- The filter size, any odd integer greater than or equal to 3; default is 3.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filter={}".format(filter))
return self.run_tool('smooth_vectors', args, callback) # returns 1 if error
def tin_gridding(self, i, output, resolution, field=None, use_z=False, callback=None):
"""Creates a raster grid based on a triangular irregular network (TIN) fitted to vector points.
Keyword arguments:
i -- Input vector points file.
field -- Input field name in attribute table.
use_z -- Use the 'z' dimension of the Shapefile's geometry instead of an attribute field?.
output -- Output raster file.
resolution -- Output raster's grid resolution.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if field is not None: args.append("--field='{}'".format(field))
if use_z: args.append("--use_z")
args.append("--output='{}'".format(output))
args.append("--resolution='{}'".format(resolution))
return self.run_tool('tin_gridding', args, callback) # returns 1 if error
def vector_hex_binning(self, i, output, width, orientation="horizontal", callback=None):
"""Hex-bins a set of vector points.
Keyword arguments:
i -- Input base file.
output -- Output vector polygon file.
width -- The grid cell width.
orientation -- Grid Orientation, 'horizontal' or 'vertical'.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--width='{}'".format(width))
args.append("--orientation={}".format(orientation))
return self.run_tool('vector_hex_binning', args, callback) # returns 1 if error
def voronoi_diagram(self, i, output, callback=None):
"""Creates a vector Voronoi diagram for a set of vector points.
Keyword arguments:
i -- Input vector points file.
output -- Output vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('voronoi_diagram', args, callback) # returns 1 if error
###############################
# GIS Analysis/Distance Tools #
###############################
def buffer_raster(self, i, output, size, gridcells=False, callback=None):
"""Maps a distance-based buffer around each non-background (non-zero/non-nodata) grid cell in an input image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
size -- Buffer size.
gridcells -- Optional flag to indicate that the 'size' threshold should be measured in grid cells instead of the default map units.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--size='{}'".format(size))
if gridcells: args.append("--gridcells")
return self.run_tool('buffer_raster', args, callback) # returns 1 if error
def cost_allocation(self, source, backlink, output, callback=None):
"""Identifies the source cell to which each grid cell is connected by a least-cost pathway in a cost-distance analysis.
Keyword arguments:
source -- Input source raster file.
backlink -- Input backlink raster file generated by the cost-distance tool.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--source='{}'".format(source))
args.append("--backlink='{}'".format(backlink))
args.append("--output='{}'".format(output))
return self.run_tool('cost_allocation', args, callback) # returns 1 if error
def cost_distance(self, source, cost, out_accum, out_backlink, callback=None):
"""Performs cost-distance accumulation on a cost surface and a group of source cells.
Keyword arguments:
source -- Input source raster file.
cost -- Input cost (friction) raster file.
out_accum -- Output cost accumulation raster file.
out_backlink -- Output backlink raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--source='{}'".format(source))
args.append("--cost='{}'".format(cost))
args.append("--out_accum='{}'".format(out_accum))
args.append("--out_backlink='{}'".format(out_backlink))
return self.run_tool('cost_distance', args, callback) # returns 1 if error
def cost_pathway(self, destination, backlink, output, zero_background=False, callback=None):
"""Performs cost-distance pathway analysis using a series of destination grid cells.
Keyword arguments:
destination -- Input destination raster file.
backlink -- Input backlink raster file generated by the cost-distance tool.
output -- Output cost pathway raster file.
zero_background -- Flag indicating whether zero values should be treated as a background.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--destination='{}'".format(destination))
args.append("--backlink='{}'".format(backlink))
args.append("--output='{}'".format(output))
if zero_background: args.append("--zero_background")
return self.run_tool('cost_pathway', args, callback) # returns 1 if error
def euclidean_allocation(self, i, output, callback=None):
"""Assigns grid cells in the output raster the value of the nearest target cell in the input image, measured by the Shih and Wu (2004) Euclidean distance transform.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('euclidean_allocation', args, callback) # returns 1 if error
def euclidean_distance(self, i, output, callback=None):
"""Calculates the Shih and Wu (2004) Euclidean distance transform.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('euclidean_distance', args, callback) # returns 1 if error
##############################
# GIS Analysis/Overlay Tools #
##############################
def average_overlay(self, inputs, output, callback=None):
"""Calculates the average for each grid cell from a group of raster images.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('average_overlay', args, callback) # returns 1 if error
def clip(self, i, clip, output, callback=None):
"""Extract all the features, or parts of features, that overlap with the features of the clip vector.
Keyword arguments:
i -- Input vector file.
clip -- Input clip polygon vector file.
output -- Output vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--clip='{}'".format(clip))
args.append("--output='{}'".format(output))
return self.run_tool('clip', args, callback) # returns 1 if error
def clip_raster_to_polygon(self, i, polygons, output, maintain_dimensions=False, callback=None):
"""Clips a raster to a vector polygon.
Keyword arguments:
i -- Input raster file.
polygons -- Input vector polygons file.
output -- Output raster file.
maintain_dimensions -- Maintain input raster dimensions?.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--polygons='{}'".format(polygons))
args.append("--output='{}'".format(output))
if maintain_dimensions: args.append("--maintain_dimensions")
return self.run_tool('clip_raster_to_polygon', args, callback) # returns 1 if error
def count_if(self, inputs, output, value, callback=None):
"""Counts the number of occurrences of a specified value in a cell-stack of rasters.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
value -- Search value (e.g. countif value = 5.0).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
args.append("--value='{}'".format(value))
return self.run_tool('count_if', args, callback) # returns 1 if error
def difference(self, i, overlay, output, callback=None):
"""Outputs the features that occur in one of the two vector inputs but not both, i.e. no overlapping features.
Keyword arguments:
i -- Input vector file.
overlay -- Input overlay vector file.
output -- Output vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--overlay='{}'".format(overlay))
args.append("--output='{}'".format(output))
return self.run_tool('difference', args, callback) # returns 1 if error
def erase(self, i, erase, output, callback=None):
"""Removes all the features, or parts of features, that overlap with the features of the erase vector polygon.
Keyword arguments:
i -- Input vector file.
erase -- Input erase polygon vector file.
output -- Output vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--erase='{}'".format(erase))
args.append("--output='{}'".format(output))
return self.run_tool('erase', args, callback) # returns 1 if error
def erase_polygon_from_raster(self, i, polygons, output, callback=None):
"""Erases (cuts out) a vector polygon from a raster.
Keyword arguments:
i -- Input raster file.
polygons -- Input vector polygons file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--polygons='{}'".format(polygons))
args.append("--output='{}'".format(output))
return self.run_tool('erase_polygon_from_raster', args, callback) # returns 1 if error
def highest_position(self, inputs, output, callback=None):
"""Identifies the stack position of the maximum value within a raster stack on a cell-by-cell basis.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('highest_position', args, callback) # returns 1 if error
def intersect(self, i, overlay, output, snap=0.0, callback=None):
"""Identifies the parts of features in common between two input vector layers.
Keyword arguments:
i -- Input vector file.
overlay -- Input overlay vector file.
output -- Output vector file.
snap -- Snap tolerance.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--overlay='{}'".format(overlay))
args.append("--output='{}'".format(output))
args.append("--snap={}".format(snap))
return self.run_tool('intersect', args, callback) # returns 1 if error
def line_intersections(self, input1, input2, output, callback=None):
"""Identifies points where the features of two vector line layers intersect.
Keyword arguments:
input1 -- Input vector polyline file.
input2 -- Input vector polyline file.
output -- Output vector point file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('line_intersections', args, callback) # returns 1 if error
def lowest_position(self, inputs, output, callback=None):
"""Identifies the stack position of the minimum value within a raster stack on a cell-by-cell basis.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('lowest_position', args, callback) # returns 1 if error
def max_absolute_overlay(self, inputs, output, callback=None):
"""Evaluates the maximum absolute value for each grid cell from a stack of input rasters.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('max_absolute_overlay', args, callback) # returns 1 if error
def max_overlay(self, inputs, output, callback=None):
"""Evaluates the maximum value for each grid cell from a stack of input rasters.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('max_overlay', args, callback) # returns 1 if error
def merge_line_segments(self, i, output, snap=0.0, callback=None):
"""Merges vector line segments into larger features.
Keyword arguments:
i -- Input vector file.
output -- Output vector file.
snap -- Snap tolerance.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--snap={}".format(snap))
return self.run_tool('merge_line_segments', args, callback) # returns 1 if error
def min_absolute_overlay(self, inputs, output, callback=None):
"""Evaluates the minimum absolute value for each grid cell from a stack of input rasters.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('min_absolute_overlay', args, callback) # returns 1 if error
def min_overlay(self, inputs, output, callback=None):
"""Evaluates the minimum value for each grid cell from a stack of input rasters.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('min_overlay', args, callback) # returns 1 if error
def percent_equal_to(self, inputs, comparison, output, callback=None):
"""Calculates the percentage of a raster stack that have cell values equal to an input on a cell-by-cell basis.
Keyword arguments:
inputs -- Input raster files.
comparison -- Input comparison raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--comparison='{}'".format(comparison))
args.append("--output='{}'".format(output))
return self.run_tool('percent_equal_to', args, callback) # returns 1 if error
def percent_greater_than(self, inputs, comparison, output, callback=None):
"""Calculates the percentage of a raster stack that have cell values greather than an input on a cell-by-cell basis.
Keyword arguments:
inputs -- Input raster files.
comparison -- Input comparison raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--comparison='{}'".format(comparison))
args.append("--output='{}'".format(output))
return self.run_tool('percent_greater_than', args, callback) # returns 1 if error
def percent_less_than(self, inputs, comparison, output, callback=None):
"""Calculates the percentage of a raster stack that have cell values less than an input on a cell-by-cell basis.
Keyword arguments:
inputs -- Input raster files.
comparison -- Input comparison raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--comparison='{}'".format(comparison))
args.append("--output='{}'".format(output))
return self.run_tool('percent_less_than', args, callback) # returns 1 if error
def pick_from_list(self, inputs, pos_input, output, callback=None):
"""Outputs the value from a raster stack specified by a position raster.
Keyword arguments:
inputs -- Input raster files.
pos_input -- Input position raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--pos_input='{}'".format(pos_input))
args.append("--output='{}'".format(output))
return self.run_tool('pick_from_list', args, callback) # returns 1 if error
def polygonize(self, inputs, output, callback=None):
"""Creates a polygon layer from two or more intersecting line features contained in one or more input vector line files.
Keyword arguments:
inputs -- Input vector polyline file.
output -- Output vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('polygonize', args, callback) # returns 1 if error
def split_with_lines(self, i, split, output, callback=None):
"""Splits the lines or polygons in one layer using the lines in another layer.
Keyword arguments:
i -- Input vector line or polygon file.
split -- Input vector polyline file.
output -- Output vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--split='{}'".format(split))
args.append("--output='{}'".format(output))
return self.run_tool('split_with_lines', args, callback) # returns 1 if error
def sum_overlay(self, inputs, output, callback=None):
"""Calculates the sum for each grid cell from a group of raster images.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('sum_overlay', args, callback) # returns 1 if error
def symmetrical_difference(self, i, overlay, output, snap=0.0, callback=None):
"""Outputs the features that occur in one of the two vector inputs but not both, i.e. no overlapping features.
Keyword arguments:
i -- Input vector file.
overlay -- Input overlay vector file.
output -- Output vector file.
snap -- Snap tolerance.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--overlay='{}'".format(overlay))
args.append("--output='{}'".format(output))
args.append("--snap={}".format(snap))
return self.run_tool('symmetrical_difference', args, callback) # returns 1 if error
def union(self, i, overlay, output, snap=0.0, callback=None):
"""Splits vector layers at their overlaps, creating a layer containing all the portions from both input and overlay layers.
Keyword arguments:
i -- Input vector file.
overlay -- Input overlay vector file.
output -- Output vector file.
snap -- Snap tolerance.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--overlay='{}'".format(overlay))
args.append("--output='{}'".format(output))
args.append("--snap={}".format(snap))
return self.run_tool('union', args, callback) # returns 1 if error
def weighted_overlay(self, factors, weights, output, cost=None, constraints=None, scale_max=1.0, callback=None):
"""Performs a weighted sum on multiple input rasters after converting each image to a common scale. The tool performs a multi-criteria evaluation (MCE).
Keyword arguments:
factors -- Input factor raster files.
weights -- Weight values, contained in quotes and separated by commas or semicolons. Must have the same number as factors.
cost -- Weight values, contained in quotes and separated by commas or semicolons. Must have the same number as factors.
constraints -- Input constraints raster files.
output -- Output raster file.
scale_max -- Suitability scale maximum value (common values are 1.0, 100.0, and 255.0).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--factors='{}'".format(factors))
args.append("--weights='{}'".format(weights))
if cost is not None: args.append("--cost='{}'".format(cost))
if constraints is not None: args.append("--constraints='{}'".format(constraints))
args.append("--output='{}'".format(output))
args.append("--scale_max={}".format(scale_max))
return self.run_tool('weighted_overlay', args, callback) # returns 1 if error
def weighted_sum(self, inputs, weights, output, callback=None):
"""Performs a weighted-sum overlay on multiple input raster images.
Keyword arguments:
inputs -- Input raster files.
weights -- Weight values, contained in quotes and separated by commas or semicolons.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--weights='{}'".format(weights))
args.append("--output='{}'".format(output))
return self.run_tool('weighted_sum', args, callback) # returns 1 if error
##################################
# GIS Analysis/Patch Shape Tools #
##################################
def boundary_shape_complexity(self, i, output, callback=None):
"""Calculates the complexity of the boundaries of raster polygons.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('boundary_shape_complexity', args, callback) # returns 1 if error
def compactness_ratio(self, i, callback=None):
"""Calculates the compactness ratio (A/P), a measure of shape complexity, for vector polygons.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('compactness_ratio', args, callback) # returns 1 if error
def edge_proportion(self, i, output, output_text=False, callback=None):
"""Calculate the proportion of cells in a raster polygon that are edge cells.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
output_text -- flag indicating whether a text report should also be output.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if output_text: args.append("--output_text")
return self.run_tool('edge_proportion', args, callback) # returns 1 if error
def elongation_ratio(self, i, callback=None):
"""Calculates the elongation ratio for vector polygons.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('elongation_ratio', args, callback) # returns 1 if error
def find_patch_or_class_edge_cells(self, i, output, callback=None):
"""Finds all cells located on the edge of patch or class features.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('find_patch_or_class_edge_cells', args, callback) # returns 1 if error
def hole_proportion(self, i, callback=None):
"""Calculates the proportion of the total area of a polygon's holes relative to the area of the polygon's hull.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('hole_proportion', args, callback) # returns 1 if error
def linearity_index(self, i, callback=None):
"""Calculates the linearity index for vector polygons.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('linearity_index', args, callback) # returns 1 if error
def narrowness_index(self, i, output, callback=None):
"""Calculates the narrowness of raster polygons.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('narrowness_index', args, callback) # returns 1 if error
def patch_orientation(self, i, callback=None):
"""Calculates the orientation of vector polygons.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('patch_orientation', args, callback) # returns 1 if error
def perimeter_area_ratio(self, i, callback=None):
"""Calculates the perimeter-area ratio of vector polygons.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('perimeter_area_ratio', args, callback) # returns 1 if error
def radius_of_gyration(self, i, output, text_output=False, callback=None):
"""Calculates the distance of cells from their polygon's centroid.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
text_output -- Optional text output.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if text_output: args.append("--text_output")
return self.run_tool('radius_of_gyration', args, callback) # returns 1 if error
def related_circumscribing_circle(self, i, callback=None):
"""Calculates the related circumscribing circle of vector polygons.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('related_circumscribing_circle', args, callback) # returns 1 if error
def shape_complexity_index(self, i, callback=None):
"""Calculates overall polygon shape complexity or irregularity.
Keyword arguments:
i -- Input vector polygon file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('shape_complexity_index', args, callback) # returns 1 if error
def shape_complexity_index_raster(self, i, output, callback=None):
"""Calculates the complexity of raster polygons or classes.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('shape_complexity_index_raster', args, callback) # returns 1 if error
############################
# Geomorphometric Analysis #
############################
def aspect(self, dem, output, zfactor=1.0, callback=None):
"""Calculates an aspect raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('aspect', args, callback) # returns 1 if error
def average_normal_vector_angular_deviation(self, dem, output, filter=11, callback=None):
"""Calculates the circular variance of aspect at a scale for a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filter -- Size of the filter kernel.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filter={}".format(filter))
return self.run_tool('average_normal_vector_angular_deviation', args, callback) # returns 1 if error
def circular_variance_of_aspect(self, dem, output, filter=11, callback=None):
"""Calculates the circular variance of aspect at a scale for a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filter -- Size of the filter kernel.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filter={}".format(filter))
return self.run_tool('circular_variance_of_aspect', args, callback) # returns 1 if error
def dev_from_mean_elev(self, dem, output, filterx=11, filtery=11, callback=None):
"""Calculates deviation from mean elevation.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('dev_from_mean_elev', args, callback) # returns 1 if error
def diff_from_mean_elev(self, dem, output, filterx=11, filtery=11, callback=None):
"""Calculates difference from mean elevation (equivalent to a high-pass filter).
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('diff_from_mean_elev', args, callback) # returns 1 if error
def directional_relief(self, dem, output, azimuth=0.0, max_dist=None, callback=None):
"""Calculates relief for cells in an input DEM for a specified direction.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
azimuth -- Wind azimuth in degrees.
max_dist -- Optional maximum search distance (unspecified if none; in xy units).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--azimuth={}".format(azimuth))
if max_dist is not None: args.append("--max_dist='{}'".format(max_dist))
return self.run_tool('directional_relief', args, callback) # returns 1 if error
def downslope_index(self, dem, output, drop=2.0, out_type="tangent", callback=None):
"""Calculates the Hjerdt et al. (2004) downslope index.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
drop -- Vertical drop value (default is 2.0).
out_type -- Output type, options include 'tangent', 'degrees', 'radians', 'distance' (default is 'tangent').
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--drop={}".format(drop))
args.append("--out_type={}".format(out_type))
return self.run_tool('downslope_index', args, callback) # returns 1 if error
def edge_density(self, dem, output, filter=11, norm_diff=5.0, zfactor=1.0, callback=None):
"""Calculates the density of edges, or breaks-in-slope within DEMs.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filter -- Size of the filter kernel.
norm_diff -- Maximum difference in normal vectors, in degrees.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filter={}".format(filter))
args.append("--norm_diff={}".format(norm_diff))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('edge_density', args, callback) # returns 1 if error
def elev_above_pit(self, dem, output, callback=None):
"""Calculate the elevation of each grid cell above the nearest downstream pit cell or grid edge cell.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('elev_above_pit', args, callback) # returns 1 if error
def elev_percentile(self, dem, output, filterx=11, filtery=11, sig_digits=2, callback=None):
"""Calculates the elevation percentile raster from a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
sig_digits -- Number of significant digits.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
args.append("--sig_digits={}".format(sig_digits))
return self.run_tool('elev_percentile', args, callback) # returns 1 if error
def elev_relative_to_min_max(self, dem, output, callback=None):
"""Calculates the elevation of a location relative to the minimum and maximum elevations in a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('elev_relative_to_min_max', args, callback) # returns 1 if error
def elev_relative_to_watershed_min_max(self, dem, watersheds, output, callback=None):
"""Calculates the elevation of a location relative to the minimum and maximum elevations in a watershed.
Keyword arguments:
dem -- Input raster DEM file.
watersheds -- Input raster watersheds file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--watersheds='{}'".format(watersheds))
args.append("--output='{}'".format(output))
return self.run_tool('elev_relative_to_watershed_min_max', args, callback) # returns 1 if error
def feature_preserving_smoothing(self, dem, output, filter=11, norm_diff=15.0, num_iter=3, max_diff=0.5, zfactor=1.0, callback=None):
"""Reduces short-scale variation in an input DEM using a modified Sun et al. (2007) algorithm.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filter -- Size of the filter kernel.
norm_diff -- Maximum difference in normal vectors, in degrees.
num_iter -- Number of iterations.
max_diff -- Maximum allowable absolute elevation change (optional).
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filter={}".format(filter))
args.append("--norm_diff={}".format(norm_diff))
args.append("--num_iter={}".format(num_iter))
args.append("--max_diff={}".format(max_diff))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('feature_preserving_smoothing', args, callback) # returns 1 if error
def fetch_analysis(self, dem, output, azimuth=0.0, hgt_inc=0.05, callback=None):
"""Performs an analysis of fetch or upwind distance to an obstacle.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
azimuth -- Wind azimuth in degrees in degrees.
hgt_inc -- Height increment value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--azimuth={}".format(azimuth))
args.append("--hgt_inc={}".format(hgt_inc))
return self.run_tool('fetch_analysis', args, callback) # returns 1 if error
def fill_missing_data(self, i, output, filter=11, weight=2.0, callback=None):
"""Fills NoData holes in a DEM.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filter -- Filter size (cells).
weight -- IDW weight value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filter={}".format(filter))
args.append("--weight={}".format(weight))
return self.run_tool('fill_missing_data', args, callback) # returns 1 if error
def find_ridges(self, dem, output, line_thin=True, callback=None):
"""Identifies potential ridge and peak grid cells.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
line_thin -- Optional flag indicating whether post-processing line-thinning should be performed.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if line_thin: args.append("--line_thin")
return self.run_tool('find_ridges', args, callback) # returns 1 if error
def hillshade(self, dem, output, azimuth=315.0, altitude=30.0, zfactor=1.0, callback=None):
"""Calculates a hillshade raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
azimuth -- Illumination source azimuth in degrees.
altitude -- Illumination source altitude in degrees.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--azimuth={}".format(azimuth))
args.append("--altitude={}".format(altitude))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('hillshade', args, callback) # returns 1 if error
def horizon_angle(self, dem, output, azimuth=0.0, max_dist=None, callback=None):
"""Calculates horizon angle (maximum upwind slope) for each grid cell in an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
azimuth -- Wind azimuth in degrees.
max_dist -- Optional maximum search distance (unspecified if none; in xy units).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--azimuth={}".format(azimuth))
if max_dist is not None: args.append("--max_dist='{}'".format(max_dist))
return self.run_tool('horizon_angle', args, callback) # returns 1 if error
def hypsometric_analysis(self, inputs, output, watershed=None, callback=None):
"""Calculates a hypsometric curve for one or more DEMs.
Keyword arguments:
inputs -- Input DEM files.
watershed -- Input watershed files (optional).
output -- Output HTML file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
if watershed is not None: args.append("--watershed='{}'".format(watershed))
args.append("--output='{}'".format(output))
return self.run_tool('hypsometric_analysis', args, callback) # returns 1 if error
def max_anisotropy_dev(self, dem, out_mag, out_scale, max_scale, min_scale=3, step=2, callback=None):
"""Calculates the maximum anisotropy (directionality) in elevation deviation over a range of spatial scales.
Keyword arguments:
dem -- Input raster DEM file.
out_mag -- Output raster DEVmax magnitude file.
out_scale -- Output raster DEVmax scale file.
min_scale -- Minimum search neighbourhood radius in grid cells.
max_scale -- Maximum search neighbourhood radius in grid cells.
step -- Step size as any positive non-zero integer.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--out_mag='{}'".format(out_mag))
args.append("--out_scale='{}'".format(out_scale))
args.append("--min_scale={}".format(min_scale))
args.append("--max_scale='{}'".format(max_scale))
args.append("--step={}".format(step))
return self.run_tool('max_anisotropy_dev', args, callback) # returns 1 if error
def max_anisotropy_dev_signature(self, dem, points, output, max_scale, min_scale=1, step=1, callback=None):
"""Calculates the anisotropy in deviation from mean for points over a range of spatial scales.
Keyword arguments:
dem -- Input raster DEM file.
points -- Input vector points file.
output -- Output HTML file.
min_scale -- Minimum search neighbourhood radius in grid cells.
max_scale -- Maximum search neighbourhood radius in grid cells.
step -- Step size as any positive non-zero integer.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--points='{}'".format(points))
args.append("--output='{}'".format(output))
args.append("--min_scale={}".format(min_scale))
args.append("--max_scale='{}'".format(max_scale))
args.append("--step={}".format(step))
return self.run_tool('max_anisotropy_dev_signature', args, callback) # returns 1 if error
def max_branch_length(self, dem, output, log=False, callback=None):
"""Lindsay and Seibert's (2013) branch length index is used to map drainage divides or ridge lines.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
log -- Optional flag to request the output be log-transformed.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if log: args.append("--log")
return self.run_tool('max_branch_length', args, callback) # returns 1 if error
def max_difference_from_mean(self, dem, out_mag, out_scale, min_scale, max_scale, step=1, callback=None):
"""Calculates the maximum difference from mean elevation over a range of spatial scales.
Keyword arguments:
dem -- Input raster DEM file.
out_mag -- Output raster DIFFmax magnitude file.
out_scale -- Output raster DIFFmax scale file.
min_scale -- Minimum search neighbourhood radius in grid cells.
max_scale -- Maximum search neighbourhood radius in grid cells.
step -- Step size as any positive non-zero integer.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--out_mag='{}'".format(out_mag))
args.append("--out_scale='{}'".format(out_scale))
args.append("--min_scale='{}'".format(min_scale))
args.append("--max_scale='{}'".format(max_scale))
args.append("--step={}".format(step))
return self.run_tool('max_difference_from_mean', args, callback) # returns 1 if error
def max_downslope_elev_change(self, dem, output, callback=None):
"""Calculates the maximum downslope change in elevation between a grid cell and its eight downslope neighbors.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('max_downslope_elev_change', args, callback) # returns 1 if error
def max_elev_dev_signature(self, dem, points, output, min_scale, max_scale, step=10, callback=None):
"""Calculates the maximum elevation deviation over a range of spatial scales and for a set of points.
Keyword arguments:
dem -- Input raster DEM file.
points -- Input vector points file.
output -- Output HTML file.
min_scale -- Minimum search neighbourhood radius in grid cells.
max_scale -- Maximum search neighbourhood radius in grid cells.
step -- Step size as any positive non-zero integer.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--points='{}'".format(points))
args.append("--output='{}'".format(output))
args.append("--min_scale='{}'".format(min_scale))
args.append("--max_scale='{}'".format(max_scale))
args.append("--step={}".format(step))
return self.run_tool('max_elev_dev_signature', args, callback) # returns 1 if error
def max_elevation_deviation(self, dem, out_mag, out_scale, min_scale, max_scale, step=1, callback=None):
"""Calculates the maximum elevation deviation over a range of spatial scales.
Keyword arguments:
dem -- Input raster DEM file.
out_mag -- Output raster DEVmax magnitude file.
out_scale -- Output raster DEVmax scale file.
min_scale -- Minimum search neighbourhood radius in grid cells.
max_scale -- Maximum search neighbourhood radius in grid cells.
step -- Step size as any positive non-zero integer.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--out_mag='{}'".format(out_mag))
args.append("--out_scale='{}'".format(out_scale))
args.append("--min_scale='{}'".format(min_scale))
args.append("--max_scale='{}'".format(max_scale))
args.append("--step={}".format(step))
return self.run_tool('max_elevation_deviation', args, callback) # returns 1 if error
def min_downslope_elev_change(self, dem, output, callback=None):
"""Calculates the minimum downslope change in elevation between a grid cell and its eight downslope neighbors.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('min_downslope_elev_change', args, callback) # returns 1 if error
def multiscale_roughness(self, dem, out_mag, out_scale, max_scale, min_scale=1, step=1, callback=None):
"""Calculates surface roughness over a range of spatial scales.
Keyword arguments:
dem -- Input raster DEM file.
out_mag -- Output raster roughness magnitude file.
out_scale -- Output raster roughness scale file.
min_scale -- Minimum search neighbourhood radius in grid cells.
max_scale -- Maximum search neighbourhood radius in grid cells.
step -- Step size as any positive non-zero integer.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--out_mag='{}'".format(out_mag))
args.append("--out_scale='{}'".format(out_scale))
args.append("--min_scale={}".format(min_scale))
args.append("--max_scale='{}'".format(max_scale))
args.append("--step={}".format(step))
return self.run_tool('multiscale_roughness', args, callback) # returns 1 if error
def multiscale_roughness_signature(self, dem, points, output, max_scale, min_scale=1, step=1, callback=None):
"""Calculates the surface roughness for points over a range of spatial scales.
Keyword arguments:
dem -- Input raster DEM file.
points -- Input vector points file.
output -- Output HTML file.
min_scale -- Minimum search neighbourhood radius in grid cells.
max_scale -- Maximum search neighbourhood radius in grid cells.
step -- Step size as any positive non-zero integer.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--points='{}'".format(points))
args.append("--output='{}'".format(output))
args.append("--min_scale={}".format(min_scale))
args.append("--max_scale='{}'".format(max_scale))
args.append("--step={}".format(step))
return self.run_tool('multiscale_roughness_signature', args, callback) # returns 1 if error
def multiscale_std_dev_normals(self, dem, out_mag, out_scale, min_scale=1, step=1, num_steps=10, step_nonlinearity=1.0, callback=None):
"""Calculates surface roughness over a range of spatial scales.
Keyword arguments:
dem -- Input raster DEM file.
out_mag -- Output raster roughness magnitude file.
out_scale -- Output raster roughness scale file.
min_scale -- Minimum search neighbourhood radius in grid cells.
step -- Step size as any positive non-zero integer.
num_steps -- Number of steps.
step_nonlinearity -- Step nonlinearity factor (1.0-2.0 is typical).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--out_mag='{}'".format(out_mag))
args.append("--out_scale='{}'".format(out_scale))
args.append("--min_scale={}".format(min_scale))
args.append("--step={}".format(step))
args.append("--num_steps={}".format(num_steps))
args.append("--step_nonlinearity={}".format(step_nonlinearity))
return self.run_tool('multiscale_std_dev_normals', args, callback) # returns 1 if error
def multiscale_std_dev_normals_signature(self, dem, points, output, min_scale=1, step=1, num_steps=10, step_nonlinearity=1.0, callback=None):
"""Calculates the surface roughness for points over a range of spatial scales.
Keyword arguments:
dem -- Input raster DEM file.
points -- Input vector points file.
output -- Output HTML file.
min_scale -- Minimum search neighbourhood radius in grid cells.
step -- Step size as any positive non-zero integer.
num_steps -- Number of steps.
step_nonlinearity -- Step nonlinearity factor (1.0-2.0 is typical).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--points='{}'".format(points))
args.append("--output='{}'".format(output))
args.append("--min_scale={}".format(min_scale))
args.append("--step={}".format(step))
args.append("--num_steps={}".format(num_steps))
args.append("--step_nonlinearity={}".format(step_nonlinearity))
return self.run_tool('multiscale_std_dev_normals_signature', args, callback) # returns 1 if error
def multiscale_topographic_position_image(self, local, meso, broad, output, lightness=1.2, callback=None):
"""Creates a multiscale topographic position image from three DEVmax rasters of differing spatial scale ranges.
Keyword arguments:
local -- Input local-scale topographic position (DEVmax) raster file.
meso -- Input meso-scale topographic position (DEVmax) raster file.
broad -- Input broad-scale topographic position (DEVmax) raster file.
output -- Output raster file.
lightness -- Image lightness value (default is 1.2).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--local='{}'".format(local))
args.append("--meso='{}'".format(meso))
args.append("--broad='{}'".format(broad))
args.append("--output='{}'".format(output))
args.append("--lightness={}".format(lightness))
return self.run_tool('multiscale_topographic_position_image', args, callback) # returns 1 if error
def num_downslope_neighbours(self, dem, output, callback=None):
"""Calculates the number of downslope neighbours to each grid cell in a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('num_downslope_neighbours', args, callback) # returns 1 if error
def num_upslope_neighbours(self, dem, output, callback=None):
"""Calculates the number of upslope neighbours to each grid cell in a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('num_upslope_neighbours', args, callback) # returns 1 if error
def pennock_landform_class(self, dem, output, slope=3.0, prof=0.1, plan=0.0, zfactor=1.0, callback=None):
"""Classifies hillslope zones based on slope, profile curvature, and plan curvature.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
slope -- Slope threshold value, in degrees (default is 3.0).
prof -- Profile curvature threshold value (default is 0.1).
plan -- Plan curvature threshold value (default is 0.0).
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--slope={}".format(slope))
args.append("--prof={}".format(prof))
args.append("--plan={}".format(plan))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('pennock_landform_class', args, callback) # returns 1 if error
def percent_elev_range(self, dem, output, filterx=3, filtery=3, callback=None):
"""Calculates percent of elevation range from a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('percent_elev_range', args, callback) # returns 1 if error
def plan_curvature(self, dem, output, zfactor=1.0, callback=None):
"""Calculates a plan (contour) curvature raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('plan_curvature', args, callback) # returns 1 if error
def profile(self, lines, surface, output, callback=None):
"""Plots profiles from digital surface models.
Keyword arguments:
lines -- Input vector line file.
surface -- Input raster surface file.
output -- Output HTML file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--lines='{}'".format(lines))
args.append("--surface='{}'".format(surface))
args.append("--output='{}'".format(output))
return self.run_tool('profile', args, callback) # returns 1 if error
def profile_curvature(self, dem, output, zfactor=1.0, callback=None):
"""Calculates a profile curvature raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('profile_curvature', args, callback) # returns 1 if error
def relative_aspect(self, dem, output, azimuth=0.0, zfactor=1.0, callback=None):
"""Calculates relative aspect (relative to a user-specified direction) from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
azimuth -- Illumination source azimuth.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--azimuth={}".format(azimuth))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('relative_aspect', args, callback) # returns 1 if error
def relative_topographic_position(self, dem, output, filterx=11, filtery=11, callback=None):
"""Calculates the relative topographic position index from a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('relative_topographic_position', args, callback) # returns 1 if error
def remove_off_terrain_objects(self, dem, output, filter=11, slope=15.0, callback=None):
"""Removes off-terrain objects from a raster digital elevation model (DEM).
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filter -- Filter size (cells).
slope -- Slope threshold value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filter={}".format(filter))
args.append("--slope={}".format(slope))
return self.run_tool('remove_off_terrain_objects', args, callback) # returns 1 if error
def ruggedness_index(self, dem, output, zfactor=1.0, callback=None):
"""Calculates the Riley et al.'s (1999) terrain ruggedness index from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('ruggedness_index', args, callback) # returns 1 if error
def sediment_transport_index(self, sca, slope, output, sca_exponent=0.4, slope_exponent=1.3, callback=None):
"""Calculates the sediment transport index.
Keyword arguments:
sca -- Input raster specific contributing area (SCA) file.
slope -- Input raster slope file.
output -- Output raster file.
sca_exponent -- SCA exponent value.
slope_exponent -- Slope exponent value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--sca='{}'".format(sca))
args.append("--slope='{}'".format(slope))
args.append("--output='{}'".format(output))
args.append("--sca_exponent={}".format(sca_exponent))
args.append("--slope_exponent={}".format(slope_exponent))
return self.run_tool('sediment_transport_index', args, callback) # returns 1 if error
def slope(self, dem, output, zfactor=1.0, callback=None):
"""Calculates a slope raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('slope', args, callback) # returns 1 if error
def slope_vs_elevation_plot(self, inputs, output, watershed=None, callback=None):
"""Creates a slope vs. elevation plot for one or more DEMs.
Keyword arguments:
inputs -- Input DEM files.
watershed -- Input watershed files (optional).
output -- Output HTML file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
if watershed is not None: args.append("--watershed='{}'".format(watershed))
args.append("--output='{}'".format(output))
return self.run_tool('slope_vs_elevation_plot', args, callback) # returns 1 if error
def spherical_std_dev_of_normals(self, dem, output, filter=11, callback=None):
"""Calculates the spherical standard deviation of surface normals for a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
filter -- Size of the filter kernel.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--filter={}".format(filter))
return self.run_tool('spherical_std_dev_of_normals', args, callback) # returns 1 if error
def standard_deviation_of_slope(self, i, output, zfactor=1.0, filterx=11, filtery=11, callback=None):
"""Calculates the standard deviation of slope from an input DEM.
Keyword arguments:
i -- Input raster DEM file.
output -- Output raster DEM file.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--zfactor={}".format(zfactor))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('standard_deviation_of_slope', args, callback) # returns 1 if error
def stream_power_index(self, sca, slope, output, exponent=1.0, callback=None):
"""Calculates the relative stream power index.
Keyword arguments:
sca -- Input raster specific contributing area (SCA) file.
slope -- Input raster slope file.
output -- Output raster file.
exponent -- SCA exponent value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--sca='{}'".format(sca))
args.append("--slope='{}'".format(slope))
args.append("--output='{}'".format(output))
args.append("--exponent={}".format(exponent))
return self.run_tool('stream_power_index', args, callback) # returns 1 if error
def surface_area_ratio(self, dem, output, callback=None):
"""Calculates a the surface area ratio of each grid cell in an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('surface_area_ratio', args, callback) # returns 1 if error
def tangential_curvature(self, dem, output, zfactor=1.0, callback=None):
"""Calculates a tangential curvature raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('tangential_curvature', args, callback) # returns 1 if error
def total_curvature(self, dem, output, zfactor=1.0, callback=None):
"""Calculates a total curvature raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
zfactor -- Optional multiplier for when the vertical and horizontal units are not the same.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--zfactor={}".format(zfactor))
return self.run_tool('total_curvature', args, callback) # returns 1 if error
def viewshed(self, dem, stations, output, height=2.0, callback=None):
"""Identifies the viewshed for a point or set of points.
Keyword arguments:
dem -- Input raster DEM file.
stations -- Input viewing station vector file.
output -- Output raster file.
height -- Viewing station height, in z units.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--stations='{}'".format(stations))
args.append("--output='{}'".format(output))
args.append("--height={}".format(height))
return self.run_tool('viewshed', args, callback) # returns 1 if error
def visibility_index(self, dem, output, height=2.0, res_factor=2, callback=None):
"""Estimates the relative visibility of sites in a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
height -- Viewing station height, in z units.
res_factor -- The resolution factor determines the density of measured viewsheds.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--height={}".format(height))
args.append("--res_factor={}".format(res_factor))
return self.run_tool('visibility_index', args, callback) # returns 1 if error
def wetness_index(self, sca, slope, output, callback=None):
"""Calculates the topographic wetness index, Ln(A / tan(slope)).
Keyword arguments:
sca -- Input raster specific contributing area (SCA) file.
slope -- Input raster slope file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--sca='{}'".format(sca))
args.append("--slope='{}'".format(slope))
args.append("--output='{}'".format(output))
return self.run_tool('wetness_index', args, callback) # returns 1 if error
#########################
# Hydrological Analysis #
#########################
def average_flowpath_slope(self, dem, output, callback=None):
"""Measures the average slope gradient from each grid cell to all upslope divide cells.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('average_flowpath_slope', args, callback) # returns 1 if error
def average_upslope_flowpath_length(self, dem, output, callback=None):
"""Measures the average length of all upslope flowpaths draining each grid cell.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('average_upslope_flowpath_length', args, callback) # returns 1 if error
def basins(self, d8_pntr, output, esri_pntr=False, callback=None):
"""Identifies drainage basins that drain to the DEM edge.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('basins', args, callback) # returns 1 if error
def breach_depressions(self, dem, output, max_depth=None, max_length=None, flat_increment=None, fill_pits=False, callback=None):
"""Breaches all of the depressions in a DEM using Lindsay's (2016) algorithm. This should be preferred over depression filling in most cases.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
max_depth -- Optional maximum breach depth (default is Inf).
max_length -- Optional maximum breach channel length (in grid cells; default is Inf).
flat_increment -- Optional elevation increment applied to flat areas.
fill_pits -- Optional flag indicating whether to fill single-cell pits.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if max_depth is not None: args.append("--max_depth='{}'".format(max_depth))
if max_length is not None: args.append("--max_length='{}'".format(max_length))
if flat_increment is not None: args.append("--flat_increment='{}'".format(flat_increment))
if fill_pits: args.append("--fill_pits")
return self.run_tool('breach_depressions', args, callback) # returns 1 if error
def breach_single_cell_pits(self, dem, output, callback=None):
"""Removes single-cell pits from an input DEM by breaching.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('breach_single_cell_pits', args, callback) # returns 1 if error
def d8_flow_accumulation(self, dem, output, out_type="cells", log=False, clip=False, callback=None):
"""Calculates a D8 flow accumulation raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
out_type -- Output type; one of 'cells' (default), 'catchment area', and 'specific contributing area'.
log -- Optional flag to request the output be log-transformed.
clip -- Optional flag to request clipping the display max by 1%.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--out_type={}".format(out_type))
if log: args.append("--log")
if clip: args.append("--clip")
return self.run_tool('d8_flow_accumulation', args, callback) # returns 1 if error
def d8_mass_flux(self, dem, loading, efficiency, absorption, output, callback=None):
"""Performs a D8 mass flux calculation.
Keyword arguments:
dem -- Input raster DEM file.
loading -- Input loading raster file.
efficiency -- Input efficiency raster file.
absorption -- Input absorption raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--loading='{}'".format(loading))
args.append("--efficiency='{}'".format(efficiency))
args.append("--absorption='{}'".format(absorption))
args.append("--output='{}'".format(output))
return self.run_tool('d8_mass_flux', args, callback) # returns 1 if error
def d8_pointer(self, dem, output, esri_pntr=False, callback=None):
"""Calculates a D8 flow pointer raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('d8_pointer', args, callback) # returns 1 if error
def d_inf_flow_accumulation(self, dem, output, out_type="Specific Contributing Area", threshold=None, log=False, clip=False, callback=None):
"""Calculates a D-infinity flow accumulation raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
out_type -- Output type; one of 'cells', 'sca' (default), and 'ca'.
threshold -- Optional convergence threshold parameter, in grid cells; default is inifinity.
log -- Optional flag to request the output be log-transformed.
clip -- Optional flag to request clipping the display max by 1%.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--out_type={}".format(out_type))
if threshold is not None: args.append("--threshold='{}'".format(threshold))
if log: args.append("--log")
if clip: args.append("--clip")
return self.run_tool('d_inf_flow_accumulation', args, callback) # returns 1 if error
def d_inf_mass_flux(self, dem, loading, efficiency, absorption, output, callback=None):
"""Performs a D-infinity mass flux calculation.
Keyword arguments:
dem -- Input raster DEM file.
loading -- Input loading raster file.
efficiency -- Input efficiency raster file.
absorption -- Input absorption raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--loading='{}'".format(loading))
args.append("--efficiency='{}'".format(efficiency))
args.append("--absorption='{}'".format(absorption))
args.append("--output='{}'".format(output))
return self.run_tool('d_inf_mass_flux', args, callback) # returns 1 if error
def d_inf_pointer(self, dem, output, callback=None):
"""Calculates a D-infinity flow pointer (flow direction) raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('d_inf_pointer', args, callback) # returns 1 if error
def depth_in_sink(self, dem, output, zero_background=False, callback=None):
"""Measures the depth of sinks (depressions) in a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
zero_background -- Flag indicating whether the background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if zero_background: args.append("--zero_background")
return self.run_tool('depth_in_sink', args, callback) # returns 1 if error
def downslope_distance_to_stream(self, dem, streams, output, callback=None):
"""Measures distance to the nearest downslope stream cell.
Keyword arguments:
dem -- Input raster DEM file.
streams -- Input raster streams file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
return self.run_tool('downslope_distance_to_stream', args, callback) # returns 1 if error
def downslope_flowpath_length(self, d8_pntr, output, watersheds=None, weights=None, esri_pntr=False, callback=None):
"""Calculates the downslope flowpath length from each cell to basin outlet.
Keyword arguments:
d8_pntr -- Input D8 pointer raster file.
watersheds -- Optional input watershed raster file.
weights -- Optional input weights raster file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
if watersheds is not None: args.append("--watersheds='{}'".format(watersheds))
if weights is not None: args.append("--weights='{}'".format(weights))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('downslope_flowpath_length', args, callback) # returns 1 if error
def elevation_above_stream(self, dem, streams, output, callback=None):
"""Calculates the elevation of cells above the nearest downslope stream cell.
Keyword arguments:
dem -- Input raster DEM file.
streams -- Input raster streams file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
return self.run_tool('elevation_above_stream', args, callback) # returns 1 if error
def elevation_above_stream_euclidean(self, dem, streams, output, callback=None):
"""Calculates the elevation of cells above the nearest (Euclidean distance) stream cell.
Keyword arguments:
dem -- Input raster DEM file.
streams -- Input raster streams file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
return self.run_tool('elevation_above_stream_euclidean', args, callback) # returns 1 if error
def fd8_flow_accumulation(self, dem, output, out_type="specific contributing area", exponent=1.1, threshold=None, log=False, clip=False, callback=None):
"""Calculates an FD8 flow accumulation raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
out_type -- Output type; one of 'cells', 'specific contributing area' (default), and 'catchment area'.
exponent -- Optional exponent parameter; default is 1.1.
threshold -- Optional convergence threshold parameter, in grid cells; default is inifinity.
log -- Optional flag to request the output be log-transformed.
clip -- Optional flag to request clipping the display max by 1%.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--out_type={}".format(out_type))
args.append("--exponent={}".format(exponent))
if threshold is not None: args.append("--threshold='{}'".format(threshold))
if log: args.append("--log")
if clip: args.append("--clip")
return self.run_tool('fd8_flow_accumulation', args, callback) # returns 1 if error
def fd8_pointer(self, dem, output, callback=None):
"""Calculates an FD8 flow pointer raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('fd8_pointer', args, callback) # returns 1 if error
def fill_burn(self, dem, streams, output, callback=None):
"""Burns streams into a DEM using the FillBurn (Saunders, 1999) method.
Keyword arguments:
dem -- Input raster DEM file.
streams -- Input vector streams file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
return self.run_tool('fill_burn', args, callback) # returns 1 if error
def fill_depressions(self, dem, output, fix_flats=True, flat_increment=None, callback=None):
"""Fills all of the depressions in a DEM. Depression breaching should be preferred in most cases.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
fix_flats -- Optional flag indicating whether flat areas should have a small gradient applied.
flat_increment -- Optional elevation increment applied to flat areas.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if fix_flats: args.append("--fix_flats")
if flat_increment is not None: args.append("--flat_increment='{}'".format(flat_increment))
return self.run_tool('fill_depressions', args, callback) # returns 1 if error
def fill_single_cell_pits(self, dem, output, callback=None):
"""Raises pit cells to the elevation of their lowest neighbour.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('fill_single_cell_pits', args, callback) # returns 1 if error
def find_no_flow_cells(self, dem, output, callback=None):
"""Finds grid cells with no downslope neighbours.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('find_no_flow_cells', args, callback) # returns 1 if error
def find_parallel_flow(self, d8_pntr, streams, output, callback=None):
"""Finds areas of parallel flow in D8 flow direction rasters.
Keyword arguments:
d8_pntr -- Input D8 pointer raster file.
streams -- Input raster streams file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
return self.run_tool('find_parallel_flow', args, callback) # returns 1 if error
def flatten_lakes(self, dem, lakes, output, callback=None):
"""Flattens lake polygons in a raster DEM.
Keyword arguments:
dem -- Input raster DEM file.
lakes -- Input lakes vector polygons file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--lakes='{}'".format(lakes))
args.append("--output='{}'".format(output))
return self.run_tool('flatten_lakes', args, callback) # returns 1 if error
def flood_order(self, dem, output, callback=None):
"""Assigns each DEM grid cell its order in the sequence of inundations that are encountered during a search starting from the edges, moving inward at increasing elevations.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('flood_order', args, callback) # returns 1 if error
def flow_accumulation_full_workflow(self, dem, out_dem, out_pntr, out_accum, out_type="Specific Contributing Area", log=False, clip=False, esri_pntr=False, callback=None):
"""Resolves all of the depressions in a DEM, outputting a breached DEM, an aspect-aligned non-divergent flow pointer, and a flow accumulation raster.
Keyword arguments:
dem -- Input raster DEM file.
out_dem -- Output raster DEM file.
out_pntr -- Output raster flow pointer file.
out_accum -- Output raster flow accumulation file.
out_type -- Output type; one of 'cells', 'sca' (default), and 'ca'.
log -- Optional flag to request the output be log-transformed.
clip -- Optional flag to request clipping the display max by 1%.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--out_dem='{}'".format(out_dem))
args.append("--out_pntr='{}'".format(out_pntr))
args.append("--out_accum='{}'".format(out_accum))
args.append("--out_type={}".format(out_type))
if log: args.append("--log")
if clip: args.append("--clip")
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('flow_accumulation_full_workflow', args, callback) # returns 1 if error
def flow_length_diff(self, d8_pntr, output, esri_pntr=False, callback=None):
"""Calculates the local maximum absolute difference in downslope flowpath length, useful in mapping drainage divides and ridges.
Keyword arguments:
d8_pntr -- Input D8 pointer raster file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('flow_length_diff', args, callback) # returns 1 if error
def hillslopes(self, d8_pntr, streams, output, esri_pntr=False, callback=None):
"""Identifies the individual hillslopes draining to each link in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('hillslopes', args, callback) # returns 1 if error
def impoundment_size_index(self, dem, output, damlength, out_type="depth", callback=None):
"""Calculates the impoundment size resulting from damming a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output file.
out_type -- Output type; one of 'depth' (default), 'volume', and 'area'.
damlength -- Maximum length of the dam.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--out_type={}".format(out_type))
args.append("--damlength='{}'".format(damlength))
return self.run_tool('impoundment_size_index', args, callback) # returns 1 if error
def isobasins(self, dem, output, size, callback=None):
"""Divides a landscape into nearly equal sized drainage basins (i.e. watersheds).
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
size -- Target basin size, in grid cells.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--size='{}'".format(size))
return self.run_tool('isobasins', args, callback) # returns 1 if error
def jenson_snap_pour_points(self, pour_pts, streams, output, snap_dist, callback=None):
"""Moves outlet points used to specify points of interest in a watershedding operation to the nearest stream cell.
Keyword arguments:
pour_pts -- Input vector pour points (outlet) file.
streams -- Input raster streams file.
output -- Output vector file.
snap_dist -- Maximum snap distance in map units.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--pour_pts='{}'".format(pour_pts))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
args.append("--snap_dist='{}'".format(snap_dist))
return self.run_tool('jenson_snap_pour_points', args, callback) # returns 1 if error
def longest_flowpath(self, dem, basins, output, callback=None):
"""Delineates the longest flowpaths for a group of subbasins or watersheds.
Keyword arguments:
dem -- Input raster DEM file.
basins -- Input raster basins file.
output -- Output vector file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--basins='{}'".format(basins))
args.append("--output='{}'".format(output))
return self.run_tool('longest_flowpath', args, callback) # returns 1 if error
def max_upslope_flowpath_length(self, dem, output, callback=None):
"""Measures the maximum length of all upslope flowpaths draining each grid cell.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('max_upslope_flowpath_length', args, callback) # returns 1 if error
def num_inflowing_neighbours(self, dem, output, callback=None):
"""Computes the number of inflowing neighbours to each cell in an input DEM based on the D8 algorithm.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
return self.run_tool('num_inflowing_neighbours', args, callback) # returns 1 if error
def raise_walls(self, i, dem, output, breach=None, height=100.0, callback=None):
"""Raises walls in a DEM along a line or around a polygon, e.g. a watershed.
Keyword arguments:
i -- Input vector lines or polygons file.
breach -- Optional input vector breach lines.
dem -- Input raster DEM file.
output -- Output raster file.
height -- Wall height.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if breach is not None: args.append("--breach='{}'".format(breach))
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--height={}".format(height))
return self.run_tool('raise_walls', args, callback) # returns 1 if error
def rho8_pointer(self, dem, output, esri_pntr=False, callback=None):
"""Calculates a stochastic Rho8 flow pointer raster from an input DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('rho8_pointer', args, callback) # returns 1 if error
def sink(self, dem, output, zero_background=False, callback=None):
"""Identifies the depressions in a DEM, giving each feature a unique identifier.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if zero_background: args.append("--zero_background")
return self.run_tool('sink', args, callback) # returns 1 if error
def snap_pour_points(self, pour_pts, flow_accum, output, snap_dist, callback=None):
"""Moves outlet points used to specify points of interest in a watershedding operation to the cell with the highest flow accumulation in its neighbourhood.
Keyword arguments:
pour_pts -- Input vector pour points (outlet) file.
flow_accum -- Input raster D8 flow accumulation file.
output -- Output vector file.
snap_dist -- Maximum snap distance in map units.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--pour_pts='{}'".format(pour_pts))
args.append("--flow_accum='{}'".format(flow_accum))
args.append("--output='{}'".format(output))
args.append("--snap_dist='{}'".format(snap_dist))
return self.run_tool('snap_pour_points', args, callback) # returns 1 if error
def stochastic_depression_analysis(self, dem, output, rmse, range, iterations=100, callback=None):
"""Preforms a stochastic analysis of depressions within a DEM.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output file.
rmse -- The DEM's root-mean-square-error (RMSE), in z units. This determines error magnitude.
range -- The error field's correlation length, in xy-units.
iterations -- The number of iterations.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--rmse='{}'".format(rmse))
args.append("--range='{}'".format(range))
args.append("--iterations={}".format(iterations))
return self.run_tool('stochastic_depression_analysis', args, callback) # returns 1 if error
def strahler_order_basins(self, d8_pntr, streams, output, esri_pntr=False, callback=None):
"""Identifies Strahler-order basins from an input stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('strahler_order_basins', args, callback) # returns 1 if error
def subbasins(self, d8_pntr, streams, output, esri_pntr=False, callback=None):
"""Identifies the catchments, or sub-basin, draining to each link in a stream network.
Keyword arguments:
d8_pntr -- Input D8 pointer raster file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('subbasins', args, callback) # returns 1 if error
def trace_downslope_flowpaths(self, seed_pts, d8_pntr, output, esri_pntr=False, zero_background=False, callback=None):
"""Traces downslope flowpaths from one or more target sites (i.e. seed points).
Keyword arguments:
seed_pts -- Input vector seed points file.
d8_pntr -- Input D8 pointer raster file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--seed_pts='{}'".format(seed_pts))
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('trace_downslope_flowpaths', args, callback) # returns 1 if error
def unnest_basins(self, d8_pntr, pour_pts, output, esri_pntr=False, callback=None):
"""Extract whole watersheds for a set of outlet points.
Keyword arguments:
d8_pntr -- Input D8 pointer raster file.
pour_pts -- Input vector pour points (outlet) file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--pour_pts='{}'".format(pour_pts))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('unnest_basins', args, callback) # returns 1 if error
def watershed(self, d8_pntr, pour_pts, output, esri_pntr=False, callback=None):
"""Identifies the watershed, or drainage basin, draining to a set of target cells.
Keyword arguments:
d8_pntr -- Input D8 pointer raster file.
pour_pts -- Input vector pour points (outlet) file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--pour_pts='{}'".format(pour_pts))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('watershed', args, callback) # returns 1 if error
##########################
# Image Processing Tools #
##########################
def change_vector_analysis(self, date1, date2, magnitude, direction, callback=None):
"""Performs a change vector analysis on a two-date multi-spectral dataset.
Keyword arguments:
date1 -- Input raster files for the earlier date.
date2 -- Input raster files for the later date.
magnitude -- Output vector magnitude raster file.
direction -- Output vector Direction raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--date1='{}'".format(date1))
args.append("--date2='{}'".format(date2))
args.append("--magnitude='{}'".format(magnitude))
args.append("--direction='{}'".format(direction))
return self.run_tool('change_vector_analysis', args, callback) # returns 1 if error
def closing(self, i, output, filterx=11, filtery=11, callback=None):
"""A closing is a mathematical morphology operation involving an erosion (min filter) of a dilation (max filter) set.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('closing', args, callback) # returns 1 if error
def create_colour_composite(self, red, green, blue, output, opacity=None, enhance=True, zeros=False, callback=None):
"""Creates a colour-composite image from three bands of multispectral imagery.
Keyword arguments:
red -- Input red band image file.
green -- Input green band image file.
blue -- Input blue band image file.
opacity -- Input opacity band image file (optional).
output -- Output colour composite file.
enhance -- Optional flag indicating whether a balance contrast enhancement is performed.
zeros -- Optional flag to indicate if zeros are nodata values.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--red='{}'".format(red))
args.append("--green='{}'".format(green))
args.append("--blue='{}'".format(blue))
if opacity is not None: args.append("--opacity='{}'".format(opacity))
args.append("--output='{}'".format(output))
if enhance: args.append("--enhance")
if zeros: args.append("--zeros")
return self.run_tool('create_colour_composite', args, callback) # returns 1 if error
def flip_image(self, i, output, direction="vertical", callback=None):
"""Reflects an image in the vertical or horizontal axis.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
direction -- Direction of reflection; options include 'v' (vertical), 'h' (horizontal), and 'b' (both).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--direction={}".format(direction))
return self.run_tool('flip_image', args, callback) # returns 1 if error
def ihs_to_rgb(self, intensity, hue, saturation, red=None, green=None, blue=None, output=None, callback=None):
"""Converts intensity, hue, and saturation (IHS) images into red, green, and blue (RGB) images.
Keyword arguments:
intensity -- Input intensity file.
hue -- Input hue file.
saturation -- Input saturation file.
red -- Output red band file. Optionally specified if colour-composite not specified.
green -- Output green band file. Optionally specified if colour-composite not specified.
blue -- Output blue band file. Optionally specified if colour-composite not specified.
output -- Output colour-composite file. Only used if individual bands are not specified.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--intensity='{}'".format(intensity))
args.append("--hue='{}'".format(hue))
args.append("--saturation='{}'".format(saturation))
if red is not None: args.append("--red='{}'".format(red))
if green is not None: args.append("--green='{}'".format(green))
if blue is not None: args.append("--blue='{}'".format(blue))
if output is not None: args.append("--output='{}'".format(output))
return self.run_tool('ihs_to_rgb', args, callback) # returns 1 if error
def image_stack_profile(self, inputs, points, output, callback=None):
"""Plots an image stack profile (i.e. signature) for a set of points and multispectral images.
Keyword arguments:
inputs -- Input multispectral image files.
points -- Input vector points file.
output -- Output HTML file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--points='{}'".format(points))
args.append("--output='{}'".format(output))
return self.run_tool('image_stack_profile', args, callback) # returns 1 if error
def integral_image(self, i, output, callback=None):
"""Transforms an input image (summed area table) into its integral image equivalent.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('integral_image', args, callback) # returns 1 if error
def k_means_clustering(self, inputs, output, classes, out_html=None, max_iterations=10, class_change=2.0, initialize="diagonal", min_class_size=10, callback=None):
"""Performs a k-means clustering operation on a multi-spectral dataset.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
out_html -- Output HTML report file.
classes -- Number of classes.
max_iterations -- Maximum number of iterations.
class_change -- Minimum percent of cells changed between iterations before completion.
initialize -- How to initialize cluster centres?.
min_class_size -- Minimum class size, in pixels.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
if out_html is not None: args.append("--out_html='{}'".format(out_html))
args.append("--classes='{}'".format(classes))
args.append("--max_iterations={}".format(max_iterations))
args.append("--class_change={}".format(class_change))
args.append("--initialize={}".format(initialize))
args.append("--min_class_size={}".format(min_class_size))
return self.run_tool('k_means_clustering', args, callback) # returns 1 if error
def line_thinning(self, i, output, callback=None):
"""Performs line thinning a on Boolean raster image; intended to be used with the RemoveSpurs tool.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('line_thinning', args, callback) # returns 1 if error
def modified_k_means_clustering(self, inputs, output, out_html=None, start_clusters=1000, merge_dist=None, max_iterations=10, class_change=2.0, callback=None):
"""Performs a modified k-means clustering operation on a multi-spectral dataset.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
out_html -- Output HTML report file.
start_clusters -- Initial number of clusters.
merge_dist -- Cluster merger distance.
max_iterations -- Maximum number of iterations.
class_change -- Minimum percent of cells changed between iterations before completion.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
if out_html is not None: args.append("--out_html='{}'".format(out_html))
args.append("--start_clusters={}".format(start_clusters))
if merge_dist is not None: args.append("--merge_dist='{}'".format(merge_dist))
args.append("--max_iterations={}".format(max_iterations))
args.append("--class_change={}".format(class_change))
return self.run_tool('modified_k_means_clustering', args, callback) # returns 1 if error
def mosaic(self, inputs, output, method="cc", callback=None):
"""Mosaics two or more images together.
Keyword arguments:
inputs -- Input raster files.
output -- Output raster file.
method -- Resampling method; options include 'nn' (nearest neighbour), 'bilinear', and 'cc' (cubic convolution).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
args.append("--method={}".format(method))
return self.run_tool('mosaic', args, callback) # returns 1 if error
def mosaic_with_feathering(self, input1, input2, output, method="cc", weight=4.0, callback=None):
"""Mosaics two images together using a feathering technique in overlapping areas to reduce edge-effects.
Keyword arguments:
input1 -- Input raster file to modify.
input2 -- Input reference raster file.
output -- Output raster file.
method -- Resampling method; options include 'nn' (nearest neighbour), 'bilinear', and 'cc' (cubic convolution).
weight -- .
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
args.append("--method={}".format(method))
args.append("--weight={}".format(weight))
return self.run_tool('mosaic_with_feathering', args, callback) # returns 1 if error
def normalized_difference_index(self, input1, input2, output, clip=0.0, correction=0.0, callback=None):
"""Calculate a normalized-difference index (NDI) from two bands of multispectral image data.
Keyword arguments:
input1 -- Input image 1 (e.g. near-infrared band).
input2 -- Input image 2 (e.g. red band).
output -- Output raster file.
clip -- Optional amount to clip the distribution tails by, in percent.
correction -- Optional adjustment value (e.g. 1, or 0.16 for the optimal soil adjusted vegetation index, OSAVI).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
args.append("--clip={}".format(clip))
args.append("--correction={}".format(correction))
return self.run_tool('normalized_difference_index', args, callback) # returns 1 if error
def opening(self, i, output, filterx=11, filtery=11, callback=None):
"""An opening is a mathematical morphology operation involving a dilation (max filter) of an erosion (min filter) set.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('opening', args, callback) # returns 1 if error
def remove_spurs(self, i, output, iterations=10, callback=None):
"""Removes the spurs (pruning operation) from a Boolean line image; intended to be used on the output of the LineThinning tool.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
iterations -- Maximum number of iterations.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--iterations={}".format(iterations))
return self.run_tool('remove_spurs', args, callback) # returns 1 if error
def resample(self, inputs, destination, method="cc", callback=None):
"""Resamples one or more input images into a destination image.
Keyword arguments:
inputs -- Input raster files.
destination -- Destination raster file.
method -- Resampling method; options include 'nn' (nearest neighbour), 'bilinear', and 'cc' (cubic convolution).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--destination='{}'".format(destination))
args.append("--method={}".format(method))
return self.run_tool('resample', args, callback) # returns 1 if error
def rgb_to_ihs(self, intensity, hue, saturation, red=None, green=None, blue=None, composite=None, callback=None):
"""Converts red, green, and blue (RGB) images into intensity, hue, and saturation (IHS) images.
Keyword arguments:
red -- Input red band image file. Optionally specified if colour-composite not specified.
green -- Input green band image file. Optionally specified if colour-composite not specified.
blue -- Input blue band image file. Optionally specified if colour-composite not specified.
composite -- Input colour-composite image file. Only used if individual bands are not specified.
intensity -- Output intensity raster file.
hue -- Output hue raster file.
saturation -- Output saturation raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
if red is not None: args.append("--red='{}'".format(red))
if green is not None: args.append("--green='{}'".format(green))
if blue is not None: args.append("--blue='{}'".format(blue))
if composite is not None: args.append("--composite='{}'".format(composite))
args.append("--intensity='{}'".format(intensity))
args.append("--hue='{}'".format(hue))
args.append("--saturation='{}'".format(saturation))
return self.run_tool('rgb_to_ihs', args, callback) # returns 1 if error
def split_colour_composite(self, i, red=None, green=None, blue=None, callback=None):
"""This tool splits an RGB colour composite image into seperate multispectral images.
Keyword arguments:
i -- Input colour composite image file.
red -- Output red band file.
green -- Output green band file.
blue -- Output blue band file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if red is not None: args.append("--red='{}'".format(red))
if green is not None: args.append("--green='{}'".format(green))
if blue is not None: args.append("--blue='{}'".format(blue))
return self.run_tool('split_colour_composite', args, callback) # returns 1 if error
def thicken_raster_line(self, i, output, callback=None):
"""Thickens single-cell wide lines within a raster image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('thicken_raster_line', args, callback) # returns 1 if error
def tophat_transform(self, i, output, filterx=11, filtery=11, variant="white", callback=None):
"""Performs either a white or black top-hat transform on an input image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
variant -- Optional variant value. Options include 'white' and 'black'.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
args.append("--variant={}".format(variant))
return self.run_tool('tophat_transform', args, callback) # returns 1 if error
def write_function_memory_insertion(self, input1, input2, output, input3=None, callback=None):
"""Performs a write function memory insertion for single-band multi-date change detection.
Keyword arguments:
input1 -- Input raster file associated with the first date.
input2 -- Input raster file associated with the second date.
input3 -- Optional input raster file associated with the third date.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
if input3 is not None: args.append("--input3='{}'".format(input3))
args.append("--output='{}'".format(output))
return self.run_tool('write_function_memory_insertion', args, callback) # returns 1 if error
##################################
# Image Processing Tools/Filters #
##################################
def adaptive_filter(self, i, output, filterx=11, filtery=11, threshold=2.0, callback=None):
"""Performs an adaptive filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
threshold -- Difference from mean threshold, in standard deviations.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
args.append("--threshold={}".format(threshold))
return self.run_tool('adaptive_filter', args, callback) # returns 1 if error
def bilateral_filter(self, i, output, sigma_dist=0.75, sigma_int=1.0, callback=None):
"""A bilateral filter is an edge-preserving smoothing filter introduced by Tomasi and Manduchi (1998).
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
sigma_dist -- Standard deviation in distance in pixels.
sigma_int -- Standard deviation in intensity in pixels.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--sigma_dist={}".format(sigma_dist))
args.append("--sigma_int={}".format(sigma_int))
return self.run_tool('bilateral_filter', args, callback) # returns 1 if error
def conservative_smoothing_filter(self, i, output, filterx=3, filtery=3, callback=None):
"""Performs a conservative-smoothing filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('conservative_smoothing_filter', args, callback) # returns 1 if error
def corner_detection(self, i, output, callback=None):
"""Identifies corner patterns in boolean images using hit-and-miss pattern matching.
Keyword arguments:
i -- Input boolean image.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('corner_detection', args, callback) # returns 1 if error
def diff_of_gaussian_filter(self, i, output, sigma1=2.0, sigma2=4.0, callback=None):
"""Performs a Difference of Gaussian (DoG) filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
sigma1 -- Standard deviation distance in pixels.
sigma2 -- Standard deviation distance in pixels.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--sigma1={}".format(sigma1))
args.append("--sigma2={}".format(sigma2))
return self.run_tool('diff_of_gaussian_filter', args, callback) # returns 1 if error
def diversity_filter(self, i, output, filterx=11, filtery=11, callback=None):
"""Assigns each cell in the output grid the number of different values in a moving window centred on each grid cell in the input raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('diversity_filter', args, callback) # returns 1 if error
def edge_preserving_mean_filter(self, i, output, threshold, filter=11, callback=None):
"""Performs a simple edge-preserving mean filter on an input image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filter -- Size of the filter kernel.
threshold -- Maximum difference in values.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filter={}".format(filter))
args.append("--threshold='{}'".format(threshold))
return self.run_tool('edge_preserving_mean_filter', args, callback) # returns 1 if error
def emboss_filter(self, i, output, direction="n", clip=0.0, callback=None):
"""Performs an emboss filter on an image, similar to a hillshade operation.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
direction -- Direction of reflection; options include 'n', 's', 'e', 'w', 'ne', 'se', 'nw', 'sw'.
clip -- Optional amount to clip the distribution tails by, in percent.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--direction={}".format(direction))
args.append("--clip={}".format(clip))
return self.run_tool('emboss_filter', args, callback) # returns 1 if error
def fast_almost_gaussian_filter(self, i, output, sigma=1.8, callback=None):
"""Performs a fast approximate Gaussian filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
sigma -- Standard deviation distance in pixels.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--sigma={}".format(sigma))
return self.run_tool('fast_almost_gaussian_filter', args, callback) # returns 1 if error
def gaussian_filter(self, i, output, sigma=0.75, callback=None):
"""Performs a Gaussian filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
sigma -- Standard deviation distance in pixels.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--sigma={}".format(sigma))
return self.run_tool('gaussian_filter', args, callback) # returns 1 if error
def high_pass_filter(self, i, output, filterx=11, filtery=11, callback=None):
"""Performs a high-pass filter on an input image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('high_pass_filter', args, callback) # returns 1 if error
def high_pass_median_filter(self, i, output, filterx=11, filtery=11, sig_digits=2, callback=None):
"""Performs a high pass median filter on an input image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
sig_digits -- Number of significant digits.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
args.append("--sig_digits={}".format(sig_digits))
return self.run_tool('high_pass_median_filter', args, callback) # returns 1 if error
def k_nearest_mean_filter(self, i, output, filterx=11, filtery=11, k=5, callback=None):
"""A k-nearest mean filter is a type of edge-preserving smoothing filter.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
k -- k-value in pixels; this is the number of nearest-valued neighbours to use.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
args.append("-k={}".format(k))
return self.run_tool('k_nearest_mean_filter', args, callback) # returns 1 if error
def laplacian_filter(self, i, output, variant="3x3(1)", clip=0.0, callback=None):
"""Performs a Laplacian filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
variant -- Optional variant value. Options include 3x3(1), 3x3(2), 3x3(3), 3x3(4), 5x5(1), and 5x5(2) (default is 3x3(1)).
clip -- Optional amount to clip the distribution tails by, in percent.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--variant={}".format(variant))
args.append("--clip={}".format(clip))
return self.run_tool('laplacian_filter', args, callback) # returns 1 if error
def laplacian_of_gaussian_filter(self, i, output, sigma=0.75, callback=None):
"""Performs a Laplacian-of-Gaussian (LoG) filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
sigma -- Standard deviation in pixels.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--sigma={}".format(sigma))
return self.run_tool('laplacian_of_gaussian_filter', args, callback) # returns 1 if error
def lee_sigma_filter(self, i, output, filterx=11, filtery=11, sigma=10.0, m=5.0, callback=None):
"""Performs a Lee (Sigma) smoothing filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
sigma -- Sigma value should be related to the standarad deviation of the distribution of image speckle noise.
m -- M-threshold value the minimum allowable number of pixels within the intensity range.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
args.append("--sigma={}".format(sigma))
args.append("-m={}".format(m))
return self.run_tool('lee_sigma_filter', args, callback) # returns 1 if error
def line_detection_filter(self, i, output, variant="vertical", absvals=False, clip=0.0, callback=None):
"""Performs a line-detection filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
variant -- Optional variant value. Options include 'v' (vertical), 'h' (horizontal), '45', and '135' (default is 'v').
absvals -- Optional flag indicating whether outputs should be absolute values.
clip -- Optional amount to clip the distribution tails by, in percent.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--variant={}".format(variant))
if absvals: args.append("--absvals")
args.append("--clip={}".format(clip))
return self.run_tool('line_detection_filter', args, callback) # returns 1 if error
def majority_filter(self, i, output, filterx=11, filtery=11, callback=None):
"""Assigns each cell in the output grid the most frequently occurring value (mode) in a moving window centred on each grid cell in the input raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('majority_filter', args, callback) # returns 1 if error
def maximum_filter(self, i, output, filterx=11, filtery=11, callback=None):
"""Assigns each cell in the output grid the maximum value in a moving window centred on each grid cell in the input raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('maximum_filter', args, callback) # returns 1 if error
def mean_filter(self, i, output, filterx=3, filtery=3, callback=None):
"""Performs a mean filter (low-pass filter) on an input image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('mean_filter', args, callback) # returns 1 if error
def median_filter(self, i, output, filterx=11, filtery=11, sig_digits=2, callback=None):
"""Performs a median filter on an input image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
sig_digits -- Number of significant digits.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
args.append("--sig_digits={}".format(sig_digits))
return self.run_tool('median_filter', args, callback) # returns 1 if error
def minimum_filter(self, i, output, filterx=11, filtery=11, callback=None):
"""Assigns each cell in the output grid the minimum value in a moving window centred on each grid cell in the input raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('minimum_filter', args, callback) # returns 1 if error
def olympic_filter(self, i, output, filterx=11, filtery=11, callback=None):
"""Performs an olympic smoothing filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('olympic_filter', args, callback) # returns 1 if error
def percentile_filter(self, i, output, filterx=11, filtery=11, sig_digits=2, callback=None):
"""Performs a percentile filter on an input image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
sig_digits -- Number of significant digits.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
args.append("--sig_digits={}".format(sig_digits))
return self.run_tool('percentile_filter', args, callback) # returns 1 if error
def prewitt_filter(self, i, output, clip=0.0, callback=None):
"""Performs a Prewitt edge-detection filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
clip -- Optional amount to clip the distribution tails by, in percent.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--clip={}".format(clip))
return self.run_tool('prewitt_filter', args, callback) # returns 1 if error
def range_filter(self, i, output, filterx=11, filtery=11, callback=None):
"""Assigns each cell in the output grid the range of values in a moving window centred on each grid cell in the input raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('range_filter', args, callback) # returns 1 if error
def roberts_cross_filter(self, i, output, clip=0.0, callback=None):
"""Performs a Robert's cross edge-detection filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
clip -- Optional amount to clip the distribution tails by, in percent.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--clip={}".format(clip))
return self.run_tool('roberts_cross_filter', args, callback) # returns 1 if error
def scharr_filter(self, i, output, clip=0.0, callback=None):
"""Performs a Scharr edge-detection filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
clip -- Optional amount to clip the distribution tails by, in percent.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--clip={}".format(clip))
return self.run_tool('scharr_filter', args, callback) # returns 1 if error
def sobel_filter(self, i, output, variant="3x3", clip=0.0, callback=None):
"""Performs a Sobel edge-detection filter on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
variant -- Optional variant value. Options include 3x3 and 5x5 (default is 3x3).
clip -- Optional amount to clip the distribution tails by, in percent (default is 0.0).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--variant={}".format(variant))
args.append("--clip={}".format(clip))
return self.run_tool('sobel_filter', args, callback) # returns 1 if error
def standard_deviation_filter(self, i, output, filterx=11, filtery=11, callback=None):
"""Assigns each cell in the output grid the standard deviation of values in a moving window centred on each grid cell in the input raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('standard_deviation_filter', args, callback) # returns 1 if error
def total_filter(self, i, output, filterx=11, filtery=11, callback=None):
"""Performs a total filter on an input image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
filterx -- Size of the filter kernel in the x-direction.
filtery -- Size of the filter kernel in the y-direction.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--filterx={}".format(filterx))
args.append("--filtery={}".format(filtery))
return self.run_tool('total_filter', args, callback) # returns 1 if error
def unsharp_masking(self, i, output, sigma=0.75, amount=100.0, threshold=0.0, callback=None):
"""An image sharpening technique that enhances edges.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
sigma -- Standard deviation distance in pixels.
amount -- A percentage and controls the magnitude of each overshoot.
threshold -- Controls the minimal brightness change that will be sharpened.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--sigma={}".format(sigma))
args.append("--amount={}".format(amount))
args.append("--threshold={}".format(threshold))
return self.run_tool('unsharp_masking', args, callback) # returns 1 if error
def user_defined_weights_filter(self, i, weights, output, center="center", normalize=False, callback=None):
"""Performs a user-defined weights filter on an image.
Keyword arguments:
i -- Input raster file.
weights -- Input weights file.
output -- Output raster file.
center -- Kernel center cell; options include 'center', 'upper-left', 'upper-right', 'lower-left', 'lower-right'.
normalize -- Normalize kernel weights? This can reduce edge effects and lessen the impact of data gaps (nodata) but is not suited when the kernel weights sum to zero.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--weights='{}'".format(weights))
args.append("--output='{}'".format(output))
args.append("--center={}".format(center))
if normalize: args.append("--normalize")
return self.run_tool('user_defined_weights_filter', args, callback) # returns 1 if error
############################################
# Image Processing Tools/Image Enhancement #
############################################
def balance_contrast_enhancement(self, i, output, band_mean=100.0, callback=None):
"""Performs a balance contrast enhancement on a colour-composite image of multispectral data.
Keyword arguments:
i -- Input colour composite image file.
output -- Output raster file.
band_mean -- Band mean value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--band_mean={}".format(band_mean))
return self.run_tool('balance_contrast_enhancement', args, callback) # returns 1 if error
def correct_vignetting(self, i, pp, output, focal_length=304.8, image_width=228.6, n=4.0, callback=None):
"""Corrects the darkening of images towards corners.
Keyword arguments:
i -- Input raster file.
pp -- Input principal point file.
output -- Output raster file.
focal_length -- Camera focal length, in millimeters.
image_width -- Distance between photograph edges, in millimeters.
n -- The 'n' parameter.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--pp='{}'".format(pp))
args.append("--output='{}'".format(output))
args.append("--focal_length={}".format(focal_length))
args.append("--image_width={}".format(image_width))
args.append("-n={}".format(n))
return self.run_tool('correct_vignetting', args, callback) # returns 1 if error
def direct_decorrelation_stretch(self, i, output, k=0.5, clip=1.0, callback=None):
"""Performs a direct decorrelation stretch enhancement on a colour-composite image of multispectral data.
Keyword arguments:
i -- Input colour composite image file.
output -- Output raster file.
k -- Achromatic factor (k) ranges between 0 (no effect) and 1 (full saturation stretch), although typical values range from 0.3 to 0.7.
clip -- Optional percent to clip the upper tail by during the stretch.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("-k={}".format(k))
args.append("--clip={}".format(clip))
return self.run_tool('direct_decorrelation_stretch', args, callback) # returns 1 if error
def gamma_correction(self, i, output, gamma=0.5, callback=None):
"""Performs a gamma correction on an input images.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
gamma -- Gamma value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--gamma={}".format(gamma))
return self.run_tool('gamma_correction', args, callback) # returns 1 if error
def gaussian_contrast_stretch(self, i, output, num_tones=256, callback=None):
"""Performs a Gaussian contrast stretch on input images.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
num_tones -- Number of tones in the output image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--num_tones={}".format(num_tones))
return self.run_tool('gaussian_contrast_stretch', args, callback) # returns 1 if error
def histogram_equalization(self, i, output, num_tones=256, callback=None):
"""Performs a histogram equalization contrast enhancment on an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
num_tones -- Number of tones in the output image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--num_tones={}".format(num_tones))
return self.run_tool('histogram_equalization', args, callback) # returns 1 if error
def histogram_matching(self, i, histo_file, output, callback=None):
"""Alters the statistical distribution of a raster image matching it to a specified PDF.
Keyword arguments:
i -- Input raster file.
histo_file -- Input reference probability distribution function (pdf) text file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--histo_file='{}'".format(histo_file))
args.append("--output='{}'".format(output))
return self.run_tool('histogram_matching', args, callback) # returns 1 if error
def histogram_matching_two_images(self, input1, input2, output, callback=None):
"""This tool alters the cumulative distribution function of a raster image to that of another image.
Keyword arguments:
input1 -- Input raster file to modify.
input2 -- Input reference raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('histogram_matching_two_images', args, callback) # returns 1 if error
def min_max_contrast_stretch(self, i, output, min_val, max_val, num_tones=256, callback=None):
"""Performs a min-max contrast stretch on an input greytone image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
min_val -- Lower tail clip value.
max_val -- Upper tail clip value.
num_tones -- Number of tones in the output image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--min_val='{}'".format(min_val))
args.append("--max_val='{}'".format(max_val))
args.append("--num_tones={}".format(num_tones))
return self.run_tool('min_max_contrast_stretch', args, callback) # returns 1 if error
def panchromatic_sharpening(self, pan, output, red=None, green=None, blue=None, composite=None, method="brovey", callback=None):
"""Increases the spatial resolution of image data by combining multispectral bands with panchromatic data.
Keyword arguments:
red -- Input red band image file. Optionally specified if colour-composite not specified.
green -- Input green band image file. Optionally specified if colour-composite not specified.
blue -- Input blue band image file. Optionally specified if colour-composite not specified.
composite -- Input colour-composite image file. Only used if individual bands are not specified.
pan -- Input panchromatic band file.
output -- Output colour composite file.
method -- Options include 'brovey' (default) and 'ihs'.
callback -- Custom function for handling tool text outputs.
"""
args = []
if red is not None: args.append("--red='{}'".format(red))
if green is not None: args.append("--green='{}'".format(green))
if blue is not None: args.append("--blue='{}'".format(blue))
if composite is not None: args.append("--composite='{}'".format(composite))
args.append("--pan='{}'".format(pan))
args.append("--output='{}'".format(output))
args.append("--method={}".format(method))
return self.run_tool('panchromatic_sharpening', args, callback) # returns 1 if error
def percentage_contrast_stretch(self, i, output, clip=1.0, tail="both", num_tones=256, callback=None):
"""Performs a percentage linear contrast stretch on input images.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
clip -- Optional amount to clip the distribution tails by, in percent.
tail -- Specified which tails to clip; options include 'upper', 'lower', and 'both' (default is 'both').
num_tones -- Number of tones in the output image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--clip={}".format(clip))
args.append("--tail={}".format(tail))
args.append("--num_tones={}".format(num_tones))
return self.run_tool('percentage_contrast_stretch', args, callback) # returns 1 if error
def sigmoidal_contrast_stretch(self, i, output, cutoff=0.0, gain=1.0, num_tones=256, callback=None):
"""Performs a sigmoidal contrast stretch on input images.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
cutoff -- Cutoff value between 0.0 and 0.95.
gain -- Gain value.
num_tones -- Number of tones in the output image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--cutoff={}".format(cutoff))
args.append("--gain={}".format(gain))
args.append("--num_tones={}".format(num_tones))
return self.run_tool('sigmoidal_contrast_stretch', args, callback) # returns 1 if error
def standard_deviation_contrast_stretch(self, i, output, stdev=2.0, num_tones=256, callback=None):
"""Performs a standard-deviation contrast stretch on input images.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
stdev -- Standard deviation clip value.
num_tones -- Number of tones in the output image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--stdev={}".format(stdev))
args.append("--num_tones={}".format(num_tones))
return self.run_tool('standard_deviation_contrast_stretch', args, callback) # returns 1 if error
###############
# LiDAR Tools #
###############
def classify_overlap_points(self, i, output, resolution=2.0, filter=False, callback=None):
"""Classifies or filters LAS points in regions of overlapping flight lines.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
resolution -- The size of the square area used to evaluate nearby points in the LiDAR data.
filter -- Filter out points from overlapping flightlines? If false, overlaps will simply be classified.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--resolution={}".format(resolution))
if filter: args.append("--filter")
return self.run_tool('classify_overlap_points', args, callback) # returns 1 if error
def clip_lidar_to_polygon(self, i, polygons, output, callback=None):
"""Clips a LiDAR point cloud to a vector polygon or polygons.
Keyword arguments:
i -- Input LiDAR file.
polygons -- Input vector polygons file.
output -- Output LiDAR file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--polygons='{}'".format(polygons))
args.append("--output='{}'".format(output))
return self.run_tool('clip_lidar_to_polygon', args, callback) # returns 1 if error
def erase_polygon_from_lidar(self, i, polygons, output, callback=None):
"""Erases (cuts out) a vector polygon or polygons from a LiDAR point cloud.
Keyword arguments:
i -- Input LiDAR file.
polygons -- Input vector polygons file.
output -- Output LiDAR file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--polygons='{}'".format(polygons))
args.append("--output='{}'".format(output))
return self.run_tool('erase_polygon_from_lidar', args, callback) # returns 1 if error
def filter_lidar_classes(self, i, output, exclude_cls=None, callback=None):
"""Removes points in a LAS file with certain specified class values.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
exclude_cls -- Optional exclude classes from interpolation; Valid class values range from 0 to 18, based on LAS specifications. Example, --exclude_cls='3,4,5,6,7,18'.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if exclude_cls is not None: args.append("--exclude_cls='{}'".format(exclude_cls))
return self.run_tool('filter_lidar_classes', args, callback) # returns 1 if error
def filter_lidar_scan_angles(self, i, output, threshold, callback=None):
"""Removes points in a LAS file with scan angles greater than a threshold.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
threshold -- Scan angle threshold.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--threshold='{}'".format(threshold))
return self.run_tool('filter_lidar_scan_angles', args, callback) # returns 1 if error
def find_flightline_edge_points(self, i, output, callback=None):
"""Identifies points along a flightline's edge in a LAS file.
Keyword arguments:
i -- Input LiDAR file.
output -- Output file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('find_flightline_edge_points', args, callback) # returns 1 if error
def flightline_overlap(self, i=None, output=None, resolution=1.0, callback=None):
"""Reads a LiDAR (LAS) point file and outputs a raster containing the number of overlapping flight lines in each grid cell.
Keyword arguments:
i -- Input LiDAR file.
output -- Output file.
resolution -- Output raster's grid resolution.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
args.append("--resolution={}".format(resolution))
return self.run_tool('flightline_overlap', args, callback) # returns 1 if error
def las_to_ascii(self, inputs, callback=None):
"""Converts one or more LAS files into ASCII text files.
Keyword arguments:
inputs -- Input LiDAR files.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
return self.run_tool('las_to_ascii', args, callback) # returns 1 if error
def las_to_multipoint_shapefile(self, i=None, callback=None):
"""Converts one or more LAS files into MultipointZ vector Shapefiles. When the input parameter is not specified, the tool grids all LAS files contained within the working directory.
Keyword arguments:
i -- Input LiDAR file.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
return self.run_tool('las_to_multipoint_shapefile', args, callback) # returns 1 if error
def las_to_shapefile(self, i=None, callback=None):
"""Converts one or more LAS files into a vector Shapefile of POINT ShapeType.
Keyword arguments:
i -- Input LiDAR file.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
return self.run_tool('las_to_shapefile', args, callback) # returns 1 if error
def lidar_block_maximum(self, i=None, output=None, resolution=1.0, callback=None):
"""Creates a block-maximum raster from an input LAS file. When the input/output parameters are not specified, the tool grids all LAS files contained within the working directory.
Keyword arguments:
i -- Input LiDAR file.
output -- Output file.
resolution -- Output raster's grid resolution.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
args.append("--resolution={}".format(resolution))
return self.run_tool('lidar_block_maximum', args, callback) # returns 1 if error
def lidar_block_minimum(self, i=None, output=None, resolution=1.0, callback=None):
"""Creates a block-minimum raster from an input LAS file. When the input/output parameters are not specified, the tool grids all LAS files contained within the working directory.
Keyword arguments:
i -- Input LiDAR file.
output -- Output file.
resolution -- Output raster's grid resolution.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
args.append("--resolution={}".format(resolution))
return self.run_tool('lidar_block_minimum', args, callback) # returns 1 if error
def lidar_classify_subset(self, base, subset, output, subset_class, nonsubset_class=None, callback=None):
"""Classifies the values in one LiDAR point cloud that correpond with points in a subset cloud.
Keyword arguments:
base -- Input base LiDAR file.
subset -- Input subset LiDAR file.
output -- Output LiDAR file.
subset_class -- Subset point class value (must be 0-18; see LAS specifications).
nonsubset_class -- Non-subset point class value (must be 0-18; see LAS specifications).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--base='{}'".format(base))
args.append("--subset='{}'".format(subset))
args.append("--output='{}'".format(output))
args.append("--subset_class='{}'".format(subset_class))
if nonsubset_class is not None: args.append("--nonsubset_class='{}'".format(nonsubset_class))
return self.run_tool('lidar_classify_subset', args, callback) # returns 1 if error
def lidar_colourize(self, in_lidar, in_image, output, callback=None):
"""Adds the red-green-blue colour fields of a LiDAR (LAS) file based on an input image.
Keyword arguments:
in_lidar -- Input LiDAR file.
in_image -- Input colour image file.
output -- Output LiDAR file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--in_lidar='{}'".format(in_lidar))
args.append("--in_image='{}'".format(in_image))
args.append("--output='{}'".format(output))
return self.run_tool('lidar_colourize', args, callback) # returns 1 if error
def lidar_construct_vector_tin(self, i=None, output=None, returns="all", exclude_cls=None, minz=None, maxz=None, callback=None):
"""Creates a vector triangular irregular network (TIN) fitted to LiDAR points.
Keyword arguments:
i -- Input LiDAR file (including extension).
output -- Output raster file (including extension).
returns -- Point return types to include; options are 'all' (default), 'last', 'first'.
exclude_cls -- Optional exclude classes from interpolation; Valid class values range from 0 to 18, based on LAS specifications. Example, --exclude_cls='3,4,5,6,7,18'.
minz -- Optional minimum elevation for inclusion in interpolation.
maxz -- Optional maximum elevation for inclusion in interpolation.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
args.append("--returns={}".format(returns))
if exclude_cls is not None: args.append("--exclude_cls='{}'".format(exclude_cls))
if minz is not None: args.append("--minz='{}'".format(minz))
if maxz is not None: args.append("--maxz='{}'".format(maxz))
return self.run_tool('lidar_construct_vector_tin', args, callback) # returns 1 if error
def lidar_elevation_slice(self, i, output, minz=None, maxz=None, cls=False, inclassval=2, outclassval=1, callback=None):
"""Outputs all of the points within a LiDAR (LAS) point file that lie between a specified elevation range.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
minz -- Minimum elevation value (optional).
maxz -- Maximum elevation value (optional).
cls -- Optional boolean flag indicating whether points outside the range should be retained in output but reclassified.
inclassval -- Optional parameter specifying the class value assigned to points within the slice.
outclassval -- Optional parameter specifying the class value assigned to points within the slice.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if minz is not None: args.append("--minz='{}'".format(minz))
if maxz is not None: args.append("--maxz='{}'".format(maxz))
if cls: args.append("--class")
args.append("--inclassval={}".format(inclassval))
args.append("--outclassval={}".format(outclassval))
return self.run_tool('lidar_elevation_slice', args, callback) # returns 1 if error
def lidar_ground_point_filter(self, i, output, radius=2.0, min_neighbours=0, slope_threshold=45.0, height_threshold=1.0, classify=True, slope_norm=True, height_above_ground=False, callback=None):
"""Identifies ground points within LiDAR dataset using a slope-based method.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
radius -- Search Radius.
min_neighbours -- The minimum number of neighbouring points within search areas. If fewer points than this threshold are idenfied during the fixed-radius search, a subsequent kNN search is performed to identify the k number of neighbours.
slope_threshold -- Maximum inter-point slope to be considered an off-terrain point.
height_threshold -- Inter-point height difference to be considered an off-terrain point.
classify -- Classify points as ground (2) or off-ground (1).
slope_norm -- Perform initial ground slope normalization?.
height_above_ground -- Transform output to height above average ground elevation?.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--radius={}".format(radius))
args.append("--min_neighbours={}".format(min_neighbours))
args.append("--slope_threshold={}".format(slope_threshold))
args.append("--height_threshold={}".format(height_threshold))
if classify: args.append("--classify")
if slope_norm: args.append("--slope_norm")
if height_above_ground: args.append("--height_above_ground")
return self.run_tool('lidar_ground_point_filter', args, callback) # returns 1 if error
def lidar_hex_binning(self, i, output, width, orientation="horizontal", callback=None):
"""Hex-bins a set of LiDAR points.
Keyword arguments:
i -- Input base file.
output -- Output vector polygon file.
width -- The grid cell width.
orientation -- Grid Orientation, 'horizontal' or 'vertical'.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--width='{}'".format(width))
args.append("--orientation={}".format(orientation))
return self.run_tool('lidar_hex_binning', args, callback) # returns 1 if error
def lidar_hillshade(self, i, output, azimuth=315.0, altitude=30.0, radius=1.0, callback=None):
"""Calculates a hillshade value for points within a LAS file and stores these data in the RGB field.
Keyword arguments:
i -- Input LiDAR file.
output -- Output file.
azimuth -- Illumination source azimuth in degrees.
altitude -- Illumination source altitude in degrees.
radius -- Search Radius.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--azimuth={}".format(azimuth))
args.append("--altitude={}".format(altitude))
args.append("--radius={}".format(radius))
return self.run_tool('lidar_hillshade', args, callback) # returns 1 if error
def lidar_histogram(self, i, output, parameter="elevation", clip=1.0, callback=None):
"""Creates a histogram of LiDAR data.
Keyword arguments:
i -- Input LiDAR file.
output -- Output HTML file (default name will be based on input file if unspecified).
parameter -- Parameter; options are 'elevation' (default), 'intensity', 'scan angle', 'class'.
clip -- Amount to clip distribution tails (in percent).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--parameter={}".format(parameter))
args.append("--clip={}".format(clip))
return self.run_tool('lidar_histogram', args, callback) # returns 1 if error
def lidar_idw_interpolation(self, i=None, output=None, parameter="elevation", returns="all", resolution=1.0, weight=1.0, radius=2.5, exclude_cls=None, minz=None, maxz=None, callback=None):
"""Interpolates LAS files using an inverse-distance weighted (IDW) scheme. When the input/output parameters are not specified, the tool interpolates all LAS files contained within the working directory.
Keyword arguments:
i -- Input LiDAR file (including extension).
output -- Output raster file (including extension).
parameter -- Interpolation parameter; options are 'elevation' (default), 'intensity', 'class', 'return_number', 'number_of_returns', 'scan angle', 'rgb', 'user data'.
returns -- Point return types to include; options are 'all' (default), 'last', 'first'.
resolution -- Output raster's grid resolution.
weight -- IDW weight value.
radius -- Search Radius.
exclude_cls -- Optional exclude classes from interpolation; Valid class values range from 0 to 18, based on LAS specifications. Example, --exclude_cls='3,4,5,6,7,18'.
minz -- Optional minimum elevation for inclusion in interpolation.
maxz -- Optional maximum elevation for inclusion in interpolation.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
args.append("--parameter={}".format(parameter))
args.append("--returns={}".format(returns))
args.append("--resolution={}".format(resolution))
args.append("--weight={}".format(weight))
args.append("--radius={}".format(radius))
if exclude_cls is not None: args.append("--exclude_cls='{}'".format(exclude_cls))
if minz is not None: args.append("--minz='{}'".format(minz))
if maxz is not None: args.append("--maxz='{}'".format(maxz))
return self.run_tool('lidar_idw_interpolation', args, callback) # returns 1 if error
def lidar_info(self, i, output=None, vlr=False, geokeys=False, callback=None):
"""Prints information about a LiDAR (LAS) dataset, including header, point return frequency, and classification data and information about the variable length records (VLRs) and geokeys.
Keyword arguments:
i -- Input LiDAR file.
output -- Output HTML file for summary report.
vlr -- Flag indicating whether or not to print the variable length records (VLRs).
geokeys -- Flag indicating whether or not to print the geokeys.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
if vlr: args.append("--vlr")
if geokeys: args.append("--geokeys")
return self.run_tool('lidar_info', args, callback) # returns 1 if error
def lidar_join(self, inputs, output, callback=None):
"""Joins multiple LiDAR (LAS) files into a single LAS file.
Keyword arguments:
inputs -- Input LiDAR files.
output -- Output LiDAR file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
return self.run_tool('lidar_join', args, callback) # returns 1 if error
def lidar_kappa_index(self, input1, input2, output, class_accuracy, resolution=1.0, callback=None):
"""Performs a kappa index of agreement (KIA) analysis on the classifications of two LAS files.
Keyword arguments:
input1 -- Input LiDAR classification file.
input2 -- Input LiDAR reference file.
output -- Output HTML file.
class_accuracy -- Output classification accuracy raster file.
resolution -- Output raster's grid resolution.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
args.append("--class_accuracy='{}'".format(class_accuracy))
args.append("--resolution={}".format(resolution))
return self.run_tool('lidar_kappa_index', args, callback) # returns 1 if error
def lidar_nearest_neighbour_gridding(self, i=None, output=None, parameter="elevation", returns="all", resolution=1.0, radius=2.5, exclude_cls=None, minz=None, maxz=None, callback=None):
"""Grids LAS files using nearest-neighbour scheme. When the input/output parameters are not specified, the tool grids all LAS files contained within the working directory.
Keyword arguments:
i -- Input LiDAR file (including extension).
output -- Output raster file (including extension).
parameter -- Interpolation parameter; options are 'elevation' (default), 'intensity', 'class', 'return_number', 'number_of_returns', 'scan angle', 'rgb', 'user data'.
returns -- Point return types to include; options are 'all' (default), 'last', 'first'.
resolution -- Output raster's grid resolution.
radius -- Search Radius.
exclude_cls -- Optional exclude classes from interpolation; Valid class values range from 0 to 18, based on LAS specifications. Example, --exclude_cls='3,4,5,6,7,18'.
minz -- Optional minimum elevation for inclusion in interpolation.
maxz -- Optional maximum elevation for inclusion in interpolation.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
args.append("--parameter={}".format(parameter))
args.append("--returns={}".format(returns))
args.append("--resolution={}".format(resolution))
args.append("--radius={}".format(radius))
if exclude_cls is not None: args.append("--exclude_cls='{}'".format(exclude_cls))
if minz is not None: args.append("--minz='{}'".format(minz))
if maxz is not None: args.append("--maxz='{}'".format(maxz))
return self.run_tool('lidar_nearest_neighbour_gridding', args, callback) # returns 1 if error
def lidar_point_density(self, i=None, output=None, returns="all", resolution=1.0, radius=2.5, exclude_cls=None, minz=None, maxz=None, callback=None):
"""Calculates the spatial pattern of point density for a LiDAR data set. When the input/output parameters are not specified, the tool grids all LAS files contained within the working directory.
Keyword arguments:
i -- Input LiDAR file (including extension).
output -- Output raster file (including extension).
returns -- Point return types to include; options are 'all' (default), 'last', 'first'.
resolution -- Output raster's grid resolution.
radius -- Search radius.
exclude_cls -- Optional exclude classes from interpolation; Valid class values range from 0 to 18, based on LAS specifications. Example, --exclude_cls='3,4,5,6,7,18'.
minz -- Optional minimum elevation for inclusion in interpolation.
maxz -- Optional maximum elevation for inclusion in interpolation.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
args.append("--returns={}".format(returns))
args.append("--resolution={}".format(resolution))
args.append("--radius={}".format(radius))
if exclude_cls is not None: args.append("--exclude_cls='{}'".format(exclude_cls))
if minz is not None: args.append("--minz='{}'".format(minz))
if maxz is not None: args.append("--maxz='{}'".format(maxz))
return self.run_tool('lidar_point_density', args, callback) # returns 1 if error
def lidar_point_stats(self, i=None, resolution=1.0, num_points=True, num_pulses=False, avg_points_per_pulse=True, z_range=False, intensity_range=False, predom_class=False, callback=None):
"""Creates several rasters summarizing the distribution of LAS point data. When the input/output parameters are not specified, the tool works on all LAS files contained within the working directory.
Keyword arguments:
i -- Input LiDAR file.
resolution -- Output raster's grid resolution.
num_points -- Flag indicating whether or not to output the number of points (returns) raster.
num_pulses -- Flag indicating whether or not to output the number of pulses raster.
avg_points_per_pulse -- Flag indicating whether or not to output the average number of points (returns) per pulse raster.
z_range -- Flag indicating whether or not to output the elevation range raster.
intensity_range -- Flag indicating whether or not to output the intensity range raster.
predom_class -- Flag indicating whether or not to output the predominant classification raster.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
args.append("--resolution={}".format(resolution))
if num_points: args.append("--num_points")
if num_pulses: args.append("--num_pulses")
if avg_points_per_pulse: args.append("--avg_points_per_pulse")
if z_range: args.append("--z_range")
if intensity_range: args.append("--intensity_range")
if predom_class: args.append("--predom_class")
return self.run_tool('lidar_point_stats', args, callback) # returns 1 if error
def lidar_ransac_planes(self, i, output, radius=2.0, num_iter=50, num_samples=5, threshold=0.35, model_size=8, classify=False, callback=None):
"""Removes outliers (high and low points) in a LiDAR point cloud.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
radius -- Search Radius.
num_iter -- Number of iterations.
num_samples -- Number of sample points on which to build the model.
threshold -- Threshold used to determine inliner points.
model_size -- Acceptable model size.
classify -- Classify points as ground (2) or off-ground (1).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--radius={}".format(radius))
args.append("--num_iter={}".format(num_iter))
args.append("--num_samples={}".format(num_samples))
args.append("--threshold={}".format(threshold))
args.append("--model_size={}".format(model_size))
if classify: args.append("--classify")
return self.run_tool('lidar_ransac_planes', args, callback) # returns 1 if error
def lidar_remove_duplicates(self, i, output, include_z=False, callback=None):
"""Removes duplicate points from a LiDAR data set.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
include_z -- Include z-values in point comparison?.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if include_z: args.append("--include_z")
return self.run_tool('lidar_remove_duplicates', args, callback) # returns 1 if error
def lidar_remove_outliers(self, i, output, radius=2.0, elev_diff=50.0, use_median=False, classify=True, callback=None):
"""Removes outliers (high and low points) in a LiDAR point cloud.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
radius -- Search Radius.
elev_diff -- Max. elevation difference.
use_median -- Optional flag indicating whether to use the difference from median elevation rather than mean.
classify -- Classify points as ground (2) or off-ground (1).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--radius={}".format(radius))
args.append("--elev_diff={}".format(elev_diff))
if use_median: args.append("--use_median")
if classify: args.append("--classify")
return self.run_tool('lidar_remove_outliers', args, callback) # returns 1 if error
def lidar_segmentation(self, i, output, radius=5.0, norm_diff=10.0, maxzdiff=1.0, classes=False, min_size=1, callback=None):
"""Segments a LiDAR point cloud based on normal vectors.
Keyword arguments:
i -- Input LiDAR file.
output -- Output file.
radius -- Search Radius.
norm_diff -- Maximum difference in normal vectors, in degrees.
maxzdiff -- Maximum difference in elevation (z units) between neighbouring points of the same segment.
classes -- Segments don't cross class boundaries.
min_size -- Minimum segment size (number of points).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--radius={}".format(radius))
args.append("--norm_diff={}".format(norm_diff))
args.append("--maxzdiff={}".format(maxzdiff))
if classes: args.append("--classes")
args.append("--min_size={}".format(min_size))
return self.run_tool('lidar_segmentation', args, callback) # returns 1 if error
def lidar_segmentation_based_filter(self, i, output, radius=5.0, norm_diff=2.0, maxzdiff=1.0, classify=False, callback=None):
"""Identifies ground points within LiDAR point clouds using a segmentation based approach.
Keyword arguments:
i -- Input LiDAR file.
output -- Output file.
radius -- Search Radius.
norm_diff -- Maximum difference in normal vectors, in degrees.
maxzdiff -- Maximum difference in elevation (z units) between neighbouring points of the same segment.
classify -- Classify points as ground (2) or off-ground (1).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--radius={}".format(radius))
args.append("--norm_diff={}".format(norm_diff))
args.append("--maxzdiff={}".format(maxzdiff))
if classify: args.append("--classify")
return self.run_tool('lidar_segmentation_based_filter', args, callback) # returns 1 if error
def lidar_thin(self, i, output, resolution=2.0, method="lowest", save_filtered=False, callback=None):
"""Thins a LiDAR point cloud, reducing point density.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
resolution -- The size of the square area used to evaluate nearby points in the LiDAR data.
method -- Point selection method; options are 'first', 'last', 'lowest' (default), 'highest', 'nearest'.
save_filtered -- Save filtered points to seperate file?.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--resolution={}".format(resolution))
args.append("--method={}".format(method))
if save_filtered: args.append("--save_filtered")
return self.run_tool('lidar_thin', args, callback) # returns 1 if error
def lidar_thin_high_density(self, i, output, density, resolution=1.0, save_filtered=False, callback=None):
"""Thins points from high density areas within a LiDAR point cloud.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
resolution -- Output raster's grid resolution.
density -- Max. point density (points / m^3).
save_filtered -- Save filtered points to seperate file?.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--resolution={}".format(resolution))
args.append("--density='{}'".format(density))
if save_filtered: args.append("--save_filtered")
return self.run_tool('lidar_thin_high_density', args, callback) # returns 1 if error
def lidar_tile(self, i, width=1000.0, height=1000.0, origin_x=0.0, origin_y=0.0, min_points=2, callback=None):
"""Tiles a LiDAR LAS file into multiple LAS files.
Keyword arguments:
i -- Input LiDAR file.
width -- Width of tiles in the X dimension; default 1000.0.
height -- Height of tiles in the Y dimension.
origin_x -- Origin point X coordinate for tile grid.
origin_y -- Origin point Y coordinate for tile grid.
min_points -- Minimum number of points contained in a tile for it to be saved.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--width={}".format(width))
args.append("--height={}".format(height))
args.append("--origin_x={}".format(origin_x))
args.append("--origin_y={}".format(origin_y))
args.append("--min_points={}".format(min_points))
return self.run_tool('lidar_tile', args, callback) # returns 1 if error
def lidar_tile_footprint(self, output, i=None, hull=False, callback=None):
"""Creates a vector polygon of the convex hull of a LiDAR point cloud. When the input/output parameters are not specified, the tool works with all LAS files contained within the working directory.
Keyword arguments:
i -- Input LiDAR file.
output -- Output vector polygon file.
hull -- Identify the convex hull around points.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if hull: args.append("--hull")
return self.run_tool('lidar_tile_footprint', args, callback) # returns 1 if error
def lidar_tin_gridding(self, i=None, output=None, parameter="elevation", returns="all", resolution=1.0, exclude_cls=None, minz=None, maxz=None, max_triangle_edge_length=None, callback=None):
"""Creates a raster grid based on a Delaunay triangular irregular network (TIN) fitted to LiDAR points.
Keyword arguments:
i -- Input LiDAR file (including extension).
output -- Output raster file (including extension).
parameter -- Interpolation parameter; options are 'elevation' (default), 'intensity', 'class', 'return_number', 'number_of_returns', 'scan angle', 'rgb', 'user data'.
returns -- Point return types to include; options are 'all' (default), 'last', 'first'.
resolution -- Output raster's grid resolution.
exclude_cls -- Optional exclude classes from interpolation; Valid class values range from 0 to 18, based on LAS specifications. Example, --exclude_cls='3,4,5,6,7,18'.
minz -- Optional minimum elevation for inclusion in interpolation.
maxz -- Optional maximum elevation for inclusion in interpolation.
max_triangle_edge_length -- Optional maximum triangle edge length; triangles larger than this size will not be gridded.
callback -- Custom function for handling tool text outputs.
"""
args = []
if i is not None: args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
args.append("--parameter={}".format(parameter))
args.append("--returns={}".format(returns))
args.append("--resolution={}".format(resolution))
if exclude_cls is not None: args.append("--exclude_cls='{}'".format(exclude_cls))
if minz is not None: args.append("--minz='{}'".format(minz))
if maxz is not None: args.append("--maxz='{}'".format(maxz))
if max_triangle_edge_length is not None: args.append("--max_triangle_edge_length='{}'".format(max_triangle_edge_length))
return self.run_tool('lidar_tin_gridding', args, callback) # returns 1 if error
def lidar_tophat_transform(self, i, output, radius=1.0, callback=None):
"""Performs a white top-hat transform on a Lidar dataset; as an estimate of height above ground, this is useful for modelling the vegetation canopy.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
radius -- Search Radius.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--radius={}".format(radius))
return self.run_tool('lidar_tophat_transform', args, callback) # returns 1 if error
def normal_vectors(self, i, output, radius=1.0, callback=None):
"""Calculates normal vectors for points within a LAS file and stores these data (XYZ vector components) in the RGB field.
Keyword arguments:
i -- Input LiDAR file.
output -- Output LiDAR file.
radius -- Search Radius.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--radius={}".format(radius))
return self.run_tool('normal_vectors', args, callback) # returns 1 if error
def select_tiles_by_polygon(self, indir, outdir, polygons, callback=None):
"""Copies LiDAR tiles overlapping with a polygon into an output directory.
Keyword arguments:
indir -- Input LAS file source directory.
outdir -- Output directory into which LAS files within the polygon are copied.
polygons -- Input vector polygons file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--indir='{}'".format(indir))
args.append("--outdir='{}'".format(outdir))
args.append("--polygons='{}'".format(polygons))
return self.run_tool('select_tiles_by_polygon', args, callback) # returns 1 if error
########################
# Math and Stats Tools #
########################
def And(self, input1, input2, output, callback=None):
"""Performs a logical AND operator on two Boolean raster images.
Keyword arguments:
input1 -- Input raster file.
input2 -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('and', args, callback) # returns 1 if error
def Not(self, input1, input2, output, callback=None):
"""Performs a logical NOT operator on two Boolean raster images.
Keyword arguments:
input1 -- Input raster file.
input2 -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('not', args, callback) # returns 1 if error
def Or(self, input1, input2, output, callback=None):
"""Performs a logical OR operator on two Boolean raster images.
Keyword arguments:
input1 -- Input raster file.
input2 -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('or', args, callback) # returns 1 if error
def absolute_value(self, i, output, callback=None):
"""Calculates the absolute value of every cell in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('absolute_value', args, callback) # returns 1 if error
def add(self, input1, input2, output, callback=None):
"""Performs an addition operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('add', args, callback) # returns 1 if error
def anova(self, i, features, output, callback=None):
"""Performs an analysis of variance (ANOVA) test on a raster dataset.
Keyword arguments:
i -- Input raster file.
features -- Feature definition (or class) raster.
output -- Output HTML file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--features='{}'".format(features))
args.append("--output='{}'".format(output))
return self.run_tool('anova', args, callback) # returns 1 if error
def arc_cos(self, i, output, callback=None):
"""Returns the inverse cosine (arccos) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('arc_cos', args, callback) # returns 1 if error
def arc_sin(self, i, output, callback=None):
"""Returns the inverse sine (arcsin) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('arc_sin', args, callback) # returns 1 if error
def arc_tan(self, i, output, callback=None):
"""Returns the inverse tangent (arctan) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('arc_tan', args, callback) # returns 1 if error
def arcosh(self, i, output, callback=None):
"""Returns the inverse hyperbolic cosine (arcosh) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('arcosh', args, callback) # returns 1 if error
def arsinh(self, i, output, callback=None):
"""Returns the inverse hyperbolic sine (arsinh) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('arsinh', args, callback) # returns 1 if error
def artanh(self, i, output, callback=None):
"""Returns the inverse hyperbolic tangent (arctanh) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('artanh', args, callback) # returns 1 if error
def atan2(self, input_y, input_x, output, callback=None):
"""Returns the 2-argument inverse tangent (atan2).
Keyword arguments:
input_y -- Input y raster file or constant value (rise).
input_x -- Input x raster file or constant value (run).
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input_y='{}'".format(input_y))
args.append("--input_x='{}'".format(input_x))
args.append("--output='{}'".format(output))
return self.run_tool('atan2', args, callback) # returns 1 if error
def attribute_correlation(self, i, output=None, callback=None):
"""Performs a correlation analysis on attribute fields from a vector database.
Keyword arguments:
i -- Input raster file.
output -- Output HTML file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
return self.run_tool('attribute_correlation', args, callback) # returns 1 if error
def attribute_histogram(self, i, field, output, callback=None):
"""Creates a histogram for the field values of a vector's attribute table.
Keyword arguments:
i -- Input raster file.
field -- Input field name in attribute table.
output -- Output HTML file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field='{}'".format(field))
args.append("--output='{}'".format(output))
return self.run_tool('attribute_histogram', args, callback) # returns 1 if error
def attribute_scattergram(self, i, fieldx, fieldy, output, trendline=False, callback=None):
"""Creates a scattergram for two field values of a vector's attribute table.
Keyword arguments:
i -- Input raster file.
fieldx -- Input field name in attribute table for the x-axis.
fieldy -- Input field name in attribute table for the y-axis.
output -- Output HTML file (default name will be based on input file if unspecified).
trendline -- Draw the trendline.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--fieldx='{}'".format(fieldx))
args.append("--fieldy='{}'".format(fieldy))
args.append("--output='{}'".format(output))
if trendline: args.append("--trendline")
return self.run_tool('attribute_scattergram', args, callback) # returns 1 if error
def ceil(self, i, output, callback=None):
"""Returns the smallest (closest to negative infinity) value that is greater than or equal to the values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('ceil', args, callback) # returns 1 if error
def cos(self, i, output, callback=None):
"""Returns the cosine (cos) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('cos', args, callback) # returns 1 if error
def cosh(self, i, output, callback=None):
"""Returns the hyperbolic cosine (cosh) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('cosh', args, callback) # returns 1 if error
def crispness_index(self, i, output=None, callback=None):
"""Calculates the Crispness Index, which is used to quantify how crisp (or conversely how fuzzy) a probability image is.
Keyword arguments:
i -- Input raster file.
output -- Optional output html file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
if output is not None: args.append("--output='{}'".format(output))
return self.run_tool('crispness_index', args, callback) # returns 1 if error
def cross_tabulation(self, input1, input2, output, callback=None):
"""Performs a cross-tabulation on two categorical images.
Keyword arguments:
input1 -- Input raster file 1.
input2 -- Input raster file 1.
output -- Output HTML file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('cross_tabulation', args, callback) # returns 1 if error
def cumulative_distribution(self, i, output, callback=None):
"""Converts a raster image to its cumulative distribution function.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('cumulative_distribution', args, callback) # returns 1 if error
def decrement(self, i, output, callback=None):
"""Decreases the values of each grid cell in an input raster by 1.0 (see also InPlaceSubtract).
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('decrement', args, callback) # returns 1 if error
def divide(self, input1, input2, output, callback=None):
"""Performs a division operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('divide', args, callback) # returns 1 if error
def equal_to(self, input1, input2, output, callback=None):
"""Performs a equal-to comparison operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('equal_to', args, callback) # returns 1 if error
def exp(self, i, output, callback=None):
"""Returns the exponential (base e) of values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('exp', args, callback) # returns 1 if error
def exp2(self, i, output, callback=None):
"""Returns the exponential (base 2) of values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('exp2', args, callback) # returns 1 if error
def floor(self, i, output, callback=None):
"""Returns the largest (closest to positive infinity) value that is less than or equal to the values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('floor', args, callback) # returns 1 if error
def greater_than(self, input1, input2, output, incl_equals=False, callback=None):
"""Performs a greater-than comparison operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
incl_equals -- Perform a greater-than-or-equal-to operation.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
if incl_equals: args.append("--incl_equals")
return self.run_tool('greater_than', args, callback) # returns 1 if error
def image_autocorrelation(self, inputs, output, contiguity="Rook", callback=None):
"""Performs Moran's I analysis on two or more input images.
Keyword arguments:
inputs -- Input raster files.
contiguity -- Contiguity type.
output -- Output HTML file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--contiguity={}".format(contiguity))
args.append("--output='{}'".format(output))
return self.run_tool('image_autocorrelation', args, callback) # returns 1 if error
def image_correlation(self, inputs, output=None, callback=None):
"""Performs image correlation on two or more input images.
Keyword arguments:
inputs -- Input raster files.
output -- Output HTML file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
if output is not None: args.append("--output='{}'".format(output))
return self.run_tool('image_correlation', args, callback) # returns 1 if error
def image_regression(self, input1, input2, output, out_residuals=None, standardize=False, scattergram=False, num_samples=1000, callback=None):
"""Performs image regression analysis on two input images.
Keyword arguments:
input1 -- Input raster file (independent variable, X).
input2 -- Input raster file (dependent variable, Y).
output -- Output HTML file for regression summary report.
out_residuals -- Output raster regression resdidual file.
standardize -- Optional flag indicating whether to standardize the residuals map.
scattergram -- Optional flag indicating whether to output a scattergram.
num_samples -- Number of samples used to create scattergram.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
if out_residuals is not None: args.append("--out_residuals='{}'".format(out_residuals))
if standardize: args.append("--standardize")
if scattergram: args.append("--scattergram")
args.append("--num_samples={}".format(num_samples))
return self.run_tool('image_regression', args, callback) # returns 1 if error
def in_place_add(self, input1, input2, callback=None):
"""Performs an in-place addition operation (input1 += input2).
Keyword arguments:
input1 -- Input raster file.
input2 -- Input raster file or constant value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
return self.run_tool('in_place_add', args, callback) # returns 1 if error
def in_place_divide(self, input1, input2, callback=None):
"""Performs an in-place division operation (input1 /= input2).
Keyword arguments:
input1 -- Input raster file.
input2 -- Input raster file or constant value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
return self.run_tool('in_place_divide', args, callback) # returns 1 if error
def in_place_multiply(self, input1, input2, callback=None):
"""Performs an in-place multiplication operation (input1 *= input2).
Keyword arguments:
input1 -- Input raster file.
input2 -- Input raster file or constant value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
return self.run_tool('in_place_multiply', args, callback) # returns 1 if error
def in_place_subtract(self, input1, input2, callback=None):
"""Performs an in-place subtraction operation (input1 -= input2).
Keyword arguments:
input1 -- Input raster file.
input2 -- Input raster file or constant value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
return self.run_tool('in_place_subtract', args, callback) # returns 1 if error
def increment(self, i, output, callback=None):
"""Increases the values of each grid cell in an input raster by 1.0. (see also InPlaceAdd).
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('increment', args, callback) # returns 1 if error
def integer_division(self, input1, input2, output, callback=None):
"""Performs an integer division operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('integer_division', args, callback) # returns 1 if error
def is_no_data(self, i, output, callback=None):
"""Identifies NoData valued pixels in an image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('is_no_data', args, callback) # returns 1 if error
def kappa_index(self, input1, input2, output, callback=None):
"""Performs a kappa index of agreement (KIA) analysis on two categorical raster files.
Keyword arguments:
input1 -- Input classification raster file.
input2 -- Input reference raster file.
output -- Output HTML file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('kappa_index', args, callback) # returns 1 if error
def ks_test_for_normality(self, i, output, num_samples=None, callback=None):
"""Evaluates whether the values in a raster are normally distributed.
Keyword arguments:
i -- Input raster file.
output -- Output HTML file.
num_samples -- Number of samples. Leave blank to use whole image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if num_samples is not None: args.append("--num_samples='{}'".format(num_samples))
return self.run_tool('ks_test_for_normality', args, callback) # returns 1 if error
def less_than(self, input1, input2, output, incl_equals=False, callback=None):
"""Performs a less-than comparison operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
incl_equals -- Perform a less-than-or-equal-to operation.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
if incl_equals: args.append("--incl_equals")
return self.run_tool('less_than', args, callback) # returns 1 if error
def list_unique_values(self, i, field, output, callback=None):
"""Lists the unique values contained in a field witin a vector's attribute table.
Keyword arguments:
i -- Input raster file.
field -- Input field name in attribute table.
output -- Output HTML file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field='{}'".format(field))
args.append("--output='{}'".format(output))
return self.run_tool('list_unique_values', args, callback) # returns 1 if error
def ln(self, i, output, callback=None):
"""Returns the natural logarithm of values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('ln', args, callback) # returns 1 if error
def log10(self, i, output, callback=None):
"""Returns the base-10 logarithm of values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('log10', args, callback) # returns 1 if error
def log2(self, i, output, callback=None):
"""Returns the base-2 logarithm of values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('log2', args, callback) # returns 1 if error
def max(self, input1, input2, output, callback=None):
"""Performs a MAX operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('max', args, callback) # returns 1 if error
def min(self, input1, input2, output, callback=None):
"""Performs a MIN operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('min', args, callback) # returns 1 if error
def modulo(self, input1, input2, output, callback=None):
"""Performs a modulo operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('modulo', args, callback) # returns 1 if error
def multiply(self, input1, input2, output, callback=None):
"""Performs a multiplication operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('multiply', args, callback) # returns 1 if error
def negate(self, i, output, callback=None):
"""Changes the sign of values in a raster or the 0-1 values of a Boolean raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('negate', args, callback) # returns 1 if error
def not_equal_to(self, input1, input2, output, callback=None):
"""Performs a not-equal-to comparison operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('not_equal_to', args, callback) # returns 1 if error
def paired_sample_t_test(self, input1, input2, output, num_samples=None, callback=None):
"""Performs a 2-sample K-S test for significant differences on two input rasters.
Keyword arguments:
input1 -- First input raster file.
input2 -- Second input raster file.
output -- Output HTML file.
num_samples -- Number of samples. Leave blank to use whole image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
if num_samples is not None: args.append("--num_samples='{}'".format(num_samples))
return self.run_tool('paired_sample_t_test', args, callback) # returns 1 if error
def power(self, input1, input2, output, callback=None):
"""Raises the values in grid cells of one rasters, or a constant value, by values in another raster or constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('power', args, callback) # returns 1 if error
def principal_component_analysis(self, inputs, output, num_comp=None, standardized=False, callback=None):
"""Performs a principal component analysis (PCA) on a multi-spectral dataset.
Keyword arguments:
inputs -- Input raster files.
output -- Output HTML report file.
num_comp -- Number of component images to output; <= to num. input images.
standardized -- Perform standardized PCA?.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--inputs='{}'".format(inputs))
args.append("--output='{}'".format(output))
if num_comp is not None: args.append("--num_comp='{}'".format(num_comp))
if standardized: args.append("--standardized")
return self.run_tool('principal_component_analysis', args, callback) # returns 1 if error
def quantiles(self, i, output, num_quantiles=5, callback=None):
"""Transforms raster values into quantiles.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
num_quantiles -- Number of quantiles.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--num_quantiles={}".format(num_quantiles))
return self.run_tool('quantiles', args, callback) # returns 1 if error
def random_field(self, base, output, callback=None):
"""Creates an image containing random values.
Keyword arguments:
base -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--base='{}'".format(base))
args.append("--output='{}'".format(output))
return self.run_tool('random_field', args, callback) # returns 1 if error
def random_sample(self, base, output, num_samples=1000, callback=None):
"""Creates an image containing randomly located sample grid cells with unique IDs.
Keyword arguments:
base -- Input raster file.
output -- Output raster file.
num_samples -- Number of samples.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--base='{}'".format(base))
args.append("--output='{}'".format(output))
args.append("--num_samples={}".format(num_samples))
return self.run_tool('random_sample', args, callback) # returns 1 if error
def raster_histogram(self, i, output, callback=None):
"""Creates a histogram from raster values.
Keyword arguments:
i -- Input raster file.
output -- Output HTML file (default name will be based on input file if unspecified).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('raster_histogram', args, callback) # returns 1 if error
def raster_summary_stats(self, i, callback=None):
"""Measures a rasters min, max, average, standard deviation, num. non-nodata cells, and total.
Keyword arguments:
i -- Input raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
return self.run_tool('raster_summary_stats', args, callback) # returns 1 if error
def reciprocal(self, i, output, callback=None):
"""Returns the reciprocal (i.e. 1 / z) of values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('reciprocal', args, callback) # returns 1 if error
def rescale_value_range(self, i, output, out_min_val, out_max_val, clip_min=None, clip_max=None, callback=None):
"""Performs a min-max contrast stretch on an input greytone image.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
out_min_val -- New minimum value in output image.
out_max_val -- New maximum value in output image.
clip_min -- Optional lower tail clip value.
clip_max -- Optional upper tail clip value.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--out_min_val='{}'".format(out_min_val))
args.append("--out_max_val='{}'".format(out_max_val))
if clip_min is not None: args.append("--clip_min='{}'".format(clip_min))
if clip_max is not None: args.append("--clip_max='{}'".format(clip_max))
return self.run_tool('rescale_value_range', args, callback) # returns 1 if error
def root_mean_square_error(self, i, base, callback=None):
"""Calculates the RMSE and other accuracy statistics.
Keyword arguments:
i -- Input raster file.
base -- Input base raster file used for comparison.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--base='{}'".format(base))
return self.run_tool('root_mean_square_error', args, callback) # returns 1 if error
def round(self, i, output, callback=None):
"""Rounds the values in an input raster to the nearest integer value.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('round', args, callback) # returns 1 if error
def sin(self, i, output, callback=None):
"""Returns the sine (sin) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('sin', args, callback) # returns 1 if error
def sinh(self, i, output, callback=None):
"""Returns the hyperbolic sine (sinh) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('sinh', args, callback) # returns 1 if error
def square(self, i, output, callback=None):
"""Squares the values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('square', args, callback) # returns 1 if error
def square_root(self, i, output, callback=None):
"""Returns the square root of the values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('square_root', args, callback) # returns 1 if error
def subtract(self, input1, input2, output, callback=None):
"""Performs a differencing operation on two rasters or a raster and a constant value.
Keyword arguments:
input1 -- Input raster file or constant value.
input2 -- Input raster file or constant value.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('subtract', args, callback) # returns 1 if error
def tan(self, i, output, callback=None):
"""Returns the tangent (tan) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('tan', args, callback) # returns 1 if error
def tanh(self, i, output, callback=None):
"""Returns the hyperbolic tangent (tanh) of each values in a raster.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('tanh', args, callback) # returns 1 if error
def to_degrees(self, i, output, callback=None):
"""Converts a raster from radians to degrees.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('to_degrees', args, callback) # returns 1 if error
def to_radians(self, i, output, callback=None):
"""Converts a raster from degrees to radians.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('to_radians', args, callback) # returns 1 if error
def trend_surface(self, i, output, order=1, callback=None):
"""Estimates the trend surface of an input raster file.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
order -- Polynomial order (1 to 10).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
args.append("--order={}".format(order))
return self.run_tool('trend_surface', args, callback) # returns 1 if error
def trend_surface_vector_points(self, i, field, output, cell_size, order=1, callback=None):
"""Estimates a trend surface from vector points.
Keyword arguments:
i -- Input vector Points file.
field -- Input field name in attribute table.
output -- Output raster file.
order -- Polynomial order (1 to 10).
cell_size -- Optionally specified cell size of output raster. Not used when base raster is specified.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--field='{}'".format(field))
args.append("--output='{}'".format(output))
args.append("--order={}".format(order))
args.append("--cell_size='{}'".format(cell_size))
return self.run_tool('trend_surface_vector_points', args, callback) # returns 1 if error
def truncate(self, i, output, num_decimals=None, callback=None):
"""Truncates the values in a raster to the desired number of decimal places.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
num_decimals -- Number of decimals left after truncation (default is zero).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
if num_decimals is not None: args.append("--num_decimals='{}'".format(num_decimals))
return self.run_tool('truncate', args, callback) # returns 1 if error
def turning_bands_simulation(self, base, output, range, iterations=1000, callback=None):
"""Creates an image containing random values based on a turning-bands simulation.
Keyword arguments:
base -- Input base raster file.
output -- Output file.
range -- The field's range, in xy-units, related to the extent of spatial autocorrelation.
iterations -- The number of iterations.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--base='{}'".format(base))
args.append("--output='{}'".format(output))
args.append("--range='{}'".format(range))
args.append("--iterations={}".format(iterations))
return self.run_tool('turning_bands_simulation', args, callback) # returns 1 if error
def two_sample_ks_test(self, input1, input2, output, num_samples=None, callback=None):
"""Performs a 2-sample K-S test for significant differences on two input rasters.
Keyword arguments:
input1 -- First input raster file.
input2 -- Second input raster file.
output -- Output HTML file.
num_samples -- Number of samples. Leave blank to use whole image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
if num_samples is not None: args.append("--num_samples='{}'".format(num_samples))
return self.run_tool('two_sample_ks_test', args, callback) # returns 1 if error
def wilcoxon_signed_rank_test(self, input1, input2, output, num_samples=None, callback=None):
"""Performs a 2-sample K-S test for significant differences on two input rasters.
Keyword arguments:
input1 -- First input raster file.
input2 -- Second input raster file.
output -- Output HTML file.
num_samples -- Number of samples. Leave blank to use whole image.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
if num_samples is not None: args.append("--num_samples='{}'".format(num_samples))
return self.run_tool('wilcoxon_signed_rank_test', args, callback) # returns 1 if error
def xor(self, input1, input2, output, callback=None):
"""Performs a logical XOR operator on two Boolean raster images.
Keyword arguments:
input1 -- Input raster file.
input2 -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input1='{}'".format(input1))
args.append("--input2='{}'".format(input2))
args.append("--output='{}'".format(output))
return self.run_tool('xor', args, callback) # returns 1 if error
def z_scores(self, i, output, callback=None):
"""Standardizes the values in an input raster by converting to z-scores.
Keyword arguments:
i -- Input raster file.
output -- Output raster file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--output='{}'".format(output))
return self.run_tool('z_scores', args, callback) # returns 1 if error
def zonal_statistics(self, i, features, output=None, stat="mean", out_table=None, callback=None):
"""Extracts descriptive statistics for a group of patches in a raster.
Keyword arguments:
i -- Input data raster file.
features -- Input feature definition raster file.
output -- Output raster file.
stat -- Statistic to extract, including 'mean', 'median', 'minimum', 'maximum', 'range', 'standard deviation', and 'total'.
out_table -- Output HTML Table file.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--input='{}'".format(i))
args.append("--features='{}'".format(features))
if output is not None: args.append("--output='{}'".format(output))
args.append("--stat={}".format(stat))
if out_table is not None: args.append("--out_table='{}'".format(out_table))
return self.run_tool('zonal_statistics', args, callback) # returns 1 if error
###########################
# Stream Network Analysis #
###########################
def burn_streams_at_roads(self, dem, streams, roads, output, width=None, callback=None):
"""Rasterizes vector streams based on Lindsay (2016) method.
Keyword arguments:
dem -- Input raster digital elevation model (DEM) file.
streams -- Input vector streams file.
roads -- Input vector roads file.
output -- Output raster file.
width -- Maximum road embankment width, in map units.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--streams='{}'".format(streams))
args.append("--roads='{}'".format(roads))
args.append("--output='{}'".format(output))
if width is not None: args.append("--width='{}'".format(width))
return self.run_tool('burn_streams_at_roads', args, callback) # returns 1 if error
def distance_to_outlet(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Calculates the distance of stream grid cells to the channel network outlet cell.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('distance_to_outlet', args, callback) # returns 1 if error
def extract_streams(self, flow_accum, output, threshold, zero_background=False, callback=None):
"""Extracts stream grid cells from a flow accumulation raster.
Keyword arguments:
flow_accum -- Input raster D8 flow accumulation file.
output -- Output raster file.
threshold -- Threshold in flow accumulation values for channelization.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--flow_accum='{}'".format(flow_accum))
args.append("--output='{}'".format(output))
args.append("--threshold='{}'".format(threshold))
if zero_background: args.append("--zero_background")
return self.run_tool('extract_streams', args, callback) # returns 1 if error
def extract_valleys(self, dem, output, variant="LQ", line_thin=True, filter=5, callback=None):
"""Identifies potential valley bottom grid cells based on local topolography alone.
Keyword arguments:
dem -- Input raster DEM file.
output -- Output raster file.
variant -- Options include 'LQ' (lower quartile), 'JandR' (Johnston and Rosenfeld), and 'PandD' (Peucker and Douglas); default is 'LQ'.
line_thin -- Optional flag indicating whether post-processing line-thinning should be performed.
filter -- Optional argument (only used when variant='lq') providing the filter size, in grid cells, used for lq-filtering (default is 5).
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
args.append("--variant={}".format(variant))
if line_thin: args.append("--line_thin")
args.append("--filter={}".format(filter))
return self.run_tool('extract_valleys', args, callback) # returns 1 if error
def farthest_channel_head(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Calculates the distance to the furthest upstream channel head for each stream cell.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('farthest_channel_head', args, callback) # returns 1 if error
def find_main_stem(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Finds the main stem, based on stream lengths, of each stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('find_main_stem', args, callback) # returns 1 if error
def hack_stream_order(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Assigns the Hack stream order to each tributary in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('hack_stream_order', args, callback) # returns 1 if error
def horton_stream_order(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Assigns the Horton stream order to each tributary in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('horton_stream_order', args, callback) # returns 1 if error
def length_of_upstream_channels(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Calculates the total length of channels upstream.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('length_of_upstream_channels', args, callback) # returns 1 if error
def long_profile(self, d8_pntr, streams, dem, output, esri_pntr=False, callback=None):
"""Plots the stream longitudinal profiles for one or more rivers.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
dem -- Input raster DEM file.
output -- Output HTML file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('long_profile', args, callback) # returns 1 if error
def long_profile_from_points(self, d8_pntr, points, dem, output, esri_pntr=False, callback=None):
"""Plots the longitudinal profiles from flow-paths initiating from a set of vector points.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
points -- Input vector points file.
dem -- Input raster DEM file.
output -- Output HTML file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--points='{}'".format(points))
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('long_profile_from_points', args, callback) # returns 1 if error
def raster_streams_to_vector(self, streams, d8_pntr, output, esri_pntr=False, callback=None):
"""Converts a raster stream file into a vector file.
Keyword arguments:
streams -- Input raster streams file.
d8_pntr -- Input raster D8 pointer file.
output -- Output vector file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--streams='{}'".format(streams))
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('raster_streams_to_vector', args, callback) # returns 1 if error
def rasterize_streams(self, streams, base, output, nodata=True, feature_id=False, callback=None):
"""Rasterizes vector streams based on Lindsay (2016) method.
Keyword arguments:
streams -- Input vector streams file.
base -- Input base raster file.
output -- Output raster file.
nodata -- Use NoData value for background?.
feature_id -- Use feature number as output value?.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--streams='{}'".format(streams))
args.append("--base='{}'".format(base))
args.append("--output='{}'".format(output))
if nodata: args.append("--nodata")
if feature_id: args.append("--feature_id")
return self.run_tool('rasterize_streams', args, callback) # returns 1 if error
def remove_short_streams(self, d8_pntr, streams, output, min_length, esri_pntr=False, callback=None):
"""Removes short first-order streams from a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
min_length -- Minimum tributary length (in map units) used for network prunning.
esri_pntr -- D8 pointer uses the ESRI style scheme.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
args.append("--min_length='{}'".format(min_length))
if esri_pntr: args.append("--esri_pntr")
return self.run_tool('remove_short_streams', args, callback) # returns 1 if error
def shreve_stream_magnitude(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Assigns the Shreve stream magnitude to each link in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('shreve_stream_magnitude', args, callback) # returns 1 if error
def strahler_stream_order(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Assigns the Strahler stream order to each link in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('strahler_stream_order', args, callback) # returns 1 if error
def stream_link_class(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Identifies the exterior/interior links and nodes in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('stream_link_class', args, callback) # returns 1 if error
def stream_link_identifier(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Assigns a unique identifier to each link in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('stream_link_identifier', args, callback) # returns 1 if error
def stream_link_length(self, d8_pntr, linkid, output, esri_pntr=False, zero_background=False, callback=None):
"""Estimates the length of each link (or tributary) in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
linkid -- Input raster streams link ID (or tributary ID) file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--linkid='{}'".format(linkid))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('stream_link_length', args, callback) # returns 1 if error
def stream_link_slope(self, d8_pntr, linkid, dem, output, esri_pntr=False, zero_background=False, callback=None):
"""Estimates the average slope of each link (or tributary) in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
linkid -- Input raster streams link ID (or tributary ID) file.
dem -- Input raster DEM file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--linkid='{}'".format(linkid))
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('stream_link_slope', args, callback) # returns 1 if error
def stream_slope_continuous(self, d8_pntr, streams, dem, output, esri_pntr=False, zero_background=False, callback=None):
"""Estimates the slope of each grid cell in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
dem -- Input raster DEM file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--dem='{}'".format(dem))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('stream_slope_continuous', args, callback) # returns 1 if error
def topological_stream_order(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Assigns each link in a stream network its topological order.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('topological_stream_order', args, callback) # returns 1 if error
def tributary_identifier(self, d8_pntr, streams, output, esri_pntr=False, zero_background=False, callback=None):
"""Assigns a unique identifier to each tributary in a stream network.
Keyword arguments:
d8_pntr -- Input raster D8 pointer file.
streams -- Input raster streams file.
output -- Output raster file.
esri_pntr -- D8 pointer uses the ESRI style scheme.
zero_background -- Flag indicating whether a background value of zero should be used.
callback -- Custom function for handling tool text outputs.
"""
args = []
args.append("--d8_pntr='{}'".format(d8_pntr))
args.append("--streams='{}'".format(streams))
args.append("--output='{}'".format(output))
if esri_pntr: args.append("--esri_pntr")
if zero_background: args.append("--zero_background")
return self.run_tool('tributary_identifier', args, callback) # returns 1 if error
| [
"[email protected]"
]
| |
2d71ffa6cfe0c24d3bfbca19a207ee4695593d52 | c6057a6cc2cf02aa6b9aa877402e84b9cb0bf596 | /commands.py | 3a15d2dbb00813a78c73f9957bd202d12eb83de3 | []
| no_license | DollaR84/HotSound | 372b3e7bd585fa1dc14e31d9fbd0364f7293e1ce | a969f2faac6dfef3b12f69960052f33f77c0de61 | refs/heads/master | 2023-03-25T18:20:56.941975 | 2020-04-08T07:55:08 | 2020-04-08T07:55:08 | 351,373,891 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,502 | py | """
Commands for graphical interface.
Created on 12.02.2020
@author: Ruslan Dolovanyuk
"""
import webbrowser
from dialogs.dialogs import About
from dialogs.dialogs import Message
from linker import Linker
from player import Player
import version
import wx
class Commands:
"""Helper class, contains command for bind events, menu and buttons."""
def __init__(self, drawer):
"""Initialization commands class."""
self.drawer = drawer
self.phrases = self.drawer.phrases
self.message = Message(self.drawer)
self.linker = Linker()
self.player = Player()
self.config = self.drawer.config
self.config.get_outputs = self.player.get_outputs
self.player.config = self.config
self.wildcard = 'Wave files (*.wav)|*.wav|' \
'All files (*.*)|*.*'
self.__mods = [
wx.WXK_CONTROL,
wx.WXK_SHIFT,
wx.WXK_ALT,
wx.WXK_WINDOWS_LEFT,
wx.WXK_WINDOWS_RIGHT,
wx.WXK_WINDOWS_MENU,
]
self.set_window()
def set_window(self):
"""Set size and position window from saving data."""
self.drawer.SetPosition(self.config.get_pos())
self.drawer.SetSize(self.config.get_size())
self.drawer.Layout()
def donate(self, event):
"""Run donate hyperlink in browser."""
webbrowser.open(self.config.donate_url)
def about(self, event):
"""Run about dialog."""
About(
self.drawer,
self.phrases.about.title,
self.phrases.about.name,
version.VERSION,
self.phrases.about.author
).ShowModal()
def close(self, event):
"""Close event for button close."""
self.drawer.Close(True)
def close_window(self, event):
"""Close window event."""
self.config.set_pos(self.drawer.GetScreenPosition())
self.config.set_size(self.drawer.GetSize())
self.config.close()
self.linker.close()
self.player.close()
self.drawer.Destroy()
def options(self, event):
"""Run settings dialog."""
self.config.open_settings(self.drawer)
def get_path_file(self):
"""Return path wave file."""
path = ''
file_dlg = wx.FileDialog(self.drawer, self.phrases.titles.choice_file, '', '', self.wildcard, style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if wx.ID_OK == file_dlg.ShowModal():
path = file_dlg.GetPath()
file_dlg.Destroy()
return path
def process(self, event):
"""Main process eventer for button."""
keycode = event.GetKeyCode()
if not keycode in self.__mods:
if event.CmdDown() and event.ShiftDown():
self.linker.del_link(keycode)
self.drawer.data.SetValue(self.linker.get_all_links())
self.drawer.Layout()
elif event.CmdDown():
path = self.get_path_file()
if path != '':
self.linker.set_link(keycode, path)
self.drawer.data.SetValue(self.linker.get_all_links())
self.drawer.Layout()
else:
path = self.linker.get_link(keycode)
if path is not None:
self.player.play(path)
event.Skip()
| [
"[email protected]"
]
| |
f0e7679a16f42089ed8ff7242c54678d82bf28ce | 633944f913050debf0764c2a29cf3e88f912670e | /v8/depot_tools/bootstrap-3.8.0b1.chromium.1_bin/python3/lib/python3.8/distutils/tests/test_util.py | e66f9562fdf6124dbdfe93a456b961c2118e9dcc | [
"BSD-3-Clause",
"bzip2-1.0.6",
"SunPro",
"Apache-2.0"
]
| permissive | bopopescu/V8-lgtm | 0474c2ff39baf754f556ef57619ceae93e7320fd | da307e2f7abfca5fa0e860a809de6cd07fd1b72b | refs/heads/master | 2022-02-16T19:10:54.008520 | 2019-09-25T07:51:13 | 2019-09-25T07:51:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | ../../../../../.cipd/pkgs/2/_current/lib/python3.8/distutils/tests/test_util.py | [
"[email protected]"
]
| |
f131821fc8232df89ad26250894a2c7c8d50ae4a | 8ef8e6818c977c26d937d09b46be0d748022ea09 | /cv/detection/autoassign/pytorch/mmdet/core/bbox/coder/base_bbox_coder.py | 0872bf008b42d3e9056ce17ea135c7d8ba18c92a | [
"Apache-2.0"
]
| permissive | Deep-Spark/DeepSparkHub | eb5996607e63ccd2c706789f64b3cc0070e7f8ef | 9d643e88946fc4a24f2d4d073c08b05ea693f4c5 | refs/heads/master | 2023-09-01T11:26:49.648759 | 2023-08-25T01:50:18 | 2023-08-25T01:50:18 | 534,133,249 | 7 | 6 | Apache-2.0 | 2023-03-28T02:54:59 | 2022-09-08T09:07:01 | Python | UTF-8 | Python | false | false | 514 | py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
class BaseBBoxCoder(metaclass=ABCMeta):
"""Base bounding box coder."""
def __init__(self, **kwargs):
pass
@abstractmethod
def encode(self, bboxes, gt_bboxes):
"""Encode deltas between bboxes and ground truth boxes."""
@abstractmethod
def decode(self, bboxes, bboxes_pred):
"""Decode the predicted bboxes according to prediction and base
boxes."""
| [
"[email protected]"
]
| |
4dad4fd1062e9275b9c9a467a5b2a23d31a1b62d | 3d06eeebdd598efba25d29d7e3d03d90ede1bfbd | /15_lesson(django)/itProger/blog/migrations/0001_initial.py | 2f69d752487a523064c5052cc3a50ad5bc00220d | []
| no_license | duk1edev/itproger | 58bdd16088dec7864585d318935b118ce584874d | 786f94fff6d816f3f978bd8c24c3d985ffd5ffb2 | refs/heads/master | 2021-01-02T02:43:32.684100 | 2020-03-28T18:10:25 | 2020-03-28T18:10:25 | 239,443,309 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 897 | py | # Generated by Django 3.0.4 on 2020-03-13 23:17
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='News',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('text', models.TextField()),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
]
| |
e46a595d613fae013a7af518621b8c2516c05609 | 1ad01be757bd497c24f0e46e5fe88f6d018796c3 | /alertweet/actions/__init__.py | ee6b302ce52b383d36365a2bfb98eaabd52dff63 | []
| no_license | jgsogo/twitter-alert | e80d0146fba33bf331d2d114c3ba43459ad3bd10 | b2a7911841f89be08f2364f8833d13549aad69b2 | refs/heads/master | 2016-08-03T20:04:36.116295 | 2015-07-31T13:03:31 | 2015-07-31T13:03:31 | 40,003,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 114 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from .stdout import StdOutAction
from .dump_place import DumpPlace | [
"[email protected]"
]
| |
1f433a635d0821dae159098a8435f766ab8150a4 | f3b233e5053e28fa95c549017bd75a30456eb50c | /mcl1_input/L56/56-60_MD_NVT_rerun/set.py | 5e3aecefef9c6d535f5cd40500ef9874db041e1b | []
| no_license | AnguseZhang/Input_TI | ddf2ed40ff1c0aa24eea3275b83d4d405b50b820 | 50ada0833890be9e261c967d00948f998313cb60 | refs/heads/master | 2021-05-25T15:02:38.858785 | 2020-02-18T16:57:04 | 2020-02-18T16:57:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | import os
dir = '/mnt/scratch/songlin3/run/mcl1/L56/MD/ti_one-step/56_60/'
filesdir = dir + 'files/'
temp_equiin = filesdir + 'temp_equi.in'
temp_prodin = filesdir + 'temp_prod.in'
temp_pbs = filesdir + 'temp.pbs'
lambd = [ 0.00922, 0.04794, 0.11505, 0.20634, 0.31608, 0.43738, 0.56262, 0.68392, 0.79366, 0.88495, 0.95206, 0.99078]
for j in lambd:
os.system("rm -r %6.5f" %(j))
os.system("mkdir %6.5f" %(j))
os.chdir("%6.5f" %(j))
os.system("rm *")
workdir = dir + "%6.5f" %(j) + '/'
#equiin
eqin = workdir + "%6.5f_equi.in" %(j)
os.system("cp %s %s" %(temp_equiin, eqin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, eqin))
#prodin
prodin = workdir + "%6.5f_prod.in" %(j)
os.system("cp %s %s" %(temp_prodin, prodin))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, prodin))
#PBS
pbs = workdir + "%6.5f.pbs" %(j)
os.system("cp %s %s" %(temp_pbs, pbs))
os.system("sed -i 's/XXX/%6.5f/g' %s" %(j, pbs))
#top
os.system("cp ../56-60_merged.prmtop .")
os.system("cp ../0.5_equi_0.rst .")
#submit pbs
os.system("qsub %s" %(pbs))
os.chdir(dir)
| [
"[email protected]"
]
| |
d8f2a9320c7bf9881c95e4b343f6339d2052933b | bd02997a44218468b155eda45dd9dd592bb3d124 | /baekjoon_2146_3.py | b09cb7e0c56e5b92cbe28541d979b1e838604d8d | []
| no_license | rheehot/ProblemSolving_Python | 88b1eb303ab97624ae6c97e05393352695038d14 | 4d6dc6aea628f0e6e96530646c66216bf489427f | refs/heads/master | 2023-02-13T03:30:07.039231 | 2021-01-04T06:04:11 | 2021-01-04T06:04:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,968 | py | '''
Problem Solving Baekjoon 2146_3
Author: Injun Son
Date: Dec 21, 2020
'''
import sys
sys.setrecursionlimit(10 ** 6)
from collections import deque
N = int(input())
graph = [list(map(int, input().split())) for _ in range(N)]
check = [[False] * N for _ in range(N)]
dx = [-1, 0, 0, 1]
dy = [0, 1, -1, 0]
ans = sys.maxsize
count = 1
def print_board(board):
for i in range(N):
for j in range(N):
print(board[i][j], end=" ")
print("")
# 각 섬에 번호를 붙여줘서 그룹핑하는 함수
def dfs(y, x):
global count
check[y][x] = True
graph[y][x] = count
for i in range(4):
ny, nx = y + dy[i], x + dx[i]
if ny < 0 or ny >= N or nx < 0 or nx >= N:
continue
if check[ny][nx] == False and graph[ny][nx]:
dfs(ny, nx)
def bfs(z):
global ans
dist = [[-1] * N for _ in range(N)]
q = deque()
for i in range(N): # 섬들의 위치 모두 큐에 저장
for j in range(N):
if graph[i][j] == z:
q.append([i, j])
dist[i][j] = 0
while q:
y, x = q.popleft()
for i in range(4):
ny, nx = y + dy[i], x + dx[i]
if ny < 0 or ny >= N or nx < 0 or nx >= N:
continue
# 다른 섬에 도착한 경우
if graph[ny][nx] > 0 and graph[ny][nx] != z:
ans = min(ans, dist[y][x])
return
# 만약 바다이고, 간척 사업도 안된 곳이라면 새로 거리를 더해준다
if graph[ny][nx] == 0 and dist[ny][nx] == -1:
dist[ny][nx] = dist[y][x] + 1
q.append([ny, nx])
for i in range(N):
for j in range(N):
if check[i][j] == False and graph[i][j] == 1:
dfs(i, j)
count += 1
# 각각의 섬에 대하여 bfs로 간척을 하여 다른 섬에 도달한다
for i in range(1, count):
bfs(i)
print(ans)
| [
"[email protected]"
]
| |
b1a25edd516630842446acf3d6b63f6392f82110 | 41249d7d4ca9950b9c6fee89bf7e2c1929629767 | /results/lmg_optimizations_50spins_criticalpoint_diffConstraints_20200617/script_lmg_crab4freq_neldermead_bound08_fixedInitialPoints_tf2.py | 3d216507ee0ed4e33aa36db3e1ff63d419d92a85 | [
"MIT"
]
| permissive | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | f739b3baad1d2aadda576303bb0bbe9d48ec204a | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | refs/heads/master | 2022-11-22T00:44:09.998199 | 2020-07-21T08:35:28 | 2020-07-21T08:35:28 | 281,237,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
import src.protocol_ansatz as protocol_ansatz
from src.utils import autonumber_filename, basic_logger_configuration
output_file_name = os.path.basename(__file__)[7:-3] + '.csv'
output_file_name = autonumber_filename(output_file_name)
basic_logger_configuration(filename=output_file_name[:-3] + 'log')
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
num_frequencies = 4
protocol = protocol_ansatz.CRABProtocolAnsatz(num_frequencies=num_frequencies)
protocol.generate_rnd_frequencies_each_tf = False
for idx in range(num_frequencies):
protocol.hyperpars['nuk' + str(idx + 1)] = 0
protocol.fill_hyperpar_value(y0=0, y1=1)
results = optimization.find_best_protocol(
problem_specification=dict(
model='lmg',
model_parameters=dict(num_spins=50),
task=dict(initial_intensity=0, final_intensity=1)
),
optimization_specs=dict(
protocol=protocol,
protocol_options=dict(num_frequencies=num_frequencies),
optimization_method='Nelder-Mead',
parameters_constraints=[-8, 8],
initial_parameters=[0] * (2 * num_frequencies),
optimization_options=dict(maxiter=1e5, maxfev=1e5,
xatol=1e-8, fatol=1e-8, adaptive=True)
),
other_options=dict(
scan_times=np.linspace(0.01, 2, 100)
)
)
# ------ save results to file
results.to_csv(output_file_name)
| [
"[email protected]"
]
| |
931f4d5d28fbb6cd5865f176db4d037f806c9964 | d86e9d59784097a7262fa9337585a36bd58a6d29 | /cvxbenchmarks/lib/data/epsilon/epopt/problems/robust_pca.py | d0cb3237223924c12733872e4563a3ff724d0b0c | []
| no_license | nishi951/cvxbenchmarks | 2ae36e75c42c8bd35fafac98bad5d9d88168bd68 | 932141d8e4e929860011bf25c41e941e2f8fbd76 | refs/heads/master | 2021-01-11T07:23:32.260811 | 2018-09-15T22:23:14 | 2018-09-15T22:23:14 | 62,177,196 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 472 | py | import cvxpy as cp
import numpy as np
import scipy.sparse as sp
def create(n, r=10, density=0.1):
np.random.seed(0)
L1 = np.random.randn(n,r)
L2 = np.random.randn(r,n)
L0 = L1.dot(L2)
S0 = sp.rand(n, n, density)
S0.data = 10*np.random.randn(len(S0.data))
M = L0 + S0
lam = 0.1
L = cp.Variable(n, n)
S = cp.Variable(n, n)
f = cp.norm(L, "nuc") + lam*cp.norm1(S)
C = [L + S == M]
return cp.Problem(cp.Minimize(f), C)
| [
"[email protected]"
]
| |
8982bb30bc58de5af180ccd22ab752d94dcc2df1 | c50cf19707ecf44c8e15acf0e994d288fe4f01a7 | /addition/migrations/0005_auto_20160420_1600.py | dbc95f4b80f196cabb9c51f3bd641a0ee163e56f | [
"MIT"
]
| permissive | JeremyParker/idlecars-backend | ee5981356c60161dee05c22e01e5c913e73083c0 | 819cce48e4679d61164b238b81dab0e4d51b8afa | refs/heads/master | 2021-03-16T04:29:43.287760 | 2018-03-03T23:16:02 | 2018-03-03T23:16:02 | 31,734,223 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('addition', '0004_addition_created_time'),
]
operations = [
migrations.RemoveField(
model_name='addition',
name='defensive_cert_image',
),
migrations.AddField(
model_name='addition',
name='ssn',
field=models.CharField(max_length=9, blank=True),
),
]
| [
"[email protected]"
]
| |
6d0d2fb4638956baa4c2192e6ffbab832ad42d15 | d4cb392e60e8864b6b728e54929a6cef040d62f1 | /cmocean/rgb/matter.py | 6e81c7dbaf16b08c4d1a68f5e5fae6827ad40c13 | [
"MIT"
]
| permissive | matplotlib/cmocean | e3150c11556c13a7eb664c9ee0d55e373ef5f64f | 50611a57becd98e4cc5858330d9cb7f10aa49711 | refs/heads/main | 2023-08-20T05:28:25.151849 | 2023-03-20T16:06:48 | 2023-03-20T16:06:48 | 32,095,153 | 203 | 53 | MIT | 2023-09-12T16:36:21 | 2015-03-12T18:42:41 | Python | UTF-8 | Python | false | false | 13,688 | py |
from matplotlib.colors import ListedColormap
from numpy import nan, inf
# Used to reconstruct the colormap in pycam02ucs.cm.viscm
parameters = {'xp': [12.083124471935321, 31.017789474016041, 34.659071205185427, 33.93081485895155, 32.27851430976432, -0.7196049621778116],
'yp': [-13.935705368289632, -11.750936329588001, -1.1912193091968106, 11.553266749895982, 22.829861111111114, 15.377428998505223],
'min_Jp': 15.0,
'max_Jp': 95.0}
cm_data = [[ 0.18517171, 0.05913349, 0.24304267],
[ 0.19008219, 0.06057894, 0.24651605],
[ 0.1950016 , 0.06199021, 0.24996805],
[ 0.19993023, 0.06336778, 0.25339803],
[ 0.2048691 , 0.06471124, 0.25680549],
[ 0.20982353, 0.06601486, 0.26019056],
[ 0.21478769, 0.06728631, 0.26355148],
[ 0.21976179, 0.06852599, 0.26688759],
[ 0.22474604, 0.0697343 , 0.27019819],
[ 0.22974485, 0.07090635, 0.27348302],
[ 0.2347553 , 0.07204628, 0.27674091],
[ 0.23977611, 0.07315619, 0.27997094],
[ 0.24480741, 0.07423648, 0.28317235],
[ 0.24984931, 0.07528753, 0.28634437],
[ 0.25490478, 0.07630599, 0.28948629],
[ 0.25997231, 0.07729426, 0.29259714],
[ 0.26505035, 0.07825478, 0.295676 ],
[ 0.27013893, 0.07918798, 0.29872202],
[ 0.27523807, 0.08009433, 0.30173433],
[ 0.28034773, 0.08097432, 0.30471204],
[ 0.28546798, 0.08182832, 0.30765422],
[ 0.29060135, 0.08265337, 0.31055974],
[ 0.29574478, 0.083454 , 0.3134278 ],
[ 0.30089818, 0.08423083, 0.31625744],
[ 0.30606141, 0.08498453, 0.3190477 ],
[ 0.31123432, 0.08571579, 0.32179759],
[ 0.31641672, 0.08642536, 0.32450613],
[ 0.32160842, 0.08711404, 0.32717232],
[ 0.32680918, 0.08778266, 0.32979513],
[ 0.33201875, 0.08843214, 0.33237355],
[ 0.33723686, 0.08906342, 0.33490656],
[ 0.3424632 , 0.08967752, 0.33739313],
[ 0.34769744, 0.09027551, 0.33983225],
[ 0.35293923, 0.09085853, 0.34222288],
[ 0.35818819, 0.09142777, 0.34456401],
[ 0.36344392, 0.0919845 , 0.34685464],
[ 0.36870599, 0.09253005, 0.34909377],
[ 0.37397397, 0.0930658 , 0.35128042],
[ 0.37924738, 0.09359321, 0.35341364],
[ 0.38452573, 0.0941138 , 0.35549249],
[ 0.38980852, 0.09462915, 0.35751606],
[ 0.39509521, 0.0951409 , 0.35948347],
[ 0.40038526, 0.09565075, 0.36139387],
[ 0.40567812, 0.09616044, 0.36324644],
[ 0.4109732 , 0.09667179, 0.36504042],
[ 0.41626992, 0.09718665, 0.36677507],
[ 0.42156767, 0.09770691, 0.36844971],
[ 0.42686586, 0.09823449, 0.37006371],
[ 0.43216478, 0.09876992, 0.37161577],
[ 0.43746355, 0.09931556, 0.37310547],
[ 0.44276088, 0.0998745 , 0.37453284],
[ 0.44805611, 0.10044877, 0.37589749],
[ 0.45334861, 0.1010404 , 0.37719908],
[ 0.45863773, 0.10165142, 0.37843733],
[ 0.46392333, 0.1022831 , 0.37961155],
[ 0.46920552, 0.10293623, 0.38072076],
[ 0.47448238, 0.10361485, 0.38176614],
[ 0.47975329, 0.10432087, 0.38274769],
[ 0.48501765, 0.1050562 , 0.3836655 ],
[ 0.49027555, 0.10582154, 0.38451884],
[ 0.49552659, 0.10661841, 0.3853076 ],
[ 0.50076923, 0.10745003, 0.38603324],
[ 0.50600291, 0.10831803, 0.38669608],
[ 0.51122751, 0.10922327, 0.38729597],
[ 0.51644309, 0.11016633, 0.38783247],
[ 0.52164803, 0.11115027, 0.38830764],
[ 0.52684188, 0.11217633, 0.38872207],
[ 0.53202486, 0.11324458, 0.38907529],
[ 0.53719631, 0.11435643, 0.38936827],
[ 0.54235522, 0.11551369, 0.38960269],
[ 0.54750131, 0.11671701, 0.38977915],
[ 0.552635 , 0.11796595, 0.38989699],
[ 0.55775491, 0.11926261, 0.38995887],
[ 0.5628607 , 0.12060752, 0.38996572],
[ 0.56795278, 0.12200003, 0.38991698],
[ 0.57303015, 0.12344147, 0.38981494],
[ 0.57809238, 0.12493225, 0.38966089],
[ 0.58313973, 0.1264718 , 0.38945465],
[ 0.58817152, 0.12806077, 0.3891981 ],
[ 0.5931873 , 0.12969941, 0.38889274],
[ 0.59818726, 0.13138711, 0.38853858],
[ 0.60317083, 0.13312418, 0.38813742],
[ 0.60813763, 0.13491065, 0.38769077],
[ 0.61308782, 0.13674584, 0.38719864],
[ 0.61802083, 0.13862993, 0.386663 ],
[ 0.62293634, 0.14056274, 0.38608522],
[ 0.62783454, 0.14254354, 0.38546523],
[ 0.63271477, 0.14457248, 0.38480529],
[ 0.63757687, 0.14664919, 0.38410635],
[ 0.64242081, 0.14877308, 0.38336876],
[ 0.64724605, 0.15094418, 0.38259458],
[ 0.65205246, 0.15316203, 0.38178446],
[ 0.65683983, 0.15542627, 0.38093931],
[ 0.66160768, 0.15773679, 0.38006084],
[ 0.66635592, 0.16009315, 0.37914944],
[ 0.67108412, 0.16249519, 0.37820659],
[ 0.67579188, 0.16494277, 0.37723366],
[ 0.68047906, 0.16743548, 0.37623081],
[ 0.68514508, 0.16997338, 0.37520008],
[ 0.68978961, 0.17255625, 0.37414221],
[ 0.69441228, 0.17518398, 0.37305808],
[ 0.69901252, 0.1778566 , 0.3719494 ],
[ 0.70359001, 0.18057398, 0.37081657],
[ 0.70814412, 0.18333622, 0.36966126],
[ 0.71267433, 0.18614338, 0.36848468],
[ 0.71718017, 0.1889955 , 0.36728743],
[ 0.7216609 , 0.19189279, 0.36607151],
[ 0.72611598, 0.19483537, 0.36483758],
[ 0.73054471, 0.19782348, 0.36358718],
[ 0.73494635, 0.20085737, 0.36232191],
[ 0.73932024, 0.20393732, 0.36104238],
[ 0.74366551, 0.20706364, 0.35975091],
[ 0.74798139, 0.21023669, 0.35844838],
[ 0.75226701, 0.21345683, 0.35713657],
[ 0.75652144, 0.21672446, 0.35581731],
[ 0.76074383, 0.22004003, 0.35449167],
[ 0.76493312, 0.22340394, 0.35316213],
[ 0.76908837, 0.22681667, 0.35183001],
[ 0.7732085 , 0.23027866, 0.35049752],
[ 0.77729245, 0.23379036, 0.34916672],
[ 0.78133913, 0.23735228, 0.34783945],
[ 0.78534739, 0.24096482, 0.3465183 ],
[ 0.78931609, 0.24462845, 0.34520528],
[ 0.79324405, 0.24834357, 0.34390302],
[ 0.79713009, 0.25211056, 0.34261402],
[ 0.80097301, 0.25592978, 0.34134093],
[ 0.80477161, 0.25980154, 0.34008638],
[ 0.80852469, 0.26372603, 0.33885362],
[ 0.81223105, 0.26770354, 0.33764514],
[ 0.81588953, 0.27173408, 0.33646439],
[ 0.819499 , 0.27581774, 0.33531442],
[ 0.82305829, 0.27995459, 0.33419807],
[ 0.82656645, 0.28414414, 0.33311978],
[ 0.83002232, 0.28838659, 0.33208153],
[ 0.83342508, 0.29268122, 0.33108793],
[ 0.8367738 , 0.29702773, 0.33014193],
[ 0.84006752, 0.30142586, 0.32924624],
[ 0.84330599, 0.30587398, 0.32840668],
[ 0.84648807, 0.31037235, 0.32762406],
[ 0.84961352, 0.31491948, 0.32690313],
[ 0.85268212, 0.31951405, 0.32624774],
[ 0.855693 , 0.32415591, 0.32565907],
[ 0.85864675, 0.32884252, 0.32514279],
[ 0.86154301, 0.33357304, 0.32470075],
[ 0.86438132, 0.33834687, 0.32433433],
[ 0.86716293, 0.34316081, 0.32404903],
[ 0.86988775, 0.34801394, 0.32384604],
[ 0.87255551, 0.35290558, 0.3237261 ],
[ 0.8751674 , 0.35783305, 0.32369276],
[ 0.87772497, 0.36279339, 0.32374939],
[ 0.88022743, 0.36778691, 0.32389464],
[ 0.88267554, 0.37281188, 0.32412991],
[ 0.88507136, 0.37786501, 0.32445816],
[ 0.88741638, 0.38294391, 0.32488078],
[ 0.88971012, 0.38804876, 0.32539617],
[ 0.89195365, 0.39317783, 0.32600483],
[ 0.89414811, 0.3983294 , 0.32670706],
[ 0.89629551, 0.40350084, 0.32750381],
[ 0.89839827, 0.40868922, 0.32839594],
[ 0.90045568, 0.41389524, 0.32938102],
[ 0.90246898, 0.41911741, 0.33045857],
[ 0.90443943, 0.42435429, 0.33162793],
[ 0.90636827, 0.42960453, 0.33288831],
[ 0.90825675, 0.43486684, 0.33423877],
[ 0.91010609, 0.44014 , 0.33567825],
[ 0.9119175 , 0.4454229 , 0.33720557],
[ 0.91369216, 0.45071447, 0.33881946],
[ 0.9154312 , 0.45601377, 0.34051856],
[ 0.91713572, 0.4613199 , 0.34230146],
[ 0.91880676, 0.46663207, 0.34416665],
[ 0.92044532, 0.47194958, 0.3461126 ],
[ 0.92205233, 0.47727179, 0.34813776],
[ 0.92362867, 0.48259816, 0.35024054],
[ 0.92517515, 0.4879282 , 0.35241933],
[ 0.92669252, 0.49326154, 0.35467254],
[ 0.92818145, 0.49859784, 0.35699858],
[ 0.92964257, 0.50393685, 0.35939586],
[ 0.93107641, 0.50927839, 0.36186284],
[ 0.93248345, 0.51462233, 0.364398 ],
[ 0.93386574, 0.51996726, 0.36700002],
[ 0.93522801, 0.52530964, 0.36966771],
[ 0.93656551, 0.53065374, 0.37239898],
[ 0.93787845, 0.53599965, 0.37519244],
[ 0.93916701, 0.54134751, 0.37804676],
[ 0.94043129, 0.54669749, 0.38096067],
[ 0.94167798, 0.55204474, 0.38393283],
[ 0.94290654, 0.55739007, 0.38696177],
[ 0.94411192, 0.56273764, 0.39004635],
[ 0.94529398, 0.56808778, 0.39318551],
[ 0.94645531, 0.57343881, 0.39637803],
[ 0.94760645, 0.57878343, 0.39962183],
[ 0.9487348 , 0.58413113, 0.40291682],
[ 0.94984002, 0.58948232, 0.40626218],
[ 0.95092987, 0.5948318 , 0.40965603],
[ 0.95200784, 0.60017752, 0.41309682],
[ 0.95306267, 0.60552755, 0.4165852 ],
[ 0.95409505, 0.61088157, 0.42012037],
[ 0.95512504, 0.61622639, 0.42369803],
[ 0.95613153, 0.6215765 , 0.42732096],
[ 0.95711389, 0.62693242, 0.43098874],
[ 0.95809626, 0.63227868, 0.43469575],
[ 0.95905559, 0.63763057, 0.43844592],
[ 0.95999158, 0.64298842, 0.44223886],
[ 0.96092933, 0.6483366 , 0.44606817],
[ 0.9618419 , 0.65369237, 0.44993943],
[ 0.9627383 , 0.65905026, 0.45385005],
[ 0.9636302 , 0.6644034 , 0.45779644],
[ 0.96449566, 0.6697654 , 0.46178377],
[ 0.96535776, 0.67512254, 0.46580546],
[ 0.96620304, 0.68048317, 0.46986463],
[ 0.9670273 , 0.68584991, 0.4739621 ],
[ 0.96785315, 0.6912098 , 0.47809096],
[ 0.9686504 , 0.6965806 , 0.48225963],
[ 0.96944845, 0.7019456 , 0.48645901],
[ 0.97022616, 0.7073171 , 0.49069515],
[ 0.97099121, 0.71269094, 0.49496543],
[ 0.97174984, 0.71806387, 0.49926753],
[ 0.97248364, 0.72344627, 0.50360719],
[ 0.97322354, 0.7288214 , 0.507974 ],
[ 0.97393124, 0.73421027, 0.51238049],
[ 0.97464928, 0.7395902 , 0.51681211],
[ 0.97533754, 0.7449828 , 0.52128239],
[ 0.97602903, 0.75037081, 0.52577975],
[ 0.97669802, 0.75576791, 0.53031308],
[ 0.97736473, 0.76116379, 0.53487508],
[ 0.97801457, 0.76656615, 0.53947089],
[ 0.97865835, 0.77196968, 0.5440965 ],
[ 0.97928916, 0.77737806, 0.54875439],
[ 0.9799119 , 0.78278902, 0.55344266],
[ 0.98052378, 0.78820414, 0.55816232],
[ 0.98112747, 0.7936223 , 0.56291231],
[ 0.98172055, 0.79904486, 0.56769355],
[ 0.98230724, 0.80446999, 0.57250437],
[ 0.98288171, 0.80990066, 0.57734705],
[ 0.98345358, 0.8153325 , 0.58221775],
[ 0.98400967, 0.82077189, 0.58712176],
[ 0.98456902, 0.82621018, 0.59205135],
[ 0.98510705, 0.83165888, 0.59701657],
[ 0.9856563 , 0.83710333, 0.60200398],
[ 0.98617668, 0.84256188, 0.60703023],
[ 0.98671839, 0.8480122 , 0.61207428],
[ 0.98722803, 0.8534783 , 0.61715857],
[ 0.9877585 , 0.858937 , 0.6222606 ],
[ 0.9882647 , 0.86440831, 0.62739957],
[ 0.98878008, 0.86987788, 0.632561 ],
[ 0.98928575, 0.87535412, 0.63775296],
[ 0.98978682, 0.88083501, 0.64297311],
[ 0.99029495, 0.88631589, 0.64821615],
[ 0.99078259, 0.89180853, 0.65349411],
[ 0.99129627, 0.89729384, 0.65878604],
[ 0.99178436, 0.9027934 , 0.6641148 ],
[ 0.99229377, 0.90828822, 0.66945902],
[ 0.99279222, 0.91379173, 0.67483294],
[ 0.99329154, 0.91929937, 0.68023089],
[ 0.99380347, 0.92480678, 0.68564709],
[ 0.99429361, 0.9303278 , 0.6910969 ]]
test_cm = ListedColormap(cm_data, name=__file__)
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
try:
from viscm import viscm
viscm(test_cm)
except ImportError:
print("viscm not found, falling back on simple display")
plt.imshow(np.linspace(0, 100, 256)[None, :], aspect='auto',
cmap=test_cm)
plt.show()
| [
"[email protected]"
]
| |
aff592a4db92ec929b6c09bb78ddc5edee7e08ec | 1a166165ab8287d01cbb377a13efdb5eff5dfef0 | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations/_public_ip_prefixes_operations.py | 51da2f098d86fa05d77f2239e18387c6c415d319 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
]
| permissive | manoj0806/azure-sdk-for-python | 7a14b202ff80f528abd068bf50334e91001a9686 | aab999792db1132232b2f297c76800590a901142 | refs/heads/master | 2023-04-19T16:11:31.984930 | 2021-04-29T23:19:49 | 2021-04-29T23:19:49 | 363,025,016 | 1 | 0 | MIT | 2021-04-30T04:23:35 | 2021-04-30T04:23:35 | null | UTF-8 | Python | false | false | 29,914 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PublicIPPrefixesOperations:
"""PublicIPPrefixesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
public_ip_prefix_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the PublicIpPrefix.
:type public_ip_prefix_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def get(
self,
resource_group_name: str,
public_ip_prefix_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.PublicIPPrefix":
"""Gets the specified public IP prefix in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PublicIPPrefix, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.PublicIPPrefix
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.PublicIPPrefix",
**kwargs
) -> "_models.PublicIPPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PublicIPPrefix')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.PublicIPPrefix",
**kwargs
) -> AsyncLROPoller["_models.PublicIPPrefix"]:
"""Creates or updates a static or dynamic public IP prefix.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to the create or update public IP prefix operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.PublicIPPrefix
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> "_models.PublicIPPrefix":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
public_ip_prefix_name: str,
parameters: "_models.TagsObject",
**kwargs
) -> AsyncLROPoller["_models.PublicIPPrefix"]:
"""Updates public IP prefix tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param public_ip_prefix_name: The name of the public IP prefix.
:type public_ip_prefix_name: str
:param parameters: Parameters supplied to update public IP prefix tags.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PublicIPPrefix or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.PublicIPPrefix]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefix"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
public_ip_prefix_name=public_ip_prefix_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PublicIPPrefix', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'publicIpPrefixName': self._serialize.url("public_ip_prefix_name", public_ip_prefix_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes/{publicIpPrefixName}'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["_models.PublicIPPrefixListResult"]:
"""Gets all the public IP prefixes in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.PublicIPPrefixListResult"]:
"""Gets all public IP prefixes in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PublicIPPrefixListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.PublicIPPrefixListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PublicIPPrefixListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PublicIPPrefixListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/publicIPPrefixes'} # type: ignore
| [
"[email protected]"
]
| |
b331df55a69467e1044e466d91086d475893a6a1 | c554e6a4bd90760fe3924d00f8428a3796e6bd0c | /src/gui/graphWidget.py | e2314ed9b0bfa0db2515115163bd4665fb9835f7 | []
| no_license | Xifax/muscale | 0d84013e5c4252437c64c42681f4d89a78e21512 | 4feba17a845d2df5da7c46aee495c0675c28ea5f | refs/heads/master | 2021-01-23T03:05:33.076385 | 2011-06-13T21:28:44 | 2011-06-13T21:28:44 | 1,459,682 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,003 | py | # -*- coding: utf-8 -*-
'''
Created on Mar 10, 2011
@author: Yadavito
'''
# internal #
import os
import Image
# own #
from utility.const import RES, ICONS, TOOLBAR_ICONS,\
TEMP, LINE_WITH, ICO_GRAPH, LEGEND,\
GRAPH, COPY, SCALE
from utility.tools import clearFolderContents
# external #
from PyQt4 import QtGui
from PyQt4.QtCore import Qt, QObject, QEvent, QSize
import matplotlib
matplotlib.use('Agg')
from matplotlib.backends.backend_qt4agg import \
FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import \
NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.figure import Figure
from matplotlib.pyplot import setp
from matplotlib.legend import DraggableLegend
from matplotlib import font_manager
import numpy as np
class Filter(QObject):
"""Status message mouse click filter"""
def eventFilter(self, object, event):
if event.type() == QEvent.HoverEnter:
object.toolbar.show()
if event.type() == QEvent.HoverLeave:
object.toolbar.hide()
if event.type() == QEvent.MouseButtonPress:
pass
return False
class MplCanvas(FigureCanvas):
"""Class to represent the FigureCanvas widget"""
def __init__(self):
# setup Matplotlib Figure and Axis
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
# initialization of the canvas
FigureCanvas.__init__(self, self.fig)
# define the widget as expandable
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# notify the system of updated policy
FigureCanvas.updateGeometry(self)
class MplWidget(QtGui.QWidget):
"""Widget defined in Qt Designer"""
def __init__(self, tools, toolbar=True, menu=True, parent=None):
# initialization of Qt MainWindow widget
QtGui.QWidget.__init__(self, parent)
# set the canvas to the Matplotlib widget
self.canvas = MplCanvas()
# create a vertical box layout
self.layout = QtGui.QVBoxLayout()
# add mpl widget to layout
self.layout.addWidget(self.canvas)
# reference to toolsFrame
self.tool = tools
if toolbar:
# add navigation toolbar to layout
self.toolbar = NavigationToolbar(self.canvas, self)
self.layout.addWidget(self.toolbar)
# enable hover event handling
self.setAttribute(Qt.WA_Hover)
# create and install event filter
self.filter = Filter(self)
self.installEventFilter(self.filter)
# hide toolbar
self.initComponents()
else:
self.toolbar = None
# set the layout to th vertical box
self.setLayout(self.layout)
# active lines list
self.lines = []
# legend
self.legend = None
# autoscale
self.canvas.ax.autoscale_view(True, True, True)
if menu:
# setup context menu
self.setContextMenuPolicy(Qt.ActionsContextMenu)
self.initActions()
self.alwaysAutoScale.setChecked(True)
#-------------- initialization ---------------#
def initComponents(self):
if self.toolbar is not None:
self.toolbar.hide()
self.newIcons()
def initActions(self):
# toolbar
self.toggleLegendAction = QtGui.QAction(QtGui.QIcon(RES + ICONS + LEGEND), 'Toggle legend',
self, triggered=self.toggleLegend)
self.toggleLegendAction.setCheckable(True)
if self.toolbar is not None:
self.toolbar.addAction(self.toggleLegendAction)
# context menu
self.addAction(self.toggleLegendAction)
self.addAction(QtGui.QAction(QtGui.QIcon(RES + ICONS + COPY),'Copy data to table',
self, triggered=self.toTable))
self.addAction(QtGui.QAction(QtGui.QIcon(RES + ICONS + GRAPH),'Plot data in tools',
self, triggered=self.toGraphTool))
self.addAction(QtGui.QAction(QtGui.QIcon(RES + ICONS + SCALE), 'Autoscale',
self, triggered=self.updateScale))
self.alwaysAutoScale = QtGui.QAction('Scale on update', self)
self.alwaysAutoScale.setCheckable(True)
self.selectLinesMenu = QtGui.QMenu()
self.selectLines = (QtGui.QAction('Plots', self))
self.selectLines.setMenu(self.selectLinesMenu)
aSep = QtGui.QAction('', self)
aSep.setSeparator(True)
self.addAction(aSep)
self.addAction(self.selectLines)
self.addAction(self.alwaysAutoScale)
def newIcons(self):
for position in range(0, self.toolbar.layout().count()):
widget = self.toolbar.layout().itemAt(position).widget()
if isinstance(widget, QtGui.QToolButton):
icon = QtGui.QIcon(RES + ICONS + TOOLBAR_ICONS[position])
self.toolbar.layout().itemAt(position).widget().setIcon(icon)
self.toolbar.setIconSize(QSize(ICO_GRAPH, ICO_GRAPH))
def resetGraphicEffect(self):
if self.graphicsEffect() is not None:
self.graphicsEffect().setEnabled(False)
#------------- plotting methods ---------------#
## Hides axes in widget.
# @param axes Widget axes form canvas.
@staticmethod
def hideAxes(axes):
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
## Clears widget canvas, removing all data and clearing figure.
# @param repaint_axes Add standard plot after clearing figure.
def clearCanvas(self, repaint_axes=True):
self.canvas.ax.clear()
self.canvas.fig.clear()
if repaint_axes:
self.canvas.ax = self.canvas.fig.add_subplot(111)
## Update existing data or plot anew.
# @param data List or array to plot/update.
# @param line Which line (by index) to update (if any).
# @param label Data label (new or existing).
# @param style Line style (solid, dashed, dotted).
# @param color Line color.
def updatePlot(self, data, line=0, label=None, style='solid', color=None):
if not self.canvas.ax.has_data():
if label is not None:
if color is not None:
self.lines = self.canvas.ax.plot(data, label=label, linestyle=style, color=color)
else:
self.lines = self.canvas.ax.plot(data, label=label, linestyle=style)
else:
if color is not None:
self.lines = self.canvas.ax.plot(data, linestyle=style, color=color)
else:
self.lines = self.canvas.ax.plot(data, linestyle=style)
else:
if not self.lines:
self.lines = self.canvas.ax.get_lines()
if label is not None:
if label not in [l._label for l in self.lines]:
if color is not None:
self.lines.extend(self.canvas.ax.plot(data, label=label, linestyle=style, color=color))
else:
self.lines.extend(self.canvas.ax.plot(data, label=label, linestyle=style))
line = len(self.lines) - 1
else:
line = [l._label for l in self.lines].index(label)
line_to_update = self.lines[line]
if len(data) != len(line_to_update._x):
# x, y ~ data in y
line_to_update.set_data(np.arange(len(data)), data)
else:
# in case data length stays the same
line_to_update.set_data(line_to_update._x, data)
self.canvas.draw()
self.updateLegend()
self.updateLinesSubmenu()
if self.alwaysAutoScale.isChecked():
self.updateScale()
## Plots scalogram for wavelet decomposition.
# @param data Wavelet coefficients in matrix.
# @param top Axis position.
# @param colorbar Shows colorbar for data levels.
# @param power Scales resulting graph by power of 2.
def scalogram(self, data, top=True, colorbar=True, power=False):
# self.resetGraphicEffect()
self.clearCanvas()
x = np.arange(len(data[0]))
y = np.arange(len(data))
if power:
contour = self.canvas.ax.contourf(x, y, np.abs(data) ** 2)
else:
contour = self.canvas.ax.contourf(x, y, np.abs(data))
if colorbar:
self.canvas.fig.colorbar(contour, ax=self.canvas.ax, orientation='vertical', format='%2.1f')
if top:
self.canvas.ax.set_ylim((y[-1], y[0]))
else:
self.canvas.ax.set_ylim((y[0], y[-1]))
self.canvas.ax.set_xlim((x[0], x[-1]))
# self.canvas.ax.set_ylabel('scales')
self.canvas.draw()
## Plots list of arrays with shared x/y axes.
# @param data Arrays to plot (list or matrix).
def multiline(self, data):
# self.resetGraphicEffect()
# abscissa
axprops = dict(yticks=[])
# ordinate
yprops = dict(rotation=0,
horizontalalignment='right',
verticalalignment='center',
x=-0.01)
# level/figure ratio
ratio = 1. / len(data)
# positioning (fractions of total figure)
left = 0.1
bottom = 1.0
width = 0.85
space = 0.035
height = ratio - space
# legend
label = 'Lvl %d'
i = 0
bottom -= height
ax = self.canvas.fig.add_axes([left, bottom - space, width, height], **axprops)
ax.plot(data[i])
setp(ax.get_xticklabels(), visible=False)
ax.set_ylabel(label % i, **yprops)
i += 1
axprops['sharex'] = ax
axprops['sharey'] = ax
while i < len(data):
bottom -= height
ax = self.canvas.fig.add_axes([left, bottom, width, height], **axprops)
ax.plot(data[i], label='Lvl' + str(i))
ax.set_ylabel(label % i, **yprops)
i += 1
if i != len(data):
setp(ax.get_xticklabels(), visible=False)
#----------------- actions -----------------#
def getTopParent(self):
widget = self.parentWidget()
while True:
if widget.parentWidget() is None:
return widget
else:
widget = widget.parentWidget()
def toggleLegend(self):
self.updateLegend()
def updateLegend(self):
#NB: sometimes induces random exceptions from legend.py -> offsetbox.py
try:
prop = font_manager.FontProperties(size=11)
self.legend = DraggableLegend(self.canvas.ax.legend(fancybox=True, shadow=True, prop=prop))
if self.toggleLegendAction.isChecked():
self.legend.legend.set_visible(True)
else:
self.legend.legend.set_visible(False)
self.canvas.draw()
except Exception, e:
pass
def toTable(self):
try:
if len(self.canvas.ax.get_lines()) > 1:
for line in self.canvas.ax.get_lines():
for action in self.selectLinesMenu.actions():
if action.isChecked():
if line._label == action.text():
self.tool.updateTable(line._y, line._label)
else:
line = self.canvas.ax.get_lines()[0]
self.tool.updateTable(line._y, line._label)
self.getTopParent().messageInfo.showInfo('Copied to table')
except Exception:
pass
def toGraphTool(self):
try:
if len(self.canvas.ax.get_lines()) > 1:
for line in self.canvas.ax.get_lines():
for action in self.selectLinesMenu.actions():
if action.isChecked():
if line._label == action.text():
self.tool.updatePlot(line._y)
else:
line = self.canvas.ax.get_lines()[0]
self.tool.updatePlot(line._y)
self.getTopParent().messageInfo.showInfo("Updated tool's graph")
except Exception:
pass
def selectAllSublines(self):
for action in self.selectLinesMenu.actions():
if action.isCheckable():
action.setChecked(True)
def selectNoneSublines(self):
for action in self.selectLinesMenu.actions():
if action.isCheckable():
action.setChecked(False)
def selectInverseSublines(self):
for action in self.selectLinesMenu.actions():
if action.isCheckable():
action.setChecked(not action.isChecked())
def updateLinesSubmenu(self):
if len(self.canvas.ax.get_lines()) > 1:
self.selectLines.setEnabled(True)
self.selectLinesMenu.clear()
for line in self.canvas.ax.get_lines():
lineSelectAction = QtGui.QAction(line._label, self)
lineSelectAction.setCheckable(True)
self.selectLinesMenu.addAction(lineSelectAction)
self.selectLinesMenu.actions()[0].setChecked(True)
# additional methods
self.selectLinesMenu.addSeparator()
self.selectLinesMenu.addAction(QtGui.QAction('All', self, triggered=self.selectAllSublines))
self.selectLinesMenu.addAction(QtGui.QAction('None', self, triggered=self.selectNoneSublines))
self.selectLinesMenu.addAction(QtGui.QAction('Inverse', self, triggered=self.selectInverseSublines))
else:
self.selectLines.setEnabled(False)
def updateScale(self):
try:
# get new max/min values
x_max = max([max(line._x) for line in self.canvas.ax.get_lines()])
x_min = min([min(line._x) for line in self.canvas.ax.get_lines()])
y_max = max([max(line._y) for line in self.canvas.ax.get_lines()])
y_min = min([min(line._y) for line in self.canvas.ax.get_lines()])
# update axes
self.canvas.ax.set_xlim(x_min, x_max)
self.canvas.ax.set_ylim(y_min, y_max)
self.canvas.draw()
except Exception, e:
pass
def saveFigure(self, name='figure', format='png', transparency=False):
if format == 'bmp':
self.canvas.fig.savefig(RES + TEMP + name + '.png', transparent=transparency)
img = Image.open(RES + TEMP + name + '.png')
img.load()
if len(img.split()) == 4:
r, g, b, a = img.split()
img = Image.merge('RGB', (r, g, b))
img.save(RES + TEMP + name + '.bmp')
os.unlink(RES + TEMP + name + '.png')
else:
self.canvas.fig.savefig(RES + TEMP + name + '.' + format, transparent=transparency)
#------------------ utils ------------------#
## Generates previews for specified data.
# @param data List of arrays.
@staticmethod
def generatePreviews(data):
# if data is of array type, it's preferable to use .any()
if len(data) > 0:
# temp folder
tmp = RES + TEMP
# prepare folder
if not os.path.exists(tmp):
os.makedirs(tmp)
# clear temp folder contents
clearFolderContents(tmp)
level = 0
for array in data:
matplotlib.pyplot.figure(figsize=(6, 3))
matplotlib.pyplot.axis('off')
matplotlib.pyplot.plot(array, color='w', linewidth=LINE_WITH)
matplotlib.pyplot.savefig(RES + TEMP + str(level) + '.png', transparent=True)
level += 1
| [
"[email protected]"
]
| |
6fe457cbf36eb7f66a569eb0d932fb84baac199e | 1e9c67785cd2a07fbd12b63bd93a2eba2272f237 | /gcn1/parameters.py | 90f131240639bd833f76ced834fd592e8188d5d6 | []
| no_license | monisha-jega/mmd | 2975d0f77bce4db38795fa201f515f35498f0eb3 | d4f9d2c94409c2877ff5a5a2242e7e7ed2f87921 | refs/heads/master | 2022-07-20T17:01:39.043859 | 2020-05-16T23:31:35 | 2020-05-16T23:31:35 | 264,543,426 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,170 | py | # train_dir = "/scratch/scratch2/monishaj/dataset/v1/valid/"
# val_dir = "/scratch/scratch2/monishaj/dataset/v1/valid/"
# test_dir = "/scratch/scratch2/monishaj/dataset/v1/test/"
# data_dump_dir = "/scratch/scratch2/monishaj/image_data_dump_SN/"
# model_dump_dir = "/scratch/scratch2/monishaj/image_model_dump_SN/"
# #Must contain ImageUrlToIndex.pkl and annoy.ann
# annoy_dir = '/scratch/scratch2/monishaj/image_annoy_index/'
# #Word embeddings file
# embed_file = '../../GoogleNews-vectors-negative300.bin'
train_dir = "../../dataset/v1/train/"
val_dir = "../../dataset/v1/valid/"
test_dir = "../../dataset/v1/test/"
data_dump_dir = "data_dump/"
model_dump_dir = "model_dump/"
#Must contain ImageUrlToIndex.pkl and annoy.ann
annoy_dir = '../../raw_catalog/image_annoy_index/'
#Word embeddings file
embed_file = '../../GoogleNews-vectors-negative300.bin'
start_word = "</s>"
start_word_id = 0
end_word = "</e>"
end_word_id = 1
pad_word = "<pad>"
pad_word_id = 2
unk_word = "<unk>"
unk_word_id = 3
max_dialogue_len = max_context_len = 20 #Time steps for dialogue (Number of utterances in a dialogue)
#max_dialogue_len is used while preprocessing data, while max_context_len is used during training
max_utter_len = 30 #Time steps for utterance (Number of words in a utterance)
num_neg_images_sample = 100 #Number of negative images to train against
num_neg_images_use = 5 #Number of negative images to test against
num_images_in_context = 5
num_nodes = max_context_len * (1 + num_images_in_context)
word_embedding_size = 300
image_size = 4096
image_embedding_size = 512
cell_state_size = 512
gcn_layer1_out_size = 500
batch_size = 10 #best value - 64
vocab_freq_cutoff = 4 #best value - 4
learning_rate=0.0004 #best value - 0.0004
max_gradient_norm = 0.1 #best value - 0.1
epochs = 10 #best value - Early stopping
use_random_neg_images = False #If True, random images are sampled for negative images used in read_data_*.py)
use_images = False #If False, will use 0s for image instead of loading from annoy file
restore_trained = False #If True, will restore latest model from checkpoint | [
"[email protected]"
]
| |
13f35bebc83121ef7c3b39ca0bd9121a7e9981ce | d09c6ff7114f69a9326883c5b9fcc70fa994e8a2 | /_pycharm_skeletons/renderdoc/GLFBO.py | 22543193e763b3990fca10ebdc726c68f718ce92 | [
"MIT"
]
| permissive | Lex-DRL/renderdoc-py-stubs | 3dd32d23c0c8219bb66387e6078244cff453cd83 | 75d280e4f500ded506f3315a49fc432b37ab4fa6 | refs/heads/master | 2020-08-22T16:55:39.336657 | 2019-11-03T01:21:26 | 2019-11-03T01:21:26 | 216,441,308 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,703 | py | # encoding: utf-8
# module renderdoc
# from P:\1-Scripts\_Python\Py-Autocomplete\renderdoc.pyd
# by generator 1.146
# no doc
# imports
import enum as __enum
from .SwigPyObject import SwigPyObject
class GLFBO(SwigPyObject):
""" Describes the contents of a framebuffer object. """
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
colorAttachments = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The list of :class:`GLAttachment` with the framebuffer color attachments."""
depthAttachment = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The :class:`GLAttachment` with the framebuffer depth attachment."""
drawBuffers = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The list of draw buffer indices into the :data:`colorAttachments` attachment list."""
readBuffer = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The read buffer index in the :data:`colorAttachments` attachment list."""
resourceId = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The :class:`ResourceId` of the framebuffer."""
stencilAttachment = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The :class:`GLAttachment` with the framebuffer stencil attachment."""
this = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
thisown = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
__dict__ = None # (!) real value is ''
| [
"[email protected]"
]
| |
82d0e61c36c862b9836cbf9fa197313ee1b5d21a | 5a0488b1e1b3cb9423ab5537598cd2e927deada1 | /com/kute/date/caltime.py | 28d7f63207063afa069969fede799cf1ce8f4878 | []
| no_license | dajima/purepythontest | c276796a90ded77cc033b717a01425a64c5fe729 | 1e0c446f9f6d2bf1f38ab2aafec5af914cf66293 | refs/heads/master | 2021-06-07T04:15:34.450197 | 2016-11-03T14:27:28 | 2016-11-03T14:27:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 858 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'kute'
# __mtime__ = '16/7/31 17:04'
"""
计算函数执行时间 装饰器
"""
from functools import wraps
import time
def perf_counter(function):
@wraps(function)
def _caltime(*args, **kwargs):
start = time.perf_counter()
result = function(*args, **kwargs)
end = time.perf_counter()
print("The function[%s] use total time is %s s." % (function.__name__, end - start))
return result
return _caltime
def process_time(function):
@wraps(function)
def _caltime(*args, **kwargs):
start = time.process_time()
result = function(*args, **kwargs)
end = time.process_time()
print("The function[%s] use total time is %s s." % (function.__name__, (end - start)))
return result
return _caltime
| [
"[email protected]"
]
| |
115102adbceafbbd78127a07c46c81fafef049d7 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-edas/aliyunsdkedas/request/v20170801/ListScaleOutEcuRequest.py | 618118a73801ecbaed62a1509ab2bac80d1200da | [
"Apache-2.0"
]
| permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,117 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RoaRequest
class ListScaleOutEcuRequest(RoaRequest):
def __init__(self):
RoaRequest.__init__(self, 'Edas', '2017-08-01', 'ListScaleOutEcu')
self.set_uri_pattern('/pop/v5/resource/scale_out_ecu_list')
self.set_method('POST')
def get_Mem(self):
return self.get_query_params().get('Mem')
def set_Mem(self,Mem):
self.add_query_param('Mem',Mem)
def get_LogicalRegionId(self):
return self.get_query_params().get('LogicalRegionId')
def set_LogicalRegionId(self,LogicalRegionId):
self.add_query_param('LogicalRegionId',LogicalRegionId)
def get_AppId(self):
return self.get_query_params().get('AppId')
def set_AppId(self,AppId):
self.add_query_param('AppId',AppId)
def get_GroupId(self):
return self.get_query_params().get('GroupId')
def set_GroupId(self,GroupId):
self.add_query_param('GroupId',GroupId)
def get_InstanceNum(self):
return self.get_query_params().get('InstanceNum')
def set_InstanceNum(self,InstanceNum):
self.add_query_param('InstanceNum',InstanceNum)
def get_Cpu(self):
return self.get_query_params().get('Cpu')
def set_Cpu(self,Cpu):
self.add_query_param('Cpu',Cpu)
def get_ClusterId(self):
return self.get_query_params().get('ClusterId')
def set_ClusterId(self,ClusterId):
self.add_query_param('ClusterId',ClusterId) | [
"[email protected]"
]
| |
f834ec50bc964f06617f54a3c83792264173427b | ec1fad1e16d24f51987acba26c1b4014cbed0e96 | /python/xgbserver/xgbserver/__main__.py | 8d64b763e7a79c82e256e3e7e49deb18ca3d2197 | [
"Apache-2.0"
]
| permissive | Jeffwan/kfserving | 4127b811fd9b778903c0c7572b0bc687e1997efd | 47a6303173dab27b157ca77c72d62b847d099d21 | refs/heads/master | 2020-06-05T03:06:11.606236 | 2019-10-06T01:47:10 | 2019-10-06T01:47:10 | 192,292,123 | 0 | 0 | Apache-2.0 | 2019-06-17T06:59:43 | 2019-06-17T06:59:41 | null | UTF-8 | Python | false | false | 1,289 | py | # Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import kfserving
import argparse
from xgbserver import XGBoostModel
DEFAULT_MODEL_NAME = "default"
DEFAULT_LOCAL_MODEL_DIR = "/tmp/model"
parser = argparse.ArgumentParser(parents=[kfserving.server.parser]) #pylint:disable=c-extension-no-member
parser.add_argument('--model_dir', required=True,
help='A URI pointer to the model directory')
parser.add_argument('--model_name', default=DEFAULT_MODEL_NAME,
help='The name that the model is served under.')
args, _ = parser.parse_known_args()
if __name__ == "__main__":
model = XGBoostModel(args.model_name, args.model_dir)
model.load()
kfserving.KFServer().start([model]) #pylint:disable=c-extension-no-member
| [
"[email protected]"
]
| |
63a925ca4475c05114f2fa5a5e84e1cfd5c070e2 | 3432efd194137e1d0cb05656eb547c9992229f02 | /test1014/other/5.py | 2163452ba68ab81662362b7241b90f2aba765a8c | []
| no_license | zhanganxia/other_code | 31747d7689ae1e91fcf3f9f758df130246e7d495 | 8d09d9d0b6d6a1a9b8755487f926ac6fafd761fa | refs/heads/master | 2021-09-04T02:22:38.632685 | 2018-01-14T15:37:14 | 2018-01-14T15:37:14 | 107,007,482 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | #encoding=utf-8
a=input("请输入第一个数:")
b=input("请输入第二个数:")
c=input("请输入第三个数:")
d=input("请输入第四个数:")
e=input("请输入第五个数:")
num1=int(a)
num2=int(b)
num3=int(c)
num4=int(d)
num5=int(e)
sum=num1+num2+num3+num4+num5
print (sum) | [
"[email protected]"
]
| |
c971d3ead94eb08032a310c9abc1c648f30c516b | 7e69c60c23fce92463c78774b5968d3320c715c9 | /django_covid/django_covid/wsgi.py | b570b88fa5b444fdc628f862febd2e87d82a55f9 | []
| no_license | hwet-j/Python | 5128d114cf7257067f68cfb1db502e4f762ac8cc | 3e6f36be665932588a576f44ebb0107a4f350613 | refs/heads/master | 2023-04-08T17:52:31.607225 | 2021-04-17T05:25:02 | 2021-04-17T05:25:02 | 353,336,473 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | """
WSGI config for django_covid project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_covid.settings')
application = get_wsgi_application()
| [
"[email protected]"
]
| |
1acb6d67a1ae657f90c2e4402322ec6d567f9adc | 32c56293475f49c6dd1b0f1334756b5ad8763da9 | /google-cloud-sdk/lib/third_party/kubernetes/client/models/v1_network_policy_port.py | 85bb6625d011a03814dc504e0395a5ee14e54a4d | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
]
| permissive | bopopescu/socialliteapp | b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494 | 85bb264e273568b5a0408f733b403c56373e2508 | refs/heads/master | 2022-11-20T03:01:47.654498 | 2020-02-01T20:29:43 | 2020-02-01T20:29:43 | 282,403,750 | 0 | 0 | MIT | 2020-07-25T08:31:59 | 2020-07-25T08:31:59 | null | UTF-8 | Python | false | false | 3,880 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NetworkPolicyPort(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {'port': 'object', 'protocol': 'str'}
attribute_map = {'port': 'port', 'protocol': 'protocol'}
def __init__(self, port=None, protocol=None):
"""
V1NetworkPolicyPort - a model defined in Swagger
"""
self._port = None
self._protocol = None
self.discriminator = None
if port is not None:
self.port = port
if protocol is not None:
self.protocol = protocol
@property
def port(self):
"""
Gets the port of this V1NetworkPolicyPort.
The port on the given protocol. This can either be a numerical or named
port on a pod. If this field is not provided, this matches all port
names and numbers.
:return: The port of this V1NetworkPolicyPort.
:rtype: object
"""
return self._port
@port.setter
def port(self, port):
"""
Sets the port of this V1NetworkPolicyPort.
The port on the given protocol. This can either be a numerical or named
port on a pod. If this field is not provided, this matches all port
names and numbers.
:param port: The port of this V1NetworkPolicyPort.
:type: object
"""
self._port = port
@property
def protocol(self):
"""
Gets the protocol of this V1NetworkPolicyPort.
The protocol (TCP, UDP, or SCTP) which traffic must match. If not
specified, this field defaults to TCP.
:return: The protocol of this V1NetworkPolicyPort.
:rtype: str
"""
return self._protocol
@protocol.setter
def protocol(self, protocol):
"""
Sets the protocol of this V1NetworkPolicyPort.
The protocol (TCP, UDP, or SCTP) which traffic must match. If not
specified, this field defaults to TCP.
:param protocol: The protocol of this V1NetworkPolicyPort.
:type: str
"""
self._protocol = protocol
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NetworkPolicyPort):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"[email protected]"
]
| |
20af141d1a5395d48a83884c91f1a07c01b172f3 | 2734b77a68f6d7e22e8b823418ad1c59fe1a34af | /opengever/core/upgrades/20190415161809_add_nightly_jobs_settings/upgrade.py | 110a092ff386b9a2072439d981ca1fcc3b5104cd | []
| no_license | 4teamwork/opengever.core | 5963660f5f131bc12fd0a5898f1d7c8f24a5e2b1 | a01bec6c00d203c21a1b0449f8d489d0033c02b7 | refs/heads/master | 2023-08-30T23:11:27.914905 | 2023-08-25T14:27:15 | 2023-08-25T14:27:15 | 9,788,097 | 19 | 8 | null | 2023-09-14T13:28:56 | 2013-05-01T08:28:16 | Python | UTF-8 | Python | false | false | 187 | py | from ftw.upgrade import UpgradeStep
class AddNightlyJobsSettings(UpgradeStep):
"""Add nightly jobs settings.
"""
def __call__(self):
self.install_upgrade_profile()
| [
"[email protected]"
]
| |
6582416d65803d8038c2fe7b1d4310ac23d17dbb | 51a936439315f892e0cb4db33ca4c9a6e60c127e | /app/controllers/stats.py | c9eed0785e6d36128393c21c43595b08435aa2ba | [
"MIT"
]
| permissive | june07/packagecontrol.io | a27dfcb797f396027bcafa29392db6cf3fef80c2 | 9f5eb7e3392e6bc2ad979ad32d3dd27ef9c00b20 | refs/heads/master | 2023-02-21T15:41:54.903531 | 2021-08-14T01:14:52 | 2021-08-14T01:15:07 | 172,954,020 | 1 | 0 | NOASSERTION | 2019-02-27T16:51:47 | 2019-02-27T16:51:46 | null | UTF-8 | Python | false | false | 354 | py | from datetime import datetime, timedelta
from bottle import route
from ..models import system_stats
from ..render import render
@route('/stats', name='stats')
def stats_controller():
data = system_stats.fetch('1 days')
data['date'] = datetime.utcnow().replace(hour=0, minute=0, second=0) - timedelta(days=2)
return render('stats', data)
| [
"[email protected]"
]
| |
7d5a8a1bee2752016f38cb31f378a9bf78332833 | 6c174c0cbff5f3403d8034a13c2b8cefff2dd364 | /dfttapptest/database/cookie/jira.py | d8fe5f60dc94c88c89b05e93d14e1e6114387d28 | []
| no_license | xiaominwanglast/uiautomator | 0416e217538527c02e544e559b2d996554b10b20 | 7ce47cda6ac03b7eb707929dd2e0428132ff255f | refs/heads/master | 2021-09-12T12:37:54.286397 | 2018-04-16T10:03:57 | 2018-04-16T10:03:57 | 106,253,765 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 751 | py | #coding:utf-8
import urllib
import requests
data={'os_username':'wangxiaomin',
'os_password':'wang12345',
'os_destination':'',
'user_role':'',
'atl_token':'',
'login':'Log In'}
url='http://jira.dfshurufa.com/login.jsp'
session=requests.Session()
header={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.6 Safari/537.36',}
session.post(url=url,headers=header,data=data,timeout=3)
rq=session.get('http://jira.dfshurufa.com/issues/?jql=project%20%3D%20TTAND%20AND%20issuetype%20%3D%20%E7%BC%BA%E9%99%B7%20AND%20status%20in%20(Open%2C%20Reopened)%20AND%20resolution%20%3D%20Unresolved%20ORDER%20BY%20priority%20DESC%2C%20updated%20DESC',headers=header)
print rq.text
| [
"[email protected]"
]
| |
7b6e633b612a2474b21935d9902db2f20a237d70 | 0c7d7b24a8d453fc1a9c2f27a08f3c4cfa46ec3b | /recipes/sota/2019/raw_lm_corpus/get_gb_books_by_id.py | 7e6b484fc9636ef947b6756f67c42d7b1bb9bece | [
"BSD-3-Clause",
"MIT"
]
| permissive | piEYj/wav2letter | e6ae462eeeb6a4374f8280c8fa15d8f194c60215 | 49fbb1392e69b5194c077df9847505ec995b4e3d | refs/heads/main | 2023-09-06T01:08:48.837731 | 2021-11-12T14:13:41 | 2021-11-12T14:15:15 | 444,344,109 | 1 | 0 | NOASSERTION | 2022-01-04T08:37:19 | 2022-01-04T08:37:19 | null | UTF-8 | Python | false | false | 1,059 | py | import argparse
import os
import sys
from multiprocessing.pool import ThreadPool
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_one_book(book_id, outdir):
eprint("Getting book with id", book_id)
text = strip_headers(load_etext(book_id)).strip()
newpath = os.path.join(outdir, str(book_id) + ".body.txt")
with open(newpath, "w") as outfile:
outfile.write(text)
def main():
parser = argparse.ArgumentParser("Grabs Gutenberg books by ID from a file")
parser.add_argument("--idfile", type=str, required=True)
parser.add_argument("--outdir", type=str, required=True)
args = parser.parse_args()
if not os.path.exists(args.idfile):
raise RuntimeError("idfile not found")
with open(args.idfile, "r") as infile:
ids = [(int(line.strip()), args.outdir) for line in infile]
pool = ThreadPool(80)
pool.starmap(get_one_book, ids)
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
6a87d1e1ce9879012a9742449f94015df579147d | 62e45255088abb536e9ea6fcbe497e83bad171a0 | /ippython/funciones_es_par.py | 055dff2bfd12db09351dbd03b0618033e06494a5 | []
| no_license | jmery24/python | a24f562c8d893a97a5d9011e9283eba948b8b6dc | 3e35ac9c9efbac4ff20374e1dfa75a7af6003ab9 | refs/heads/master | 2020-12-25T21:56:17.063767 | 2015-06-18T04:59:05 | 2015-06-18T04:59:05 | 36,337,473 | 0 | 0 | null | 2015-05-27T02:26:54 | 2015-05-27T02:26:54 | null | UTF-8 | Python | false | false | 201 | py | # -*- coding: utf-8 -*-
"""
Created on Sun May 26 11:21:03 2013
@author: daniel
"""
def es_par(num):
return num % 2 == 0
numero = int(raw_input('Escribe un numero: '))
print es_par(numero)
| [
"[email protected]"
]
| |
dfa30680a49a5312290d890edacd03c8a0e44fe5 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/resources/azure-mgmt-msi/azure/mgmt/msi/v2022_01_31_preview/__init__.py | 785d352dea5bec33c870f80744d1867b9bcd5b90 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
]
| permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 925 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._managed_service_identity_client import ManagedServiceIdentityClient
from ._version import VERSION
__version__ = VERSION
try:
from ._patch import __all__ as _patch_all
from ._patch import * # pylint: disable=unused-wildcard-import
except ImportError:
_patch_all = []
from ._patch import patch_sdk as _patch_sdk
__all__ = [
"ManagedServiceIdentityClient",
]
__all__.extend([p for p in _patch_all if p not in __all__])
_patch_sdk()
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.